zeroshade commented on code in PR #14098:
URL: https://github.com/apache/arrow/pull/14098#discussion_r969683781
##########
go/arrow/compute/internal/kernels/helpers.go:
##########
@@ -513,3 +514,134 @@ func (v *validityBuilder) Finish() (buf *memory.Buffer) {
v.buffer = nil
return
}
+
+type execBufBuilder struct {
+ mem memory.Allocator
+ buffer *memory.Buffer
+ data []byte
+ sz int
+}
+
+func (bldr *execBufBuilder) resize(newcap int) {
+ if bldr.buffer == nil {
+ bldr.buffer = memory.NewResizableBuffer(bldr.mem)
+ }
+
+ bldr.buffer.ResizeNoShrink(newcap)
+ bldr.data = bldr.buffer.Bytes()
+}
+
+func (bldr *execBufBuilder) reserve(additional int) {
+ if bldr.buffer == nil {
+ bldr.buffer = memory.NewResizableBuffer(bldr.mem)
+ }
+
+ mincap := bldr.sz + additional
+ if mincap <= cap(bldr.data) {
+ return
+ }
+ bldr.buffer.ResizeNoShrink(mincap)
+ bldr.data = bldr.buffer.Buf()
+}
+
+func (bldr *execBufBuilder) unsafeAppend(data []byte) {
+ copy(bldr.data[bldr.sz:], data)
+ bldr.sz += len(data)
+}
+
+func (bldr *execBufBuilder) unsafeAppendN(n int, val byte) {
+ bldr.data[bldr.sz] = val
+ for i := 1; i < n; i *= 2 {
+ copy(bldr.data[bldr.sz+i:], bldr.data[bldr.sz:bldr.sz+i])
+ }
Review Comment:
The builtin `copy` function will never copy more than the capacity of the
slice (it uses the size of the destination to cap it) so we don't have to
manually do that check ourselves and can leave it to the optimized `copy`
implementation. So that last call to copy which would add the extra 2 bytes
internally will respect the capacity of the slice if it only has space for 3.
If the capacity is higher, then subsequent appends will just overwrite the
extra values. This is one of the fastest implementations of a `fill` operation
for slices.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]