Skip to content

Commit

Permalink
*: replace len(<type>.Fields()) with <type>.NumFields
Browse files Browse the repository at this point in the history
This commit is purely a mechanical change that avoids the allocations incurred
by calling <type>.Fields() just to get the number of fields.
  • Loading branch information
asubiotto committed Nov 28, 2023
1 parent 6a27eec commit 054464a
Show file tree
Hide file tree
Showing 26 changed files with 55 additions and 55 deletions.
2 changes: 1 addition & 1 deletion go/arrow/array/concat.go
Original file line number Diff line number Diff line change
Expand Up @@ -695,7 +695,7 @@ func concat(data []arrow.ArrayData, mem memory.Allocator) (arr arrow.ArrayData,
}
out.childData = []arrow.ArrayData{children}
case *arrow.StructType:
out.childData = make([]arrow.ArrayData, len(dt.Fields()))
out.childData = make([]arrow.ArrayData, dt.NumFields())
for i := range dt.Fields() {
children := gatherChildren(data, i)
for _, c := range children {
Expand Down
2 changes: 1 addition & 1 deletion go/arrow/array/record.go
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ func NewRecordBuilder(mem memory.Allocator, schema *arrow.Schema) *RecordBuilder
refCount: 1,
mem: mem,
schema: schema,
fields: make([]Builder, len(schema.Fields())),
fields: make([]Builder, schema.NumFields()),
}

for i, f := range schema.Fields() {
Expand Down
2 changes: 1 addition & 1 deletion go/arrow/array/struct.go
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ func NewStructBuilder(mem memory.Allocator, dtype *arrow.StructType) *StructBuil
b := &StructBuilder{
builder: builder{refCount: 1, mem: mem},
dtype: dtype,
fields: make([]Builder, len(dtype.Fields())),
fields: make([]Builder, dtype.NumFields()),
}
for i, f := range dtype.Fields() {
b.fields[i] = NewBuilder(b.mem, f.Type)
Expand Down
8 changes: 4 additions & 4 deletions go/arrow/array/table.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,11 +137,11 @@ func NewTable(schema *arrow.Schema, cols []arrow.Column, rows int64) *simpleTabl
// - the total length of each column's array slice (ie: number of rows
// in the column) aren't the same for all columns.
func NewTableFromSlice(schema *arrow.Schema, data [][]arrow.Array) *simpleTable {
if len(data) != len(schema.Fields()) {
if len(data) != schema.NumFields() {
panic("array/table: mismatch in number of columns and data for creating a table")
}

cols := make([]arrow.Column, len(schema.Fields()))
cols := make([]arrow.Column, schema.NumFields())
for i, arrs := range data {
field := schema.Field(i)
chunked := arrow.NewChunked(field.Type, arrs)
Expand Down Expand Up @@ -177,7 +177,7 @@ func NewTableFromSlice(schema *arrow.Schema, data [][]arrow.Array) *simpleTable
// NewTableFromRecords panics if the records and schema are inconsistent.
func NewTableFromRecords(schema *arrow.Schema, recs []arrow.Record) *simpleTable {
arrs := make([]arrow.Array, len(recs))
cols := make([]arrow.Column, len(schema.Fields()))
cols := make([]arrow.Column, schema.NumFields())

defer func(cols []arrow.Column) {
for i := range cols {
Expand Down Expand Up @@ -224,7 +224,7 @@ func (tbl *simpleTable) NumCols() int64 { return int64(len(tbl.cols)
func (tbl *simpleTable) Column(i int) *arrow.Column { return &tbl.cols[i] }

func (tbl *simpleTable) validate() {
if len(tbl.cols) != len(tbl.schema.Fields()) {
if len(tbl.cols) != tbl.schema.NumFields() {
panic(errors.New("arrow/array: table schema mismatch"))
}
for i, col := range tbl.cols {
Expand Down
4 changes: 2 additions & 2 deletions go/arrow/array/union.go
Original file line number Diff line number Diff line change
Expand Up @@ -896,7 +896,7 @@ func NewEmptySparseUnionBuilder(mem memory.Allocator) *SparseUnionBuilder {
// children and type codes. Builders will be constructed for each child
// using the fields in typ
func NewSparseUnionBuilder(mem memory.Allocator, typ *arrow.SparseUnionType) *SparseUnionBuilder {
children := make([]Builder, len(typ.Fields()))
children := make([]Builder, typ.NumFields())
for i, f := range typ.Fields() {
children[i] = NewBuilder(mem, f.Type)
defer children[i].Release()
Expand Down Expand Up @@ -1129,7 +1129,7 @@ func NewEmptyDenseUnionBuilder(mem memory.Allocator) *DenseUnionBuilder {
// children and type codes. Builders will be constructed for each child
// using the fields in typ
func NewDenseUnionBuilder(mem memory.Allocator, typ *arrow.DenseUnionType) *DenseUnionBuilder {
children := make([]Builder, 0, len(typ.Fields()))
children := make([]Builder, 0, typ.NumFields())
defer func() {
for _, child := range children {
child.Release()
Expand Down
2 changes: 1 addition & 1 deletion go/arrow/array/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,7 @@ func (n *nullArrayFactory) create() *Data {
}

if nf, ok := dt.(arrow.NestedType); ok {
childData = make([]arrow.ArrayData, len(nf.Fields()))
childData = make([]arrow.ArrayData, nf.NumFields())
}

switch dt := dt.(type) {
Expand Down
2 changes: 1 addition & 1 deletion go/arrow/cdata/cdata_exports.go
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ func (exp *schemaExporter) export(field arrow.Field) {
exp.dict = new(schemaExporter)
exp.dict.export(arrow.Field{Type: dt.ValueType})
case arrow.NestedType:
exp.children = make([]schemaExporter, len(dt.Fields()))
exp.children = make([]schemaExporter, dt.NumFields())
for i, f := range dt.Fields() {
exp.children[i].export(f)
}
Expand Down
4 changes: 2 additions & 2 deletions go/arrow/compute/cast.go
Original file line number Diff line number Diff line change
Expand Up @@ -266,8 +266,8 @@ func CastStruct(ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecResult)
opts = ctx.State.(kernels.CastState)
inType = batch.Values[0].Array.Type.(*arrow.StructType)
outType = out.Type.(*arrow.StructType)
inFieldCount = len(inType.Fields())
outFieldCount = len(outType.Fields())
inFieldCount = inType.NumFields()
outFieldCount = outType.NumFields()
)

fieldsToSelect := make([]int, outFieldCount)
Expand Down
2 changes: 1 addition & 1 deletion go/arrow/compute/exec/span.go
Original file line number Diff line number Diff line change
Expand Up @@ -633,7 +633,7 @@ func FillZeroLength(dt arrow.DataType, span *ArraySpan) {
return
}

span.resizeChildren(len(nt.Fields()))
span.resizeChildren(nt.NumFields())
for i, f := range nt.Fields() {
FillZeroLength(f.Type, &span.Children[i])
}
Expand Down
2 changes: 1 addition & 1 deletion go/arrow/compute/exprs/builders.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ func NewFieldRefFromDotPath(dotpath string, rootSchema *arrow.Schema) (expr.Refe
idx, _ := strconv.Atoi(dotpath[:subend])
switch ct := curType.(type) {
case *arrow.StructType:
if idx > len(ct.Fields()) {
if idx > ct.NumFields() {
return nil, fmt.Errorf("%w: field out of bounds in dotpath", arrow.ErrIndex)
}
curType = ct.Field(idx).Type
Expand Down
4 changes: 2 additions & 2 deletions go/arrow/compute/exprs/exec.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ func makeExecBatch(ctx context.Context, schema *arrow.Schema, partial compute.Da
partialBatch := partial.(*compute.RecordDatum).Value
batchSchema := partialBatch.Schema()

out.Values = make([]compute.Datum, len(schema.Fields()))
out.Values = make([]compute.Datum, schema.NumFields())
out.Len = partialBatch.NumRows()

for i, field := range schema.Fields() {
Expand Down Expand Up @@ -99,7 +99,7 @@ func makeExecBatch(ctx context.Context, schema *arrow.Schema, partial compute.Da
return makeExecBatch(ctx, schema, compute.NewDatumWithoutOwning(batch))
case *compute.ScalarDatum:
out.Len = 1
out.Values = make([]compute.Datum, len(schema.Fields()))
out.Values = make([]compute.Datum, schema.NumFields())

s := part.Value.(*scalar.Struct)
dt := s.Type.(*arrow.StructType)
Expand Down
2 changes: 1 addition & 1 deletion go/arrow/compute/exprs/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -594,7 +594,7 @@ func ToSubstraitType(dt arrow.DataType, nullable bool, ext ExtensionIDSet) (type
Precision: dt.GetPrecision(), Scale: dt.GetScale()}, nil
case arrow.STRUCT:
dt := dt.(*arrow.StructType)
fields := make([]types.Type, len(dt.Fields()))
fields := make([]types.Type, dt.NumFields())
var err error
for i, f := range dt.Fields() {
fields[i], err = ToSubstraitType(f.Type, f.Nullable, ext)
Expand Down
4 changes: 2 additions & 2 deletions go/arrow/compute/fieldref_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ func TestFieldPathBasics(t *testing.T) {
assert.Nil(t, f)
assert.ErrorIs(t, err, compute.ErrEmpty)

f, err = compute.FieldPath{len(s.Fields()) * 2}.Get(s)
f, err = compute.FieldPath{s.NumFields() * 2}.Get(s)
assert.Nil(t, f)
assert.ErrorIs(t, err, compute.ErrIndexRange)
}
Expand All @@ -63,7 +63,7 @@ func TestFieldRefBasics(t *testing.T) {
}

// out of range index results in failure to match
assert.Empty(t, compute.FieldRefIndex(len(s.Fields())*2).FindAll(s.Fields()))
assert.Empty(t, compute.FieldRefIndex(s.NumFields()*2).FindAll(s.Fields()))

// lookup by name returns the indices of both matching fields
assert.Equal(t, []compute.FieldPath{{0}, {2}}, compute.FieldRefName("alpha").FindAll(s.Fields()))
Expand Down
2 changes: 1 addition & 1 deletion go/arrow/datatype_nested_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ func TestStructOf(t *testing.T) {
t.Fatalf("invalid name. got=%q, want=%q", got, want)
}

if got, want := len(got.Fields()), len(tc.fields); got != want {
if got, want := got.NumFields(), len(tc.fields); got != want {
t.Fatalf("invalid number of fields. got=%d, want=%d", got, want)
}

Expand Down
2 changes: 1 addition & 1 deletion go/arrow/flight/flightsql/driver/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ func (s *Stmt) NumInput() int {
// If NumInput returns >= 0, the sql package will sanity check argument
// counts from callers and return errors to the caller before the
// statement's Exec or Query methods are called.
return len(schema.Fields())
return schema.NumFields()
}

// Exec executes a query that doesn't return rows, such
Expand Down
2 changes: 1 addition & 1 deletion go/arrow/flight/flightsql/example/sql_batch_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ type SqlBatchReader struct {
}

func NewSqlBatchReaderWithSchema(mem memory.Allocator, schema *arrow.Schema, rows *sql.Rows) (*SqlBatchReader, error) {
rowdest := make([]interface{}, len(schema.Fields()))
rowdest := make([]interface{}, schema.NumFields())
for i, f := range schema.Fields() {
switch f.Type.ID() {
case arrow.DENSE_UNION, arrow.SPARSE_UNION:
Expand Down
8 changes: 4 additions & 4 deletions go/arrow/internal/arrjson/arrjson.go
Original file line number Diff line number Diff line change
Expand Up @@ -1181,7 +1181,7 @@ func arrayFromJSON(mem memory.Allocator, dt arrow.DataType, arr Array) arrow.Arr

nulls := arr.Count - bitutil.CountSetBits(bitmap.Bytes(), 0, arr.Count)

fields := make([]arrow.ArrayData, len(dt.Fields()))
fields := make([]arrow.ArrayData, dt.NumFields())
for i := range fields {
child := arrayFromJSON(mem, dt.Field(i).Type, arr.Children[i])
defer child.Release()
Expand Down Expand Up @@ -1328,7 +1328,7 @@ func arrayFromJSON(mem memory.Allocator, dt arrow.DataType, arr Array) arrow.Arr
return array.NewData(dt, arr.Count, []*memory.Buffer{nil}, []arrow.ArrayData{runEnds, values}, 0, 0)

case arrow.UnionType:
fields := make([]arrow.ArrayData, len(dt.Fields()))
fields := make([]arrow.ArrayData, dt.NumFields())
for i, f := range dt.Fields() {
child := arrayFromJSON(mem, f.Type, arr.Children[i])
defer child.Release()
Expand Down Expand Up @@ -1620,7 +1620,7 @@ func arrayToJSON(field arrow.Field, arr arrow.Array) Array {
Name: field.Name,
Count: arr.Len(),
Valids: validsToJSON(arr),
Children: make([]Array, len(dt.Fields())),
Children: make([]Array, dt.NumFields()),
}
for i := range o.Children {
o.Children[i] = arrayToJSON(dt.Field(i), arr.Field(i))
Expand Down Expand Up @@ -1741,7 +1741,7 @@ func arrayToJSON(field arrow.Field, arr arrow.Array) Array {
Count: arr.Len(),
Valids: validsToJSON(arr),
TypeID: arr.RawTypeCodes(),
Children: make([]Array, len(dt.Fields())),
Children: make([]Array, dt.NumFields()),
}
if dt.Mode() == arrow.DenseMode {
o.Offset = arr.(*array.DenseUnion).RawValueOffsets()
Expand Down
6 changes: 3 additions & 3 deletions go/arrow/ipc/file_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ func newRecord(schema *arrow.Schema, memo *dictutils.Memo, meta *memory.Buffer,
}

pos := dictutils.NewFieldPos()
cols := make([]arrow.Array, len(schema.Fields()))
cols := make([]arrow.Array, schema.NumFields())
for i, field := range schema.Fields() {
data := ctx.loadArray(field.Type)
defer data.Release()
Expand Down Expand Up @@ -663,7 +663,7 @@ func (ctx *arrayLoaderContext) loadStruct(dt *arrow.StructType) arrow.ArrayData
field, buffers := ctx.loadCommon(dt.ID(), 1)
defer releaseBuffers(buffers)

subs := make([]arrow.ArrayData, len(dt.Fields()))
subs := make([]arrow.ArrayData, dt.NumFields())
for i, f := range dt.Fields() {
subs[i] = ctx.loadChild(f.Type)
}
Expand Down Expand Up @@ -705,7 +705,7 @@ func (ctx *arrayLoaderContext) loadUnion(dt arrow.UnionType) arrow.ArrayData {
}

defer releaseBuffers(buffers)
subs := make([]arrow.ArrayData, len(dt.Fields()))
subs := make([]arrow.ArrayData, dt.NumFields())
for i, f := range dt.Fields() {
subs[i] = ctx.loadChild(f.Type)
}
Expand Down
6 changes: 3 additions & 3 deletions go/arrow/ipc/metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,7 @@ func (fv *fieldVisitor) visit(field arrow.Field) {

case *arrow.StructType:
fv.dtype = flatbuf.TypeStruct_
offsets := make([]flatbuffers.UOffsetT, len(dt.Fields()))
offsets := make([]flatbuffers.UOffsetT, dt.NumFields())
for i, field := range dt.Fields() {
offsets[i] = fieldToFB(fv.b, fv.pos.Child(int32(i)), field, fv.memo)
}
Expand Down Expand Up @@ -472,7 +472,7 @@ func (fv *fieldVisitor) visit(field arrow.Field) {

case arrow.UnionType:
fv.dtype = flatbuf.TypeUnion
offsets := make([]flatbuffers.UOffsetT, len(dt.Fields()))
offsets := make([]flatbuffers.UOffsetT, dt.NumFields())
for i, field := range dt.Fields() {
offsets[i] = fieldToFB(fv.b, fv.pos.Child(int32(i)), field, fv.memo)
}
Expand Down Expand Up @@ -1100,7 +1100,7 @@ func schemaFromFB(schema *flatbuf.Schema, memo *dictutils.Memo) (*arrow.Schema,
}

func schemaToFB(b *flatbuffers.Builder, schema *arrow.Schema, memo *dictutils.Mapper) flatbuffers.UOffsetT {
fields := make([]flatbuffers.UOffsetT, len(schema.Fields()))
fields := make([]flatbuffers.UOffsetT, schema.NumFields())
pos := dictutils.NewFieldPos()
for i, field := range schema.Fields() {
fields[i] = fieldToFB(b, pos.Child(int32(i)), field, memo)
Expand Down
16 changes: 8 additions & 8 deletions go/arrow/scalar/nested.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ func NewLargeListScalarData(val arrow.ArrayData) *LargeList {
}

func makeMapType(typ *arrow.StructType) *arrow.MapType {
debug.Assert(len(typ.Fields()) == 2, "must pass struct with only 2 fields for MapScalar")
debug.Assert(typ.NumFields() == 2, "must pass struct with only 2 fields for MapScalar")
return arrow.MapOf(typ.Field(0).Type, typ.Field(1).Type)
}

Expand Down Expand Up @@ -265,7 +265,7 @@ func (s *Struct) Validate() (err error) {
}

st := s.Type.(*arrow.StructType)
num := len(st.Fields())
num := st.NumFields()
if len(s.Value) != num {
return fmt.Errorf("non-null %s scalar should have %d child values, got %d", s.Type, num, len(s.Value))
}
Expand Down Expand Up @@ -303,7 +303,7 @@ func (s *Struct) ValidateFull() (err error) {
}

st := s.Type.(*arrow.StructType)
num := len(st.Fields())
num := st.NumFields()
if len(s.Value) != num {
return fmt.Errorf("non-null %s scalar should have %d child values, got %d", s.Type, num, len(s.Value))
}
Expand Down Expand Up @@ -571,8 +571,8 @@ func (s *SparseUnion) Release() {

func (s *SparseUnion) Validate() (err error) {
dt := s.Type.(*arrow.SparseUnionType)
if len(dt.Fields()) != len(s.Value) {
return fmt.Errorf("sparse union scalar value had %d fields but type has %d fields", len(dt.Fields()), len(s.Value))
if dt.NumFields() != len(s.Value) {
return fmt.Errorf("sparse union scalar value had %d fields but type has %d fields", dt.NumFields(), len(s.Value))
}

if s.TypeCode < 0 || int(s.TypeCode) >= len(dt.ChildIDs()) || dt.ChildIDs()[s.TypeCode] == arrow.InvalidUnionChildID {
Expand All @@ -593,8 +593,8 @@ func (s *SparseUnion) Validate() (err error) {

func (s *SparseUnion) ValidateFull() (err error) {
dt := s.Type.(*arrow.SparseUnionType)
if len(dt.Fields()) != len(s.Value) {
return fmt.Errorf("sparse union scalar value had %d fields but type has %d fields", len(dt.Fields()), len(s.Value))
if dt.NumFields() != len(s.Value) {
return fmt.Errorf("sparse union scalar value had %d fields but type has %d fields", dt.NumFields(), len(s.Value))
}

if s.TypeCode < 0 || int(s.TypeCode) >= len(dt.ChildIDs()) || dt.ChildIDs()[s.TypeCode] == arrow.InvalidUnionChildID {
Expand Down Expand Up @@ -643,7 +643,7 @@ func NewSparseUnionScalar(val []Scalar, code arrow.UnionTypeCode, dt *arrow.Spar

func NewSparseUnionScalarFromValue(val Scalar, idx int, dt *arrow.SparseUnionType) *SparseUnion {
code := dt.TypeCodes()[idx]
values := make([]Scalar, len(dt.Fields()))
values := make([]Scalar, dt.NumFields())
for i, f := range dt.Fields() {
if i == idx {
values[i] = val
Expand Down
8 changes: 4 additions & 4 deletions go/arrow/scalar/scalar.go
Original file line number Diff line number Diff line change
Expand Up @@ -512,26 +512,26 @@ func init() {
arrow.LIST: func(dt arrow.DataType) Scalar { return &List{scalar: scalar{dt, false}} },
arrow.STRUCT: func(dt arrow.DataType) Scalar {
typ := dt.(*arrow.StructType)
values := make([]Scalar, len(typ.Fields()))
values := make([]Scalar, typ.NumFields())
for i, f := range typ.Fields() {
values[i] = MakeNullScalar(f.Type)
}
return &Struct{scalar: scalar{dt, false}, Value: values}
},
arrow.SPARSE_UNION: func(dt arrow.DataType) Scalar {
typ := dt.(*arrow.SparseUnionType)
if len(typ.Fields()) == 0 {
if typ.NumFields() == 0 {
panic("cannot make scalar of empty union type")
}
values := make([]Scalar, len(typ.Fields()))
values := make([]Scalar, typ.NumFields())
for i, f := range typ.Fields() {
values[i] = MakeNullScalar(f.Type)
}
return NewSparseUnionScalar(values, typ.TypeCodes()[0], typ)
},
arrow.DENSE_UNION: func(dt arrow.DataType) Scalar {
typ := dt.(*arrow.DenseUnionType)
if len(typ.Fields()) == 0 {
if typ.NumFields() == 0 {
panic("cannot make scalar of empty union type")
}
return NewDenseUnionScalar(MakeNullScalar(typ.Fields()[0].Type), typ.TypeCodes()[0], typ)
Expand Down
2 changes: 1 addition & 1 deletion go/arrow/scalar/scalar_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1194,7 +1194,7 @@ func makeDenseUnionScalar(ty *arrow.DenseUnionType, val scalar.Scalar, idx int)
func makeSpecificNullScalar(dt arrow.UnionType, idx int) scalar.Scalar {
switch dt.Mode() {
case arrow.SparseMode:
values := make([]scalar.Scalar, len(dt.Fields()))
values := make([]scalar.Scalar, dt.NumFields())
for i, f := range dt.Fields() {
values[i] = scalar.MakeNullScalar(f.Type)
}
Expand Down
2 changes: 1 addition & 1 deletion go/arrow/schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ func (s *Schema) AddField(i int, field Field) (*Schema, error) {

func (s *Schema) String() string {
o := new(strings.Builder)
fmt.Fprintf(o, "schema:\n fields: %d\n", len(s.Fields()))
fmt.Fprintf(o, "schema:\n fields: %d\n", s.NumFields())
for i, f := range s.Fields() {
if i > 0 {
o.WriteString("\n")
Expand Down
Loading

0 comments on commit 054464a

Please sign in to comment.