@@ -34,7 +34,7 @@ const (
3434 defaultBatchSize = 1000
3535 defaultPayloadBytes = 100
3636 defaultRanges = 10
37- defaultNumTables = 1
37+ defaultTables = 1
3838 maxTransfer = 999
3939)
4040
@@ -46,7 +46,7 @@ type bank struct {
4646
4747 rows , batchSize int
4848 payloadBytes , ranges int
49- numTables int
49+ tables int
5050}
5151
5252func init () {
@@ -68,7 +68,7 @@ var bankMeta = workload.Meta{
6868 g .flags .IntVar (& g .batchSize , `batch-size` , defaultBatchSize , `Number of rows in each batch of initial data.` )
6969 g .flags .IntVar (& g .payloadBytes , `payload-bytes` , defaultPayloadBytes , `Size of the payload field in each initial row.` )
7070 g .flags .IntVar (& g .ranges , `ranges` , defaultRanges , `Initial number of ranges in bank table.` )
71- g .flags .IntVar (& g .numTables , `num- tables` , defaultNumTables , `Number of bank tables to create.` )
71+ g .flags .IntVar (& g .tables , `tables` , defaultTables , `Initial number of bank tables to create.` )
7272 RandomSeed .AddFlag (& g .flags )
7373 g .connFlags = workload .NewConnFlags (& g .flags )
7474 // Because this workload can create a large number of objects, the import
@@ -123,8 +123,8 @@ func (b *bank) Hooks() workload.Hooks {
123123 if b .batchSize <= 0 {
124124 return errors .Errorf (`Value of batch-size must be greater than zero; was %d` , b .batchSize )
125125 }
126- if b .numTables <= 0 {
127- return errors .Errorf (`Value of num- tables must be greater than zero; was %d` , b .numTables )
126+ if b .tables <= 0 {
127+ return errors .Errorf (`Value of tables must be greater than zero; was %d` , b .tables )
128128 }
129129 return nil
130130 },
@@ -133,7 +133,7 @@ func (b *bank) Hooks() workload.Hooks {
133133
134134// tableName returns the table name with optional schema prefix and table number.
135135func (b * bank ) tableName (baseName string , tableIdx int ) string {
136- if b .numTables > 1 {
136+ if b .tables > 1 {
137137 return fmt .Sprintf ("%s_%d" , baseName , tableIdx )
138138 }
139139 return baseName
@@ -149,8 +149,8 @@ var bankTypes = []*types.T{
149149func (b * bank ) Tables () []workload.Table {
150150 numBatches := (b .rows + b .batchSize - 1 ) / b .batchSize // ceil(b.rows/b.batchSize)
151151
152- tables := make ([]workload.Table , b .numTables )
153- for tableIdx := range b .numTables {
152+ tables := make ([]workload.Table , b .tables )
153+ for tableIdx := range b .tables {
154154 table := workload.Table {
155155 Name : b .tableName (`bank` , tableIdx ),
156156 Schema : bankSchema ,
@@ -208,8 +208,8 @@ func (b *bank) Ops(
208208 db .SetMaxIdleConns (b .connFlags .Concurrency + 1 )
209209
210210 // TODO(dan): Move the various queries in the backup/restore tests here.
211- updateStmts := make ([]* gosql.Stmt , b .numTables )
212- for tableIdx := range b .numTables {
211+ updateStmts := make ([]* gosql.Stmt , b .tables )
212+ for tableIdx := range b .tables {
213213 updateStmt , err := db .Prepare (fmt .Sprintf (`
214214 UPDATE %s
215215 SET balance = CASE id WHEN $1 THEN balance-$3 WHEN $2 THEN balance+$3 END
@@ -233,7 +233,7 @@ func (b *bank) Ops(
233233 hists := reg .GetHandle ()
234234
235235 workerFn := func (ctx context.Context ) error {
236- tableIdx := rng .IntN (b .numTables )
236+ tableIdx := rng .IntN (b .tables )
237237 updateStmt := updateStmts [tableIdx ]
238238
239239 from := rng .IntN (b .rows )
0 commit comments