Skip to content

Commit e28d106

Browse files
authored
Merge pull request #839 from mhucka/mhucka-fix-type-mismatch-warnings
Fix type mismatch warnings
2 parents 9101a91 + 67b41af commit e28d106

18 files changed

+92
-89
lines changed

tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,7 @@ class TfqInnerProductOp : public tensorflow::OpKernel {
179179
// Simulate programs one by one. Parallelizing over state vectors
180180
// we no longer parallelize over circuits. Each time we encounter a
181181
// a larger circuit we will grow the Statevector as necessary.
182-
for (int i = 0; i < fused_circuits.size(); i++) {
182+
for (size_t i = 0; i < fused_circuits.size(); i++) {
183183
int nq = num_qubits[i];
184184
if (nq > largest_nq) {
185185
// need to switch to larger statespace.
@@ -191,18 +191,18 @@ class TfqInnerProductOp : public tensorflow::OpKernel {
191191
// the state if there is a possibility that circuit[i] and
192192
// circuit[i + 1] produce the same state.
193193
ss.SetStateZero(sv);
194-
for (int j = 0; j < fused_circuits[i].size(); j++) {
194+
for (size_t j = 0; j < fused_circuits[i].size(); j++) {
195195
qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv);
196196
}
197-
for (int j = 0; j < other_fused_circuits[i].size(); j++) {
197+
for (size_t j = 0; j < other_fused_circuits[i].size(); j++) {
198198
// (#679) Just ignore empty program
199199
if (fused_circuits[i].size() == 0) {
200200
(*output_tensor)(i, j) = std::complex<float>(1, 0);
201201
continue;
202202
}
203203

204204
ss.SetStateZero(scratch);
205-
for (int k = 0; k < other_fused_circuits[i][j].size(); k++) {
205+
for (size_t k = 0; k < other_fused_circuits[i][j].size(); k++) {
206206
qsim::ApplyFusedGate(sim, other_fused_circuits[i][j][k], scratch);
207207
}
208208

@@ -260,13 +260,13 @@ class TfqInnerProductOp : public tensorflow::OpKernel {
260260
// no need to update scratch_state since ComputeExpectation
261261
// will take care of things for us.
262262
ss.SetStateZero(sv);
263-
for (int j = 0; j < fused_circuits[cur_batch_index].size(); j++) {
263+
for (size_t j = 0; j < fused_circuits[cur_batch_index].size(); j++) {
264264
qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv);
265265
}
266266
}
267267

268268
ss.SetStateZero(scratch);
269-
for (int k = 0;
269+
for (size_t k = 0;
270270
k <
271271
other_fused_circuits[cur_batch_index][cur_internal_index].size();
272272
k++) {

tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -61,9 +61,9 @@ class TfqInnerProductGradOp : public tensorflow::OpKernel {
6161
"other_programs must be rank 2. Got ", context->input(3).dims())));
6262

6363
// Create the output Tensor.
64-
const int output_dim_batch_size = context->input(0).dim_size(0);
65-
const int output_dim_internal_size = context->input(3).dim_size(1);
66-
const int output_dim_symbol_size = context->input(1).dim_size(0);
64+
const size_t output_dim_batch_size = context->input(0).dim_size(0);
65+
const size_t output_dim_internal_size = context->input(3).dim_size(1);
66+
const size_t output_dim_symbol_size = context->input(1).dim_size(0);
6767
OP_REQUIRES(context, output_dim_symbol_size > 0,
6868
tensorflow::errors::InvalidArgument(absl::StrCat(
6969
"The number of symbols must be a positive integer, got ",
@@ -403,13 +403,13 @@ class TfqInnerProductGradOp : public tensorflow::OpKernel {
403403
// if applicable compute control qubit mask and control value bits.
404404
uint64_t mask = 0;
405405
uint64_t cbits = 0;
406-
for (int k = 0; k < cur_gate.controlled_by.size(); k++) {
406+
for (size_t k = 0; k < cur_gate.controlled_by.size(); k++) {
407407
uint64_t control_loc = cur_gate.controlled_by[k];
408408
mask |= uint64_t{1} << control_loc;
409409
cbits |= ((cur_gate.cmask >> k) & 1) << control_loc;
410410
}
411411

412-
for (int k = 0;
412+
for (size_t k = 0;
413413
k < gradient_gates[cur_batch_index][l - 1].grad_gates.size();
414414
k++) {
415415
// Copy sv_adj onto scratch2 in anticipation of non-unitary

tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -181,8 +181,8 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
181181

182182
tensorflow::GuardedPhiloxRandom random_gen;
183183
int max_n_shots = 1;
184-
for (int i = 0; i < num_samples.size(); i++) {
185-
for (int j = 0; j < num_samples[i].size(); j++) {
184+
for (size_t i = 0; i < num_samples.size(); i++) {
185+
for (size_t j = 0; j < num_samples[i].size(); j++) {
186186
max_n_shots = std::max(max_n_shots, num_samples[i][j]);
187187
}
188188
}
@@ -194,12 +194,12 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
194194
// Simulate programs one by one. Parallelizing over state vectors
195195
// we no longer parallelize over circuits. Each time we encounter a
196196
// a larger circuit we will grow the Statevector as necessary.
197-
for (int i = 0; i < ncircuits.size(); i++) {
197+
for (size_t i = 0; i < ncircuits.size(); i++) {
198198
int nq = num_qubits[i];
199199

200200
// (#679) Just ignore empty program
201201
if (ncircuits[i].channels.size() == 0) {
202-
for (int j = 0; j < pauli_sums[i].size(); j++) {
202+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
203203
(*output_tensor)(i, j) = -2.0;
204204
}
205205
continue;
@@ -226,7 +226,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
226226
sv, unused_stats);
227227

228228
// Use this trajectory as a source for all expectation calculations.
229-
for (int j = 0; j < pauli_sums[i].size(); j++) {
229+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
230230
if (run_samples[j] >= num_samples[i][j]) {
231231
continue;
232232
}
@@ -238,14 +238,14 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
238238
run_samples[j]++;
239239
}
240240
bool break_loop = true;
241-
for (int j = 0; j < num_samples[i].size(); j++) {
241+
for (size_t j = 0; j < num_samples[i].size(); j++) {
242242
if (run_samples[j] < num_samples[i][j]) {
243243
break_loop = false;
244244
break;
245245
}
246246
}
247247
if (break_loop) {
248-
for (int j = 0; j < num_samples[i].size(); j++) {
248+
for (size_t j = 0; j < num_samples[i].size(); j++) {
249249
rolling_sums[j] /= num_samples[i][j];
250250
(*output_tensor)(i, j) = static_cast<float>(rolling_sums[j]);
251251
}
@@ -286,8 +286,8 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
286286

287287
tensorflow::GuardedPhiloxRandom random_gen;
288288
int max_n_shots = 1;
289-
for (int i = 0; i < num_samples.size(); i++) {
290-
for (int j = 0; j < num_samples[i].size(); j++) {
289+
for (size_t i = 0; i < num_samples.size(); i++) {
290+
for (size_t j = 0; j < num_samples[i].size(); j++) {
291291
max_n_shots = std::max(max_n_shots, num_samples[i][j]);
292292
}
293293
}
@@ -310,13 +310,13 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
310310
random_gen.ReserveSamples128(ncircuits.size() * max_n_shots + 1);
311311
tensorflow::random::SimplePhilox rand_source(&local_gen);
312312

313-
for (int i = 0; i < ncircuits.size(); i++) {
313+
for (size_t i = 0; i < ncircuits.size(); i++) {
314314
int nq = num_qubits[i];
315315
int rep_offset = rep_offsets[start][i];
316316

317317
// (#679) Just ignore empty program
318318
if (ncircuits[i].channels.size() == 0) {
319-
for (int j = 0; j < pauli_sums[i].size(); j++) {
319+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
320320
(*output_tensor)(i, j) = -2.0;
321321
}
322322
continue;
@@ -343,7 +343,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
343343
sim, sv, unused_stats);
344344

345345
// Compute expectations across all ops using this trajectory.
346-
for (int j = 0; j < pauli_sums[i].size(); j++) {
346+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
347347
int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads;
348348
if (run_samples[j] >= p_reps + rep_offset) {
349349
continue;
@@ -360,7 +360,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
360360

361361
// Check if we have run enough trajectories for all ops.
362362
bool break_loop = true;
363-
for (int j = 0; j < num_samples[i].size(); j++) {
363+
for (size_t j = 0; j < num_samples[i].size(); j++) {
364364
int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads;
365365
if (run_samples[j] < p_reps + rep_offset) {
366366
break_loop = false;
@@ -370,7 +370,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
370370
if (break_loop) {
371371
// Lock writing to this batch index in output_tensor.
372372
batch_locks[i].lock();
373-
for (int j = 0; j < num_samples[i].size(); j++) {
373+
for (size_t j = 0; j < num_samples[i].size(); j++) {
374374
rolling_sums[j] /= num_samples[i][j];
375375
(*output_tensor)(i, j) += static_cast<float>(rolling_sums[j]);
376376
}

tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -183,8 +183,8 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
183183
tensorflow::GuardedPhiloxRandom random_gen;
184184
int max_psum_length = 1;
185185
int max_n_shots = 1;
186-
for (int i = 0; i < pauli_sums.size(); i++) {
187-
for (int j = 0; j < pauli_sums[i].size(); j++) {
186+
for (size_t i = 0; i < pauli_sums.size(); i++) {
187+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
188188
max_psum_length =
189189
std::max(max_psum_length, pauli_sums[i][j].terms().size());
190190
max_n_shots = std::max(max_n_shots, num_samples[i][j]);
@@ -198,12 +198,12 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
198198
// Simulate programs one by one. Parallelizing over state vectors
199199
// we no longer parallelize over circuits. Each time we encounter a
200200
// a larger circuit we will grow the Statevector as necessary.
201-
for (int i = 0; i < ncircuits.size(); i++) {
201+
for (size_t i = 0; i < ncircuits.size(); i++) {
202202
int nq = num_qubits[i];
203203

204204
// (#679) Just ignore empty program
205205
if (ncircuits[i].channels.empty()) {
206-
for (int j = 0; j < pauli_sums[i].size(); j++) {
206+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
207207
(*output_tensor)(i, j) = -2.0;
208208
}
209209
continue;
@@ -230,7 +230,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
230230
sv, unused_stats);
231231

232232
// Use this trajectory as a source for all expectation calculations.
233-
for (int j = 0; j < pauli_sums[i].size(); j++) {
233+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
234234
if (run_samples[j] >= num_samples[i][j]) {
235235
continue;
236236
}
@@ -242,14 +242,14 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
242242
run_samples[j]++;
243243
}
244244
bool break_loop = true;
245-
for (int j = 0; j < num_samples[i].size(); j++) {
245+
for (size_t j = 0; j < num_samples[i].size(); j++) {
246246
if (run_samples[j] < num_samples[i][j]) {
247247
break_loop = false;
248248
break;
249249
}
250250
}
251251
if (break_loop) {
252-
for (int j = 0; j < num_samples[i].size(); j++) {
252+
for (size_t j = 0; j < num_samples[i].size(); j++) {
253253
rolling_sums[j] /= num_samples[i][j];
254254
(*output_tensor)(i, j) = static_cast<float>(rolling_sums[j]);
255255
}
@@ -291,8 +291,8 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
291291
tensorflow::GuardedPhiloxRandom random_gen;
292292
int max_psum_length = 1;
293293
int max_n_shots = 1;
294-
for (int i = 0; i < pauli_sums.size(); i++) {
295-
for (int j = 0; j < pauli_sums[i].size(); j++) {
294+
for (size_t i = 0; i < pauli_sums.size(); i++) {
295+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
296296
max_psum_length =
297297
std::max(max_psum_length, pauli_sums[i][j].terms().size());
298298
max_n_shots = std::max(max_n_shots, num_samples[i][j]);
@@ -316,13 +316,13 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
316316
auto local_gen = random_gen.ReserveSamples128(num_rand);
317317
tensorflow::random::SimplePhilox rand_source(&local_gen);
318318

319-
for (int i = 0; i < ncircuits.size(); i++) {
319+
for (size_t i = 0; i < ncircuits.size(); i++) {
320320
int nq = num_qubits[i];
321321
int rep_offset = rep_offsets[start][i];
322322

323323
// (#679) Just ignore empty program
324324
if (ncircuits[i].channels.empty()) {
325-
for (int j = 0; j < pauli_sums[i].size(); j++) {
325+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
326326
(*output_tensor)(i, j) = -2.0;
327327
}
328328
continue;
@@ -349,7 +349,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
349349
sim, sv, unused_stats);
350350

351351
// Compute expectations across all ops using this trajectory.
352-
for (int j = 0; j < pauli_sums[i].size(); j++) {
352+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
353353
int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads;
354354
if (run_samples[j] >= p_reps + rep_offset) {
355355
continue;
@@ -366,7 +366,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
366366

367367
// Check if we have run enough trajectories for all ops.
368368
bool break_loop = true;
369-
for (int j = 0; j < num_samples[i].size(); j++) {
369+
for (size_t j = 0; j < num_samples[i].size(); j++) {
370370
int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads;
371371
if (run_samples[j] < p_reps + rep_offset) {
372372
break_loop = false;
@@ -376,7 +376,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
376376
if (break_loop) {
377377
// Lock writing to this batch index in output_tensor.
378378
batch_locks[i].lock();
379-
for (int j = 0; j < num_samples[i].size(); j++) {
379+
for (size_t j = 0; j < num_samples[i].size(); j++) {
380380
rolling_sums[j] /= num_samples[i][j];
381381
(*output_tensor)(i, j) += static_cast<float>(rolling_sums[j]);
382382
}

tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel {
160160
// Simulate programs one by one. Parallelizing over state vectors
161161
// we no longer parallelize over circuits. Each time we encounter a
162162
// a larger circuit we will grow the Statevector as nescessary.
163-
for (int i = 0; i < ncircuits.size(); i++) {
163+
for (size_t i = 0; i < ncircuits.size(); i++) {
164164
int nq = num_qubits[i];
165165

166166
if (nq > largest_nq) {
@@ -182,7 +182,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel {
182182

183183
QTSimulator::RunOnce(param, ncircuits[i], rand_source.Rand64(), ss, sim,
184184
sv, gathered_samples);
185-
uint64_t q_ind = 0;
185+
int q_ind = 0;
186186
uint64_t mask = 1;
187187
bool val = 0;
188188
while (q_ind < nq) {
@@ -253,7 +253,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel {
253253
auto local_gen = random_gen.ReserveSamples32(needed_random);
254254
tensorflow::random::SimplePhilox rand_source(&local_gen);
255255

256-
for (int i = 0; i < ncircuits.size(); i++) {
256+
for (size_t i = 0; i < ncircuits.size(); i++) {
257257
int nq = num_qubits[i];
258258
int j = start > 0 ? offset_prefix_sum[start - 1][i] : 0;
259259
int needed_samples = offset_prefix_sum[start][i] - j;
@@ -279,7 +279,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel {
279279
QTSimulator::RunOnce(param, ncircuits[i], rand_source.Rand64(), ss,
280280
sim, sv, gathered_samples);
281281

282-
uint64_t q_ind = 0;
282+
int q_ind = 0;
283283
uint64_t mask = 1;
284284
bool val = 0;
285285
while (q_ind < nq) {

tensorflow_quantum/core/ops/tfq_adj_grad_op.cc

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel {
212212
}
213213

214214
ss.SetStateZero(sv);
215-
for (int j = 0; j < full_fuse[i].size(); j++) {
215+
for (size_t j = 0; j < full_fuse[i].size(); j++) {
216216
qsim::ApplyFusedGate(sim, full_fuse[i][j], sv);
217217
}
218218

@@ -241,13 +241,14 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel {
241241
// if applicable compute control qubit mask and control value bits.
242242
uint64_t mask = 0;
243243
uint64_t cbits = 0;
244-
for (int k = 0; k < cur_gate.controlled_by.size(); k++) {
244+
for (size_t k = 0; k < cur_gate.controlled_by.size(); k++) {
245245
uint64_t control_loc = cur_gate.controlled_by[k];
246246
mask |= uint64_t{1} << control_loc;
247247
cbits |= ((cur_gate.cmask >> k) & 1) << control_loc;
248248
}
249249

250-
for (int k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) {
250+
for (size_t k = 0; k < gradient_gates[i][j - 1].grad_gates.size();
251+
k++) {
251252
// Copy sv onto scratch2 in anticipation of non-unitary "gradient
252253
// gate".
253254
ss.Copy(sv, scratch2);
@@ -307,7 +308,7 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel {
307308
auto scratch = ss.Create(largest_nq);
308309
auto scratch2 = ss.Create(largest_nq);
309310

310-
for (int i = 0; i < partial_fused_circuits.size(); i++) {
311+
for (size_t i = 0; i < partial_fused_circuits.size(); i++) {
311312
int nq = num_qubits[i];
312313

313314
if (nq > largest_nq) {
@@ -324,7 +325,7 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel {
324325
}
325326

326327
ss.SetStateZero(sv);
327-
for (int j = 0; j < full_fuse[i].size(); j++) {
328+
for (size_t j = 0; j < full_fuse[i].size(); j++) {
328329
qsim::ApplyFusedGate(sim, full_fuse[i][j], sv);
329330
}
330331

@@ -352,13 +353,14 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel {
352353
// if applicable compute control qubit mask and control value bits.
353354
uint64_t mask = 0;
354355
uint64_t cbits = 0;
355-
for (int k = 0; k < cur_gate.controlled_by.size(); k++) {
356+
for (size_t k = 0; k < cur_gate.controlled_by.size(); k++) {
356357
uint64_t control_loc = cur_gate.controlled_by[k];
357358
mask |= uint64_t{1} << control_loc;
358359
cbits |= ((cur_gate.cmask >> k) & 1) << control_loc;
359360
}
360361

361-
for (int k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) {
362+
for (size_t k = 0; k < gradient_gates[i][j - 1].grad_gates.size();
363+
k++) {
362364
// Copy sv onto scratch2 in anticipation of non-unitary "gradient
363365
// gate".
364366
ss.Copy(sv, scratch2);

0 commit comments

Comments
 (0)