@@ -166,7 +166,7 @@ class BatchingSession : public ServingSession {
166166 // signatures, and for each one supplies a lambda to construct a batch
167167 // scheduler given a process-batch callback. See batching_session.h for
168168 // example usage.
169- static Status Create (
169+ static absl:: Status Create (
170170 const BatchingSessionOptions& options, std::unique_ptr<Session> wrapped,
171171 const std::vector<SignatureWithBatchingSessionSchedulerCreator>&
172172 signatures_with_scheduler_creators,
@@ -176,7 +176,7 @@ class BatchingSession : public ServingSession {
176176 // Same as above but allows for specification of a default scheduler creator
177177 // which enables requests that don't match an exact signature to also
178178 // have batching.
179- static Status Create (
179+ static absl:: Status Create (
180180 const BatchingSessionOptions& options, std::unique_ptr<Session> wrapped,
181181 const std::vector<SignatureWithBatchingSessionSchedulerCreator>&
182182 signatures_with_scheduler_creators,
@@ -186,10 +186,10 @@ class BatchingSession : public ServingSession {
186186
187187 ~BatchingSession () override = default ;
188188
189- Status Run (const std::vector<std::pair<string, Tensor>>& inputs,
190- const std::vector<string>& output_tensor_names,
191- const std::vector<string>& target_node_names,
192- std::vector<Tensor>* outputs) override ;
189+ absl:: Status Run (const std::vector<std::pair<string, Tensor>>& inputs,
190+ const std::vector<string>& output_tensor_names,
191+ const std::vector<string>& target_node_names,
192+ std::vector<Tensor>* outputs) override ;
193193
194194 // RunOptions handling:
195195 // Since multiple of these Run() calls get backed into a single call to the
@@ -206,31 +206,33 @@ class BatchingSession : public ServingSession {
206206 // assuming all individual tasks in a batch have equal cost, which is the
207207 // assumption before splitting is introduced), the rest of fields in
208208 // `RunMetadata` are copied from the processing result of first split.
209- Status Run (const RunOptions& run_options,
210- const std::vector<std::pair<string, Tensor>>& inputs,
211- const std::vector<string>& output_tensor_names,
212- const std::vector<string>& target_node_names,
213- std::vector<Tensor>* outputs, RunMetadata* run_metadata) override ;
209+ absl::Status Run (const RunOptions& run_options,
210+ const std::vector<std::pair<string, Tensor>>& inputs,
211+ const std::vector<string>& output_tensor_names,
212+ const std::vector<string>& target_node_names,
213+ std::vector<Tensor>* outputs,
214+ RunMetadata* run_metadata) override ;
214215
215216 // Similar to the function above, but takes an additional
216217 // 'thread_pool_options' to pass to the underlying Session's Run(). We select
217218 // an arbitrary 'thread_pool_options' (typically they are the same across
218219 // calls).
219- Status Run (const RunOptions& run_options,
220- const std::vector<std::pair<string, Tensor>>& inputs,
221- const std::vector<string>& output_tensor_names,
222- const std::vector<string>& target_node_names,
223- std::vector<Tensor>* outputs, RunMetadata* run_metadata,
224- const thread::ThreadPoolOptions& thread_pool_options) override ;
220+ absl::Status Run (
221+ const RunOptions& run_options,
222+ const std::vector<std::pair<string, Tensor>>& inputs,
223+ const std::vector<string>& output_tensor_names,
224+ const std::vector<string>& target_node_names,
225+ std::vector<Tensor>* outputs, RunMetadata* run_metadata,
226+ const thread::ThreadPoolOptions& thread_pool_options) override ;
225227
226- Status ListDevices (std::vector<DeviceAttributes>* response) override ;
228+ absl:: Status ListDevices (std::vector<DeviceAttributes>* response) override ;
227229
228230 private:
229231 explicit BatchingSession (const BatchingSessionOptions& options,
230232 const std::string& thread_pool_name);
231233
232234 // Helper fucntion to run the session.
233- Status InternalRun (
235+ absl:: Status InternalRun (
234236 const RunOptions& run_options,
235237 const std::vector<std::pair<string, Tensor>>& inputs,
236238 const std::vector<string>& output_tensor_names,
@@ -242,28 +244,28 @@ class BatchingSession : public ServingSession {
242244 // analyzing the 0th dimension size of each of the tensors. All tensors in the
243245 // list must have the same 0th dimension size to be batchable. If the sizes
244246 // are not all identical, returns an error.
245- Status ComputeInputSize (const std::vector<std::pair<string, Tensor>>& inputs,
246- size_t * size) const ;
247+ absl:: Status ComputeInputSize (
248+ const std::vector<std::pair<string, Tensor>>& inputs, size_t * size) const ;
247249
248250 // Merges the input tensors in a batch, via concatenation of correspondingly-
249251 // named tensors. Puts the merged inputs in the order they are in in the
250252 // signature. Assumes 'batch' is non-empty. Returns an error if there are any
251253 // mismatches among the tasks in the batch that violate the constraints for
252254 // batchability.
253- Status MergeInputTensors (
255+ absl:: Status MergeInputTensors (
254256 const TensorSignature& signature, const Batch<BatchingSessionTask>& batch,
255257 std::vector<std::pair<string, Tensor>>* merged_inputs);
256258
257259 // Splits the output of a batched call to 'wrapped_->Run()' into individual
258260 // task outputs. Assumes the output tensor order matches the signature.
259- Status SplitOutputTensors (const TensorSignature& signature,
260- const std::vector<Tensor>& combined_outputs,
261- Batch<BatchingSessionTask>* batch);
261+ absl:: Status SplitOutputTensors (const TensorSignature& signature,
262+ const std::vector<Tensor>& combined_outputs,
263+ Batch<BatchingSessionTask>* batch);
262264
263265 // Splits RunMetadata parts (e.g. costgraph attribution) into individual task
264266 // outputs.
265- Status SplitRunMetadata (RunMetadata* batch_metadata,
266- Batch<BatchingSessionTask>* batch);
267+ absl:: Status SplitRunMetadata (RunMetadata* batch_metadata,
268+ Batch<BatchingSessionTask>* batch);
267269
268270 // Processes one batch of Run() calls with 'signature'. Called by
269271 // 'batch_scheduler_' in a batch thread.
@@ -295,7 +297,7 @@ class BatchingSession : public ServingSession {
295297 TF_DISALLOW_COPY_AND_ASSIGN (BatchingSession);
296298};
297299
298- Status BatchingSession::Create (
300+ absl:: Status BatchingSession::Create (
299301 const BatchingSessionOptions& options, std::unique_ptr<Session> wrapped,
300302 const std::vector<SignatureWithBatchingSessionSchedulerCreator>&
301303 signatures_with_scheduler_creators,
@@ -309,7 +311,7 @@ Status BatchingSession::Create(
309311 return status;
310312}
311313
312- Status BatchingSession::Create (
314+ absl:: Status BatchingSession::Create (
313315 const BatchingSessionOptions& options, std::unique_ptr<Session> wrapped,
314316 const std::vector<SignatureWithBatchingSessionSchedulerCreator>&
315317 signatures_with_scheduler_creators,
@@ -339,7 +341,7 @@ Status BatchingSession::Create(
339341 return absl::OkStatus ();
340342}
341343
342- Status BatchingSession::Run (
344+ absl:: Status BatchingSession::Run (
343345 const std::vector<std::pair<string, Tensor>>& inputs,
344346 const std::vector<string>& output_tensor_names,
345347 const std::vector<string>& target_node_names,
@@ -349,7 +351,7 @@ Status BatchingSession::Run(
349351 outputs, &run_metadata);
350352}
351353
352- Status BatchingSession::Run (
354+ absl:: Status BatchingSession::Run (
353355 const RunOptions& run_options,
354356 const std::vector<std::pair<string, Tensor>>& inputs,
355357 const std::vector<string>& output_tensor_names,
@@ -359,7 +361,7 @@ Status BatchingSession::Run(
359361 target_node_names, outputs, run_metadata, absl::nullopt );
360362}
361363
362- Status BatchingSession::Run (
364+ absl:: Status BatchingSession::Run (
363365 const RunOptions& run_options,
364366 const std::vector<std::pair<string, Tensor>>& inputs,
365367 const std::vector<string>& output_tensor_names,
@@ -371,7 +373,7 @@ Status BatchingSession::Run(
371373 thread_pool_options);
372374}
373375
374- Status BatchingSession::InternalRun (
376+ absl:: Status BatchingSession::InternalRun (
375377 const RunOptions& run_options,
376378 const std::vector<std::pair<string, Tensor>>& inputs,
377379 const std::vector<string>& output_tensor_names,
@@ -434,7 +436,7 @@ Status BatchingSession::InternalRun(
434436 outputs->clear ();
435437
436438 Notification done;
437- Status status;
439+ absl:: Status status;
438440 auto task = std::unique_ptr<BatchingSessionTask>(new BatchingSessionTask);
439441 task->enqueue_time_micros = EnvTime::NowMicros ();
440442 task->run_options = run_options;
@@ -455,15 +457,16 @@ Status BatchingSession::InternalRun(
455457 return status;
456458}
457459
458- Status BatchingSession::ListDevices (std::vector<DeviceAttributes>* response) {
460+ absl::Status BatchingSession::ListDevices (
461+ std::vector<DeviceAttributes>* response) {
459462 return wrapped_->ListDevices (response);
460463}
461464
462465BatchingSession::BatchingSession (const BatchingSessionOptions& options,
463466 const std::string& thread_pool_name)
464467 : options_(options), thread_pool_name_(thread_pool_name) {}
465468
466- Status BatchingSession::ComputeInputSize (
469+ absl:: Status BatchingSession::ComputeInputSize (
467470 const std::vector<std::pair<string, Tensor>>& inputs, size_t * size) const {
468471 TF_RETURN_IF_ERROR (::tensorflow::serving::ComputeTensorBatchSize (
469472 inputs, size,
@@ -480,7 +483,7 @@ Status BatchingSession::ComputeInputSize(
480483 return absl::OkStatus ();
481484}
482485
483- Status BatchingSession::MergeInputTensors (
486+ absl:: Status BatchingSession::MergeInputTensors (
484487 const TensorSignature& signature, const Batch<BatchingSessionTask>& batch,
485488 std::vector<std::pair<string, Tensor>>* merged_inputs) {
486489 DCHECK_GE (batch.num_tasks (), 1 );
@@ -574,7 +577,8 @@ Status BatchingSession::MergeInputTensors(
574577 " One or more tasks does not conform to batch signature" );
575578 }
576579 Tensor concated;
577- const Status concat_status = tensor::Concat (tensors->second , &concated);
580+ const absl::Status concat_status =
581+ tensor::Concat (tensors->second , &concated);
578582 DCHECK (concat_status.ok ()) << concat_status.ToString ();
579583 if (!concat_status.ok ()) {
580584 return errors::Internal (" Tensor concat operation failed: " ,
@@ -586,7 +590,7 @@ Status BatchingSession::MergeInputTensors(
586590 return absl::OkStatus ();
587591}
588592
589- Status BatchingSession::SplitOutputTensors (
593+ absl:: Status BatchingSession::SplitOutputTensors (
590594 const TensorSignature& signature,
591595 const std::vector<Tensor>& combined_outputs,
592596 Batch<BatchingSessionTask>* batch) {
@@ -633,7 +637,7 @@ Status BatchingSession::SplitOutputTensors(
633637 }
634638
635639 std::vector<Tensor> split_tensor;
636- const Status split_status =
640+ const absl:: Status split_status =
637641 tensor::Split (tensor, task_sizes_plus_optional_padding, &split_tensor);
638642 DCHECK (split_status.ok ()) << split_status.ToString ();
639643 if (!split_status.ok ()) {
@@ -673,8 +677,8 @@ Status BatchingSession::SplitOutputTensors(
673677 return absl::OkStatus ();
674678}
675679
676- Status BatchingSession::SplitRunMetadata (RunMetadata* batch_metadata,
677- Batch<BatchingSessionTask>* batch) {
680+ absl:: Status BatchingSession::SplitRunMetadata (
681+ RunMetadata* batch_metadata, Batch<BatchingSessionTask>* batch) {
678682 if (batch->num_tasks () > 0 ) {
679683 if (batch_metadata->has_cost_graph ()) {
680684 // Scale the batch aggregated to reflect the cost of an individual request
@@ -725,7 +729,7 @@ void BatchingSession::ProcessBatch(
725729 // Regardless of the outcome, we need to propagate the status to the
726730 // individual tasks and signal that they are done. We use MakeCleanup() to
727731 // ensure that this happens no matter how we exit the method below.
728- Status status;
732+ absl:: Status status;
729733 auto finally = gtl::MakeCleanup ([&status, &batch] {
730734 for (int i = 0 ; i < batch->num_tasks (); ++i) {
731735 BatchingSessionTask* task = batch->mutable_task (i);
@@ -764,9 +768,9 @@ void BatchingSession::ProcessBatch(
764768 ->Add (dequeue_time_micros - task.enqueue_time_micros );
765769 }
766770 if (all_tasks_timeout_exceeded) {
767- status = Status ( static_cast <tensorflow::errors::Code> (
768- absl::StatusCode::kResourceExhausted ),
769- " Run() timeout exceeded while waiting in batching queue" );
771+ status = absl::Status (
772+ static_cast <absl::StatusCode>( absl::StatusCode::kResourceExhausted ),
773+ " Run() timeout exceeded while waiting in batching queue" );
770774 return ;
771775 }
772776
@@ -817,7 +821,7 @@ void BatchingSession::ProcessBatch(
817821// Share implementation between `SplitInputTask` here and
818822// `BatchResource::SplitInputTask` by refactoring and unifying the naming or
819823// type differences of data members.
820- Status SplitInputTask (
824+ absl:: Status SplitInputTask (
821825 std::unique_ptr<BatchingSessionTask>* input_task_ptr,
822826 int open_batch_remaining_slot, int max_batch_size,
823827 std::vector<std::unique_ptr<BatchingSessionTask>>* output_tasks) {
@@ -947,7 +951,7 @@ Status SplitInputTask(
947951 // TODO(b/158393551):
948952 // Figure out the optimal implementation of Split, by using
949953 // 'Tensor::Slice' and eliminating unnecessary memcpy as much as possible.
950- const Status split_status =
954+ const absl:: Status split_status =
951955 tensor::Split (input_tensor, output_task_sizes, &split_tensors);
952956 if (!split_status.ok ()) {
953957 return errors::Internal (
@@ -969,7 +973,7 @@ Status SplitInputTask(
969973 return absl::OkStatus ();
970974}
971975
972- Status CreateBatchingSession (
976+ absl:: Status CreateBatchingSession (
973977 const BatchingSessionOptions& options,
974978 const std::vector<SignatureWithBatchingSessionSchedulerCreator>&
975979 signatures_with_scheduler_creators,
@@ -984,7 +988,7 @@ Status CreateBatchingSession(
984988 return absl::OkStatus ();
985989}
986990
987- Status CreateBatchingSession (
991+ absl:: Status CreateBatchingSession (
988992 const BatchingSessionOptions& options,
989993 const std::vector<SignatureWithBatchingSessionSchedulerCreator>&
990994 signatures_with_scheduler_creators,
@@ -998,7 +1002,7 @@ Status CreateBatchingSession(
9981002 return absl::OkStatus ();
9991003}
10001004
1001- Status CreateBasicBatchingSession (
1005+ absl:: Status CreateBasicBatchingSession (
10021006 const BasicBatchScheduler<BatchingSessionTask>::Options& schedule_options,
10031007 const BatchingSessionOptions& batching_session_options,
10041008 const TensorSignature& signature, std::unique_ptr<Session> session,
0 commit comments