Skip to content

Commit 48213ea

Browse files
aliafzalmeta-codesync[bot]
authored andcommitted
Fix CQS signal clang-diagnostic-shadow in fbcode/torchrec/inference (#3508)
Summary: Pull Request resolved: #3508 Differential Revision: D85419668 fbshipit-source-id: b28fa97ef82eb3def7e52d014e94d276466d97d4
1 parent 570eb1c commit 48213ea

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

torchrec/inference/inference_legacy/src/GPUExecutor.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -105,9 +105,9 @@ GPUExecutor::GPUExecutor(
105105
if (gcConfig_->optimizationEnabled) {
106106
gcConfig_->threadIdToNumForwards[threadId] = 0;
107107
// Freeze all python objects in each interpreter
108-
auto model =
108+
auto session =
109109
model_.acquireSession(&manager_->allInstances().at(threadId));
110-
model.global("gc", "freeze")(at::ArrayRef<torch::deploy::Obj>());
110+
session.global("gc", "freeze")(at::ArrayRef<torch::deploy::Obj>());
111111
}
112112

113113
processThreads_.emplace_back([this, threadId] {
@@ -189,7 +189,7 @@ void GPUExecutor::process(int idx) {
189189
auto start = std::chrono::steady_clock::now();
190190
model_.acquireSession(&manager_->allInstances().at(idx));
191191
{
192-
std::lock_guard<std::mutex> lock(warmUpMutex_);
192+
std::lock_guard<std::mutex> warmUpLock(warmUpMutex_);
193193
warmUpCounter_++;
194194
warmUpCV_.notify_one();
195195
}

0 commit comments

Comments
 (0)