Skip to content

Commit 7ec13b0

Browse files
authored
refactor: replace all TORCH_CHECK with CHECK. (#453)
1 parent ef995ba commit 7ec13b0

File tree

8 files changed

+14
-17
lines changed

8 files changed

+14
-17
lines changed

xllm/core/framework/eplb/eplb_policy.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ std::pair<torch::Tensor, std::vector<bool>> EplbPolicy::rebalance_experts(
7676
torch::Tensor EplbPolicy::compute_balanced_pack(
7777
const torch::Tensor& expert_loads) {
7878
// Parameter Validation
79-
TORCH_CHECK(expert_loads.dim() == 1, "expert_loads must be 1D tensor");
79+
CHECK_EQ(expert_loads.dim(), 1) << "expert_loads must be 1D tensor";
8080
const int64_t num_experts = expert_loads.size(0);
8181

8282
// Generate Redundant Experts
@@ -139,7 +139,7 @@ std::pair<torch::Tensor, torch::Tensor> EplbPolicy::update_origin_weights(
139139
torch::Tensor expert_loads,
140140
int32_t redundancy_experts) {
141141
// Parameter Validation
142-
TORCH_CHECK(expert_loads.dim() == 1, "expert_loads must be 1D tensor");
142+
CHECK_EQ(expert_loads.dim(), 1) << "expert_loads must be 1D tensor";
143143
const int64_t num_experts = expert_loads.size(0);
144144

145145
// Initialize Data Structures

xllm/core/framework/parallel_state/npu_process_group.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ HcclDataType to_hccl_data_type(const torch::Tensor& input) {
6969
case at::kBFloat16:
7070
return HCCL_DATA_TYPE_BFP16;
7171
default:
72-
TORCH_CHECK(false, "Unconvertible HCCL type ", type);
72+
LOG(FATAL) << "Unconvertible HCCL type: " << type;
7373
}
7474
}
7575

xllm/core/layers/common/tests/tests_utils.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -249,10 +249,10 @@ torch::Tensor seeded_tensor(const std::string& key,
249249
out_cpu = map_mod_span(int64_t{});
250250
break;
251251
default:
252-
TORCH_CHECK(false, "Unsupported integer dtype: ", dtype);
252+
LOG(FATAL) << "Unsupported integer dtype: " << dtype;
253253
}
254254
} else {
255-
TORCH_CHECK(false, "Unsupported dtype for seeded_tensor");
255+
LOG(FATAL) << "Unsupported dtype for seeded_tensor";
256256
}
257257

258258
// Shape & device

xllm/core/layers/mlu/deepseek_v2_attention.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,8 @@ DeepseekV2AttentionImpl::DeepseekV2AttentionImpl(
4343
int64_t max_position_embeddings = args.max_position_embeddings();
4444

4545
qk_head_dim_ = qk_nope_head_dim_ + qk_rope_head_dim_;
46-
TORCH_CHECK(num_heads % tp_size == 0,
47-
"num_heads must be divisible by tensor parallel size");
46+
CHECK_EQ(num_heads % tp_size, 0)
47+
<< "num_heads must be divisible by tensor parallel size";
4848
num_local_heads_ = num_heads / tp_size;
4949
float scaling = std::pow(qk_head_dim_, -0.5f);
5050

xllm/core/layers/npu/npu_deepseek_v2_decoder_layer_impl.h

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -80,11 +80,8 @@ class ExpertBuffer {
8080
} else {
8181
auto validate_shape = [](const torch::Tensor& t,
8282
const std::vector<int64_t>& expected) {
83-
TORCH_CHECK(t.sizes() == expected,
84-
"Shape mismatch. Expected ",
85-
expected,
86-
" got ",
87-
t.sizes());
83+
CHECK_EQ(t.sizes(), expected)
84+
<< "Shape mismatch. Expected " << expected << " got " << t.sizes();
8885
};
8986

9087
validate_shape(gateup_weight, gateup_weight_shape);

xllm/models/dit/dit.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -592,7 +592,7 @@ inline torch::Tensor get_timestep_embedding(const torch::Tensor& timesteps,
592592
float downscale_freq_shift = 1.0f,
593593
float scale = 1.0f,
594594
int64_t max_period = 10000) {
595-
TORCH_CHECK(timesteps.dim() == 1, "Timesteps should be a 1d-array");
595+
CHECK_EQ(timesteps.dim(), 1) << "Timesteps should be a 1d-array";
596596
int64_t half_dim = embedding_dim / 2;
597597
// -ln(max_period) * [0, 1, ..., half_dim-1] / (half_dim -
598598
// downscale_freq_shift

xllm/models/dit/pipeline_flux_base.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ torch::Tensor get_1d_rotary_pos_embed(
8585
float ntk_factor = 1.0,
8686
bool repeat_interleave_real = true,
8787
torch::Dtype freqs_dtype = torch::kFloat32) {
88-
TORCH_CHECK(dim % 2 == 0, "Dimension must be even");
88+
CHECK_EQ(dim % 2, 0) << "Dimension must be even";
8989

9090
torch::Tensor pos_tensor = pos;
9191
if (pos.dim() == 0) {

xllm/models/vlm/minicpmv.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -306,7 +306,7 @@ torch::Tensor get_1d_sincos_pos_embed_from_grid(int embed_dim,
306306
std::pair<int, int> version = {
307307
2,
308308
0}) {
309-
TORCH_CHECK(embed_dim % 2 == 0, "embed_dim must be even");
309+
CHECK_EQ(embed_dim % 2, 0) << "embed_dim must be even";
310310

311311
// compute omega
312312
auto omega = torch::arange(embed_dim / 2, torch::kFloat32);
@@ -332,7 +332,7 @@ torch::Tensor get_2d_sincos_pos_embed_from_grid(int embed_dim,
332332
std::pair<int, int> version = {
333333
2,
334334
0}) {
335-
TORCH_CHECK(embed_dim % 2 == 0, "embed_dim must be even");
335+
CHECK_EQ(embed_dim % 2, 0) << "embed_dim must be even";
336336

337337
auto emb_h =
338338
get_1d_sincos_pos_embed_from_grid(embed_dim / 2, grid[0], version);
@@ -382,7 +382,7 @@ class Resampler2_5Impl : public BaseResamplerImpl {
382382
}
383383

384384
torch::Tensor forward(torch::Tensor x, torch::Tensor tgt_sizes) {
385-
TORCH_CHECK(x.size(0) == tgt_sizes.size(0), "Batch size mismatch!");
385+
CHECK_EQ(x.size(0), tgt_sizes.size(0)) << "Batch size mismatch!";
386386

387387
int64_t batch_size = x.size(0);
388388
auto device = x.device();

0 commit comments

Comments
 (0)