-
Notifications
You must be signed in to change notification settings - Fork 585
Open
Labels
Description
CUDA version: 12.8
flashinfer version: 0.5.2
CUDA ARCH(Compute Capability): 9.0
I reported an error using the following test command
python3 benchmarks/flashinfer_benchmark.py --routine BatchPrefillWithPagedKVCacheWrapper --page_size 64 --batch_size 1 --s_qo 2000 --s_kv 2000 --num_qo_heads 4 --num_kv_heads 1 --head_dim_qk 128 --head_dim_vo 128 -vv --refcheck --q_dtype fp8_e4m3 --kv_dtype fp8_e4m3 --allow_output_mismatch --generate_repro_command --num_iters 20 --backends fa2 fa3
python3 benchmarks/flashinfer_benchmark.py --routine BatchPrefillWithPagedKVCacheWrapper --page_size 64 --batch_size 1 --s_qo 2000 --s_kv 2000 --num_qo_heads 4 --num_kv_heads 1 --head_dim_qk 128 --head_dim_vo 128 -vv --refcheck --q_dtype bfloat16 --kv_dtype fp8_e4m3 --allow_output_mismatch --generate_repro_command --num_iters 20 --backends fa2 fa3error info
Traceback (most recent call last):
File "/usr/local/lib/python3.12/dist-packages/flashinfer/jit/cpp_ext.py", line 287, in run_ninja
subprocess.run(
File "/usr/lib/python3.12/subprocess.py", line 571, in run
raise CalledProcessError(retcode, process.args,
subprocess.CalledProcessError: Command '['ninja', '-v', '-C', '/root/.cache/flashinfer/0.5.2/90_90a/cached_ops', '-f', '/root/.cache/flashinfer/0.5.2/90_90a/cached_ops/batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/build.ninja']' returned non-zero exit status 1.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/root/flashinfer/benchmarks/flashinfer_benchmark.py", line 207, in <module>
run_test(args)
File "/root/flashinfer/benchmarks/flashinfer_benchmark.py", line 24, in run_test
res = run_attention_test(args)
^^^^^^^^^^^^^^^^^^^^^^^^
File "/root/flashinfer/benchmarks/routines/attention.py", line 59, in run_attention_test
return testBatchPrefillWithPagedKVCacheWrapper(args)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/root/flashinfer/benchmarks/routines/attention.py", line 938, in testBatchPrefillWithPagedKVCacheWrapper
backend_wrappers[backend].plan(
File "/usr/local/lib/python3.12/dist-packages/flashinfer/prefill.py", line 1838, in plan
self._cached_module = get_batch_prefill_module(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/dist-packages/flashinfer/prefill.py", line 374, in get_batch_prefill_module
module = gen_batch_prefill_module(backend, *args).build_and_load()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/dist-packages/flashinfer/jit/core.py", line 309, in build_and_load
self.build(verbose, need_lock=False)
File "/usr/local/lib/python3.12/dist-packages/flashinfer/jit/core.py", line 295, in build
run_ninja(jit_env.FLASHINFER_JIT_DIR, self.ninja_path, verbose)
File "/usr/local/lib/python3.12/dist-packages/flashinfer/jit/cpp_ext.py", line 299, in run_ninja
raise RuntimeError(msg) from e
RuntimeError: Ninja build failed. Ninja output:
ninja: Entering directory `/root/.cache/flashinfer/0.5.2/90_90a/cached_ops'
[1/5] /usr/local/cuda/bin/nvcc --generate-dependencies-with-compile --dependency-output batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_3.cuda.o.d -DPy_LIMITED_API=0x03090000 -D_GLIBCXX_USE_CXX11_ABI=1 -isystem /usr/include/python3.12 -isystem /usr/local/cuda/include -isystem /usr/local/cuda/include/cccl -isystem /usr/local/lib/python3.12/dist-packages/tvm_ffi/include -isystem /usr/local/lib/python3.12/dist-packages/tvm_ffi/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/csrc -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/tools/util/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/spdlog/include --compiler-options=-fPIC --expt-relaxed-constexpr -static-global-template-stub=false -DFLASHINFER_ENABLE_FP8_E8M0 -DFLASHINFER_ENABLE_FP4_E2M1 -std=c++17 --threads=1 -use_fast_math -DFLASHINFER_ENABLE_F16 -DFLASHINFER_ENABLE_BF16 -DFLASHINFER_ENABLE_FP8_E4M3 -DFLASHINFER_ENABLE_FP8_E5M2 -DNDEBUG -O3 -gencode=arch=compute_90a,code=sm_90a -DFLASHINFER_ENABLE_FP8_E8M0 -DFLASHINFER_ENABLE_FP4_E2M1 -c /root/.cache/flashinfer/0.5.2/90_90a/generated/batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_3.cu -o batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_3.cuda.o
FAILED: [code=1] batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_3.cuda.o
/usr/local/cuda/bin/nvcc --generate-dependencies-with-compile --dependency-output batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_3.cuda.o.d -DPy_LIMITED_API=0x03090000 -D_GLIBCXX_USE_CXX11_ABI=1 -isystem /usr/include/python3.12 -isystem /usr/local/cuda/include -isystem /usr/local/cuda/include/cccl -isystem /usr/local/lib/python3.12/dist-packages/tvm_ffi/include -isystem /usr/local/lib/python3.12/dist-packages/tvm_ffi/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/csrc -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/tools/util/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/spdlog/include --compiler-options=-fPIC --expt-relaxed-constexpr -static-global-template-stub=false -DFLASHINFER_ENABLE_FP8_E8M0 -DFLASHINFER_ENABLE_FP4_E2M1 -std=c++17 --threads=1 -use_fast_math -DFLASHINFER_ENABLE_F16 -DFLASHINFER_ENABLE_BF16 -DFLASHINFER_ENABLE_FP8_E4M3 -DFLASHINFER_ENABLE_FP8_E5M2 -DNDEBUG -O3 -gencode=arch=compute_90a,code=sm_90a -DFLASHINFER_ENABLE_FP8_E8M0 -DFLASHINFER_ENABLE_FP4_E2M1 -c /root/.cache/flashinfer/0.5.2/90_90a/generated/batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_3.cu -o batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_3.cuda.o
/usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/atom/copy_traits.hpp(130): error: static assertion failed with "Copy_Traits: dst failed to vectorize into registers. Layout is incompatible with this CopyOp."
static_assert(decltype(size(rD) == Int<RegNumDst>{})::value, "Copy_Traits: dst failed to vectorize into registers. Layout is incompatible with this CopyOp.")
^
detected during:
instantiation of "void cute::copy_unpack(const AnyCPYTraits &, const cute::Tensor<SEngine, SLayout> &, cute::Tensor<DEngine, DLayout> &) [with AnyCPYTraits=cute::Copy_Traits<cute::SM90_U32x4_STSM_N>, SEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SLayout=cute::Layout<cute::tuple<cute::_16>, cute::tuple<cute::_1>>, DEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>>>]" at line 103 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/atom/copy_atom.hpp
instantiation of "void cute::Copy_Atom<cute::Copy_Traits<Args...>, CopyInternalType>::call(const cute::Tensor<SEngine, SLayout> &, cute::Tensor<DEngine, DLayout> &) const [with Args=<cute::SM90_U32x4_STSM_N>, CopyInternalType=PagedParams::DTypeO, SEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SLayout=cute::Layout<cute::tuple<cute::_16>, cute::tuple<cute::_1>>, DEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>>>]" at line 124 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/atom/copy_atom.hpp
instantiation of "void cute::Copy_Atom<cute::Copy_Traits<Args...>, CopyInternalType>::call(const cute::Tensor<SEngine, SLayout> &, cute::Tensor<DEngine, DLayout> &&) const [with Args=<cute::SM90_U32x4_STSM_N>, CopyInternalType=PagedParams::DTypeO, SEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SLayout=cute::Layout<cute::tuple<cute::_16>, cute::tuple<cute::_1>>, DEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>>>]" at line 226 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/algorithm/copy.hpp
instantiation of "void cute::copy(const cute::Copy_Atom<CopyArgs...> &, const cute::Tensor<SrcEngine, SrcLayout> &, cute::Tensor<DstEngine, DstLayout> &) [with CopyArgs=<cute::Copy_Traits<cute::SM90_U32x4_STSM_N>, PagedParams::DTypeO>, SrcEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SrcLayout=cute::Layout<cute::tuple<cute::_16, cute::_4>, cute::tuple<cute::_1, cute::_16>>, DstEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DstLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>, cute::_4>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>, cute::C<32>>>]" at line 545 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/algorithm/copy.hpp
instantiation of "void cute::copy(const CopyPolicy &, const cute::Tensor<SrcEngine, SrcLayout> &, cute::Tensor<DstEngine, DstLayout> &&) [with CopyPolicy=cute::Copy_Atom<cute::Copy_Traits<cute::SM90_U32x4_STSM_N>, PagedParams::DTypeO>, SrcEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SrcLayout=cute::Layout<cute::tuple<cute::_16, cute::_4>, cute::tuple<cute::_1, cute::_16>>, DstEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DstLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>, cute::_4>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>, cute::C<32>>>]" at line 109 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/atom/copy_atom.hpp
[ 3 instantiation contexts not shown ]
instantiation of "void cute::copy(const cute::TiledCopy<CopyAtom, TV, Tiler> &, const cute::Tensor<SrcEngine, SrcLayout> &, cute::Tensor<DstEngine, DstLayout> &) [with CopyAtom=cute::Copy_Atom<cute::SM90_U32x4_STSM_N, PagedParams::DTypeO>, TV=cute::Layout<cute::tuple<cute::tuple<cute::_4, cute::_8, cute::C<8>>, cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<16>>, cute::tuple<cute::_1, cute::_1>>>, cute::tuple<cute::tuple<cute::_256, cute::_1, cute::_16>, cute::tuple<cute::tuple<cute::_128, cute::_8, cute::_1024>, cute::tuple<cute::_0, cute::_0>>>>, Tiler=cute::tuple<cute::_128, cute::_128>, SrcEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SrcLayout=cute::Layout<cute::tuple<cute::tuple<cute::_16, cute::_4>, cute::_1, cute::_1>, cute::tuple<cute::tuple<cute::_1, cute::_16>, cute::C<0>, cute::C<0>>>, DstEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DstLayout=cute::Layout<cute::tuple<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>, cute::_4>, cute::_1, cute::_1>, cute::tuple<cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>, cute::C<32>>, cute::C<0>, cute::C<0>>>]" at line 129 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/include/flashinfer/attention/hopper/quantization/epilogue.cuh
instantiation of "void flashinfer::FP8CollectiveEpilogue<Ktraits>::store(const flashinfer::FP8CollectiveEpilogue<Ktraits>::Params &, const FrgTensorO &, const FrgTensorLSE &, SharedStorage &, TiledMma, int, const BlockCoord &) [with Ktraits=flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>, BlockCoord=cute::tuple<int, int, int, int, int, int, int, int>, SharedStorage=flashinfer::SharedStorageQKVOVt<cutlass::PipelineAsync<2>, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, 128, cute::ComposedLayout<cute::Swizzle<3, 4, 3>, cute::smem_ptr_flag_bits<8>, cute::Layout<cute::tuple<cute::tuple<cute::C<8>, cute::C<16>>, cute::tuple<cute::_128, cute::_1>>, cute::tuple<cute::tuple<cute::_128, cute::_1024>, cute::tuple<cute::_1, cute::_0>>>>, cute::ComposedLayout<cute::Swizzle<3, 4, 3>, cute::smem_ptr_flag_bits<8>, cute::Layout<cute::tuple<cute::tuple<cute::C<8>, cute::C<16>>, cute::tuple<cute::_128, cute::_1>, cute::tuple<cute::_1, cute::_2>>, cute::tuple<cute::tuple<cute::_128, cute::_1024>, cute::tuple<cute::_1, cute::_0>, cute::tuple<cute::C<0>, cute::_16384>>>>, cute::ComposedLayout<cute::Swizzle<2, 4, 3>, cute::smem_ptr_flag_bits<8>, cute::Layout<cute::tuple<cute::tuple<cute::tuple<cute::_8, cute::_8>, cute::_2>, cute::tuple<cute::tuple<cute::_64, cute::_1>, cute::_2>, cute::tuple<cute::_1, cute::_2>>, cute::tuple<cute::tuple<cute::tuple<cute::_64, cute::_512>, cute::C<4096>>, cute::tuple<cute::tuple<cute::_1, cute::_0>, cute::C<8192>>, cute::tuple<cute::C<0>, cute::_16384>>>>, cute::ComposedLayout<cute::Swizzle<3, 4, 3>, cute::smem_ptr_flag_bits<8>, cute::Layout<cute::tuple<cute::tuple<cute::C<8>, cute::C<16>>, cute::tuple<cute::_128, cute::_1>>, cute::tuple<cute::tuple<cute::_128, cute::_1024>, cute::tuple<cute::_1, cute::_0>>>>>, FrgTensorO=cute::Tensor<cute::ArrayEngine<float, 64UL>, cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<16>>, cute::_1, cute::_1>, cute::tuple<cute::tuple<cute::_1, cute::_2, cute::_4>, cute::C<0>, cute::C<0>>>>, FrgTensorLSE=cute::Tensor<cute::ArrayEngine<float, 2UL>, cute::Layout<cute::tuple<cute::_2>, cute::tuple<cute::_1>>>, TiledMma=cute::TiledMMA<cute::MMA_Atom<cute::SM90::GMMA::MMA_64x128x32_F32E4M3E4M3_RS_TN<cute::SM90::GMMA::ScaleIn::One, cute::SM90::GMMA::ScaleIn::One>>, cute::Layout<cute::tuple<cute::_2, cute::_1, cute::_1>, cute::tuple<cute::_1, cute::_0, cute::_0>>, cute::tuple<cute::Underscore, cute::Underscore, cute::Underscore>>]" at line 244 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/include/flashinfer/attention/hopper/quantization/prefill_sm90.cuh
instantiation of "void flashinfer::FP8PrefillWithKVCacheKernel<CollectiveMainloop,CollectiveEpilogue,Ktraits,LEFT_SLIDING_WINDOW,CAUSAL,TileScheduler>(CollectiveMainloop::Params, CollectiveEpilogue::Params, TileScheduler::Params) [with CollectiveMainloop=flashinfer::FP8SparseCollectiveMainloop<PagedParams::AdditionalParams, flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>, false>, CollectiveEpilogue=flashinfer::FP8CollectiveEpilogue<flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>>, Ktraits=flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>, LEFT_SLIDING_WINDOW=false, CAUSAL=false, TileScheduler=flashinfer::BatchPrefillTileScheduler<PagedParams::IdType>]" at line 369 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/include/flashinfer/attention/hopper/quantization/prefill_sm90.cuh
instantiation of "cudaError_t flashinfer::BatchFP8PrefillWithPagedKVCacheKernelTraitsDispatched<KernelTraits,LEFT_SLIDING_WINDOW,CAUSAL,SAME_SCHEDULE_FOR_ALL_HEADS,Params>(Params &, cudaStream_t) [with KernelTraits=flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>, LEFT_SLIDING_WINDOW=false, CAUSAL=false, SAME_SCHEDULE_FOR_ALL_HEADS=true, Params=PagedParams]" at line 458 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/include/flashinfer/attention/hopper/quantization/prefill_sm90.cuh
instantiation of "cudaError_t flashinfer::BatchFP8PrefillWithPagedKVCacheDispatched<HEAD_DIM,MASK_MODE,LEFT_SLIDING_WINDOW,SAME_SCHEDULE_FOR_ALL_HEADS,AttentionVariant,Params>(Params &, __nv_bool, cudaStream_t) [with HEAD_DIM=128U, MASK_MODE=flashinfer::MaskMode::kMultiItemScoring, LEFT_SLIDING_WINDOW=false, SAME_SCHEDULE_FOR_ALL_HEADS=true, AttentionVariant=flashinfer::DefaultFP8Attention, Params=PagedParams]" at line 7 of /root/.cache/flashinfer/0.5.2/90_90a/generated/batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_3.cu
1 error detected in the compilation of "/root/.cache/flashinfer/0.5.2/90_90a/generated/batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_3.cu".
[2/5] /usr/local/cuda/bin/nvcc --generate-dependencies-with-compile --dependency-output batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_2.cuda.o.d -DPy_LIMITED_API=0x03090000 -D_GLIBCXX_USE_CXX11_ABI=1 -isystem /usr/include/python3.12 -isystem /usr/local/cuda/include -isystem /usr/local/cuda/include/cccl -isystem /usr/local/lib/python3.12/dist-packages/tvm_ffi/include -isystem /usr/local/lib/python3.12/dist-packages/tvm_ffi/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/csrc -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/tools/util/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/spdlog/include --compiler-options=-fPIC --expt-relaxed-constexpr -static-global-template-stub=false -DFLASHINFER_ENABLE_FP8_E8M0 -DFLASHINFER_ENABLE_FP4_E2M1 -std=c++17 --threads=1 -use_fast_math -DFLASHINFER_ENABLE_F16 -DFLASHINFER_ENABLE_BF16 -DFLASHINFER_ENABLE_FP8_E4M3 -DFLASHINFER_ENABLE_FP8_E5M2 -DNDEBUG -O3 -gencode=arch=compute_90a,code=sm_90a -DFLASHINFER_ENABLE_FP8_E8M0 -DFLASHINFER_ENABLE_FP4_E2M1 -c /root/.cache/flashinfer/0.5.2/90_90a/generated/batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_2.cu -o batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_2.cuda.o
FAILED: [code=1] batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_2.cuda.o
/usr/local/cuda/bin/nvcc --generate-dependencies-with-compile --dependency-output batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_2.cuda.o.d -DPy_LIMITED_API=0x03090000 -D_GLIBCXX_USE_CXX11_ABI=1 -isystem /usr/include/python3.12 -isystem /usr/local/cuda/include -isystem /usr/local/cuda/include/cccl -isystem /usr/local/lib/python3.12/dist-packages/tvm_ffi/include -isystem /usr/local/lib/python3.12/dist-packages/tvm_ffi/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/csrc -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/tools/util/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/spdlog/include --compiler-options=-fPIC --expt-relaxed-constexpr -static-global-template-stub=false -DFLASHINFER_ENABLE_FP8_E8M0 -DFLASHINFER_ENABLE_FP4_E2M1 -std=c++17 --threads=1 -use_fast_math -DFLASHINFER_ENABLE_F16 -DFLASHINFER_ENABLE_BF16 -DFLASHINFER_ENABLE_FP8_E4M3 -DFLASHINFER_ENABLE_FP8_E5M2 -DNDEBUG -O3 -gencode=arch=compute_90a,code=sm_90a -DFLASHINFER_ENABLE_FP8_E8M0 -DFLASHINFER_ENABLE_FP4_E2M1 -c /root/.cache/flashinfer/0.5.2/90_90a/generated/batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_2.cu -o batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_2.cuda.o
/usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/atom/copy_traits.hpp(130): error: static assertion failed with "Copy_Traits: dst failed to vectorize into registers. Layout is incompatible with this CopyOp."
static_assert(decltype(size(rD) == Int<RegNumDst>{})::value, "Copy_Traits: dst failed to vectorize into registers. Layout is incompatible with this CopyOp.")
^
detected during:
instantiation of "void cute::copy_unpack(const AnyCPYTraits &, const cute::Tensor<SEngine, SLayout> &, cute::Tensor<DEngine, DLayout> &) [with AnyCPYTraits=cute::Copy_Traits<cute::SM90_U32x4_STSM_N>, SEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SLayout=cute::Layout<cute::tuple<cute::_16>, cute::tuple<cute::_1>>, DEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>>>]" at line 103 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/atom/copy_atom.hpp
instantiation of "void cute::Copy_Atom<cute::Copy_Traits<Args...>, CopyInternalType>::call(const cute::Tensor<SEngine, SLayout> &, cute::Tensor<DEngine, DLayout> &) const [with Args=<cute::SM90_U32x4_STSM_N>, CopyInternalType=PagedParams::DTypeO, SEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SLayout=cute::Layout<cute::tuple<cute::_16>, cute::tuple<cute::_1>>, DEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>>>]" at line 124 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/atom/copy_atom.hpp
instantiation of "void cute::Copy_Atom<cute::Copy_Traits<Args...>, CopyInternalType>::call(const cute::Tensor<SEngine, SLayout> &, cute::Tensor<DEngine, DLayout> &&) const [with Args=<cute::SM90_U32x4_STSM_N>, CopyInternalType=PagedParams::DTypeO, SEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SLayout=cute::Layout<cute::tuple<cute::_16>, cute::tuple<cute::_1>>, DEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>>>]" at line 226 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/algorithm/copy.hpp
instantiation of "void cute::copy(const cute::Copy_Atom<CopyArgs...> &, const cute::Tensor<SrcEngine, SrcLayout> &, cute::Tensor<DstEngine, DstLayout> &) [with CopyArgs=<cute::Copy_Traits<cute::SM90_U32x4_STSM_N>, PagedParams::DTypeO>, SrcEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SrcLayout=cute::Layout<cute::tuple<cute::_16, cute::_4>, cute::tuple<cute::_1, cute::_16>>, DstEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DstLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>, cute::_4>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>, cute::C<32>>>]" at line 545 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/algorithm/copy.hpp
instantiation of "void cute::copy(const CopyPolicy &, const cute::Tensor<SrcEngine, SrcLayout> &, cute::Tensor<DstEngine, DstLayout> &&) [with CopyPolicy=cute::Copy_Atom<cute::Copy_Traits<cute::SM90_U32x4_STSM_N>, PagedParams::DTypeO>, SrcEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SrcLayout=cute::Layout<cute::tuple<cute::_16, cute::_4>, cute::tuple<cute::_1, cute::_16>>, DstEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DstLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>, cute::_4>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>, cute::C<32>>>]" at line 109 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/atom/copy_atom.hpp
[ 3 instantiation contexts not shown ]
instantiation of "void cute::copy(const cute::TiledCopy<CopyAtom, TV, Tiler> &, const cute::Tensor<SrcEngine, SrcLayout> &, cute::Tensor<DstEngine, DstLayout> &) [with CopyAtom=cute::Copy_Atom<cute::SM90_U32x4_STSM_N, PagedParams::DTypeO>, TV=cute::Layout<cute::tuple<cute::tuple<cute::_4, cute::_8, cute::C<8>>, cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<16>>, cute::tuple<cute::_1, cute::_1>>>, cute::tuple<cute::tuple<cute::_256, cute::_1, cute::_16>, cute::tuple<cute::tuple<cute::_128, cute::_8, cute::_1024>, cute::tuple<cute::_0, cute::_0>>>>, Tiler=cute::tuple<cute::_128, cute::_128>, SrcEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SrcLayout=cute::Layout<cute::tuple<cute::tuple<cute::_16, cute::_4>, cute::_1, cute::_1>, cute::tuple<cute::tuple<cute::_1, cute::_16>, cute::C<0>, cute::C<0>>>, DstEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DstLayout=cute::Layout<cute::tuple<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>, cute::_4>, cute::_1, cute::_1>, cute::tuple<cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>, cute::C<32>>, cute::C<0>, cute::C<0>>>]" at line 129 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/include/flashinfer/attention/hopper/quantization/epilogue.cuh
instantiation of "void flashinfer::FP8CollectiveEpilogue<Ktraits>::store(const flashinfer::FP8CollectiveEpilogue<Ktraits>::Params &, const FrgTensorO &, const FrgTensorLSE &, SharedStorage &, TiledMma, int, const BlockCoord &) [with Ktraits=flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>, BlockCoord=cute::tuple<int, int, int, int, int, int, int, int>, SharedStorage=flashinfer::SharedStorageQKVOVt<cutlass::PipelineAsync<2>, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, 128, cute::ComposedLayout<cute::Swizzle<3, 4, 3>, cute::smem_ptr_flag_bits<8>, cute::Layout<cute::tuple<cute::tuple<cute::C<8>, cute::C<16>>, cute::tuple<cute::_128, cute::_1>>, cute::tuple<cute::tuple<cute::_128, cute::_1024>, cute::tuple<cute::_1, cute::_0>>>>, cute::ComposedLayout<cute::Swizzle<3, 4, 3>, cute::smem_ptr_flag_bits<8>, cute::Layout<cute::tuple<cute::tuple<cute::C<8>, cute::C<16>>, cute::tuple<cute::_128, cute::_1>, cute::tuple<cute::_1, cute::_2>>, cute::tuple<cute::tuple<cute::_128, cute::_1024>, cute::tuple<cute::_1, cute::_0>, cute::tuple<cute::C<0>, cute::_16384>>>>, cute::ComposedLayout<cute::Swizzle<2, 4, 3>, cute::smem_ptr_flag_bits<8>, cute::Layout<cute::tuple<cute::tuple<cute::tuple<cute::_8, cute::_8>, cute::_2>, cute::tuple<cute::tuple<cute::_64, cute::_1>, cute::_2>, cute::tuple<cute::_1, cute::_2>>, cute::tuple<cute::tuple<cute::tuple<cute::_64, cute::_512>, cute::C<4096>>, cute::tuple<cute::tuple<cute::_1, cute::_0>, cute::C<8192>>, cute::tuple<cute::C<0>, cute::_16384>>>>, cute::ComposedLayout<cute::Swizzle<3, 4, 3>, cute::smem_ptr_flag_bits<8>, cute::Layout<cute::tuple<cute::tuple<cute::C<8>, cute::C<16>>, cute::tuple<cute::_128, cute::_1>>, cute::tuple<cute::tuple<cute::_128, cute::_1024>, cute::tuple<cute::_1, cute::_0>>>>>, FrgTensorO=cute::Tensor<cute::ArrayEngine<float, 64UL>, cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<16>>, cute::_1, cute::_1>, cute::tuple<cute::tuple<cute::_1, cute::_2, cute::_4>, cute::C<0>, cute::C<0>>>>, FrgTensorLSE=cute::Tensor<cute::ArrayEngine<float, 2UL>, cute::Layout<cute::tuple<cute::_2>, cute::tuple<cute::_1>>>, TiledMma=cute::TiledMMA<cute::MMA_Atom<cute::SM90::GMMA::MMA_64x128x32_F32E4M3E4M3_RS_TN<cute::SM90::GMMA::ScaleIn::One, cute::SM90::GMMA::ScaleIn::One>>, cute::Layout<cute::tuple<cute::_2, cute::_1, cute::_1>, cute::tuple<cute::_1, cute::_0, cute::_0>>, cute::tuple<cute::Underscore, cute::Underscore, cute::Underscore>>]" at line 244 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/include/flashinfer/attention/hopper/quantization/prefill_sm90.cuh
instantiation of "void flashinfer::FP8PrefillWithKVCacheKernel<CollectiveMainloop,CollectiveEpilogue,Ktraits,LEFT_SLIDING_WINDOW,CAUSAL,TileScheduler>(CollectiveMainloop::Params, CollectiveEpilogue::Params, TileScheduler::Params) [with CollectiveMainloop=flashinfer::FP8SparseCollectiveMainloop<PagedParams::AdditionalParams, flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>, false>, CollectiveEpilogue=flashinfer::FP8CollectiveEpilogue<flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>>, Ktraits=flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>, LEFT_SLIDING_WINDOW=false, CAUSAL=false, TileScheduler=flashinfer::BatchPrefillTileScheduler<PagedParams::IdType>]" at line 369 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/include/flashinfer/attention/hopper/quantization/prefill_sm90.cuh
instantiation of "cudaError_t flashinfer::BatchFP8PrefillWithPagedKVCacheKernelTraitsDispatched<KernelTraits,LEFT_SLIDING_WINDOW,CAUSAL,SAME_SCHEDULE_FOR_ALL_HEADS,Params>(Params &, cudaStream_t) [with KernelTraits=flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>, LEFT_SLIDING_WINDOW=false, CAUSAL=false, SAME_SCHEDULE_FOR_ALL_HEADS=true, Params=PagedParams]" at line 458 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/include/flashinfer/attention/hopper/quantization/prefill_sm90.cuh
instantiation of "cudaError_t flashinfer::BatchFP8PrefillWithPagedKVCacheDispatched<HEAD_DIM,MASK_MODE,LEFT_SLIDING_WINDOW,SAME_SCHEDULE_FOR_ALL_HEADS,AttentionVariant,Params>(Params &, __nv_bool, cudaStream_t) [with HEAD_DIM=128U, MASK_MODE=flashinfer::MaskMode::kCustom, LEFT_SLIDING_WINDOW=false, SAME_SCHEDULE_FOR_ALL_HEADS=true, AttentionVariant=flashinfer::DefaultFP8Attention, Params=PagedParams]" at line 7 of /root/.cache/flashinfer/0.5.2/90_90a/generated/batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_2.cu
1 error detected in the compilation of "/root/.cache/flashinfer/0.5.2/90_90a/generated/batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_2.cu".
[3/5] /usr/local/cuda/bin/nvcc --generate-dependencies-with-compile --dependency-output batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_0.cuda.o.d -DPy_LIMITED_API=0x03090000 -D_GLIBCXX_USE_CXX11_ABI=1 -isystem /usr/include/python3.12 -isystem /usr/local/cuda/include -isystem /usr/local/cuda/include/cccl -isystem /usr/local/lib/python3.12/dist-packages/tvm_ffi/include -isystem /usr/local/lib/python3.12/dist-packages/tvm_ffi/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/csrc -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/tools/util/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/spdlog/include --compiler-options=-fPIC --expt-relaxed-constexpr -static-global-template-stub=false -DFLASHINFER_ENABLE_FP8_E8M0 -DFLASHINFER_ENABLE_FP4_E2M1 -std=c++17 --threads=1 -use_fast_math -DFLASHINFER_ENABLE_F16 -DFLASHINFER_ENABLE_BF16 -DFLASHINFER_ENABLE_FP8_E4M3 -DFLASHINFER_ENABLE_FP8_E5M2 -DNDEBUG -O3 -gencode=arch=compute_90a,code=sm_90a -DFLASHINFER_ENABLE_FP8_E8M0 -DFLASHINFER_ENABLE_FP4_E2M1 -c /root/.cache/flashinfer/0.5.2/90_90a/generated/batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_0.cu -o batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_0.cuda.o
FAILED: [code=1] batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_0.cuda.o
/usr/local/cuda/bin/nvcc --generate-dependencies-with-compile --dependency-output batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_0.cuda.o.d -DPy_LIMITED_API=0x03090000 -D_GLIBCXX_USE_CXX11_ABI=1 -isystem /usr/include/python3.12 -isystem /usr/local/cuda/include -isystem /usr/local/cuda/include/cccl -isystem /usr/local/lib/python3.12/dist-packages/tvm_ffi/include -isystem /usr/local/lib/python3.12/dist-packages/tvm_ffi/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/csrc -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/tools/util/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/spdlog/include --compiler-options=-fPIC --expt-relaxed-constexpr -static-global-template-stub=false -DFLASHINFER_ENABLE_FP8_E8M0 -DFLASHINFER_ENABLE_FP4_E2M1 -std=c++17 --threads=1 -use_fast_math -DFLASHINFER_ENABLE_F16 -DFLASHINFER_ENABLE_BF16 -DFLASHINFER_ENABLE_FP8_E4M3 -DFLASHINFER_ENABLE_FP8_E5M2 -DNDEBUG -O3 -gencode=arch=compute_90a,code=sm_90a -DFLASHINFER_ENABLE_FP8_E8M0 -DFLASHINFER_ENABLE_FP4_E2M1 -c /root/.cache/flashinfer/0.5.2/90_90a/generated/batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_0.cu -o batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_0.cuda.o
/usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/atom/copy_traits.hpp(130): error: static assertion failed with "Copy_Traits: dst failed to vectorize into registers. Layout is incompatible with this CopyOp."
static_assert(decltype(size(rD) == Int<RegNumDst>{})::value, "Copy_Traits: dst failed to vectorize into registers. Layout is incompatible with this CopyOp.")
^
detected during:
instantiation of "void cute::copy_unpack(const AnyCPYTraits &, const cute::Tensor<SEngine, SLayout> &, cute::Tensor<DEngine, DLayout> &) [with AnyCPYTraits=cute::Copy_Traits<cute::SM90_U32x4_STSM_N>, SEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SLayout=cute::Layout<cute::tuple<cute::_16>, cute::tuple<cute::_1>>, DEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>>>]" at line 103 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/atom/copy_atom.hpp
instantiation of "void cute::Copy_Atom<cute::Copy_Traits<Args...>, CopyInternalType>::call(const cute::Tensor<SEngine, SLayout> &, cute::Tensor<DEngine, DLayout> &) const [with Args=<cute::SM90_U32x4_STSM_N>, CopyInternalType=PagedParams::DTypeO, SEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SLayout=cute::Layout<cute::tuple<cute::_16>, cute::tuple<cute::_1>>, DEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>>>]" at line 124 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/atom/copy_atom.hpp
instantiation of "void cute::Copy_Atom<cute::Copy_Traits<Args...>, CopyInternalType>::call(const cute::Tensor<SEngine, SLayout> &, cute::Tensor<DEngine, DLayout> &&) const [with Args=<cute::SM90_U32x4_STSM_N>, CopyInternalType=PagedParams::DTypeO, SEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SLayout=cute::Layout<cute::tuple<cute::_16>, cute::tuple<cute::_1>>, DEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>>>]" at line 226 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/algorithm/copy.hpp
instantiation of "void cute::copy(const cute::Copy_Atom<CopyArgs...> &, const cute::Tensor<SrcEngine, SrcLayout> &, cute::Tensor<DstEngine, DstLayout> &) [with CopyArgs=<cute::Copy_Traits<cute::SM90_U32x4_STSM_N>, PagedParams::DTypeO>, SrcEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SrcLayout=cute::Layout<cute::tuple<cute::_16, cute::_4>, cute::tuple<cute::_1, cute::_16>>, DstEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DstLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>, cute::_4>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>, cute::C<32>>>]" at line 545 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/algorithm/copy.hpp
instantiation of "void cute::copy(const CopyPolicy &, const cute::Tensor<SrcEngine, SrcLayout> &, cute::Tensor<DstEngine, DstLayout> &&) [with CopyPolicy=cute::Copy_Atom<cute::Copy_Traits<cute::SM90_U32x4_STSM_N>, PagedParams::DTypeO>, SrcEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SrcLayout=cute::Layout<cute::tuple<cute::_16, cute::_4>, cute::tuple<cute::_1, cute::_16>>, DstEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DstLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>, cute::_4>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>, cute::C<32>>>]" at line 109 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/atom/copy_atom.hpp
[ 3 instantiation contexts not shown ]
instantiation of "void cute::copy(const cute::TiledCopy<CopyAtom, TV, Tiler> &, const cute::Tensor<SrcEngine, SrcLayout> &, cute::Tensor<DstEngine, DstLayout> &) [with CopyAtom=cute::Copy_Atom<cute::SM90_U32x4_STSM_N, PagedParams::DTypeO>, TV=cute::Layout<cute::tuple<cute::tuple<cute::_4, cute::_8, cute::C<8>>, cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<16>>, cute::tuple<cute::_1, cute::_1>>>, cute::tuple<cute::tuple<cute::_256, cute::_1, cute::_16>, cute::tuple<cute::tuple<cute::_128, cute::_8, cute::_1024>, cute::tuple<cute::_0, cute::_0>>>>, Tiler=cute::tuple<cute::_128, cute::_128>, SrcEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SrcLayout=cute::Layout<cute::tuple<cute::tuple<cute::_16, cute::_4>, cute::_1, cute::_1>, cute::tuple<cute::tuple<cute::_1, cute::_16>, cute::C<0>, cute::C<0>>>, DstEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DstLayout=cute::Layout<cute::tuple<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>, cute::_4>, cute::_1, cute::_1>, cute::tuple<cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>, cute::C<32>>, cute::C<0>, cute::C<0>>>]" at line 129 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/include/flashinfer/attention/hopper/quantization/epilogue.cuh
instantiation of "void flashinfer::FP8CollectiveEpilogue<Ktraits>::store(const flashinfer::FP8CollectiveEpilogue<Ktraits>::Params &, const FrgTensorO &, const FrgTensorLSE &, SharedStorage &, TiledMma, int, const BlockCoord &) [with Ktraits=flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>, BlockCoord=cute::tuple<int, int, int, int, int, int, int, int>, SharedStorage=flashinfer::SharedStorageQKVOVt<cutlass::PipelineAsync<2>, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, 128, cute::ComposedLayout<cute::Swizzle<3, 4, 3>, cute::smem_ptr_flag_bits<8>, cute::Layout<cute::tuple<cute::tuple<cute::C<8>, cute::C<16>>, cute::tuple<cute::_128, cute::_1>>, cute::tuple<cute::tuple<cute::_128, cute::_1024>, cute::tuple<cute::_1, cute::_0>>>>, cute::ComposedLayout<cute::Swizzle<3, 4, 3>, cute::smem_ptr_flag_bits<8>, cute::Layout<cute::tuple<cute::tuple<cute::C<8>, cute::C<16>>, cute::tuple<cute::_128, cute::_1>, cute::tuple<cute::_1, cute::_2>>, cute::tuple<cute::tuple<cute::_128, cute::_1024>, cute::tuple<cute::_1, cute::_0>, cute::tuple<cute::C<0>, cute::_16384>>>>, cute::ComposedLayout<cute::Swizzle<2, 4, 3>, cute::smem_ptr_flag_bits<8>, cute::Layout<cute::tuple<cute::tuple<cute::tuple<cute::_8, cute::_8>, cute::_2>, cute::tuple<cute::tuple<cute::_64, cute::_1>, cute::_2>, cute::tuple<cute::_1, cute::_2>>, cute::tuple<cute::tuple<cute::tuple<cute::_64, cute::_512>, cute::C<4096>>, cute::tuple<cute::tuple<cute::_1, cute::_0>, cute::C<8192>>, cute::tuple<cute::C<0>, cute::_16384>>>>, cute::ComposedLayout<cute::Swizzle<3, 4, 3>, cute::smem_ptr_flag_bits<8>, cute::Layout<cute::tuple<cute::tuple<cute::C<8>, cute::C<16>>, cute::tuple<cute::_128, cute::_1>>, cute::tuple<cute::tuple<cute::_128, cute::_1024>, cute::tuple<cute::_1, cute::_0>>>>>, FrgTensorO=cute::Tensor<cute::ArrayEngine<float, 64UL>, cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<16>>, cute::_1, cute::_1>, cute::tuple<cute::tuple<cute::_1, cute::_2, cute::_4>, cute::C<0>, cute::C<0>>>>, FrgTensorLSE=cute::Tensor<cute::ArrayEngine<float, 2UL>, cute::Layout<cute::tuple<cute::_2>, cute::tuple<cute::_1>>>, TiledMma=cute::TiledMMA<cute::MMA_Atom<cute::SM90::GMMA::MMA_64x128x32_F32E4M3E4M3_RS_TN<cute::SM90::GMMA::ScaleIn::One, cute::SM90::GMMA::ScaleIn::One>>, cute::Layout<cute::tuple<cute::_2, cute::_1, cute::_1>, cute::tuple<cute::_1, cute::_0, cute::_0>>, cute::tuple<cute::Underscore, cute::Underscore, cute::Underscore>>]" at line 244 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/include/flashinfer/attention/hopper/quantization/prefill_sm90.cuh
instantiation of "void flashinfer::FP8PrefillWithKVCacheKernel<CollectiveMainloop,CollectiveEpilogue,Ktraits,LEFT_SLIDING_WINDOW,CAUSAL,TileScheduler>(CollectiveMainloop::Params, CollectiveEpilogue::Params, TileScheduler::Params) [with CollectiveMainloop=flashinfer::FP8SparseCollectiveMainloop<PagedParams::AdditionalParams, flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>, false>, CollectiveEpilogue=flashinfer::FP8CollectiveEpilogue<flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>>, Ktraits=flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>, LEFT_SLIDING_WINDOW=false, CAUSAL=false, TileScheduler=flashinfer::BatchPrefillTileScheduler<PagedParams::IdType>]" at line 369 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/include/flashinfer/attention/hopper/quantization/prefill_sm90.cuh
instantiation of "cudaError_t flashinfer::BatchFP8PrefillWithPagedKVCacheKernelTraitsDispatched<KernelTraits,LEFT_SLIDING_WINDOW,CAUSAL,SAME_SCHEDULE_FOR_ALL_HEADS,Params>(Params &, cudaStream_t) [with KernelTraits=flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>, LEFT_SLIDING_WINDOW=false, CAUSAL=false, SAME_SCHEDULE_FOR_ALL_HEADS=true, Params=PagedParams]" at line 458 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/include/flashinfer/attention/hopper/quantization/prefill_sm90.cuh
instantiation of "cudaError_t flashinfer::BatchFP8PrefillWithPagedKVCacheDispatched<HEAD_DIM,MASK_MODE,LEFT_SLIDING_WINDOW,SAME_SCHEDULE_FOR_ALL_HEADS,AttentionVariant,Params>(Params &, __nv_bool, cudaStream_t) [with HEAD_DIM=128U, MASK_MODE=flashinfer::MaskMode::kNone, LEFT_SLIDING_WINDOW=false, SAME_SCHEDULE_FOR_ALL_HEADS=true, AttentionVariant=flashinfer::DefaultFP8Attention, Params=PagedParams]" at line 7 of /root/.cache/flashinfer/0.5.2/90_90a/generated/batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_0.cu
1 error detected in the compilation of "/root/.cache/flashinfer/0.5.2/90_90a/generated/batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_0.cu".
[4/5] /usr/local/cuda/bin/nvcc --generate-dependencies-with-compile --dependency-output batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_1.cuda.o.d -DPy_LIMITED_API=0x03090000 -D_GLIBCXX_USE_CXX11_ABI=1 -isystem /usr/include/python3.12 -isystem /usr/local/cuda/include -isystem /usr/local/cuda/include/cccl -isystem /usr/local/lib/python3.12/dist-packages/tvm_ffi/include -isystem /usr/local/lib/python3.12/dist-packages/tvm_ffi/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/csrc -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/tools/util/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/spdlog/include --compiler-options=-fPIC --expt-relaxed-constexpr -static-global-template-stub=false -DFLASHINFER_ENABLE_FP8_E8M0 -DFLASHINFER_ENABLE_FP4_E2M1 -std=c++17 --threads=1 -use_fast_math -DFLASHINFER_ENABLE_F16 -DFLASHINFER_ENABLE_BF16 -DFLASHINFER_ENABLE_FP8_E4M3 -DFLASHINFER_ENABLE_FP8_E5M2 -DNDEBUG -O3 -gencode=arch=compute_90a,code=sm_90a -DFLASHINFER_ENABLE_FP8_E8M0 -DFLASHINFER_ENABLE_FP4_E2M1 -c /root/.cache/flashinfer/0.5.2/90_90a/generated/batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_1.cu -o batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_1.cuda.o
FAILED: [code=1] batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_1.cuda.o
/usr/local/cuda/bin/nvcc --generate-dependencies-with-compile --dependency-output batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_1.cuda.o.d -DPy_LIMITED_API=0x03090000 -D_GLIBCXX_USE_CXX11_ABI=1 -isystem /usr/include/python3.12 -isystem /usr/local/cuda/include -isystem /usr/local/cuda/include/cccl -isystem /usr/local/lib/python3.12/dist-packages/tvm_ffi/include -isystem /usr/local/lib/python3.12/dist-packages/tvm_ffi/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/csrc -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/tools/util/include -isystem /usr/local/lib/python3.12/dist-packages/flashinfer/data/spdlog/include --compiler-options=-fPIC --expt-relaxed-constexpr -static-global-template-stub=false -DFLASHINFER_ENABLE_FP8_E8M0 -DFLASHINFER_ENABLE_FP4_E2M1 -std=c++17 --threads=1 -use_fast_math -DFLASHINFER_ENABLE_F16 -DFLASHINFER_ENABLE_BF16 -DFLASHINFER_ENABLE_FP8_E4M3 -DFLASHINFER_ENABLE_FP8_E5M2 -DNDEBUG -O3 -gencode=arch=compute_90a,code=sm_90a -DFLASHINFER_ENABLE_FP8_E8M0 -DFLASHINFER_ENABLE_FP4_E2M1 -c /root/.cache/flashinfer/0.5.2/90_90a/generated/batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_1.cu -o batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_1.cuda.o
/usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/atom/copy_traits.hpp(130): error: static assertion failed with "Copy_Traits: dst failed to vectorize into registers. Layout is incompatible with this CopyOp."
static_assert(decltype(size(rD) == Int<RegNumDst>{})::value, "Copy_Traits: dst failed to vectorize into registers. Layout is incompatible with this CopyOp.")
^
detected during:
instantiation of "void cute::copy_unpack(const AnyCPYTraits &, const cute::Tensor<SEngine, SLayout> &, cute::Tensor<DEngine, DLayout> &) [with AnyCPYTraits=cute::Copy_Traits<cute::SM90_U32x4_STSM_N>, SEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SLayout=cute::Layout<cute::tuple<cute::_16>, cute::tuple<cute::_1>>, DEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>>>]" at line 103 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/atom/copy_atom.hpp
instantiation of "void cute::Copy_Atom<cute::Copy_Traits<Args...>, CopyInternalType>::call(const cute::Tensor<SEngine, SLayout> &, cute::Tensor<DEngine, DLayout> &) const [with Args=<cute::SM90_U32x4_STSM_N>, CopyInternalType=PagedParams::DTypeO, SEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SLayout=cute::Layout<cute::tuple<cute::_16>, cute::tuple<cute::_1>>, DEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>>>]" at line 124 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/atom/copy_atom.hpp
instantiation of "void cute::Copy_Atom<cute::Copy_Traits<Args...>, CopyInternalType>::call(const cute::Tensor<SEngine, SLayout> &, cute::Tensor<DEngine, DLayout> &&) const [with Args=<cute::SM90_U32x4_STSM_N>, CopyInternalType=PagedParams::DTypeO, SEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SLayout=cute::Layout<cute::tuple<cute::_16>, cute::tuple<cute::_1>>, DEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>>>]" at line 226 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/algorithm/copy.hpp
instantiation of "void cute::copy(const cute::Copy_Atom<CopyArgs...> &, const cute::Tensor<SrcEngine, SrcLayout> &, cute::Tensor<DstEngine, DstLayout> &) [with CopyArgs=<cute::Copy_Traits<cute::SM90_U32x4_STSM_N>, PagedParams::DTypeO>, SrcEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SrcLayout=cute::Layout<cute::tuple<cute::_16, cute::_4>, cute::tuple<cute::_1, cute::_16>>, DstEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DstLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>, cute::_4>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>, cute::C<32>>>]" at line 545 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/algorithm/copy.hpp
instantiation of "void cute::copy(const CopyPolicy &, const cute::Tensor<SrcEngine, SrcLayout> &, cute::Tensor<DstEngine, DstLayout> &&) [with CopyPolicy=cute::Copy_Atom<cute::Copy_Traits<cute::SM90_U32x4_STSM_N>, PagedParams::DTypeO>, SrcEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SrcLayout=cute::Layout<cute::tuple<cute::_16, cute::_4>, cute::tuple<cute::_1, cute::_16>>, DstEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DstLayout=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>, cute::_4>, cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>, cute::C<32>>>]" at line 109 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/cutlass/include/cute/atom/copy_atom.hpp
[ 3 instantiation contexts not shown ]
instantiation of "void cute::copy(const cute::TiledCopy<CopyAtom, TV, Tiler> &, const cute::Tensor<SrcEngine, SrcLayout> &, cute::Tensor<DstEngine, DstLayout> &) [with CopyAtom=cute::Copy_Atom<cute::SM90_U32x4_STSM_N, PagedParams::DTypeO>, TV=cute::Layout<cute::tuple<cute::tuple<cute::_4, cute::_8, cute::C<8>>, cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<16>>, cute::tuple<cute::_1, cute::_1>>>, cute::tuple<cute::tuple<cute::_256, cute::_1, cute::_16>, cute::tuple<cute::tuple<cute::_128, cute::_8, cute::_1024>, cute::tuple<cute::_0, cute::_0>>>>, Tiler=cute::tuple<cute::_128, cute::_128>, SrcEngine=cute::ViewEngine<PagedParams::DTypeKV *>, SrcLayout=cute::Layout<cute::tuple<cute::tuple<cute::_16, cute::_4>, cute::_1, cute::_1>, cute::tuple<cute::tuple<cute::_1, cute::_16>, cute::C<0>, cute::C<0>>>, DstEngine=cute::ViewEngine<cute::swizzle_ptr<cute::Swizzle<3, 4, 3>, cute::smem_ptr<PagedParams::DTypeO *>>>, DstLayout=cute::Layout<cute::tuple<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<4>>, cute::_4>, cute::_1, cute::_1>, cute::tuple<cute::tuple<cute::tuple<cute::_1, cute::_1024, cute::_2>, cute::C<32>>, cute::C<0>, cute::C<0>>>]" at line 129 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/include/flashinfer/attention/hopper/quantization/epilogue.cuh
instantiation of "void flashinfer::FP8CollectiveEpilogue<Ktraits>::store(const flashinfer::FP8CollectiveEpilogue<Ktraits>::Params &, const FrgTensorO &, const FrgTensorLSE &, SharedStorage &, TiledMma, int, const BlockCoord &) [with Ktraits=flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>, BlockCoord=cute::tuple<int, int, int, int, int, int, int, int>, SharedStorage=flashinfer::SharedStorageQKVOVt<cutlass::PipelineAsync<2>, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, 128, cute::ComposedLayout<cute::Swizzle<3, 4, 3>, cute::smem_ptr_flag_bits<8>, cute::Layout<cute::tuple<cute::tuple<cute::C<8>, cute::C<16>>, cute::tuple<cute::_128, cute::_1>>, cute::tuple<cute::tuple<cute::_128, cute::_1024>, cute::tuple<cute::_1, cute::_0>>>>, cute::ComposedLayout<cute::Swizzle<3, 4, 3>, cute::smem_ptr_flag_bits<8>, cute::Layout<cute::tuple<cute::tuple<cute::C<8>, cute::C<16>>, cute::tuple<cute::_128, cute::_1>, cute::tuple<cute::_1, cute::_2>>, cute::tuple<cute::tuple<cute::_128, cute::_1024>, cute::tuple<cute::_1, cute::_0>, cute::tuple<cute::C<0>, cute::_16384>>>>, cute::ComposedLayout<cute::Swizzle<2, 4, 3>, cute::smem_ptr_flag_bits<8>, cute::Layout<cute::tuple<cute::tuple<cute::tuple<cute::_8, cute::_8>, cute::_2>, cute::tuple<cute::tuple<cute::_64, cute::_1>, cute::_2>, cute::tuple<cute::_1, cute::_2>>, cute::tuple<cute::tuple<cute::tuple<cute::_64, cute::_512>, cute::C<4096>>, cute::tuple<cute::tuple<cute::_1, cute::_0>, cute::C<8192>>, cute::tuple<cute::C<0>, cute::_16384>>>>, cute::ComposedLayout<cute::Swizzle<3, 4, 3>, cute::smem_ptr_flag_bits<8>, cute::Layout<cute::tuple<cute::tuple<cute::C<8>, cute::C<16>>, cute::tuple<cute::_128, cute::_1>>, cute::tuple<cute::tuple<cute::_128, cute::_1024>, cute::tuple<cute::_1, cute::_0>>>>>, FrgTensorO=cute::Tensor<cute::ArrayEngine<float, 64UL>, cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::C<16>>, cute::_1, cute::_1>, cute::tuple<cute::tuple<cute::_1, cute::_2, cute::_4>, cute::C<0>, cute::C<0>>>>, FrgTensorLSE=cute::Tensor<cute::ArrayEngine<float, 2UL>, cute::Layout<cute::tuple<cute::_2>, cute::tuple<cute::_1>>>, TiledMma=cute::TiledMMA<cute::MMA_Atom<cute::SM90::GMMA::MMA_64x128x32_F32E4M3E4M3_RS_TN<cute::SM90::GMMA::ScaleIn::One, cute::SM90::GMMA::ScaleIn::One>>, cute::Layout<cute::tuple<cute::_2, cute::_1, cute::_1>, cute::tuple<cute::_1, cute::_0, cute::_0>>, cute::tuple<cute::Underscore, cute::Underscore, cute::Underscore>>]" at line 244 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/include/flashinfer/attention/hopper/quantization/prefill_sm90.cuh
instantiation of "void flashinfer::FP8PrefillWithKVCacheKernel<CollectiveMainloop,CollectiveEpilogue,Ktraits,LEFT_SLIDING_WINDOW,CAUSAL,TileScheduler>(CollectiveMainloop::Params, CollectiveEpilogue::Params, TileScheduler::Params) [with CollectiveMainloop=flashinfer::FP8SparseCollectiveMainloop<PagedParams::AdditionalParams, flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>, true>, CollectiveEpilogue=flashinfer::FP8CollectiveEpilogue<flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>>, Ktraits=flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>, LEFT_SLIDING_WINDOW=false, CAUSAL=true, TileScheduler=flashinfer::BatchPrefillTileScheduler<PagedParams::IdType>]" at line 369 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/include/flashinfer/attention/hopper/quantization/prefill_sm90.cuh
instantiation of "cudaError_t flashinfer::BatchFP8PrefillWithPagedKVCacheKernelTraitsDispatched<KernelTraits,LEFT_SLIDING_WINDOW,CAUSAL,SAME_SCHEDULE_FOR_ALL_HEADS,Params>(Params &, cudaStream_t) [with KernelTraits=flashinfer::FP8AttentionKernelTraits<false, 128, 128, 128, 2, PagedParams::DTypeQ, PagedParams::DTypeKV, PagedParams::DTypeO, PagedParams::IdType, flashinfer::DefaultFP8Attention>, LEFT_SLIDING_WINDOW=false, CAUSAL=true, SAME_SCHEDULE_FOR_ALL_HEADS=true, Params=PagedParams]" at line 458 of /usr/local/lib/python3.12/dist-packages/flashinfer/data/include/flashinfer/attention/hopper/quantization/prefill_sm90.cuh
instantiation of "cudaError_t flashinfer::BatchFP8PrefillWithPagedKVCacheDispatched<HEAD_DIM,MASK_MODE,LEFT_SLIDING_WINDOW,SAME_SCHEDULE_FOR_ALL_HEADS,AttentionVariant,Params>(Params &, __nv_bool, cudaStream_t) [with HEAD_DIM=128U, MASK_MODE=flashinfer::MaskMode::kCausal, LEFT_SLIDING_WINDOW=false, SAME_SCHEDULE_FOR_ALL_HEADS=true, AttentionVariant=flashinfer::DefaultFP8Attention, Params=PagedParams]" at line 7 of /root/.cache/flashinfer/0.5.2/90_90a/generated/batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_1.cu
1 error detected in the compilation of "/root/.cache/flashinfer/0.5.2/90_90a/generated/batch_prefill_with_kv_cache_dtype_q_e4m3_dtype_kv_e4m3_dtype_o_e4m3_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False_sm90/batch_prefill_paged_sm90_kernel_mask_1.cu".
ninja: build stopped: subcommand failed.