Skip to content

Commit c973b0b

Browse files
Ceng23333wooway777
authored andcommitted
issue/691: 修复InfiniLM依赖问题
Signed-off-by: Ceng23333 <441651826@qq.com>
1 parent 1c25a90 commit c973b0b

File tree

2 files changed

+6
-15
lines changed

2 files changed

+6
-15
lines changed

src/infinicore/pybind11/ops/rope.hpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,11 @@ namespace py = pybind11;
99
namespace infinicore::ops {
1010

1111
inline void bind_rope(py::module &m) {
12+
13+
py::enum_<infinicore::nn::RoPE::Algo>(m, "RoPEAlgo")
14+
.value("GPT_J", infinicore::nn::RoPE::Algo::GPT_J)
15+
.value("GPT_NEOX", infinicore::nn::RoPE::Algo::GPT_NEOX);
16+
1217
m.def("rope",
1318
&op::rope,
1419
py::arg("x"),

src/infinicore/tensor/copy.cc

Lines changed: 1 addition & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -22,21 +22,7 @@ void TensorImpl::copy_from(Tensor src) {
2222
throw std::runtime_error("Cannot copy from tensor with different shape");
2323
}
2424
if (this->device() == src->device()) {
25-
26-
// If both tensors are contiguous, use direct memcpy (much faster and avoids rearrange issues)
27-
if (this->is_contiguous() && src->is_contiguous()) {
28-
// Use nbytes() to get the actual tensor size
29-
size_t copy_size = std::min(this->nbytes(), src->nbytes());
30-
31-
// For CPU-to-CPU copies, use regular memcpy. For device-to-device, use D2D memcpy
32-
if (this->device().getType() == Device::Type::CPU) {
33-
context::memcpyH2H(this->data(), src->data(), copy_size);
34-
} else {
35-
context::memcpyD2D(this->data(), src->data(), copy_size);
36-
}
37-
} else {
38-
op::rearrange_(Tensor(const_cast<TensorImpl *>(this)->shared_from_this()), src);
39-
}
25+
op::rearrange_(Tensor(const_cast<TensorImpl *>(this)->shared_from_this()), src);
4026
} else {
4127
if (!src->is_contiguous()) {
4228
src = src->contiguous();

0 commit comments

Comments
 (0)