Skip to content

Commit 6fa8b77

Browse files
committed
Formatted with cargo fmt
1 parent bb0b356 commit 6fa8b77

File tree

1 file changed

+5
-6
lines changed

1 file changed

+5
-6
lines changed

llama-cpp-2/src/model/params.rs

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -109,24 +109,23 @@ impl LlamaModelParams {
109109
}
110110

111111
impl LlamaModelParams {
112-
113112
/// Adds buffer type overides to move all mixture-of-experts layers to CPU.
114113
pub fn add_cpu_moe_override(self: Pin<&mut Self>) {
115114
self.add_cpu_buft_override(c"\\.ffn_(up|down|gate)_(ch|)exps");
116115
}
117116

118117
/// Appends a buffer type override to the model parameters, to move layers matching pattern to CPU.
119118
/// It must be pinned as this creates a self-referential struct.
120-
pub fn add_cpu_buft_override(
121-
mut self: Pin<&mut Self>,
122-
key: &CStr,
123-
) {
119+
pub fn add_cpu_buft_override(mut self: Pin<&mut Self>, key: &CStr) {
124120
let buft_override = self
125121
.buft_overrides
126122
.get_mut(0)
127123
.expect("buft_overrides did not have a next allocated");
128124

129-
assert!(buft_override.pattern.is_null(), "last buft_override was not empty");
125+
assert!(
126+
buft_override.pattern.is_null(),
127+
"last buft_override was not empty"
128+
);
130129

131130
// There should be some way to do this without iterating over everything.
132131
for (_i, &c) in key.to_bytes_with_nul().iter().enumerate() {

0 commit comments

Comments
 (0)