Skip to content

Commit d1456c1

Browse files
authored
Fixed preferred spelling (#1272)
Fixed spellings: `preferred`, `inifinitely`, `inteactive`
1 parent 1dbab8c commit d1456c1

File tree

7 files changed

+8
-8
lines changed

7 files changed

+8
-8
lines changed

LLama/Abstractions/IInferenceParams.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ public interface IInferenceParams
1414
public int TokensKeep { get; set; }
1515

1616
/// <summary>
17-
/// how many new tokens to predict (n_predict), set to -1 to inifinitely generate response
17+
/// how many new tokens to predict (n_predict), set to -1 to infinitely generate response
1818
/// until it complete.
1919
/// </summary>
2020
public int MaxTokens { get; set; }

LLama/Common/InferenceParams.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ public record InferenceParams
1818
public int TokensKeep { get; set; } = 0;
1919

2020
/// <summary>
21-
/// how many new tokens to predict (n_predict), set to -1 to inifinitely generate response
21+
/// how many new tokens to predict (n_predict), set to -1 to infinitely generate response
2222
/// until it complete.
2323
/// </summary>
2424
public int MaxTokens { get; set; } = -1;

LLama/LLamaInstructExecutor.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ protected override Task PreprocessInputs(string? text, InferStateArgs args)
124124

125125
if (_is_prompt_run)
126126
{
127-
// When running the first input (prompt) in inteactive mode, we should specially process it.
127+
// When running the first input (prompt) in interactive mode, we should specially process it.
128128
if (text == null) throw new ArgumentException("Prompt cannot be null to trigger continuation if a prompt has not been provided previously.");
129129
_embed_inps = Context.Tokenize(text, true, true).ToList();
130130
}

LLama/Native/Load/NativeLibraryConfig.cs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ public NativeLibraryConfig WithVulkan(bool enable = true)
7272
}
7373

7474
/// <summary>
75-
/// Configure the prefferred avx support level of the backend.
75+
/// Configure the preferred AVX support level of the backend.
7676
/// Default value is detected automatically due to your operating system.
7777
/// </summary>
7878
/// <param name="level"></param>
@@ -463,7 +463,7 @@ public NativeLibraryConfigContainer WithVulkan(bool enable = true)
463463
}
464464

465465
/// <summary>
466-
/// Configure the prefferred avx support level of the backend.
466+
/// Configure the preferred AVX support level of the backend.
467467
/// </summary>
468468
/// <param name="level"></param>
469469
/// <returns></returns>

docs/Tutorials/Executors.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@ public record InferenceParams
260260
public int TokensKeep { get; set; } = 0;
261261

262262
/// <summary>
263-
/// how many new tokens to predict (n_predict), set to -1 to inifinitely generate response
263+
/// how many new tokens to predict (n_predict), set to -1 to infinitely generate response
264264
/// until it complete.
265265
/// </summary>
266266
public int MaxTokens { get; set; } = -1;

docs/xmldocs/llama.abstractions.iinferenceparams.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ public abstract int TokensKeep { get; set; }
3030

3131
### **MaxTokens**
3232

33-
how many new tokens to predict (n_predict), set to -1 to inifinitely generate response
33+
how many new tokens to predict (n_predict), set to -1 to infinitely generate response
3434
until it complete.
3535

3636
```csharp

docs/xmldocs/llama.common.inferenceparams.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ public int TokensKeep { get; set; }
4242

4343
### **MaxTokens**
4444

45-
how many new tokens to predict (n_predict), set to -1 to inifinitely generate response
45+
how many new tokens to predict (n_predict), set to -1 to infinitely generate response
4646
until it complete.
4747

4848
```csharp

0 commit comments

Comments
 (0)