@@ -311,6 +311,46 @@ const snippetMlxLm = (model: ModelData): LocalAppSnippet[] => {
311311 ] ;
312312} ;
313313
314+ const snippetMlxKnife = ( model : ModelData ) : LocalAppSnippet [ ] => {
315+ return [
316+ {
317+ title : "Terminal Usage" ,
318+ setup : [ ] ,
319+ content : [
320+ "# Install MLX Knife" ,
321+ "pip install mlx-knife" ,
322+ "" ,
323+ "# Check available models" ,
324+ "mlxk list" ,
325+ "" ,
326+ "# Run model (if already installed)" ,
327+ `mlxk run ${ model . id . replace ( 'mlx-community/' , '' ) } "Hello!"` ,
328+ "" ,
329+ "# Interactive chat" ,
330+ `mlxk run ${ model . id . replace ( 'mlx-community/' , '' ) } ` ,
331+ "" ,
332+ "# Download if needed" ,
333+ `mlxk pull ${ model . id } ` ,
334+ ] . join ( "\n" ) ,
335+ } ,
336+ {
337+ title : "Web Interface" ,
338+ setup : [ ] ,
339+ content : [
340+ "# Install MLX Knife" ,
341+ "pip install mlx-knife" ,
342+ "" ,
343+ "# Start server" ,
344+ "mlxk server --port 8000" ,
345+ "" ,
346+ "# Download web interface" ,
347+ "curl -O https://raw.githubusercontent.com/mzau/mlx-knife/main/simple_chat.html" ,
348+ "open simple_chat.html" ,
349+ ] . join ( "\n" ) ,
350+ }
351+ ] ;
352+ } ;
353+
314354const snippetDockerModelRunner = ( model : ModelData , filepath ?: string ) : string => {
315355 return `docker model run hf.co/${ model . id } ${ getQuantTag ( filepath ) } ` ;
316356} ;
@@ -387,6 +427,13 @@ export const LOCAL_APPS = {
387427 displayOnModelPage : ( model ) => model . pipeline_tag === "text-generation" && isMlxModel ( model ) ,
388428 snippet : snippetMlxLm ,
389429 } ,
430+ "mlx-knife" : {
431+ prettyLabel : "MLX Knife" ,
432+ docsUrl : "https://github.com/mzau/mlx-knife" ,
433+ mainTask : "text-generation" ,
434+ displayOnModelPage : ( model ) => model . pipeline_tag === "text-generation" && isMlxModel ( model ) ,
435+ snippet : snippetMlxKnife ,
436+ } ,
390437 tgi : {
391438 prettyLabel : "TGI" ,
392439 docsUrl : "https://huggingface.co/docs/text-generation-inference/" ,
0 commit comments