|
1 | 1 | from __future__ import annotations |
2 | 2 |
|
3 | 3 | import os |
| 4 | +import sys |
4 | 5 | import json |
5 | 6 | import ctypes |
6 | 7 | import dataclasses |
@@ -627,6 +628,8 @@ def chat_completion_handler( |
627 | 628 | json.dumps(schema), verbose=llama.verbose |
628 | 629 | ) |
629 | 630 | except Exception as e: |
| 631 | + if llama.verbose: |
| 632 | + print(str(e), file=sys.stderr) |
630 | 633 | grammar = llama_grammar.LlamaGrammar.from_string( |
631 | 634 | llama_grammar.JSON_GBNF, verbose=llama.verbose |
632 | 635 | ) |
@@ -1611,12 +1614,12 @@ def message_to_str(msg: llama_types.ChatCompletionRequestMessage): |
1611 | 1614 | function_call = completion_text.split(".")[-1][:-1] |
1612 | 1615 | new_prompt = prompt + completion_text + stop |
1613 | 1616 | elif isinstance(function_call, str) and function_call != "none": |
1614 | | - new_prompt = prompt + f":\n" |
| 1617 | + new_prompt = prompt + ":\n" |
1615 | 1618 | elif isinstance(function_call, dict): |
1616 | 1619 | new_prompt = prompt + f" to=functions.{function_call['name']}:\n" |
1617 | 1620 | function_call = function_call["name"] |
1618 | 1621 | else: |
1619 | | - new_prompt = prompt + f":\n" |
| 1622 | + new_prompt = prompt + ":\n" |
1620 | 1623 |
|
1621 | 1624 | function_body = None |
1622 | 1625 | for function in functions or []: |
@@ -2871,6 +2874,8 @@ def embed_image_bytes(image_bytes: bytes): |
2871 | 2874 | json.dumps(schema), verbose=llama.verbose |
2872 | 2875 | ) |
2873 | 2876 | except Exception as e: |
| 2877 | + if llama.verbose: |
| 2878 | + print(str(e), file=sys.stderr) |
2874 | 2879 | grammar = llama_grammar.LlamaGrammar.from_string( |
2875 | 2880 | llama_grammar.JSON_GBNF, verbose=llama.verbose |
2876 | 2881 | ) |
|
0 commit comments