@@ -105,28 +105,32 @@ def __init__(self, model_path: str, vision: bool, device: str = "gpu"):
105105 )
106106 self .model_path = snapshot_path
107107
108- # it is case sensitive, only receive all char captilized only
109- self .model = OvPhi3Vision (
110- self .model_path ,
111- self .device .upper ()
112- )
113- logger .info ("Model loaded" )
114-
115- self .processor = AutoProcessor .from_pretrained (
116- self .model_path ,
117- trust_remote_code = True
118- )
119- logger .info ("Processor loaded" )
120- print ("processor directory: " ,dir (self .processor ))
121- self .tokenizer_stream = TextIteratorStreamer (
122- self .processor ,
123- ** {
124- "skip_special_tokens" : True ,
125- "skip_prompt" : True ,
126- "clean_up_tokenization_spaces" : False ,
127- },
128- )
129-
108+ try :
109+ # it is case sensitive, only receive all char captilized only
110+ self .model = OvPhi3Vision (
111+ self .model_path ,
112+ self .device .upper ()
113+ )
114+ logger .info ("Model loaded" )
115+
116+ self .processor = AutoProcessor .from_pretrained (
117+ self .model_path ,
118+ trust_remote_code = True
119+ )
120+ logger .info ("Processor loaded" )
121+ print ("processor directory: " ,dir (self .processor ))
122+ self .tokenizer_stream = TextIteratorStreamer (
123+ self .processor ,
124+ ** {
125+ "skip_special_tokens" : True ,
126+ "skip_prompt" : True ,
127+ "clean_up_tokenization_spaces" : False ,
128+ },
129+ )
130+
131+ except Exception as e :
132+ logger .error ("EmbeddedLLM Engine only support Phi 3 Vision Model." )
133+ exit ()
130134
131135 async def generate_vision (
132136 self ,
0 commit comments