This repository was archived by the owner on Oct 25, 2024. It is now read-only.
File tree Expand file tree Collapse file tree 1 file changed +14
-6
lines changed
intel_extension_for_transformers/transformers/llm/evaluation/lm_eval/models Expand file tree Collapse file tree 1 file changed +14
-6
lines changed Original file line number Diff line number Diff line change @@ -838,12 +838,20 @@ def _create_tokenizer(
838838 else :
839839 # get the HF hub name via accessor on model
840840 model_name = self .model .name_or_path
841- self .tokenizer = transformers .AutoTokenizer .from_pretrained (
842- model_name ,
843- revision = revision ,
844- trust_remote_code = trust_remote_code ,
845- use_fast = use_fast_tokenizer ,
846- )
841+
842+ # chatglm2 tokenizer doesn't support loading from local.
843+ if hasattr (self .model , "config" ) and hasattr (self .model .config , "auto_map" ) and \
844+ "chatglm2" in self .model .config .auto_map ["AutoConfig" ]:
845+ self .tokenizer = transformers .AutoTokenizer .from_pretrained (
846+ "THUDM/chatglm2-6b" , trust_remote_code = True
847+ )
848+ else :
849+ self .tokenizer = transformers .AutoTokenizer .from_pretrained (
850+ model_name ,
851+ revision = revision ,
852+ trust_remote_code = trust_remote_code ,
853+ use_fast = use_fast_tokenizer ,
854+ )
847855 return None
848856
849857 def _detect_batch_size (self , requests = None , pos : int = 0 ):
You can’t perform that action at this time.
0 commit comments