From 8fc6d5d78ba2a7ae92e685fbe7934f0d6e1ae4d1 Mon Sep 17 00:00:00 2001 From: eaidova Date: Mon, 26 May 2025 19:23:40 +0400 Subject: [PATCH 1/5] [wwb]: load transformers model first, then only trust_remote_code --- .../whowhatbench/model_loaders.py | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/tools/who_what_benchmark/whowhatbench/model_loaders.py b/tools/who_what_benchmark/whowhatbench/model_loaders.py index 50a28e011f..5ea5fe9d15 100644 --- a/tools/who_what_benchmark/whowhatbench/model_loaders.py +++ b/tools/who_what_benchmark/whowhatbench/model_loaders.py @@ -89,7 +89,12 @@ def load_text_hf_pipeline(model_id, device): model_kwargs = {} if not torch.cuda.is_available or device.lower() == "cpu": - config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) + trust_remote_code = False + try: + config = AutoConfig.from_pretrained(model_id) + except Exception: + config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) + trust_remote_code = True is_gptq = False is_awq = False if getattr(config, "quantization_config", None): @@ -99,13 +104,19 @@ def load_text_hf_pipeline(model_id, device): # infer in FP32 model_kwargs["torch_dtype"] = torch.float32 with mock_torch_cuda_is_available(is_gptq or is_awq): - model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, device_map="cpu", **model_kwargs) + model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=trust_remote_code, device_map="cpu", **model_kwargs) if is_awq: model.is_awq = is_awq else: - model = AutoModelForCausalLM.from_pretrained( - model_id, trust_remote_code=True, device_map=device.lower(), **model_kwargs - ) + try: + model = AutoModelForCausalLM.from_pretrained( + model_id, trust_remote_code=False, device_map=device.lower(), **model_kwargs + ) + except Exception: + model = AutoModelForCausalLM.from_pretrained( + model_id, trust_remote_code=True, device_map=device.lower(), **model_kwargs + ) + model.eval() return model From fd908049fa52f6d525e1c2e659d5c58c3b386017 Mon Sep 17 00:00:00 2001 From: eaidova Date: Wed, 28 May 2025 08:56:15 +0400 Subject: [PATCH 2/5] align remote code everywhere --- .../examples/openvino_batched_eval.py | 7 +-- .../whowhatbench/model_loaders.py | 48 ++++++++++--------- .../whowhatbench/whowhat_metrics.py | 10 +++- tools/who_what_benchmark/whowhatbench/wwb.py | 43 +++++++++++++---- 4 files changed, 69 insertions(+), 39 deletions(-) diff --git a/tools/who_what_benchmark/examples/openvino_batched_eval.py b/tools/who_what_benchmark/examples/openvino_batched_eval.py index 5781ddf229..6c08907b49 100644 --- a/tools/who_what_benchmark/examples/openvino_batched_eval.py +++ b/tools/who_what_benchmark/examples/openvino_batched_eval.py @@ -16,17 +16,14 @@ from openvino_tokenizers import convert_tokenizer from openvino import serialize -from transformers import AutoTokenizer +from transformers import AutoTokenizer, AutoConfig model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" MAX_NEW_TOKENS = 128 SEQS_PER_REQUEST = 5 MAX_SEQUENCES = 100 - -model = OVModelForCausalLM.from_pretrained( - model_id, export=True, trust_remote_code=True -) +model = OVModelForCausalLM.from_pretrained(model_id, export=True) tokenizer = AutoTokenizer.from_pretrained(model_id) model_path = PosixPath(tempfile.gettempdir()) / model_id model.save_pretrained(model_path) diff --git a/tools/who_what_benchmark/whowhatbench/model_loaders.py b/tools/who_what_benchmark/whowhatbench/model_loaders.py index 5ea5fe9d15..5bd913b4ef 100644 --- a/tools/who_what_benchmark/whowhatbench/model_loaders.py +++ b/tools/who_what_benchmark/whowhatbench/model_loaders.py @@ -22,13 +22,16 @@ def __init__(self, model, model_dir, model_type): if model_type == "text" or model_type == "visual-text": try: - self.config = AutoConfig.from_pretrained(model_dir, trust_remote_code=True) - except Exception: self.config = AutoConfig.from_pretrained(model_dir) + except Exception: + self.config = AutoConfig.from_pretrained(model_dir, trust_remote_code=True) elif model_type == "text-to-image": from diffusers import DiffusionPipeline - self.config = DiffusionPipeline.load_config( - model_dir, trust_remote_code=True) + try: + self.config = DiffusionPipeline.load_config(model_dir) + except Exception: + self.config = DiffusionPipeline.load_config(model_dir, trust_remote_code=True) + def __getattr__(self, attr): if attr in self.__dict__: @@ -137,7 +140,7 @@ def load_text_model( from optimum.intel.openvino import OVModelForCausalLM try: model = OVModelForCausalLM.from_pretrained( - model_id, trust_remote_code=True, device=device, ov_config=ov_config + model_id, device=device, ov_config=ov_config ) except Exception: try: @@ -188,8 +191,10 @@ def load_text2image_model( elif use_hf: from diffusers import DiffusionPipeline logger.info("Using HF Transformers API") - model = DiffusionPipeline.from_pretrained( - model_id, trust_remote_code=True) + try: + model = DiffusionPipeline.from_pretrained(model_id) + except Exception: + model = DiffusionPipeline.from_pretrained(model_id, trust_remote_code=True) else: logger.info("Using Optimum API") from optimum.intel import OVPipelineForText2Image @@ -197,14 +202,11 @@ def load_text2image_model( try: model = TEXT2IMAGEPipeline.from_pretrained( - model_id, trust_remote_code=True, device=device, ov_config=ov_config, safety_checker=None, + model_id, device=device, ov_config=ov_config, safety_checker=None, ) except ValueError: - config = AutoConfig.from_pretrained( - model_id, trust_remote_code=True) model = TEXT2IMAGEPipeline.from_pretrained( model_id, - config=config, trust_remote_code=True, use_cache=True, device=device, @@ -234,19 +236,25 @@ def load_visual_text_model( ): if use_hf: logger.info("Using HF Transformers API") - config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) + trust_remote_code = False + try: + config = AutoConfig.from_pretrained(model_id, trust_remote_code=False) + except Exception: + config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) + trust_remote_code = True + try: model = AutoModelForVision2Seq.from_pretrained( - model_id, trust_remote_code=True, device_map=device.lower() + model_id, trust_remote_code=trust_remote_code, device_map=device.lower() ) except ValueError: try: model = AutoModel.from_pretrained( - model_id, trust_remote_code=True, device_map=device.lower() + model_id, trust_remote_code=trust_remote_code, device_map=device.lower() ) except ValueError: model = AutoModelForCausalLM.from_pretrained( - model_id, trust_remote_code=True, device_map=device.lower(), _attn_implementation="eager", use_flash_attention_2=False + model_id, trust_remote_code=trust_remote_code, device_map=device.lower(), _attn_implementation="eager", use_flash_attention_2=False ) model.eval() try: @@ -266,7 +274,7 @@ def load_visual_text_model( from optimum.intel.openvino import OVModelForVisualCausalLM try: model = OVModelForVisualCausalLM.from_pretrained( - model_id, trust_remote_code=True, device=device, ov_config=ov_config + model_id, device=device, ov_config=ov_config ) except ValueError: config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) @@ -312,13 +320,11 @@ def load_imagetext2image_model( from optimum.intel.openvino import OVPipelineForImage2Image try: model = OVPipelineForImage2Image.from_pretrained( - model_id, trust_remote_code=True, device=device, ov_config=ov_config, safety_checker=None, + model_id, device=device, ov_config=ov_config, safety_checker=None, ) except ValueError: - config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) model = OVPipelineForImage2Image.from_pretrained( model_id, - config=config, trust_remote_code=True, use_cache=True, device=device, @@ -359,14 +365,12 @@ def load_inpainting_model( from optimum.intel.openvino import OVPipelineForInpainting try: model = OVPipelineForInpainting.from_pretrained( - model_id, trust_remote_code=True, device=device, ov_config=ov_config, safety_checker=None, + model_id, device=device, ov_config=ov_config, safety_checker=None, ) except ValueError as e: logger.error("Failed to load inpaiting pipeline. Details:\n", e) - config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) model = OVPipelineForInpainting.from_pretrained( model_id, - config=config, trust_remote_code=True, use_cache=True, device=device, diff --git a/tools/who_what_benchmark/whowhatbench/whowhat_metrics.py b/tools/who_what_benchmark/whowhatbench/whowhat_metrics.py index 2d1da24168..a1a24d567e 100644 --- a/tools/who_what_benchmark/whowhatbench/whowhat_metrics.py +++ b/tools/who_what_benchmark/whowhatbench/whowhat_metrics.py @@ -110,12 +110,18 @@ def evaluate_divergency(tokenizer, data_gold, data_prediction): class TextSimilarity: def __init__(self, model_id) -> None: - tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) + trust_remote_code = False + try: + tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=False) + except Exception: + trust_remote_code = True + tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) + if hasattr(tokenizer, "pad_token") and tokenizer.pad_token: pad_token = tokenizer.pad_token else: pad_token = tokenizer.eos_token - self.model = SentenceTransformer(model_id, tokenizer_kwargs={"pad_token": pad_token}, trust_remote_code=True) + self.model = SentenceTransformer(model_id, tokenizer_kwargs={"pad_token": pad_token}, trust_remote_code=trust_remote_code) def evaluate(self, gt, prediction): return evaluate_similarity(self.model, gt, prediction) diff --git a/tools/who_what_benchmark/whowhatbench/wwb.py b/tools/who_what_benchmark/whowhatbench/wwb.py index 49abb865ce..4628f9a819 100644 --- a/tools/who_what_benchmark/whowhatbench/wwb.py +++ b/tools/who_what_benchmark/whowhatbench/wwb.py @@ -212,17 +212,33 @@ def load_tokenizer(args): from llama_cpp.llama_tokenizer import LlamaHFTokenizer tokenizer = LlamaHFTokenizer.from_pretrained(args.tokenizer) else: + try: + tokenizer = AutoTokenizer.from_pretrained( + args.tokenizer, trust_remote_code=False + ) + except Exception: + tokenizer = AutoTokenizer.from_pretrained( + args.tokenizer, trust_remote_code=True + ) + elif args.base_model is not None: + try: tokenizer = AutoTokenizer.from_pretrained( - args.tokenizer, trust_remote_code=True + args.base_model, trust_remote_code=False + ) + except Exception: + tokenizer = AutoTokenizer.from_pretrained( + args.base_model, trust_remote_code=True ) - elif args.base_model is not None: - tokenizer = AutoTokenizer.from_pretrained( - args.base_model, trust_remote_code=True - ) elif args.target_model is not None: - tokenizer = AutoTokenizer.from_pretrained( - args.target_model, trust_remote_code=True - ) + try: + tokenizer = AutoTokenizer.from_pretrained( + args.target_model, trust_remote_code=False + ) + except: + tokenizer = AutoTokenizer.from_pretrained( + args.target_model, trust_remoter_code=True + ) + return tokenizer @@ -232,13 +248,20 @@ def load_processor(args): if model_id is None: return None, None - config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) + trust_remote_code = False + try: + config = AutoConfig.from_pretrained(model_id, trust_remote_code=False) + except Exception: + config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) + trust_remote_code = True if "llava-qwen" in config.model_type: preprocessor_id = config.mm_vision_tower else: preprocessor_id = model_id - return AutoProcessor.from_pretrained(preprocessor_id, trust_remote_code=True), config + preprocessor = AutoProcessor.from_pretrained(preprocessor_id, trust_remote_code=trust_remote_code) + + return preprocessor, config def diff_strings(a: str, b: str, *, use_loguru_colors: bool = False) -> str: From 050f30e11a938172cde44c2c131b38e2b4562f14 Mon Sep 17 00:00:00 2001 From: Ekaterina Aidova Date: Wed, 28 May 2025 09:38:30 +0400 Subject: [PATCH 3/5] Update tools/who_what_benchmark/examples/openvino_batched_eval.py --- tools/who_what_benchmark/examples/openvino_batched_eval.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/who_what_benchmark/examples/openvino_batched_eval.py b/tools/who_what_benchmark/examples/openvino_batched_eval.py index 6c08907b49..fdbaa9acdb 100644 --- a/tools/who_what_benchmark/examples/openvino_batched_eval.py +++ b/tools/who_what_benchmark/examples/openvino_batched_eval.py @@ -16,7 +16,7 @@ from openvino_tokenizers import convert_tokenizer from openvino import serialize -from transformers import AutoTokenizer, AutoConfig +from transformers import AutoTokenizer model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" MAX_NEW_TOKENS = 128 From cfc64d847b62becfa913bc29cf8d6b2596413b64 Mon Sep 17 00:00:00 2001 From: eaidova Date: Wed, 28 May 2025 08:56:15 +0400 Subject: [PATCH 4/5] align remote code everywhere --- .../examples/openvino_batched_eval.py | 5 +- .../whowhatbench/model_loaders.py | 47 ++++++++++--------- .../whowhatbench/whowhat_metrics.py | 10 +++- tools/who_what_benchmark/whowhatbench/wwb.py | 41 ++++++++++++---- 4 files changed, 65 insertions(+), 38 deletions(-) diff --git a/tools/who_what_benchmark/examples/openvino_batched_eval.py b/tools/who_what_benchmark/examples/openvino_batched_eval.py index 5781ddf229..fdbaa9acdb 100644 --- a/tools/who_what_benchmark/examples/openvino_batched_eval.py +++ b/tools/who_what_benchmark/examples/openvino_batched_eval.py @@ -23,10 +23,7 @@ SEQS_PER_REQUEST = 5 MAX_SEQUENCES = 100 - -model = OVModelForCausalLM.from_pretrained( - model_id, export=True, trust_remote_code=True -) +model = OVModelForCausalLM.from_pretrained(model_id, export=True) tokenizer = AutoTokenizer.from_pretrained(model_id) model_path = PosixPath(tempfile.gettempdir()) / model_id model.save_pretrained(model_path) diff --git a/tools/who_what_benchmark/whowhatbench/model_loaders.py b/tools/who_what_benchmark/whowhatbench/model_loaders.py index 5ea5fe9d15..37d30bef56 100644 --- a/tools/who_what_benchmark/whowhatbench/model_loaders.py +++ b/tools/who_what_benchmark/whowhatbench/model_loaders.py @@ -22,13 +22,15 @@ def __init__(self, model, model_dir, model_type): if model_type == "text" or model_type == "visual-text": try: - self.config = AutoConfig.from_pretrained(model_dir, trust_remote_code=True) - except Exception: self.config = AutoConfig.from_pretrained(model_dir) + except Exception: + self.config = AutoConfig.from_pretrained(model_dir, trust_remote_code=True) elif model_type == "text-to-image": from diffusers import DiffusionPipeline - self.config = DiffusionPipeline.load_config( - model_dir, trust_remote_code=True) + try: + self.config = DiffusionPipeline.load_config(model_dir) + except Exception: + self.config = DiffusionPipeline.load_config(model_dir, trust_remote_code=True) def __getattr__(self, attr): if attr in self.__dict__: @@ -137,7 +139,7 @@ def load_text_model( from optimum.intel.openvino import OVModelForCausalLM try: model = OVModelForCausalLM.from_pretrained( - model_id, trust_remote_code=True, device=device, ov_config=ov_config + model_id, device=device, ov_config=ov_config ) except Exception: try: @@ -188,8 +190,10 @@ def load_text2image_model( elif use_hf: from diffusers import DiffusionPipeline logger.info("Using HF Transformers API") - model = DiffusionPipeline.from_pretrained( - model_id, trust_remote_code=True) + try: + model = DiffusionPipeline.from_pretrained(model_id) + except Exception: + model = DiffusionPipeline.from_pretrained(model_id, trust_remote_code=True) else: logger.info("Using Optimum API") from optimum.intel import OVPipelineForText2Image @@ -197,14 +201,11 @@ def load_text2image_model( try: model = TEXT2IMAGEPipeline.from_pretrained( - model_id, trust_remote_code=True, device=device, ov_config=ov_config, safety_checker=None, + model_id, device=device, ov_config=ov_config, safety_checker=None, ) except ValueError: - config = AutoConfig.from_pretrained( - model_id, trust_remote_code=True) model = TEXT2IMAGEPipeline.from_pretrained( model_id, - config=config, trust_remote_code=True, use_cache=True, device=device, @@ -234,19 +235,25 @@ def load_visual_text_model( ): if use_hf: logger.info("Using HF Transformers API") - config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) + trust_remote_code = False + try: + config = AutoConfig.from_pretrained(model_id, trust_remote_code=False) + except Exception: + config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) + trust_remote_code = True + try: model = AutoModelForVision2Seq.from_pretrained( - model_id, trust_remote_code=True, device_map=device.lower() + model_id, trust_remote_code=trust_remote_code, device_map=device.lower() ) except ValueError: try: model = AutoModel.from_pretrained( - model_id, trust_remote_code=True, device_map=device.lower() + model_id, trust_remote_code=trust_remote_code, device_map=device.lower() ) except ValueError: model = AutoModelForCausalLM.from_pretrained( - model_id, trust_remote_code=True, device_map=device.lower(), _attn_implementation="eager", use_flash_attention_2=False + model_id, trust_remote_code=trust_remote_code, device_map=device.lower(), _attn_implementation="eager", use_flash_attention_2=False ) model.eval() try: @@ -266,7 +273,7 @@ def load_visual_text_model( from optimum.intel.openvino import OVModelForVisualCausalLM try: model = OVModelForVisualCausalLM.from_pretrained( - model_id, trust_remote_code=True, device=device, ov_config=ov_config + model_id, device=device, ov_config=ov_config ) except ValueError: config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) @@ -312,13 +319,11 @@ def load_imagetext2image_model( from optimum.intel.openvino import OVPipelineForImage2Image try: model = OVPipelineForImage2Image.from_pretrained( - model_id, trust_remote_code=True, device=device, ov_config=ov_config, safety_checker=None, + model_id, device=device, ov_config=ov_config, safety_checker=None, ) except ValueError: - config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) model = OVPipelineForImage2Image.from_pretrained( model_id, - config=config, trust_remote_code=True, use_cache=True, device=device, @@ -359,14 +364,12 @@ def load_inpainting_model( from optimum.intel.openvino import OVPipelineForInpainting try: model = OVPipelineForInpainting.from_pretrained( - model_id, trust_remote_code=True, device=device, ov_config=ov_config, safety_checker=None, + model_id, device=device, ov_config=ov_config, safety_checker=None, ) except ValueError as e: logger.error("Failed to load inpaiting pipeline. Details:\n", e) - config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) model = OVPipelineForInpainting.from_pretrained( model_id, - config=config, trust_remote_code=True, use_cache=True, device=device, diff --git a/tools/who_what_benchmark/whowhatbench/whowhat_metrics.py b/tools/who_what_benchmark/whowhatbench/whowhat_metrics.py index 2d1da24168..a1a24d567e 100644 --- a/tools/who_what_benchmark/whowhatbench/whowhat_metrics.py +++ b/tools/who_what_benchmark/whowhatbench/whowhat_metrics.py @@ -110,12 +110,18 @@ def evaluate_divergency(tokenizer, data_gold, data_prediction): class TextSimilarity: def __init__(self, model_id) -> None: - tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) + trust_remote_code = False + try: + tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=False) + except Exception: + trust_remote_code = True + tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) + if hasattr(tokenizer, "pad_token") and tokenizer.pad_token: pad_token = tokenizer.pad_token else: pad_token = tokenizer.eos_token - self.model = SentenceTransformer(model_id, tokenizer_kwargs={"pad_token": pad_token}, trust_remote_code=True) + self.model = SentenceTransformer(model_id, tokenizer_kwargs={"pad_token": pad_token}, trust_remote_code=trust_remote_code) def evaluate(self, gt, prediction): return evaluate_similarity(self.model, gt, prediction) diff --git a/tools/who_what_benchmark/whowhatbench/wwb.py b/tools/who_what_benchmark/whowhatbench/wwb.py index 49abb865ce..8cc748115e 100644 --- a/tools/who_what_benchmark/whowhatbench/wwb.py +++ b/tools/who_what_benchmark/whowhatbench/wwb.py @@ -212,17 +212,32 @@ def load_tokenizer(args): from llama_cpp.llama_tokenizer import LlamaHFTokenizer tokenizer = LlamaHFTokenizer.from_pretrained(args.tokenizer) else: + try: + tokenizer = AutoTokenizer.from_pretrained( + args.tokenizer, trust_remote_code=False + ) + except Exception: + tokenizer = AutoTokenizer.from_pretrained( + args.tokenizer, trust_remote_code=True + ) + elif args.base_model is not None: + try: tokenizer = AutoTokenizer.from_pretrained( - args.tokenizer, trust_remote_code=True + args.base_model, trust_remote_code=False + ) + except Exception: + tokenizer = AutoTokenizer.from_pretrained( + args.base_model, trust_remote_code=True ) - elif args.base_model is not None: - tokenizer = AutoTokenizer.from_pretrained( - args.base_model, trust_remote_code=True - ) elif args.target_model is not None: - tokenizer = AutoTokenizer.from_pretrained( - args.target_model, trust_remote_code=True - ) + try: + tokenizer = AutoTokenizer.from_pretrained( + args.target_model, trust_remote_code=False + ) + except Exception: + tokenizer = AutoTokenizer.from_pretrained( + args.target_model, trust_remote_code=True + ) return tokenizer @@ -232,13 +247,19 @@ def load_processor(args): if model_id is None: return None, None - config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) + trust_remote_code = False + try: + config = AutoConfig.from_pretrained(model_id, trust_remote_code=False) + except Exception: + config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) + trust_remote_code = True if "llava-qwen" in config.model_type: preprocessor_id = config.mm_vision_tower else: preprocessor_id = model_id - return AutoProcessor.from_pretrained(preprocessor_id, trust_remote_code=True), config + preprocessor = AutoProcessor.from_pretrained(preprocessor_id, trust_remote_code=trust_remote_code) + return preprocessor, config def diff_strings(a: str, b: str, *, use_loguru_colors: bool = False) -> str: From 177ac0483ef8c8bc877c1078509211cd40955ea0 Mon Sep 17 00:00:00 2001 From: Ekaterina Aidova Date: Wed, 28 May 2025 13:45:45 +0400 Subject: [PATCH 5/5] Update tools/who_what_benchmark/whowhatbench/wwb.py Co-authored-by: andreyanufr --- tools/who_what_benchmark/whowhatbench/wwb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/who_what_benchmark/whowhatbench/wwb.py b/tools/who_what_benchmark/whowhatbench/wwb.py index 9061bb8f5b..8cc748115e 100644 --- a/tools/who_what_benchmark/whowhatbench/wwb.py +++ b/tools/who_what_benchmark/whowhatbench/wwb.py @@ -258,7 +258,7 @@ def load_processor(args): else: preprocessor_id = model_id - preprocessor = AutoProcessor.from_pretrained(preprocessor_id, trust_remote_code=trust_remote_code) + preprocessor = AutoProcessor.from_pretrained(preprocessor_id, trust_remote_code=trust_remote_code) return preprocessor, config