From 0b7de67c4a8e54288bc77dc5dd1eee2905bfc146 Mon Sep 17 00:00:00 2001 From: bittersweet1999 <148421775+bittersweet1999@users.noreply.github.com> Date: Sun, 28 Apr 2024 21:54:30 +0800 Subject: [PATCH] fix prompt template (#1104) --- configs/eval_internlm_flames_chat.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/configs/eval_internlm_flames_chat.py b/configs/eval_internlm_flames_chat.py index 0d5e88bc..15a051c3 100644 --- a/configs/eval_internlm_flames_chat.py +++ b/configs/eval_internlm_flames_chat.py @@ -68,6 +68,13 @@ infer = dict( ## ------------- JudgeLLM Configuration--------------------------------- +internlm1_chat_template = dict( + round=[ + dict(role='HUMAN', begin='<|User|>:', end='\n'), + dict(role='BOT', begin='<|Bot|>:', end='\n', generate=True), + ], +) + judge_models = [ dict( type=HuggingFaceCausalLM, @@ -84,17 +91,17 @@ judge_models = [ use_fast=False, trust_remote_code=True, ), - max_out_len=2048, - max_seq_len=2048, + generation_kwargs = {"do_sample": True}, + max_out_len=512, + max_seq_len=4096, batch_size=8, - meta_template=_meta_template, + meta_template=internlm1_chat_template, run_cfg=dict(num_gpus=1, num_procs=1), - end_str='<|im_end|>', - generation_kwargs = {"eos_token_id": [2, 92542], "do_sample": True}, - batch_padding=True, + end_str='', ) ] + ## ------------- Evaluation Configuration---------------- eval = dict( partitioner=dict(