[Update] Update LiveMathBench Hard Configs (#1826)

* support G-Pass@k and livemathbench

* fix bugs

* fix comments of GPassKEvaluator

* update saved details of GPassKEvaluator

* update saved details of GPassKEvaluator

* fix eval api configs & update openai_api for ease of debugging

* update huggingface path

* fix method name of G-Pass@k

* fix default value of eval_model_name

* refactor G-Pass@k evaluator

* log generation params for each backend

* fix evaluation resume

* add notimplementerror

* update livemathbench-hard configs

* remove max_out_len from livemathbench_hard_greedy_gen_9befbf.py

* remove max_out_len from livemathbench_hard_gen_9befbf.py

* rename livemathbench_hard_gen_9befbf.py to livemathbench_hard_gen_353ae7.py

* rename livemathbench_hard_greedy_gen_9befbf.py to livemathbench_hard_greedy_gen_353ae7.py

* update livemathbench_gen_9befbf.py

* remove whitespace

* upload livemathbench hard configs
This commit is contained in:
Junnan Liu 2025-02-25 17:24:36 +08:00 committed by GitHub
parent 465e93e10e
commit 22a33d8759
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 103 additions and 54 deletions

View File

@ -48,4 +48,4 @@ livemathbench_dataset = dict(
)
)
)
livemathbench_datasets = [livemathbench_dataset]
livemathbench_datasets = [livemathbench_dataset]

View File

@ -1,49 +0,0 @@
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets.livemathbench import LiveMathBenchDataset, LiveMathBenchEvaluator
livemathbench_reader_cfg = dict(
input_columns=['prompt'],
output_column='answer'
)
livemathbench_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{prompt}'),
]
)
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(
type=GenInferencer,
max_out_len=8192,
temperature=1.0
)
)
livemathbench_eval_cfg = dict(
evaluator=dict(
type=LiveMathBenchEvaluator,
model_name='Qwen/Qwen2.5-72B-Instruct',
url=['http://172.30.40.154:23333/v1/'] #'https://api.openai.com/v1/'
)
)
livemathbench_datasets = [
dict(
type=LiveMathBenchDataset,
abbr='LiveMathBench-k1-n1',
path='opencompass/LiveMathBench202412',
k=1, # K@Pass
n=1, # Run times
reader_cfg=livemathbench_reader_cfg,
infer_cfg=livemathbench_infer_cfg,
eval_cfg=livemathbench_eval_cfg
)
]

View File

@ -1,4 +1,4 @@
from mmengine.config import read_base
with read_base():
from .livemathbench_greedy_gen_efb20d import livemathbench_datasets # noqa: F401, F403
from .livemathbench_greedy_gen_9befbf import livemathbench_datasets # noqa: F401, F403

View File

@ -0,0 +1,50 @@
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets.livemathbench import LiveMathBenchDataset, LiveMathBenchEvaluator
livemathbench_dataset = dict(
type=LiveMathBenchDataset,
path='',
k=16,
replication=3,
dataset_splits=['hard'],
dataset_languages=['cn', 'en'],
cot=True,
version='202412',
abbr='LiveMathBench-v202412-Hard',
reader_cfg=dict(
input_columns=['prompt'],
output_column='answer'
),
infer_cfg=dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{prompt}'),
]
)
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(
type=GenInferencer
),
),
eval_cfg=dict(
evaluator=dict(
type=LiveMathBenchEvaluator,
model_name='',
url=[],
use_extract_model=False,
extract_url=[],
extract_model_name='',
k=[4, 8, 16],
replication=3,
thresholds=[0.0, 0.25, 0.5, 0.75, 1.0]
)
)
)
livemathbench_datasets = [livemathbench_dataset]

View File

@ -0,0 +1,50 @@
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets.livemathbench import LiveMathBenchDataset, LiveMathBenchEvaluator
livemathbench_dataset = dict(
type=LiveMathBenchDataset,
path='',
k=1,
replication=1,
dataset_splits=['hard'],
dataset_languages=['cn', 'en'],
cot=True,
version='202412',
abbr='LiveMathBench-v202412-Hard',
reader_cfg=dict(
input_columns=['prompt'],
output_column='answer'
),
infer_cfg=dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{prompt}'),
]
)
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(
type=GenInferencer
),
),
eval_cfg=dict(
evaluator=dict(
type=LiveMathBenchEvaluator,
model_name='',
url=[],
use_extract_model=False,
extract_url=[],
extract_model_name='',
k=[1],
replication=1,
thresholds=[0.0]
)
)
)
livemathbench_datasets = [livemathbench_dataset]

View File

@ -48,6 +48,7 @@ class LiveMathBenchDataset(BaseDataset):
if path != '':
path = get_data_path(path)
path = os.path.join(path, version)
for split, language in product(dataset_splits, dataset_languages):
dataset_info[f'{split}_{language}'] = {
'single-choice': 0,
@ -64,7 +65,6 @@ class LiveMathBenchDataset(BaseDataset):
if path != '':
file_path = os.path.join(path, f'{split}_{language}.jsonl')
if not os.path.exists(file_path):
raise FileNotFoundError(
f'File {file_path} does not exist, please check the '

View File

@ -164,8 +164,6 @@ class TurboMindModelwithChatTemplate(BaseModel):
self.logger.info('Generation Config of LMdeploy: ')
self.logger.info(gen_config)
results = []
outputs = self.pipe(messages, gen_config=gen_config, do_preprocess=False)
for output in outputs: