mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
add benchmax part 1
This commit is contained in:
parent
3fd8b4e0cd
commit
707ef2fef9
20
examples/eval_benchmax.py
Normal file
20
examples/eval_benchmax.py
Normal file
@ -0,0 +1,20 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from opencompass.configs.datasets.xlivecodebench.xlivecodebench_gen import \
|
||||
LCB_datasets
|
||||
from opencompass.configs.datasets.xgpqa.xgpqa_gen import \
|
||||
gpqa_datasets
|
||||
from opencompass.configs.datasets.xIFEval.xIFeval_gen import \
|
||||
xifeval_datasets
|
||||
from opencompass.configs.models.hf_llama.hf_llama3_8b_instruct import \
|
||||
models as hf_llama3_8b_instruct_models
|
||||
|
||||
datasets = [
|
||||
*LCB_datasets,
|
||||
*gpqa_datasets,
|
||||
*xifeval_datasets
|
||||
]
|
||||
models = [
|
||||
*hf_llama3_8b_instruct_models
|
||||
]
|
55
opencompass/configs/datasets/xIFEval/IFEval.md
Normal file
55
opencompass/configs/datasets/xIFEval/IFEval.md
Normal file
@ -0,0 +1,55 @@
|
||||
# IFEval
|
||||
## Introduction
|
||||
The following introduction comes from the abstract in [Instruction Following Evaluation for Large Language Models](arxiv.org/abs/2311.07911)
|
||||
|
||||
```
|
||||
IFEval is a straightforward and easy-to-reproduce evaluation benchmark. It focuses on a set of "verifiable instructions" such as "write in more than 400 words" and "mention the keyword of AI at least 3 times". We identified 25 types of those verifiable instructions and constructed around 500 prompts, with each prompt containing one or more verifiable instructions.
|
||||
```
|
||||
|
||||
## Official link
|
||||
|
||||
### Paper
|
||||
|
||||
[Instruction Following Evaluation for Large Language Models](arxiv.org/abs/2311.07911)
|
||||
|
||||
### Repository
|
||||
|
||||
[instruction_following_eval](github.com/google-research/google-research/tree/master/instruction_following_eval)
|
||||
|
||||
## Examples
|
||||
Input example I:
|
||||
```
|
||||
Write a 300+ word summary of the wikipedia page \"https://en.wikipedia.org/wiki/Raymond_III,_Count_of_Tripoli\". Do not use any commas and highlight at least 3 sections that has titles in markdown format, for example *highlighted section part 1*, *highlighted section part 2*, *highlighted section part 3*.
|
||||
```
|
||||
Output example I (from GPT-4):
|
||||
```
|
||||
Raymond III was the Count of Tripoli from 1152 to 1187 and Prince of Galilee and Tiberias in the Kingdom of Jerusalem. He was born in 1140 to Raymond II of Tripoli and Hodierna of Jerusalem. His mother was the daughter of Baldwin II of Jerusalem. \n\n*Early Life and Succession*\n\nRaymond III was only a child when his father was murdered. His mother Hodierna was regent until Raymond came of age. In 1155 Raymond married Eschiva the daughter of Walter I of Beirut. They had three children: Raymond IV Bertrand and a daughter who married Guy of Lusignan. \n\n*Reign*\n\nRaymond III's reign was marked by a series of conflicts and alliances with the Muslim world. He was involved in the defense of the Kingdom of Jerusalem against Nur ad-Din and later Saladin. He was also a key figure in the internal politics of the kingdom. He was a supporter of the queen mother Amalric of Jerusalem and opposed the succession of Guy of Lusignan. \n\n*Later Life and Death*\n\nIn 1187 Raymond III was part of the disastrous Battle of Hattin where the Christian forces were defeated by Saladin. He was one of the few to escape the battlefield but died later that year. His son Raymond IV succeeded him as Count of Tripoli. \n\nRaymond III's life and reign were marked by the complex politics and warfare of the Crusader states. He was a key figure in the defense of the Kingdom of Jerusalem and his death marked a significant loss for the Christian forces in the region. His legacy is a testament to the turbulent times in which he lived and the challenges faced by the Crusader states in their relations with the Muslim world.
|
||||
```
|
||||
Input example II:
|
||||
```
|
||||
"I am planning a trip to Japan, and I would like thee to write an itinerary for my journey in a Shakespearean style. You are not allowed to use any commas in your response."
|
||||
```
|
||||
Output example II (from GPT-4):
|
||||
```
|
||||
Hark! Hearken to the tale of thy journey to the land of the rising sun, Japan. \n\nUpon the morrow of thy arrival, thou shalt find thyself in Tokyo, the city of lights and towers. In the morn, thou shalt explore the grandeur of the Imperial Palace, a sight to behold. As the sun reaches its zenith, thou shalt partake in a feast of sushi, a delicacy of the land. As the day wanes, thou shalt wander through the electric streets of Akihabara, a realm of technology and animation.\n\nOn the second day, thou shalt journey to the ancient city of Kyoto. In the soft light of dawn, thou shalt visit the golden pavilion of Kinkaku-ji, a vision of beauty and tranquility. As the day unfolds, thou shalt wander through the bamboo grove of Arashiyama, a place of peace and serenity. As the sun sets, thou shalt partake in a traditional tea ceremony, a ritual of grace and harmony.\n\nOn the third day, thou shalt travel to the city of Hiroshima. In the morning, thou shalt pay thy respects at the Peace Memorial Park, a place of remembrance and hope. As the day progresses, thou shalt journey to the island of Miyajima, home to the floating torii gate, a sight of wonder and awe. As the day ends, thou shalt feast on okonomiyaki, a dish of delight and satisfaction.\n\nOn the fourth day, thou shalt return to Tokyo. In the morning, thou shalt ascend the Tokyo Skytree, a tower of height and grandeur. As the day unfolds, thou shalt explore the bustling streets of Shibuya, a realm of fashion and culture. As the sun sets, thou shalt dine in the vibrant district of Roppongi, a place of gastronomy and nightlife.\n\nOn the fifth and final day, thou shalt bid farewell to the land of the rising sun. With memories of beauty and wonder, thou shalt embark on thy journey home, carrying with thee the spirit of Japan. \n\nThus ends the tale of thy journey, a tale of exploration and discovery. May thy travels be filled with joy and adventure, and may the land of the rising sun forever hold a place in thy heart.
|
||||
```
|
||||
## Evaluation results
|
||||
|
||||
```
|
||||
dataset version metric mode baichuan2-7b-chat-hf baichuan2-13b-chat-hf internlm2-chat-7b-hf internlm2-chat-20b-hf llama-2-7b-chat-hf llama-2-13b-chat-hf
|
||||
--------- --------- ---------------------------- ------ ---------------------- ----------------------- ---------------------- ----------------------- -------------------- ---------------------
|
||||
IFEval 3321a3 Prompt-level-strict-accuracy gen 36.04 35.49 38.26 33.09 33.46 33.64
|
||||
IFEval 3321a3 Inst-level-strict-accuracy gen 46.76 46.76 49.16 45.32 45.68 45.44
|
||||
IFEval 3321a3 Prompt-level-loose-accuracy gen 37.52 37.71 42.51 39.37 43.81 47.32
|
||||
IFEval 3321a3 Inst-level-loose-accuracy gen 48.44 49.16 53.72 51.08 55.64 58.03
|
||||
```
|
||||
|
||||
## Reference
|
||||
```
|
||||
@article{zhou2023instruction,
|
||||
title={Instruction-Following Evaluation for Large Language Models},
|
||||
author={Zhou, Jeffrey and Lu, Tianjian and Mishra, Swaroop and Brahma, Siddhartha and Basu, Sujoy and Luan, Yi and Zhou, Denny and Hou, Le},
|
||||
journal={arXiv preprint arXiv:2311.07911},
|
||||
year={2023}
|
||||
}
|
||||
```
|
4
opencompass/configs/datasets/xIFEval/IFEval_gen.py
Normal file
4
opencompass/configs/datasets/xIFEval/IFEval_gen.py
Normal file
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .IFEval_gen_3321a3 import ifeval_datasets # noqa: F401, F403
|
34
opencompass/configs/datasets/xIFEval/IFEval_gen_3321a3.py
Normal file
34
opencompass/configs/datasets/xIFEval/IFEval_gen_3321a3.py
Normal file
@ -0,0 +1,34 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import xIFEvalDataset, xIFEvaluator
|
||||
|
||||
xifeval_reader_cfg = dict(
|
||||
input_columns=['prompt'], output_column='reference')
|
||||
|
||||
xifeval_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'),
|
||||
])),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=2048))
|
||||
|
||||
xifeval_eval_cfg = dict(
|
||||
evaluator=dict(type=xIFEvaluator),
|
||||
pred_role='BOT',
|
||||
)
|
||||
LANG = 'de'
|
||||
|
||||
xifeval_datasets = [
|
||||
dict(
|
||||
abbr='xIFEval',
|
||||
type=xIFEvalDataset,
|
||||
path=f'data/ifeval/input_data_google_{LANG}.jsonl',
|
||||
reader_cfg=xifeval_reader_cfg,
|
||||
infer_cfg=xifeval_infer_cfg,
|
||||
eval_cfg=xifeval_eval_cfg)
|
||||
]
|
31
opencompass/configs/datasets/xIFEval/README.md
Normal file
31
opencompass/configs/datasets/xIFEval/README.md
Normal file
@ -0,0 +1,31 @@
|
||||
# IFEval
|
||||
|
||||
```bash
|
||||
python3 run.py --models hf_internlm2_chat_7b --datasets IFEval_gen_3321a3 --debug
|
||||
```
|
||||
|
||||
## Chat Models
|
||||
|
||||
| model | Prompt-level-strict-accuracy | Inst-level-strict-accuracy | Prompt-level-loose-accuracy | Inst-level-loose-accuracy |
|
||||
|:-----------------------------:|-------------------------------:|-----------------------------:|------------------------------:|----------------------------:|
|
||||
| qwen1.5-0.5b-chat-hf | 13.12 | 23.26 | 15.71 | 26.38 |
|
||||
| qwen1.5-1.8b-chat-hf | 16.08 | 26.26 | 18.30 | 29.02 |
|
||||
| qwen1.5-4b-chat-hf | 25.51 | 35.97 | 28.84 | 39.81 |
|
||||
| qwen1.5-7b-chat-hf | 38.82 | 50.00 | 42.70 | 53.48 |
|
||||
| qwen1.5-14b-chat-hf | 42.51 | 54.20 | 49.17 | 59.95 |
|
||||
| qwen1.5-32b-chat-hf | 49.54 | 60.43 | 53.97 | 64.39 |
|
||||
| qwen1.5-72b-chat-hf | 51.02 | 61.99 | 57.12 | 67.27 |
|
||||
| qwen1.5-110b-chat-hf | 55.08 | 65.59 | 61.18 | 70.86 |
|
||||
| internlm2-chat-1.8b-hf | 18.30 | 28.78 | 21.44 | 32.01 |
|
||||
| internlm2-chat-1.8b-sft-hf | 18.67 | 31.18 | 19.78 | 32.85 |
|
||||
| internlm2-chat-7b-hf | 34.75 | 46.28 | 40.48 | 51.44 |
|
||||
| internlm2-chat-7b-sft-hf | 39.19 | 50.12 | 42.33 | 52.76 |
|
||||
| internlm2-chat-20b-hf | 36.41 | 48.68 | 40.67 | 53.24 |
|
||||
| internlm2-chat-20b-sft-hf | 44.55 | 55.64 | 46.77 | 58.03 |
|
||||
| llama-3-8b-instruct-hf | 68.02 | 76.74 | 75.42 | 82.85 |
|
||||
| llama-3-70b-instruct-hf | 78.00 | 84.65 | 84.29 | 89.21 |
|
||||
| llama-3-8b-instruct-lmdeploy | 69.13 | 77.46 | 77.26 | 83.93 |
|
||||
| llama-3-70b-instruct-lmdeploy | 75.97 | 82.97 | 83.18 | 88.37 |
|
||||
| mistral-7b-instruct-v0.1-hf | 40.30 | 50.96 | 41.96 | 53.48 |
|
||||
| mistral-7b-instruct-v0.2-hf | 49.17 | 60.43 | 51.94 | 64.03 |
|
||||
| mixtral-8x7b-instruct-v0.1-hf | 50.09 | 60.67 | 55.64 | 65.83 |
|
37
opencompass/configs/datasets/xIFEval/xIFeval_gen.py
Normal file
37
opencompass/configs/datasets/xIFEval/xIFeval_gen.py
Normal file
@ -0,0 +1,37 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import xIFEvalDataset, xIFEvaluator
|
||||
|
||||
xifeval_reader_cfg = dict(
|
||||
input_columns=['prompt'], output_column='reference')
|
||||
|
||||
xifeval_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'),
|
||||
])),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=4096))
|
||||
|
||||
xifeval_eval_cfg = dict(
|
||||
evaluator=dict(type=xIFEvaluator),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
xifeval_datasets = []
|
||||
|
||||
LANGS = ['ar', 'bn', 'cs', 'de', 'es', 'fr', 'hu', 'ja', 'ko', 'ru', 'sr', 'sw', 'te', 'th', 'vi', 'zh']
|
||||
for LANG in LANGS:
|
||||
path = f'data/xifeval/input_data_google_{LANG}.jsonl'
|
||||
xifeval_datasets.append(dict(
|
||||
type=xIFEvalDataset,
|
||||
abbr=f'xIFEval_{LANG}',
|
||||
path=path,
|
||||
reader_cfg=xifeval_reader_cfg,
|
||||
infer_cfg=xifeval_infer_cfg,
|
||||
eval_cfg=xifeval_eval_cfg,
|
||||
))
|
69
opencompass/configs/datasets/xgpqa/README.md
Normal file
69
opencompass/configs/datasets/xgpqa/README.md
Normal file
@ -0,0 +1,69 @@
|
||||
# GPQA
|
||||
|
||||
```bash
|
||||
python3 run.py --models hf_internlm2_7b --datasets gpqa_ppl_6bf57a --debug
|
||||
python3 run.py --models hf_internlm2_chat_7b --datasets gpqa_gen_4baadb --debug
|
||||
```
|
||||
|
||||
## Base Models
|
||||
|
||||
| model | GPQA_diamond |
|
||||
|:------------------------:|---------------:|
|
||||
| llama-7b-turbomind | 24.24 |
|
||||
| llama-13b-turbomind | 25.25 |
|
||||
| llama-30b-turbomind | 22.73 |
|
||||
| llama-65b-turbomind | 21.72 |
|
||||
| llama-2-7b-turbomind | 25.25 |
|
||||
| llama-2-13b-turbomind | 23.74 |
|
||||
| llama-2-70b-turbomind | 28.28 |
|
||||
| llama-3-8b-turbomind | 31.82 |
|
||||
| llama-3-70b-turbomind | 40.91 |
|
||||
| internlm2-1.8b-turbomind | 24.24 |
|
||||
| internlm2-7b-turbomind | 28.28 |
|
||||
| internlm2-20b-turbomind | 31.31 |
|
||||
| qwen-1.8b-turbomind | 28.79 |
|
||||
| qwen-7b-turbomind | 24.75 |
|
||||
| qwen-14b-turbomind | 27.78 |
|
||||
| qwen-72b-turbomind | 31.31 |
|
||||
| qwen1.5-0.5b-hf | 23.74 |
|
||||
| qwen1.5-1.8b-hf | 28.79 |
|
||||
| qwen1.5-4b-hf | 23.23 |
|
||||
| qwen1.5-7b-hf | 20.71 |
|
||||
| qwen1.5-14b-hf | 32.32 |
|
||||
| qwen1.5-32b-hf | 30.81 |
|
||||
| qwen1.5-72b-hf | 31.82 |
|
||||
| qwen1.5-moe-a2-7b-hf | 28.79 |
|
||||
| mistral-7b-v0.1-hf | 24.75 |
|
||||
| mistral-7b-v0.2-hf | 23.74 |
|
||||
| mixtral-8x7b-v0.1-hf | 28.79 |
|
||||
| mixtral-8x22b-v0.1-hf | 36.36 |
|
||||
| yi-6b-hf | 28.28 |
|
||||
| yi-34b-hf | 35.86 |
|
||||
| deepseek-7b-base-hf | 20.71 |
|
||||
| deepseek-67b-base-hf | 25.25 |
|
||||
|
||||
## Chat Models
|
||||
|
||||
| model | GPQA_diamond |
|
||||
|:-----------------------------:|---------------:|
|
||||
| qwen1.5-0.5b-chat-hf | 19.70 |
|
||||
| qwen1.5-1.8b-chat-hf | 29.80 |
|
||||
| qwen1.5-4b-chat-hf | 25.25 |
|
||||
| qwen1.5-7b-chat-hf | 31.82 |
|
||||
| qwen1.5-14b-chat-hf | 30.30 |
|
||||
| qwen1.5-32b-chat-hf | 31.31 |
|
||||
| qwen1.5-72b-chat-hf | 32.83 |
|
||||
| qwen1.5-110b-chat-hf | 35.86 |
|
||||
| internlm2-chat-1.8b-hf | 25.76 |
|
||||
| internlm2-chat-1.8b-sft-hf | 26.26 |
|
||||
| internlm2-chat-7b-hf | 28.28 |
|
||||
| internlm2-chat-7b-sft-hf | 27.27 |
|
||||
| internlm2-chat-20b-hf | 30.30 |
|
||||
| internlm2-chat-20b-sft-hf | 29.29 |
|
||||
| llama-3-8b-instruct-hf | 25.76 |
|
||||
| llama-3-70b-instruct-hf | 37.88 |
|
||||
| llama-3-8b-instruct-lmdeploy | 25.76 |
|
||||
| llama-3-70b-instruct-lmdeploy | 37.88 |
|
||||
| mistral-7b-instruct-v0.1-hf | 30.30 |
|
||||
| mistral-7b-instruct-v0.2-hf | 25.25 |
|
||||
| mixtral-8x7b-instruct-v0.1-hf | 30.30 |
|
@ -0,0 +1,52 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import GPQADataset, GPQA_Simple_Eval_postprocess, GPQAEvaluator
|
||||
|
||||
# openai_simple_eval prompt
|
||||
align_prompt = """
|
||||
Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of ABCD.
|
||||
|
||||
{question}
|
||||
|
||||
A) {A}
|
||||
B) {B}
|
||||
C) {C}
|
||||
D) {D}
|
||||
""".strip()
|
||||
|
||||
gpqa_reader_cfg = dict(
|
||||
input_columns=['question', 'A', 'B', 'C', 'D'],
|
||||
output_column='answer')
|
||||
|
||||
gpqa_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=align_prompt),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer))
|
||||
|
||||
gpqa_eval_cfg = dict(evaluator=dict(type=GPQAEvaluator),
|
||||
pred_postprocessor=dict(type=GPQA_Simple_Eval_postprocess))
|
||||
|
||||
gpqa_datasets = []
|
||||
gpqa_subsets = {
|
||||
# 'extended': 'gpqa_extended.csv',
|
||||
# 'main': 'gpqa_main.csv',
|
||||
'diamond': 'gpqa_diamond.csv'
|
||||
}
|
||||
|
||||
for split in list(gpqa_subsets.keys()):
|
||||
gpqa_datasets.append(
|
||||
dict(
|
||||
abbr='GPQA_' + split,
|
||||
type=GPQADataset,
|
||||
path='./data/gpqa/',
|
||||
name=gpqa_subsets[split],
|
||||
reader_cfg=gpqa_reader_cfg,
|
||||
infer_cfg=gpqa_infer_cfg,
|
||||
eval_cfg=gpqa_eval_cfg)
|
||||
)
|
@ -0,0 +1,53 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import FixKRetriever
|
||||
from opencompass.openicl.icl_inferencer import PPLInferencer
|
||||
from opencompass.openicl.icl_evaluator import AccwithDetailsEvaluator
|
||||
from opencompass.datasets import GPQADataset, GPQAEvaluator
|
||||
from opencompass.utils import first_option_postprocess
|
||||
|
||||
gpqa_reader_cfg = dict(
|
||||
input_columns=['question', 'A', 'B', 'C', 'D'],
|
||||
output_column='answer')
|
||||
|
||||
hint = f'For the multiple choice question below, please provide the correct answer option directly.'
|
||||
question_and_options = 'Question: {question}\n(A){A}\n(B){B}\n(C){C}\n(D){D}\n'
|
||||
gpqa_infer_cfg = dict(
|
||||
ice_template=dict(
|
||||
type=PromptTemplate,
|
||||
template={
|
||||
opt: f'{question_and_options}\nAnswer: {opt}' for opt in ['A', 'B', 'C', 'D']},
|
||||
),
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template={
|
||||
opt: f'{hint}\n</E>{question_and_options}\nAnswer: {opt}' for opt in ['A', 'B', 'C', 'D']
|
||||
},
|
||||
ice_token='</E>'
|
||||
),
|
||||
retriever=dict(type=FixKRetriever, fix_id_list=[0, 1, 2, 3, 4]),
|
||||
inferencer=dict(type=PPLInferencer))
|
||||
|
||||
gpqa_eval_cfg = dict(evaluator=dict(type=AccwithDetailsEvaluator))
|
||||
|
||||
gpqa_datasets = []
|
||||
LANG = ['de']
|
||||
gpqa_subsets = {
|
||||
lang: f'gpqa_main_{lang}.csv' for lang in LANG
|
||||
}
|
||||
# gpqa_subsets = {
|
||||
# # 'extended': 'gpqa_extended.csv',
|
||||
# # 'main': 'gpqa_main.csv',
|
||||
# 'diamond': 'gpqa_main_{}.csv'
|
||||
# }
|
||||
|
||||
for split in list(gpqa_subsets.keys()):
|
||||
gpqa_datasets.append(
|
||||
dict(
|
||||
abbr='GPQA_' + split,
|
||||
type=GPQADataset,
|
||||
path='./data/gpqa/',
|
||||
name=gpqa_subsets[split],
|
||||
reader_cfg=gpqa_reader_cfg,
|
||||
infer_cfg=gpqa_infer_cfg,
|
||||
eval_cfg=gpqa_eval_cfg)
|
||||
)
|
46
opencompass/configs/datasets/xgpqa/gpqa_gen_015262.py
Normal file
46
opencompass/configs/datasets/xgpqa/gpqa_gen_015262.py
Normal file
@ -0,0 +1,46 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import GPQADataset, GPQAEvaluator
|
||||
from opencompass.utils import first_option_postprocess
|
||||
|
||||
gpqa_reader_cfg = dict(
|
||||
input_columns=['question', 'A', 'B', 'C', 'D'],
|
||||
output_column='answer')
|
||||
|
||||
gpqa_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='What is the correct answer to this question: {question}\nChoices:\n'
|
||||
'(A){A}\n'
|
||||
'(B){B}\n'
|
||||
'(C){C}\n'
|
||||
'(D){D}\n'
|
||||
'Format your response as follows: "The correct answer is (insert answer here)"'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer))
|
||||
|
||||
gpqa_eval_cfg = dict(evaluator=dict(type=GPQAEvaluator),
|
||||
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD'))
|
||||
|
||||
gpqa_datasets = []
|
||||
gpqa_subsets = {
|
||||
'extended': 'gpqa_extended.csv',
|
||||
'main': 'gpqa_main.csv',
|
||||
'diamond': 'gpqa_diamond.csv'
|
||||
}
|
||||
|
||||
for split in list(gpqa_subsets.keys()):
|
||||
gpqa_datasets.append(
|
||||
dict(
|
||||
abbr='GPQA_' + split,
|
||||
type=GPQADataset,
|
||||
path='./data/gpqa/',
|
||||
name=gpqa_subsets[split],
|
||||
reader_cfg=gpqa_reader_cfg,
|
||||
infer_cfg=gpqa_infer_cfg,
|
||||
eval_cfg=gpqa_eval_cfg)
|
||||
)
|
46
opencompass/configs/datasets/xgpqa/gpqa_gen_4baadb.py
Normal file
46
opencompass/configs/datasets/xgpqa/gpqa_gen_4baadb.py
Normal file
@ -0,0 +1,46 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import GPQADataset, GPQAEvaluator
|
||||
from opencompass.utils import first_option_postprocess
|
||||
|
||||
gpqa_reader_cfg = dict(
|
||||
input_columns=['question', 'A', 'B', 'C', 'D'],
|
||||
output_column='answer')
|
||||
|
||||
gpqa_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='What is the correct answer to this question: {question}\nChoices:\n'
|
||||
'(A){A}\n'
|
||||
'(B){B}\n'
|
||||
'(C){C}\n'
|
||||
'(D){D}\n'
|
||||
'Format your response as follows: "The correct answer is (insert answer here)"'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer))
|
||||
|
||||
gpqa_eval_cfg = dict(evaluator=dict(type=GPQAEvaluator),
|
||||
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD'))
|
||||
|
||||
gpqa_datasets = []
|
||||
gpqa_subsets = {
|
||||
# 'extended': 'gpqa_extended.csv',
|
||||
# 'main': 'gpqa_main.csv',
|
||||
'diamond': 'gpqa_diamond.csv'
|
||||
}
|
||||
|
||||
for split in list(gpqa_subsets.keys()):
|
||||
gpqa_datasets.append(
|
||||
dict(
|
||||
abbr='GPQA_' + split,
|
||||
type=GPQADataset,
|
||||
path='./data/gpqa/',
|
||||
name=gpqa_subsets[split],
|
||||
reader_cfg=gpqa_reader_cfg,
|
||||
infer_cfg=gpqa_infer_cfg,
|
||||
eval_cfg=gpqa_eval_cfg)
|
||||
)
|
@ -0,0 +1,57 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import GPQADataset, GPQA_Simple_Eval_postprocess, GPQAEvaluator
|
||||
|
||||
# openai_simple_eval prompt
|
||||
align_prompt = """
|
||||
Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of ABCD. Think step by step before answering.
|
||||
|
||||
{question}
|
||||
|
||||
A) {A}
|
||||
B) {B}
|
||||
C) {C}
|
||||
D) {D}
|
||||
""".strip()
|
||||
|
||||
gpqa_reader_cfg = dict(
|
||||
input_columns=['question', 'A', 'B', 'C', 'D'],
|
||||
output_column='answer')
|
||||
|
||||
gpqa_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=align_prompt),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer))
|
||||
|
||||
gpqa_eval_cfg = dict(evaluator=dict(type=GPQAEvaluator),
|
||||
pred_postprocessor=dict(type=GPQA_Simple_Eval_postprocess))
|
||||
|
||||
gpqa_datasets = []
|
||||
# gpqa_subsets = {
|
||||
# # 'extended': 'gpqa_extended.csv',
|
||||
# # 'main': 'gpqa_main.csv',
|
||||
# 'diamond': 'gpqa_diamond.csv'
|
||||
# }
|
||||
gpqa_datasets = []
|
||||
LANGS = ['en','ar', 'bn', 'cs', 'es', 'fr', 'hu', 'ja', 'ko', 'ru', 'sr', 'sw', 'te', 'th', 'vi', 'zh']
|
||||
gpqa_subsets = {
|
||||
lang: f'gpqa_main_{lang}.csv' for lang in LANGS
|
||||
}
|
||||
|
||||
for split in list(gpqa_subsets.keys()):
|
||||
gpqa_datasets.append(
|
||||
dict(
|
||||
abbr='GPQA_' + split,
|
||||
type=GPQADataset,
|
||||
path='./data/gpqa/', # need to change
|
||||
name=gpqa_subsets[split],
|
||||
reader_cfg=gpqa_reader_cfg,
|
||||
infer_cfg=gpqa_infer_cfg,
|
||||
eval_cfg=gpqa_eval_cfg)
|
||||
)
|
40
opencompass/configs/datasets/xgpqa/gpqa_ppl_6bf57a.py
Normal file
40
opencompass/configs/datasets/xgpqa/gpqa_ppl_6bf57a.py
Normal file
@ -0,0 +1,40 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import PPLInferencer
|
||||
from opencompass.datasets import GPQADataset, GPQAEvaluator
|
||||
from opencompass.utils import first_option_postprocess
|
||||
|
||||
gpqa_reader_cfg = dict(
|
||||
input_columns=['question', 'A', 'B', 'C', 'D'],
|
||||
output_column='answer')
|
||||
|
||||
gpqa_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template={
|
||||
opt: f'Question: {{question}}\n(A){{A}}\n(B){{B}}\n(C){{C}}\n(D){{D}}\nAnswer: {opt}' for opt in ['A', 'B', 'C', 'D']
|
||||
}),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=PPLInferencer))
|
||||
|
||||
gpqa_eval_cfg = dict(evaluator=dict(type=GPQAEvaluator),
|
||||
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD'))
|
||||
|
||||
gpqa_datasets = []
|
||||
gpqa_subsets = {
|
||||
# 'extended': 'gpqa_extended.csv',
|
||||
# 'main': 'gpqa_main.csv',
|
||||
'diamond': 'gpqa_diamond.csv'
|
||||
}
|
||||
|
||||
for split in list(gpqa_subsets.keys()):
|
||||
gpqa_datasets.append(
|
||||
dict(
|
||||
abbr='GPQA_' + split,
|
||||
type=GPQADataset,
|
||||
path='./data/gpqa/',
|
||||
name=gpqa_subsets[split],
|
||||
reader_cfg=gpqa_reader_cfg,
|
||||
infer_cfg=gpqa_infer_cfg,
|
||||
eval_cfg=gpqa_eval_cfg)
|
||||
)
|
7
opencompass/configs/datasets/xgpqa/xgpqa_gen.py
Normal file
7
opencompass/configs/datasets/xgpqa/xgpqa_gen.py
Normal file
@ -0,0 +1,7 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
# with read_base():
|
||||
# from .gpqa_openai_simple_evals_gen_5aeece import gpqa_datasets
|
||||
|
||||
with read_base():
|
||||
from .gpqa_openai_simple_evals_gen_5aeece import gpqa_datasets
|
42
opencompass/configs/datasets/xlivecodebench/README.md
Normal file
42
opencompass/configs/datasets/xlivecodebench/README.md
Normal file
@ -0,0 +1,42 @@
|
||||
# LiveCodeBench
|
||||
|
||||
## Dataset
|
||||
|
||||
LiveCodeBench provides holistic and contamination-free evaluation of coding capabilities of LLMs. Particularly, LiveCodeBench continuously collects new problems over time from contests across three competition platforms -- LeetCode, AtCoder, and CodeForces. Next, LiveCodeBench also focuses on a broader range of code-related capabilities, such as self-repair, code execution, and test output prediction, beyond just code generation. Currently, LiveCodeBench hosts four hundred high-quality coding problems that were published between May 2023 and March 2024.
|
||||
|
||||
- Origin Project: https://livecodebench.github.io/leaderboard.html
|
||||
|
||||
## Setting
|
||||
|
||||
| Model Type | Code Generation | Test Output Prediction | Code Execution |
|
||||
|------------|--------|--------|--------|
|
||||
| Base Model | ❌ | ❌ | ❌ |
|
||||
| Chat Model | ✅ | ✅ | ✅ |
|
||||
|
||||
|
||||
|
||||
## Baseline Performance
|
||||
|
||||
|
||||
| Model Type | Code Generation(pass@1) | Test Output Prediction(pass@1) | Code Execution(pass@1) |
|
||||
|------------|--------|--------|--------|
|
||||
| Qwen2.5-7B-Instruct(HF) | 39.25 | 48.64 | 41.96 |
|
||||
| Meta-Llama-3.1-8B-Instruct(HF) | 20.25 | 24.66 | 17.12 |
|
||||
|
||||
|
||||
## Citation
|
||||
|
||||
```bibtex
|
||||
@article{jain2024livecodebench,
|
||||
author = {Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, Ion Stoica},
|
||||
title = {LiveCodeBench: Holistic and Contamination Free Evaluation of Large Language Models for Code},
|
||||
year = {2024},
|
||||
journal = {arXiv preprint},
|
||||
}
|
||||
@misc{2023opencompass,
|
||||
title={OpenCompass: A Universal Evaluation Platform for Foundation Models},
|
||||
author={OpenCompass Contributors},
|
||||
howpublished = {\url{https://github.com/open-compass/opencompass}},
|
||||
year={2023}
|
||||
}
|
||||
```
|
@ -0,0 +1,166 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets.xlivecodebench import (
|
||||
LCBCodeExecutionDataset,
|
||||
LCBTestOutputPredictionDataset,
|
||||
LCBCodeExecutionEvaluator,
|
||||
LCBTestOutputEvaluator
|
||||
)
|
||||
from opencompass.datasets.xlivecodebench import xLCBCodeGenerationDataset, xLCBCodeGenerationEvaluator
|
||||
from opencompass.datasets.livecodebench import TestOutputPromptConstants
|
||||
|
||||
|
||||
lcb_code_generation_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'question_content',
|
||||
'format_prompt',
|
||||
],
|
||||
# output_column='evaluation_sample',
|
||||
output_column='question_id',
|
||||
)
|
||||
|
||||
SYSTEM_MESSAGE_GENERIC = f'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.'
|
||||
|
||||
prompt_template = '### Question:\n{question_content}\n\n{format_prompt}' + \
|
||||
'### Answer: (use the provided format with backticks)\n\n'
|
||||
|
||||
|
||||
# Code Generation Tasks
|
||||
lcb_code_generation_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=prompt_template
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_code_generation_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=xLCBCodeGenerationEvaluator,
|
||||
path='data/code_generation_lite/lcb_gpt4o_ar.jsonl',
|
||||
num_process_evaluate=4,
|
||||
timeout=6,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCB_datasets = []
|
||||
LANGS = ['ar', 'bn', 'cs', 'de', 'es', 'fr', 'hu', 'ja', 'ko', 'ru', 'sr', 'sw', 'te', 'th', 'vi', 'zh']
|
||||
for LANG in LANGS:
|
||||
LCBCodeGeneration_dataset = dict(
|
||||
type=xLCBCodeGenerationDataset,
|
||||
abbr=f'lcb_code_generation_{LANG}',
|
||||
path=f'data/code_generation_lite/lcb_gpt4o_{LANG}.jsonl',
|
||||
local_mode=True,
|
||||
reader_cfg=lcb_code_generation_reader_cfg,
|
||||
infer_cfg=lcb_code_generation_infer_cfg,
|
||||
eval_cfg=lcb_code_generation_eval_cfg
|
||||
)
|
||||
# Code Execution Dataset
|
||||
lcb_code_execution_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'prompt',
|
||||
],
|
||||
output_column='evaluation_sample',
|
||||
)
|
||||
|
||||
lcb_code_execution_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt='You are an expert at Python programming, code execution, test case generation, and fuzzing.'
|
||||
),
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_code_execution_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBCodeExecutionEvaluator,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBCodeExecution_dataset = dict(
|
||||
type=LCBCodeExecutionDataset,
|
||||
abbr=f'lcb_code_execution_{LANG}',
|
||||
path='opencompass/execution-v2',
|
||||
reader_cfg=lcb_code_execution_reader_cfg,
|
||||
infer_cfg=lcb_code_execution_infer_cfg,
|
||||
eval_cfg=lcb_code_execution_eval_cfg,
|
||||
)
|
||||
|
||||
# TestOuputput Dataset
|
||||
lcb_test_output_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'prompt',
|
||||
],
|
||||
output_column='evaluation_sample',
|
||||
)
|
||||
|
||||
system_prompt = 'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.'
|
||||
|
||||
lcb_test_output_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
# begin=[
|
||||
# dict(
|
||||
# role='SYSTEM',
|
||||
# prompt=system_prompt
|
||||
# ),
|
||||
# ],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_test_output_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBTestOutputEvaluator,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBTestOutput_dataset = dict(
|
||||
type=LCBTestOutputPredictionDataset,
|
||||
abbr=f'lcb_test_output_{LANG}',
|
||||
path='opencompass/test_generation',
|
||||
reader_cfg=lcb_test_output_reader_cfg,
|
||||
infer_cfg=lcb_test_output_infer_cfg,
|
||||
eval_cfg=lcb_test_output_eval_cfg,
|
||||
)
|
||||
LCB_datasets += [
|
||||
LCBCodeGeneration_dataset,
|
||||
LCBCodeExecution_dataset,
|
||||
LCBTestOutput_dataset,
|
||||
]
|
@ -0,0 +1,163 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import (
|
||||
LCBCodeGenerationDataset,
|
||||
LCBCodeExecutionDataset,
|
||||
LCBTestOutputPredictionDataset,
|
||||
LCBCodeGenerationEvaluator,
|
||||
LCBCodeExecutionEvaluator,
|
||||
LCBTestOutputEvaluator
|
||||
)
|
||||
from opencompass.datasets.livecodebench import TestOutputPromptConstants
|
||||
|
||||
|
||||
lcb_code_generation_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'question_content',
|
||||
'format_prompt',
|
||||
],
|
||||
# output_column='evaluation_sample',
|
||||
output_column='question_id',
|
||||
)
|
||||
|
||||
SYSTEM_MESSAGE_GENERIC = f'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.'
|
||||
|
||||
prompt_template = '### Question:\n{question_content}\n\n{format_prompt}' + \
|
||||
'### Answer: (use the provided format with backticks)\n\n'
|
||||
|
||||
|
||||
# Code Generation Tasks
|
||||
lcb_code_generation_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=prompt_template
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_code_generation_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBCodeGenerationEvaluator,
|
||||
num_process_evaluate=4,
|
||||
timeout=6,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBCodeGeneration_dataset = dict(
|
||||
type=LCBCodeGenerationDataset,
|
||||
abbr='lcb_code_generation',
|
||||
path='opencompass/code_generation_lite',
|
||||
reader_cfg=lcb_code_generation_reader_cfg,
|
||||
infer_cfg=lcb_code_generation_infer_cfg,
|
||||
eval_cfg=lcb_code_generation_eval_cfg
|
||||
)
|
||||
|
||||
# Code Execution Dataset
|
||||
lcb_code_execution_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'prompt',
|
||||
],
|
||||
output_column='evaluation_sample',
|
||||
)
|
||||
|
||||
lcb_code_execution_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
prompt='You are an expert at Python programming, code execution, test case generation, and fuzzing.'
|
||||
),
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_code_execution_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBCodeExecutionEvaluator,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBCodeExecution_dataset = dict(
|
||||
type=LCBCodeExecutionDataset,
|
||||
abbr='lcb_code_execution',
|
||||
path='opencompass/execution-v2',
|
||||
reader_cfg=lcb_code_execution_reader_cfg,
|
||||
infer_cfg=lcb_code_execution_infer_cfg,
|
||||
eval_cfg=lcb_code_execution_eval_cfg,
|
||||
)
|
||||
|
||||
# TestOuputput Dataset
|
||||
lcb_test_output_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'prompt',
|
||||
],
|
||||
output_column='evaluation_sample',
|
||||
)
|
||||
|
||||
system_prompt = 'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.'
|
||||
|
||||
lcb_test_output_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
# begin=[
|
||||
# dict(
|
||||
# role='SYSTEM',
|
||||
# prompt=system_prompt
|
||||
# ),
|
||||
# ],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_test_output_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBTestOutputEvaluator,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBTestOutput_dataset = dict(
|
||||
type=LCBTestOutputPredictionDataset,
|
||||
abbr='lcb_test_output',
|
||||
path='opencompass/test_generation',
|
||||
reader_cfg=lcb_test_output_reader_cfg,
|
||||
infer_cfg=lcb_test_output_infer_cfg,
|
||||
eval_cfg=lcb_test_output_eval_cfg,
|
||||
)
|
||||
|
||||
LCB_datasets = [
|
||||
LCBCodeGeneration_dataset,
|
||||
LCBCodeExecution_dataset,
|
||||
LCBTestOutput_dataset,
|
||||
]
|
@ -0,0 +1,165 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import (
|
||||
LCBCodeGenerationDataset,
|
||||
LCBCodeExecutionDataset,
|
||||
LCBTestOutputPredictionDataset,
|
||||
LCBCodeGenerationEvaluator,
|
||||
LCBCodeExecutionEvaluator,
|
||||
LCBTestOutputEvaluator
|
||||
)
|
||||
from opencompass.datasets.livecodebench import TestOutputPromptConstants
|
||||
|
||||
|
||||
lcb_code_generation_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'question_content',
|
||||
'format_prompt',
|
||||
],
|
||||
# output_column='evaluation_sample',
|
||||
output_column='question_id',
|
||||
)
|
||||
|
||||
SYSTEM_MESSAGE_GENERIC = f'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.'
|
||||
|
||||
prompt_template = '### Question:\n{question_content}\n\n{format_prompt}' + \
|
||||
'### Answer: (use the provided format with backticks)\n\n'
|
||||
|
||||
|
||||
# Code Generation Tasks
|
||||
lcb_code_generation_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=prompt_template
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_code_generation_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBCodeGenerationEvaluator,
|
||||
num_process_evaluate=4,
|
||||
timeout=6,
|
||||
release_version='release_v4',
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBCodeGeneration_dataset = dict(
|
||||
type=LCBCodeGenerationDataset,
|
||||
abbr='lcb_code_generation_v4',
|
||||
path='opencompass/code_generation_lite',
|
||||
reader_cfg=lcb_code_generation_reader_cfg,
|
||||
infer_cfg=lcb_code_generation_infer_cfg,
|
||||
eval_cfg=lcb_code_generation_eval_cfg,
|
||||
release_version='release_v4',
|
||||
)
|
||||
|
||||
# Code Execution Dataset
|
||||
lcb_code_execution_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'prompt',
|
||||
],
|
||||
output_column='evaluation_sample',
|
||||
)
|
||||
|
||||
lcb_code_execution_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
prompt='You are an expert at Python programming, code execution, test case generation, and fuzzing.'
|
||||
),
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_code_execution_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBCodeExecutionEvaluator,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBCodeExecution_dataset = dict(
|
||||
type=LCBCodeExecutionDataset,
|
||||
abbr='lcb_code_execution',
|
||||
path='opencompass/execution-v2',
|
||||
reader_cfg=lcb_code_execution_reader_cfg,
|
||||
infer_cfg=lcb_code_execution_infer_cfg,
|
||||
eval_cfg=lcb_code_execution_eval_cfg,
|
||||
)
|
||||
|
||||
# TestOuputput Dataset
|
||||
lcb_test_output_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'prompt',
|
||||
],
|
||||
output_column='evaluation_sample',
|
||||
)
|
||||
|
||||
system_prompt = 'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.'
|
||||
|
||||
lcb_test_output_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
# begin=[
|
||||
# dict(
|
||||
# role='SYSTEM',
|
||||
# prompt=system_prompt
|
||||
# ),
|
||||
# ],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_test_output_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBTestOutputEvaluator,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBTestOutput_dataset = dict(
|
||||
type=LCBTestOutputPredictionDataset,
|
||||
abbr='lcb_test_output',
|
||||
path='opencompass/test_generation',
|
||||
reader_cfg=lcb_test_output_reader_cfg,
|
||||
infer_cfg=lcb_test_output_infer_cfg,
|
||||
eval_cfg=lcb_test_output_eval_cfg,
|
||||
)
|
||||
|
||||
LCB_datasets = [
|
||||
LCBCodeGeneration_dataset,
|
||||
# LCBCodeExecution_dataset,
|
||||
# LCBTestOutput_dataset,
|
||||
]
|
@ -0,0 +1,165 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import (
|
||||
LCBCodeGenerationDataset,
|
||||
LCBCodeExecutionDataset,
|
||||
LCBTestOutputPredictionDataset,
|
||||
LCBCodeGenerationEvaluator,
|
||||
LCBCodeExecutionEvaluator,
|
||||
LCBTestOutputEvaluator
|
||||
)
|
||||
from opencompass.datasets.livecodebench import TestOutputPromptConstants
|
||||
|
||||
|
||||
lcb_code_generation_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'question_content',
|
||||
'format_prompt',
|
||||
],
|
||||
# output_column='evaluation_sample',
|
||||
output_column='question_id',
|
||||
)
|
||||
|
||||
SYSTEM_MESSAGE_GENERIC = f'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.'
|
||||
|
||||
prompt_template = '### Question:\n{question_content}\n\n{format_prompt}' + \
|
||||
'### Answer: (use the provided format with backticks)\n\n'
|
||||
|
||||
|
||||
# Code Generation Tasks
|
||||
lcb_code_generation_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=prompt_template
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_code_generation_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBCodeGenerationEvaluator,
|
||||
num_process_evaluate=4,
|
||||
timeout=6,
|
||||
release_version='release_split_v4',
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBCodeGeneration_dataset = dict(
|
||||
type=LCBCodeGenerationDataset,
|
||||
abbr='lcb_code_generation_split_v4',
|
||||
path='opencompass/code_generation_lite',
|
||||
reader_cfg=lcb_code_generation_reader_cfg,
|
||||
infer_cfg=lcb_code_generation_infer_cfg,
|
||||
eval_cfg=lcb_code_generation_eval_cfg,
|
||||
release_version='release_split_v4',
|
||||
)
|
||||
|
||||
# Code Execution Dataset
|
||||
lcb_code_execution_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'prompt',
|
||||
],
|
||||
output_column='evaluation_sample',
|
||||
)
|
||||
|
||||
lcb_code_execution_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
prompt='You are an expert at Python programming, code execution, test case generation, and fuzzing.'
|
||||
),
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_code_execution_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBCodeExecutionEvaluator,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBCodeExecution_dataset = dict(
|
||||
type=LCBCodeExecutionDataset,
|
||||
abbr='lcb_code_execution',
|
||||
path='opencompass/execution-v2',
|
||||
reader_cfg=lcb_code_execution_reader_cfg,
|
||||
infer_cfg=lcb_code_execution_infer_cfg,
|
||||
eval_cfg=lcb_code_execution_eval_cfg,
|
||||
)
|
||||
|
||||
# TestOuputput Dataset
|
||||
lcb_test_output_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'prompt',
|
||||
],
|
||||
output_column='evaluation_sample',
|
||||
)
|
||||
|
||||
system_prompt = 'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.'
|
||||
|
||||
lcb_test_output_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
# begin=[
|
||||
# dict(
|
||||
# role='SYSTEM',
|
||||
# prompt=system_prompt
|
||||
# ),
|
||||
# ],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_test_output_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBTestOutputEvaluator,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBTestOutput_dataset = dict(
|
||||
type=LCBTestOutputPredictionDataset,
|
||||
abbr='lcb_test_output',
|
||||
path='opencompass/test_generation',
|
||||
reader_cfg=lcb_test_output_reader_cfg,
|
||||
infer_cfg=lcb_test_output_infer_cfg,
|
||||
eval_cfg=lcb_test_output_eval_cfg,
|
||||
)
|
||||
|
||||
LCB_datasets = [
|
||||
LCBCodeGeneration_dataset,
|
||||
# LCBCodeExecution_dataset,
|
||||
# LCBTestOutput_dataset,
|
||||
]
|
@ -0,0 +1,164 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import (
|
||||
LCBCodeGenerationDataset,
|
||||
LCBCodeExecutionDataset,
|
||||
LCBTestOutputPredictionDataset,
|
||||
LCBCodeGenerationEvaluator,
|
||||
LCBCodeExecutionEvaluator,
|
||||
LCBTestOutputEvaluator
|
||||
)
|
||||
from opencompass.datasets.livecodebench import TestOutputPromptConstants
|
||||
|
||||
|
||||
lcb_code_generation_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'question_content',
|
||||
'format_prompt',
|
||||
],
|
||||
# output_column='evaluation_sample',
|
||||
output_column='question_id',
|
||||
)
|
||||
|
||||
SYSTEM_MESSAGE_GENERIC = f'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.'
|
||||
|
||||
prompt_template = '### Question:\n{question_content}\n\n{format_prompt}' + \
|
||||
'### Answer: (use the provided format with backticks)\n\n'
|
||||
|
||||
|
||||
# Code Generation Tasks
|
||||
lcb_code_generation_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=prompt_template
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_code_generation_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBCodeGenerationEvaluator,
|
||||
num_process_evaluate=4,
|
||||
timeout=6,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBCodeGeneration_dataset = dict(
|
||||
type=LCBCodeGenerationDataset,
|
||||
abbr='lcb_code_generation_v1',
|
||||
path='opencompass/code_generation_lite',
|
||||
reader_cfg=lcb_code_generation_reader_cfg,
|
||||
infer_cfg=lcb_code_generation_infer_cfg,
|
||||
eval_cfg=lcb_code_generation_eval_cfg,
|
||||
release_version='release_v1',
|
||||
)
|
||||
|
||||
# Code Execution Dataset
|
||||
lcb_code_execution_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'prompt',
|
||||
],
|
||||
output_column='evaluation_sample',
|
||||
)
|
||||
|
||||
lcb_code_execution_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
prompt='You are an expert at Python programming, code execution, test case generation, and fuzzing.'
|
||||
),
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_code_execution_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBCodeExecutionEvaluator,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBCodeExecution_dataset = dict(
|
||||
type=LCBCodeExecutionDataset,
|
||||
abbr='lcb_code_execution',
|
||||
path='opencompass/execution-v2',
|
||||
reader_cfg=lcb_code_execution_reader_cfg,
|
||||
infer_cfg=lcb_code_execution_infer_cfg,
|
||||
eval_cfg=lcb_code_execution_eval_cfg,
|
||||
)
|
||||
|
||||
# TestOuputput Dataset
|
||||
lcb_test_output_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'prompt',
|
||||
],
|
||||
output_column='evaluation_sample',
|
||||
)
|
||||
|
||||
system_prompt = 'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.'
|
||||
|
||||
lcb_test_output_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
# begin=[
|
||||
# dict(
|
||||
# role='SYSTEM',
|
||||
# prompt=system_prompt
|
||||
# ),
|
||||
# ],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_test_output_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBTestOutputEvaluator,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBTestOutput_dataset = dict(
|
||||
type=LCBTestOutputPredictionDataset,
|
||||
abbr='lcb_test_output',
|
||||
path='opencompass/test_generation',
|
||||
reader_cfg=lcb_test_output_reader_cfg,
|
||||
infer_cfg=lcb_test_output_infer_cfg,
|
||||
eval_cfg=lcb_test_output_eval_cfg,
|
||||
)
|
||||
|
||||
LCB_datasets = [
|
||||
LCBCodeGeneration_dataset,
|
||||
# LCBCodeExecution_dataset,
|
||||
# LCBTestOutput_dataset,
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .livecodebench_gen_6966bc import LCB_datasets # noqa: F401, F403
|
67
opencompass/configs/datasets/xmgsm/README.md
Normal file
67
opencompass/configs/datasets/xmgsm/README.md
Normal file
@ -0,0 +1,67 @@
|
||||
# MGSM
|
||||
|
||||
## Introduction
|
||||
|
||||
The following introduction comes from the abstract in [Language models are multilingual chain-of-thought reasoners](https://arxiv.org/abs/2210.03057)
|
||||
|
||||
```
|
||||
We introduce the Multilingual Grade School Math (MGSM) benchmark, by manually translating 250 grade-school math problems from the GSM8K dataset into ten typologically diverse languages.
|
||||
```
|
||||
|
||||
## Official link
|
||||
|
||||
### Paper
|
||||
|
||||
[Language models are multilingual chain-of-thought reasoners](https://arxiv.org/abs/2210.03057)
|
||||
|
||||
### Repository
|
||||
|
||||
[MGSM](https://github.com/google-research/url-nlp)
|
||||
|
||||
## Examples
|
||||
|
||||
Input example I:
|
||||
|
||||
```
|
||||
Solve this math problem. Give the reasoning steps before giving the final answer on the last line by itself in the format of "Answer:". Do not add anything other than the integer answer after "Answer:".
|
||||
|
||||
Janet’s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers' market daily for $2 per fresh duck egg. How much in dollars does she make every day at the farmers' market?
|
||||
```
|
||||
|
||||
Output example I (from GPT-4):
|
||||
|
||||
```
|
||||
Answer: 18
|
||||
```
|
||||
|
||||
## Evaluation results
|
||||
|
||||
```
|
||||
dataset version metric mode llama-3-8b-instruct-hf
|
||||
-------------- --------- ------------- ------ ------------------------
|
||||
mgsm_bn b65151 accuracy gen 14.4
|
||||
mgsm_de 2cc8ae accuracy gen 60
|
||||
mgsm_en 5de71e accuracy gen 76
|
||||
mgsm_es d6b459 accuracy gen 61.6
|
||||
mgsm_fr 813e3c accuracy gen 54.4
|
||||
mgsm_ja 04424f accuracy gen 42.8
|
||||
mgsm_ru 400469 accuracy gen 62.8
|
||||
mgsm_sw 9e41ed accuracy gen 0.8
|
||||
mgsm_te 346d97 accuracy gen 0
|
||||
mgsm_th e70bee accuracy gen 44
|
||||
mgsm_zh d5cf30 accuracy gen 28.4
|
||||
mgsm_latin - naive_average gen 50.56
|
||||
mgsm_non_latin - naive_average gen 32.07
|
||||
mgsm - naive_average gen 40.47
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
```
|
||||
@article{shi2022language,
|
||||
title={Language models are multilingual chain-of-thought reasoners},
|
||||
author={Shi, Freda and Suzgun, Mirac and Freitag, Markus and Wang, Xuezhi and Srivats, Suraj and Vosoughi, Soroush and Chung, Hyung Won and Tay, Yi and Ruder, Sebastian and Zhou, Denny and others},
|
||||
journal={arXiv preprint arXiv:2210.03057},
|
||||
year={2022}
|
||||
}
|
||||
```
|
4
opencompass/configs/datasets/xmgsm/mgsm_gen.py
Normal file
4
opencompass/configs/datasets/xmgsm/mgsm_gen.py
Normal file
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .mgsm_gen_d967bc import mgsm_datasets
|
62
opencompass/configs/datasets/xmgsm/mgsm_gen_d967bc.py
Normal file
62
opencompass/configs/datasets/xmgsm/mgsm_gen_d967bc.py
Normal file
@ -0,0 +1,62 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_evaluator import JiebaRougeEvaluator
|
||||
from opencompass.datasets import xMGSMSDataset, xMGSM_Evaluator, mgsm_postprocess
|
||||
|
||||
|
||||
ALL_LANGUAGES = ['bn', 'de', 'en', 'es', 'fr', 'ja', 'ru', 'sw', 'te', 'th', 'zh', 'ar', 'sr', 'hu', 'cs', 'vi', 'ko']
|
||||
|
||||
LANG_TO_INSTRUCTIONS = {
|
||||
'en': """Solve this math problem. Give the reasoning steps before giving the final answer on the last line by itself in the format of "Answer:". Do not add anything other than the integer answer after "Answer:".\n\n{question}""",
|
||||
'bn': """এই গণিতের সমস্যাটি সমাধান করুন। চূড়ান্ত উত্তর দেওয়ার আগে যুক্তিসম্পন্ন পদক্ষেপ প্রদান করুন। চূড়ান্ত উত্তরটি একক সংখ্যা হিসাবে "উত্তর:" এর পরে শেষ লাইনে দিন। "উত্তর:" এর পরে অন্য কিছু যুক্ত করবেন না।.\n\n{question}""",
|
||||
'de': """Löse dieses Mathematikproblem. Gib die Schritte zur Begründung an, bevor du die endgültige Antwort in der letzten Zeile alleine im Format "Antwort:" gibst. Füge nichts anderes als die ganzzahlige Antwort nach "Antwort:" hinzu.\n\n{question}""",
|
||||
'es': """Resuelve este problema matemático. Proporciona los pasos de razonamiento antes de dar la respuesta final en la última línea por sí misma en el formato de "Respuesta:". No añadas nada más que la respuesta entera después de "Respuesta:".\n\n{question}""",
|
||||
'fr': """Résolvez ce problème de mathématiques. Donnez les étapes de raisonnement avant de fournir la réponse finale sur la dernière ligne elle-même dans le format de "Réponse:". N'ajoutez rien d'autre que la réponse entière après "Réponse:".\n\n{question}""",
|
||||
'ja': """の数学の問題を解いてください。最終的な答えを出す前に、解答の推論過程を記述してください。そして最後の行には "答え:" の形式で答えを記述し、その後には整数の答え以外何も追加しないでください。\n\n{question}""",
|
||||
'ru': """Решите эту математическую задачу. Объясните шаги рассуждения перед тем, как дать окончательный ответ в последней строке сам по себе в формате "Ответ:". Не добавляйте ничего, кроме целочисленного ответа после "Ответ:".\n\n{question}""",
|
||||
'sw': """Suluhisha tatizo hili la hesabu. Toa hatua za mantiki kabla ya kutoa jibu la mwisho kwenye mstari wa mwisho peke yake katika muundo wa "Jibu:". Usiongeze chochote kingine isipokuwa jibu la integer baada ya "Jibu:".\n\n{question}""",
|
||||
'te': """ఈ గణిత సమస్యను పరిష్కరించండి. చివరి సమాధానాన్ని ఇవ్వదానికి ముందు తర్కాత్మక అదుగులను ఇవ్వండి. చివరి పంక్తిలో మాత్రమే 'సమాధానం:' అనే ఆకారంలో చివరి సమాధానాద్ని ఇవ్వండి సమాధానం: తర్వాత పూర్ణాంక సమాధానానికి తప్పించి ఎదేనా చేర్చవద్దు.\n\n{question}""",
|
||||
'th': """แก้ปัญหาคณิตศาสตร์นี้ ให้ให้ขั้นตอนการใช้เหตุผลก่อนที่จะให้คำตอบสุดท้ายในบรรทัดสุดท้ายโดยอยู่ในรูปแบบ "คำตอบ:" ไม่ควรเพิ่มอะไรนอกจากคำตอบที่เป็นจำนวนเต็มหลังจาก "คำตอบ:"\n\n{question}""",
|
||||
'zh': """解决这个数学问题。在最后一行给出答案前,请提供推理步骤。最后一行应该以 "答案: " 的形式独立给出答案。在 "答案:" 后不要添加除整数答案之外的任何内容。\n\n{question}""",
|
||||
'ar': """حل هذه المسألة الرياضية. قبل أن تعطي الإجابة النهائية، يرجى إعطاء الخطوات التفكيرية. يجب أن تكون الإجابة النهائية في السطر الأخير بالشكل "الإجابة:". لا تضيف أي شيء آخر غير الإجابة العددية النهائية بعد "الإجابة:".\n\n{question}""",
|
||||
'sr': """Решење овог математичког проблема. Пре него што дате коначни одговор, пошаљите логички кораци. Коначни одговор мора бити на последњој линији у формату "Одговор:". Не додавајте ништа друго осим коначног целобројног одговора после "Одговор:".\n\n{question}""",
|
||||
'hu': """Megoldás ezt a matematikai problémát. Mielőtt megadná a végső választ, adjon meg logikai lépéseket. A végső válasz a végső sorban kell legyen a "Válasz:" formátumban. Ne adjon hozzá semmit mást, csak a végső egész szám választ a "Válasz:" után.\n\n{question}""",
|
||||
'cs': """Řešení tohoto matematického problému. Předtím, než dáte finální odpověď, přidejte logické kroky. Finální odpověď musí být na posledním řádku ve formátu "Odpověď:". Nezahrněte nic jiného než celočíselnou finální odpověď po "Odpověď:".\n\n{question}""",
|
||||
'vi': """Giải bài toán toán học này. Trước khi bạn cung cấp câu trả lời cuối cùng, hãy cung cấp các bước suy luận. Câu trả lời cuối cùng phải ở trên dòng cuối cùng trong định dạng "Câu trả lời:". Không thêm bất kỳ thứ gì khác ngoài câu trả lời số nguyên sau "Câu trả lời:".\n\n{question}""",
|
||||
'ko': """이 수학 문제를 풀어주세요. 마지막 줄에 최종 답을 제시하기 전에 논리적 단계를 제공해주세요. 마지막 줄에는 "답:" 형식으로 최종 답을 제시해주세요. "답:" 뒤에 정수 답만 추가해주세요.\n\n{question}""",
|
||||
}
|
||||
|
||||
mgsm_datasets = []
|
||||
for lang in ALL_LANGUAGES:
|
||||
mgsm_reader_cfg = dict(input_columns=['question'], output_column='answer')
|
||||
|
||||
mgsm_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=LANG_TO_INSTRUCTIONS[lang]),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=512),
|
||||
)
|
||||
|
||||
mgsm_eval_cfg = dict(
|
||||
evaluator=dict(type=xMGSM_Evaluator),
|
||||
pred_role='BOT',
|
||||
pred_postprocessor=dict(type=mgsm_postprocess, lang=lang),
|
||||
)
|
||||
|
||||
mgsm_datasets.append(
|
||||
dict(
|
||||
type=xMGSMSDataset,
|
||||
abbr=f'mgsm_{lang}',
|
||||
path=f'data/mgsm/mgsm_test_{lang}_google.jsonl',
|
||||
reader_cfg=mgsm_reader_cfg,
|
||||
infer_cfg=mgsm_infer_cfg,
|
||||
eval_cfg=mgsm_eval_cfg,
|
||||
)
|
||||
)
|
90
opencompass/configs/datasets/xmgsm/mgsm_gen_d967bc_8shot.py
Normal file
90
opencompass/configs/datasets/xmgsm/mgsm_gen_d967bc_8shot.py
Normal file
@ -0,0 +1,90 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_evaluator import JiebaRougeEvaluator
|
||||
from opencompass.datasets import xMGSMSDataset, xMGSM_Evaluator, mgsm_postprocess
|
||||
import os
|
||||
|
||||
ALL_LANGUAGES = ['bn', 'de', 'en', 'es', 'fr', 'ja', 'ru', 'sw', 'te', 'th', 'zh', 'ar', 'sr', 'hu', 'cs', 'vi', 'ko']
|
||||
|
||||
LANG_TO_INSTRUCTIONS = {
|
||||
'en': """Solve this math problem. Give the reasoning steps before giving the final answer on the last line by itself in the format of "Answer:". Do not add anything other than the integer answer after "Answer:".\n\n{question}""",
|
||||
'bn': """এই গণিতের সমস্যাটি সমাধান করুন। চূড়ান্ত উত্তর দেওয়ার আগে যুক্তিসম্পন্ন পদক্ষেপ প্রদান করুন। চূড়ান্ত উত্তরটি একক সংখ্যা হিসাবে "উত্তর:" এর পরে শেষ লাইনে দিন। "উত্তর:" এর পরে অন্য কিছু যুক্ত করবেন না।.\n\n{question}""",
|
||||
'de': """Löse dieses Mathematikproblem. Gib die Schritte zur Begründung an, bevor du die endgültige Antwort in der letzten Zeile alleine im Format "Antwort:" gibst. Füge nichts anderes als die ganzzahlige Antwort nach "Antwort:" hinzu.\n\n{question}""",
|
||||
'es': """Resuelve este problema matemático. Proporciona los pasos de razonamiento antes de dar la respuesta final en la última línea por sí misma en el formato de "Respuesta:". No añadas nada más que la respuesta entera después de "Respuesta:".\n\n{question}""",
|
||||
'fr': """Résolvez ce problème de mathématiques. Donnez les étapes de raisonnement avant de fournir la réponse finale sur la dernière ligne elle-même dans le format de "Réponse:". N'ajoutez rien d'autre que la réponse entière après "Réponse:".\n\n{question}""",
|
||||
'ja': """の数学の問題を解いてください。最終的な答えを出す前に、解答の推論過程を記述してください。そして最後の行には "答え:" の形式で答えを記述し、その後には整数の答え以外何も追加しないでください。\n\n{question}""",
|
||||
'ru': """Решите эту математическую задачу. Объясните шаги рассуждения перед тем, как дать окончательный ответ в последней строке сам по себе в формате "Ответ:". Не добавляйте ничего, кроме целочисленного ответа после "Ответ:".\n\n{question}""",
|
||||
'sw': """Suluhisha tatizo hili la hesabu. Toa hatua za mantiki kabla ya kutoa jibu la mwisho kwenye mstari wa mwisho peke yake katika muundo wa "Jibu:". Usiongeze chochote kingine isipokuwa jibu la integer baada ya "Jibu:".\n\n{question}""",
|
||||
'te': """ఈ గణిత సమస్యను పరిష్కరించండి. చివరి సమాధానాన్ని ఇవ్వదానికి ముందు తర్కాత్మక అదుగులను ఇవ్వండి. చివరి పంక్తిలో మాత్రమే 'సమాధానం:' అనే ఆకారంలో చివరి సమాధానాద్ని ఇవ్వండి సమాధానం: తర్వాత పూర్ణాంక సమాధానానికి తప్పించి ఎదేనా చేర్చవద్దు.\n\n{question}""",
|
||||
'th': """แก้ปัญหาคณิตศาสตร์นี้ ให้ให้ขั้นตอนการใช้เหตุผลก่อนที่จะให้คำตอบสุดท้ายในบรรทัดสุดท้ายโดยอยู่ในรูปแบบ "คำตอบ:" ไม่ควรเพิ่มอะไรนอกจากคำตอบที่เป็นจำนวนเต็มหลังจาก "คำตอบ:"\n\n{question}""",
|
||||
'zh': """解决这个数学问题。在最后一行给出答案前,请提供推理步骤。最后一行应该以 "答案: " 的形式独立给出答案。在 "答案:" 后不要添加除整数答案之外的任何内容。\n\n{question}""",
|
||||
'ar': """حل هذه المسألة الرياضية. قبل أن تعطي الإجابة النهائية، يرجى إعطاء الخطوات التفكيرية. يجب أن تكون الإجابة النهائية في السطر الأخير بالشكل "الإجابة:". لا تضيف أي شيء آخر غير الإجابة العددية النهائية بعد "الإجابة:".\n\n{question}""",
|
||||
'sr': """Решење овог математичког проблема. Пре него што дате коначни одговор, пошаљите логички кораци. Коначни одговор мора бити на последњој линији у формату "Одговор:". Не додавајте ништа друго осим коначног целобројног одговора после "Одговор:".\n\n{question}""",
|
||||
'hu': """Megoldás ezt a matematikai problémát. Mielőtt megadná a végső választ, adjon meg logikai lépéseket. A végső válasz a végső sorban kell legyen a "Válasz:" formátumban. Ne adjon hozzá semmit mást, csak a végső egész szám választ a "Válasz:" után.\n\n{question}""",
|
||||
'cs': """Řešení tohoto matematického problému. Předtím, než dáte finální odpověď, přidejte logické kroky. Finální odpověď musí být na posledním řádku ve formátu "Odpověď:". Nezahrněte nic jiného než celočíselnou finální odpověď po "Odpověď:".\n\n{question}""",
|
||||
'vi': """Giải bài toán toán học này. Trước khi bạn cung cấp câu trả lời cuối cùng, hãy cung cấp các bước suy luận. Câu trả lời cuối cùng phải ở trên dòng cuối cùng trong định dạng "Câu trả lời:". Không thêm bất kỳ thứ gì khác ngoài câu trả lời số nguyên sau "Câu trả lời:".\n\n{question}""",
|
||||
'ko': """이 수학 문제를 풀어주세요. 마지막 줄에 최종 답을 제시하기 전에 논리적 단계를 제공해주세요. 마지막 줄에는 "답:" 형식으로 최종 답을 제시해주세요. "답:" 뒤에 정수 답만 추가해주세요.\n\n{question}""",
|
||||
}
|
||||
import json
|
||||
def load_jsonl(path):
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
return [json.loads(line) for line in f]
|
||||
USER_HOME = os.path.expanduser("~")
|
||||
DEFAULT_DATA_FOLDER = os.path.join(USER_HOME, '.cache/opencompass/')
|
||||
mgsm_datasets = []
|
||||
for lang in ALL_LANGUAGES:
|
||||
mgsm_reader_cfg = dict(input_columns=['question'], output_column='answer')
|
||||
cache_dir = DEFAULT_DATA_FOLDER
|
||||
|
||||
# For absolute path customized by the users
|
||||
local_path = os.path.join(cache_dir, f'data/mgsm/mgsm_train_{lang}_google.jsonl')
|
||||
data = load_jsonl(local_path)
|
||||
questions = [item['question'] for item in data]
|
||||
answers = [item['answer'] for item in data]
|
||||
|
||||
mgsm_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=questions[0]),
|
||||
dict(role='BOT', prompt=answers[0]),
|
||||
dict(role='HUMAN', prompt=questions[1]),
|
||||
dict(role='BOT', prompt=answers[1]),
|
||||
dict(role='HUMAN', prompt=questions[2]),
|
||||
dict(role='BOT', prompt=answers[2]),
|
||||
dict(role='HUMAN', prompt=questions[3]),
|
||||
dict(role='BOT', prompt=answers[3]),
|
||||
dict(role='HUMAN', prompt=questions[4]),
|
||||
dict(role='BOT', prompt=answers[4]),
|
||||
dict(role='HUMAN', prompt=questions[5]),
|
||||
dict(role='BOT', prompt=answers[5]),
|
||||
dict(role='HUMAN', prompt=questions[6]),
|
||||
dict(role='BOT', prompt=answers[6]),
|
||||
dict(role='HUMAN', prompt=questions[7]),
|
||||
dict(role='BOT', prompt=answers[7]),
|
||||
dict(role='HUMAN', prompt=LANG_TO_INSTRUCTIONS[lang]),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=512),
|
||||
)
|
||||
|
||||
mgsm_eval_cfg = dict(
|
||||
evaluator=dict(type=xMGSM_Evaluator),
|
||||
pred_role='BOT',
|
||||
pred_postprocessor=dict(type=mgsm_postprocess, lang=lang),
|
||||
)
|
||||
|
||||
mgsm_datasets.append(
|
||||
dict(
|
||||
type=xMGSMSDataset,
|
||||
abbr=f'mgsm_{lang}_8shot',
|
||||
path=f'data/mgsm/mgsm_test_{lang}_google.jsonl',
|
||||
reader_cfg=mgsm_reader_cfg,
|
||||
infer_cfg=mgsm_infer_cfg,
|
||||
eval_cfg=mgsm_eval_cfg,
|
||||
)
|
||||
)
|
4
opencompass/configs/datasets/xmgsm/xmgsm_ge_8shot.py
Normal file
4
opencompass/configs/datasets/xmgsm/xmgsm_ge_8shot.py
Normal file
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .mgsm_gen_d967bc_8shot import mgsm_datasets
|
4
opencompass/configs/datasets/xmgsm/xmgsm_gen.py
Normal file
4
opencompass/configs/datasets/xmgsm/xmgsm_gen.py
Normal file
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .mgsm_gen_d967bc import mgsm_datasets
|
14
opencompass/configs/datasets/xruler/README.md
Normal file
14
opencompass/configs/datasets/xruler/README.md
Normal file
@ -0,0 +1,14 @@
|
||||
# Ruler
|
||||
OpenCompass now supports the brand new long-context language model evaluation benchmark — [RULER](https://arxiv.org/pdf/2404.06654). RULER provides an evaluation of long-context including retrieval, multi-hop tracing, aggregation, and question answering through flexible configurations.
|
||||
|
||||
OpenCompass have providied two types of evaluation demo for using different tokenizers.
|
||||
|
||||
For using the same tokenizer (typicall GPT-4), you can follow the demo (configs/eval_ruler_fix_tokenizer.py) where most of the settings are already defined.
|
||||
|
||||
|
||||
For evaluation using each model's own tokenizer, you have to build the settings when you run the demo (we do not know which model you are trying to evaluate!) you can create a new evaluation script following the example (configs/eval_ruler.py) and change the context window sizes or add models according to your settings.
|
||||
|
||||
```bash
|
||||
python run.py configs/eval_ruler_fix_tokenizer.py # For evaluation with GPT-4 tokenizer
|
||||
python run.py configs/eval_ruler.py # For evaluation with model's tokenizer
|
||||
```
|
28
opencompass/configs/datasets/xruler/ruler_128k_gen.py
Normal file
28
opencompass/configs/datasets/xruler/ruler_128k_gen.py
Normal file
@ -0,0 +1,28 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .ruler_cwe_gen import cwe_datasets as cwe # CWE
|
||||
from .ruler_fwe_gen import fwe_datasets as fwe # FWE
|
||||
from .ruler_niah_gen import niah_datasets as niah # Niah
|
||||
from .ruler_qa_gen import qa_datasets as qa # QA
|
||||
from .ruler_vt_gen import vt_datasets as vt # VT
|
||||
|
||||
|
||||
import_ds = sum((cwe, fwe, niah, qa, vt), [])
|
||||
|
||||
# Evaluation config
|
||||
NUM_SAMPLES = 100 # Change to the number of samples you need
|
||||
# Change the context lengths to be tested
|
||||
max_seq_lens = [1024 * 128]
|
||||
abbr_suffixs = ['128k']
|
||||
|
||||
ruler_datasets = []
|
||||
|
||||
# Different seq length
|
||||
for max_seq_len, abbr_suffix in zip(max_seq_lens, abbr_suffixs):
|
||||
for dataset in import_ds:
|
||||
tmp_dataset = dataset.deepcopy()
|
||||
tmp_dataset['abbr'] = tmp_dataset['abbr'] + '_' + abbr_suffix
|
||||
tmp_dataset['num_samples'] = NUM_SAMPLES
|
||||
tmp_dataset['max_seq_length'] = max_seq_len
|
||||
ruler_datasets.append(tmp_dataset)
|
29
opencompass/configs/datasets/xruler/ruler_16k_gen.py
Normal file
29
opencompass/configs/datasets/xruler/ruler_16k_gen.py
Normal file
@ -0,0 +1,29 @@
|
||||
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .ruler_cwe_gen import cwe_datasets as cwe # CWE
|
||||
from .ruler_fwe_gen import fwe_datasets as fwe # FWE
|
||||
from .ruler_niah_gen import niah_datasets as niah # Niah
|
||||
from .ruler_qa_gen import qa_datasets as qa # QA
|
||||
from .ruler_vt_gen import vt_datasets as vt # VT
|
||||
|
||||
|
||||
import_ds = sum((cwe, fwe, niah, qa, vt), [])
|
||||
|
||||
# Evaluation config
|
||||
NUM_SAMPLES = 100 # Change to the number of samples you need
|
||||
# Change the context lengths to be tested
|
||||
max_seq_lens = [1024 * 16]
|
||||
abbr_suffixs = ['16k']
|
||||
|
||||
ruler_datasets = []
|
||||
|
||||
# Different seq length
|
||||
for max_seq_len, abbr_suffix in zip(max_seq_lens, abbr_suffixs):
|
||||
for dataset in import_ds:
|
||||
tmp_dataset = dataset.deepcopy()
|
||||
tmp_dataset['abbr'] = tmp_dataset['abbr'] + '_' + abbr_suffix
|
||||
tmp_dataset['num_samples'] = NUM_SAMPLES
|
||||
tmp_dataset['max_seq_length'] = max_seq_len
|
||||
ruler_datasets.append(tmp_dataset)
|
29
opencompass/configs/datasets/xruler/ruler_1m_gen.py
Normal file
29
opencompass/configs/datasets/xruler/ruler_1m_gen.py
Normal file
@ -0,0 +1,29 @@
|
||||
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .ruler_cwe_gen import cwe_datasets as cwe # CWE
|
||||
from .ruler_fwe_gen import fwe_datasets as fwe # FWE
|
||||
from .ruler_niah_gen import niah_datasets as niah # Niah
|
||||
from .ruler_qa_gen import qa_datasets as qa # QA
|
||||
from .ruler_vt_gen import vt_datasets as vt # VT
|
||||
|
||||
|
||||
import_ds = sum((cwe, fwe, niah, qa, vt), [])
|
||||
|
||||
# Evaluation config
|
||||
NUM_SAMPLES = 100 # Change to the number of samples you need
|
||||
# Change the context lengths to be tested
|
||||
max_seq_lens = [1024 * 1024]
|
||||
abbr_suffixs = ['1m']
|
||||
|
||||
ruler_datasets = []
|
||||
|
||||
# Different seq length
|
||||
for max_seq_len, abbr_suffix in zip(max_seq_lens, abbr_suffixs):
|
||||
for dataset in import_ds:
|
||||
tmp_dataset = dataset.deepcopy()
|
||||
tmp_dataset['abbr'] = tmp_dataset['abbr'] + '_' + abbr_suffix
|
||||
tmp_dataset['num_samples'] = NUM_SAMPLES
|
||||
tmp_dataset['max_seq_length'] = max_seq_len
|
||||
ruler_datasets.append(tmp_dataset)
|
29
opencompass/configs/datasets/xruler/ruler_32k_gen.py
Normal file
29
opencompass/configs/datasets/xruler/ruler_32k_gen.py
Normal file
@ -0,0 +1,29 @@
|
||||
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .ruler_cwe_gen import cwe_datasets as cwe # CWE
|
||||
from .ruler_fwe_gen import fwe_datasets as fwe # FWE
|
||||
from .ruler_niah_gen import niah_datasets as niah # Niah
|
||||
from .ruler_qa_gen import qa_datasets as qa # QA
|
||||
from .ruler_vt_gen import vt_datasets as vt # VT
|
||||
|
||||
|
||||
import_ds = sum((cwe, fwe, niah, qa, vt), [])
|
||||
|
||||
# Evaluation config
|
||||
NUM_SAMPLES = 100 # Change to the number of samples you need
|
||||
# Change the context lengths to be tested
|
||||
max_seq_lens = [1024 * 32]
|
||||
abbr_suffixs = ['32k']
|
||||
|
||||
ruler_datasets = []
|
||||
|
||||
# Different seq length
|
||||
for max_seq_len, abbr_suffix in zip(max_seq_lens, abbr_suffixs):
|
||||
for dataset in import_ds:
|
||||
tmp_dataset = dataset.deepcopy()
|
||||
tmp_dataset['abbr'] = tmp_dataset['abbr'] + '_' + abbr_suffix
|
||||
tmp_dataset['num_samples'] = NUM_SAMPLES
|
||||
tmp_dataset['max_seq_length'] = max_seq_len
|
||||
ruler_datasets.append(tmp_dataset)
|
28
opencompass/configs/datasets/xruler/ruler_4k_gen.py
Normal file
28
opencompass/configs/datasets/xruler/ruler_4k_gen.py
Normal file
@ -0,0 +1,28 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .ruler_cwe_gen import cwe_datasets as cwe # CWE
|
||||
from .ruler_fwe_gen import fwe_datasets as fwe # FWE
|
||||
from .ruler_niah_gen import niah_datasets as niah # Niah
|
||||
from .ruler_qa_gen import qa_datasets as qa # QA
|
||||
from .ruler_vt_gen import vt_datasets as vt # VT
|
||||
|
||||
|
||||
import_ds = sum((cwe, fwe, niah, qa, vt), [])
|
||||
|
||||
# Evaluation config
|
||||
NUM_SAMPLES = 100 # Change to the number of samples you need
|
||||
# Change the context lengths to be tested
|
||||
max_seq_lens = [1024 * 4]
|
||||
abbr_suffixs = ['4k']
|
||||
|
||||
ruler_datasets = []
|
||||
|
||||
# Different seq length
|
||||
for max_seq_len, abbr_suffix in zip(max_seq_lens, abbr_suffixs):
|
||||
for dataset in import_ds:
|
||||
tmp_dataset = dataset.deepcopy()
|
||||
tmp_dataset['abbr'] = tmp_dataset['abbr'] + '_' + abbr_suffix
|
||||
tmp_dataset['num_samples'] = NUM_SAMPLES
|
||||
tmp_dataset['max_seq_length'] = max_seq_len
|
||||
ruler_datasets.append(tmp_dataset)
|
28
opencompass/configs/datasets/xruler/ruler_64k_gen.py
Normal file
28
opencompass/configs/datasets/xruler/ruler_64k_gen.py
Normal file
@ -0,0 +1,28 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .ruler_cwe_gen import cwe_datasets as cwe # CWE
|
||||
from .ruler_fwe_gen import fwe_datasets as fwe # FWE
|
||||
from .ruler_niah_gen import niah_datasets as niah # Niah
|
||||
from .ruler_qa_gen import qa_datasets as qa # QA
|
||||
from .ruler_vt_gen import vt_datasets as vt # VT
|
||||
|
||||
|
||||
import_ds = sum((cwe, fwe, niah, qa, vt), [])
|
||||
|
||||
# Evaluation config
|
||||
NUM_SAMPLES = 100 # Change to the number of samples you need
|
||||
# Change the context lengths to be tested
|
||||
max_seq_lens = [1024 * 64]
|
||||
abbr_suffixs: list[str] = ['64k']
|
||||
|
||||
ruler_datasets = []
|
||||
|
||||
# Different seq length
|
||||
for max_seq_len, abbr_suffix in zip(max_seq_lens, abbr_suffixs):
|
||||
for dataset in import_ds:
|
||||
tmp_dataset = dataset.deepcopy()
|
||||
tmp_dataset['abbr'] = tmp_dataset['abbr'] + '_' + abbr_suffix
|
||||
tmp_dataset['num_samples'] = NUM_SAMPLES
|
||||
tmp_dataset['max_seq_length'] = max_seq_len
|
||||
ruler_datasets.append(tmp_dataset)
|
29
opencompass/configs/datasets/xruler/ruler_8k_gen.py
Normal file
29
opencompass/configs/datasets/xruler/ruler_8k_gen.py
Normal file
@ -0,0 +1,29 @@
|
||||
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .ruler_cwe_gen import cwe_datasets as cwe # CWE
|
||||
from .ruler_fwe_gen import fwe_datasets as fwe # FWE
|
||||
from .ruler_niah_gen import niah_datasets as niah # Niah
|
||||
from .ruler_qa_gen import qa_datasets as qa # QA
|
||||
from .ruler_vt_gen import vt_datasets as vt # VT
|
||||
|
||||
|
||||
import_ds = sum((cwe, fwe, niah, qa, vt), [])
|
||||
|
||||
# Evaluation config
|
||||
NUM_SAMPLES = 100 # Change to the number of samples you need
|
||||
# Change the context lengths to be tested
|
||||
max_seq_lens = [1024 * 8]
|
||||
abbr_suffixs = ['8k']
|
||||
|
||||
ruler_datasets = []
|
||||
|
||||
# Different seq length
|
||||
for max_seq_len, abbr_suffix in zip(max_seq_lens, abbr_suffixs):
|
||||
for dataset in import_ds:
|
||||
tmp_dataset = dataset.deepcopy()
|
||||
tmp_dataset['abbr'] = tmp_dataset['abbr'] + '_' + abbr_suffix
|
||||
tmp_dataset['num_samples'] = NUM_SAMPLES
|
||||
tmp_dataset['max_seq_length'] = max_seq_len
|
||||
ruler_datasets.append(tmp_dataset)
|
12
opencompass/configs/datasets/xruler/ruler_combined_gen.py
Normal file
12
opencompass/configs/datasets/xruler/ruler_combined_gen.py
Normal file
@ -0,0 +1,12 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .ruler_1m_gen import ruler_datasets as ruler_1m_ds
|
||||
from .ruler_4k_gen import ruler_datasets as ruler_4k_ds
|
||||
from .ruler_8k_gen import ruler_datasets as ruler_8k_ds
|
||||
from .ruler_16k_gen import ruler_datasets as ruler_16k_ds
|
||||
from .ruler_32k_gen import ruler_datasets as ruler_32k_ds
|
||||
from .ruler_64k_gen import ruler_datasets as ruler_64k_ds
|
||||
from .ruler_128k_gen import ruler_datasets as ruler_128k_ds
|
||||
|
||||
ruler_combined_datasets = sum((v for k, v in locals().items() if k.endswith('_ds')), [])
|
34
opencompass/configs/datasets/xruler/ruler_cwe_gen.py
Normal file
34
opencompass/configs/datasets/xruler/ruler_cwe_gen.py
Normal file
@ -0,0 +1,34 @@
|
||||
from opencompass.datasets.ruler.ruler_cwe import RulerCweDataset
|
||||
from opencompass.datasets.ruler.ruler_cwe import RulerCweEvaluator
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
|
||||
# CWE Dataset
|
||||
cwe_datasets = [
|
||||
{
|
||||
'abbr': 'ruler_cwe',
|
||||
'type': RulerCweDataset,
|
||||
'freq_cw': 30,
|
||||
'freq_ucw': 3,
|
||||
'num_cw': 10,
|
||||
'tokens_to_generate': 120,
|
||||
'reader_cfg': dict(input_columns=['prompt'], output_column='answer'),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{prompt}'),
|
||||
dict(role='BOT', prompt='{answer}\n'),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(type=RulerCweEvaluator),
|
||||
),
|
||||
}
|
||||
]
|
33
opencompass/configs/datasets/xruler/ruler_fwe_gen.py
Normal file
33
opencompass/configs/datasets/xruler/ruler_fwe_gen.py
Normal file
@ -0,0 +1,33 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets.ruler.ruler_fwe import RulerFweDataset
|
||||
from opencompass.datasets.ruler.ruler_fwe import RulerFweEvaluator
|
||||
|
||||
# FWE Dataset
|
||||
fwe_datasets = [
|
||||
{
|
||||
'abbr': 'ruler_fwe',
|
||||
'type': RulerFweDataset,
|
||||
'tokens_to_generate': 50,
|
||||
'alpha': 2.0,
|
||||
'coded_wordlen': 6,
|
||||
'reader_cfg': dict(input_columns=['prompt'], output_column='answer'),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{prompt}'),
|
||||
dict(role='BOT', prompt='{answer}\n'),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(type=RulerFweEvaluator),
|
||||
),
|
||||
}
|
||||
]
|
122
opencompass/configs/datasets/xruler/ruler_niah_gen.py
Normal file
122
opencompass/configs/datasets/xruler/ruler_niah_gen.py
Normal file
@ -0,0 +1,122 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets.xruler.xruler_niah import xRulerNiahDataset
|
||||
from opencompass.datasets.xruler.xruler_niah import xRulerNiahEvaluator
|
||||
|
||||
|
||||
# Ruler Dataset settings
|
||||
niah_configurations = [
|
||||
{
|
||||
'abbr': 'single_1',
|
||||
'type_haystack': 'repeat',
|
||||
'type_needle_k': 'words',
|
||||
'type_needle_v': 'numbers',
|
||||
'num_needle_k': 1,
|
||||
'num_needle_v': 1,
|
||||
'num_needle_q': 1,
|
||||
},
|
||||
{
|
||||
'abbr': 'single_2',
|
||||
'type_haystack': 'essay',
|
||||
'type_needle_k': 'words',
|
||||
'type_needle_v': 'numbers',
|
||||
'num_needle_k': 1,
|
||||
'num_needle_v': 1,
|
||||
'num_needle_q': 1,
|
||||
},
|
||||
{
|
||||
'abbr': 'single_3',
|
||||
'type_haystack': 'essay',
|
||||
'type_needle_k': 'words',
|
||||
'type_needle_v': 'uuids',
|
||||
'num_needle_k': 1,
|
||||
'num_needle_v': 1,
|
||||
'num_needle_q': 1,
|
||||
},
|
||||
{
|
||||
'abbr': 'multikey_1',
|
||||
'type_haystack': 'essay',
|
||||
'type_needle_k': 'words',
|
||||
'type_needle_v': 'numbers',
|
||||
'num_needle_k': 4,
|
||||
'num_needle_v': 1,
|
||||
'num_needle_q': 1,
|
||||
},
|
||||
{
|
||||
'abbr': 'multikey_2',
|
||||
'type_haystack': 'needle',
|
||||
'type_needle_k': 'words',
|
||||
'type_needle_v': 'numbers',
|
||||
'num_needle_k': 1,
|
||||
'num_needle_v': 1,
|
||||
'num_needle_q': 1,
|
||||
},
|
||||
{
|
||||
'abbr': 'multikey_3',
|
||||
'type_haystack': 'needle',
|
||||
'type_needle_k': 'uuids',
|
||||
'type_needle_v': 'uuids',
|
||||
'num_needle_k': 1,
|
||||
'num_needle_v': 1,
|
||||
'num_needle_q': 1,
|
||||
},
|
||||
{
|
||||
'abbr': 'multivalue',
|
||||
'type_haystack': 'essay',
|
||||
'type_needle_k': 'words',
|
||||
'type_needle_v': 'numbers',
|
||||
'num_needle_k': 1,
|
||||
'num_needle_v': 4,
|
||||
'num_needle_q': 1,
|
||||
},
|
||||
{
|
||||
'abbr': 'multiquery',
|
||||
'type_haystack': 'essay',
|
||||
'type_needle_k': 'words',
|
||||
'type_needle_v': 'numbers',
|
||||
'num_needle_k': 1,
|
||||
'num_needle_v': 1,
|
||||
'num_needle_q': 4,
|
||||
},
|
||||
]
|
||||
|
||||
niah_datasets = []
|
||||
LANGS = ['en', 'zh']
|
||||
# NIAH Dataset
|
||||
base_path = './data/xruler/un_test'
|
||||
for lang in LANGS:
|
||||
file_path = f'un_test.{lang}.jsonl'
|
||||
for index, config in enumerate(niah_configurations):
|
||||
dataset_dict = {
|
||||
'abbr': f'ruler_niah_{config["abbr"]}',
|
||||
'type': xRulerNiahDataset,
|
||||
'base_path': base_path,
|
||||
'file_path': file_path,
|
||||
# 'tokenizer_model': model_path,
|
||||
'tokens_to_generate': 128,
|
||||
'type_haystack': config['type_haystack'],
|
||||
'type_needle_k': config['type_needle_k'],
|
||||
'type_needle_v': config['type_needle_v'],
|
||||
'num_needle_k': config['num_needle_k'],
|
||||
'num_needle_v': config['num_needle_v'],
|
||||
'num_needle_q': config['num_needle_q'],
|
||||
'reader_cfg': dict(input_columns=['prompt'], output_column='answer'),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{prompt}'),
|
||||
dict(role='BOT', prompt='{answer}\n'),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(type=xRulerNiahEvaluator),
|
||||
),
|
||||
}
|
||||
niah_datasets.append(dataset_dict)
|
32
opencompass/configs/datasets/xruler/ruler_vt_gen.py
Normal file
32
opencompass/configs/datasets/xruler/ruler_vt_gen.py
Normal file
@ -0,0 +1,32 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets.ruler.ruler_vt import RulerVtDataset
|
||||
from opencompass.datasets.ruler.ruler_vt import RulerVtEvaluator
|
||||
|
||||
# VT Dataset
|
||||
vt_datasets = [
|
||||
{
|
||||
'abbr': 'ruler_vt',
|
||||
'type': RulerVtDataset,
|
||||
'num_chains': 1,
|
||||
'num_hops': 4,
|
||||
'reader_cfg': dict(input_columns=['prompt'], output_column='answer'),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{prompt}'),
|
||||
dict(role='BOT', prompt='{answer}\n'),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(type=RulerVtEvaluator),
|
||||
),
|
||||
}
|
||||
]
|
122
opencompass/configs/datasets/xruler/xruler_niah_gen.py
Normal file
122
opencompass/configs/datasets/xruler/xruler_niah_gen.py
Normal file
@ -0,0 +1,122 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets.xruler.xruler_niah import xRulerNiahDataset
|
||||
from opencompass.datasets.xruler.xruler_niah import xRulerNiahEvaluator
|
||||
|
||||
|
||||
# Ruler Dataset settings
|
||||
niah_configurations = [
|
||||
{
|
||||
'abbr': 'single_1',
|
||||
'type_haystack': 'repeat',
|
||||
'type_needle_k': 'words',
|
||||
'type_needle_v': 'numbers',
|
||||
'num_needle_k': 1,
|
||||
'num_needle_v': 1,
|
||||
'num_needle_q': 1,
|
||||
},
|
||||
{
|
||||
'abbr': 'single_2',
|
||||
'type_haystack': 'essay',
|
||||
'type_needle_k': 'words',
|
||||
'type_needle_v': 'numbers',
|
||||
'num_needle_k': 1,
|
||||
'num_needle_v': 1,
|
||||
'num_needle_q': 1,
|
||||
},
|
||||
{
|
||||
'abbr': 'single_3',
|
||||
'type_haystack': 'essay',
|
||||
'type_needle_k': 'words',
|
||||
'type_needle_v': 'uuids',
|
||||
'num_needle_k': 1,
|
||||
'num_needle_v': 1,
|
||||
'num_needle_q': 1,
|
||||
},
|
||||
{
|
||||
'abbr': 'multikey_1',
|
||||
'type_haystack': 'essay',
|
||||
'type_needle_k': 'words',
|
||||
'type_needle_v': 'numbers',
|
||||
'num_needle_k': 4,
|
||||
'num_needle_v': 1,
|
||||
'num_needle_q': 1,
|
||||
},
|
||||
{
|
||||
'abbr': 'multikey_2',
|
||||
'type_haystack': 'needle',
|
||||
'type_needle_k': 'words',
|
||||
'type_needle_v': 'numbers',
|
||||
'num_needle_k': 1,
|
||||
'num_needle_v': 1,
|
||||
'num_needle_q': 1,
|
||||
},
|
||||
{
|
||||
'abbr': 'multikey_3',
|
||||
'type_haystack': 'needle',
|
||||
'type_needle_k': 'uuids',
|
||||
'type_needle_v': 'uuids',
|
||||
'num_needle_k': 1,
|
||||
'num_needle_v': 1,
|
||||
'num_needle_q': 1,
|
||||
},
|
||||
{
|
||||
'abbr': 'multivalue',
|
||||
'type_haystack': 'essay',
|
||||
'type_needle_k': 'words',
|
||||
'type_needle_v': 'numbers',
|
||||
'num_needle_k': 1,
|
||||
'num_needle_v': 4,
|
||||
'num_needle_q': 1,
|
||||
},
|
||||
{
|
||||
'abbr': 'multiquery',
|
||||
'type_haystack': 'essay',
|
||||
'type_needle_k': 'words',
|
||||
'type_needle_v': 'numbers',
|
||||
'num_needle_k': 1,
|
||||
'num_needle_v': 1,
|
||||
'num_needle_q': 4,
|
||||
},
|
||||
]
|
||||
|
||||
niah_datasets = []
|
||||
LANGS = ['en', 'zh', 'ar', 'bn', 'cs', 'de', 'es', 'fr', 'hu']
|
||||
# NIAH Dataset
|
||||
base_path = './data/xruler/un_test'
|
||||
for lang in LANGS:
|
||||
file_path = f'un_test.{lang}.json'
|
||||
for index, config in enumerate(niah_configurations):
|
||||
dataset_dict = {
|
||||
'abbr': f'ruler_niah_{config["abbr"]}_{lang}',
|
||||
'type': xRulerNiahDataset,
|
||||
'base_path': base_path,
|
||||
'file_path': file_path,
|
||||
# 'tokenizer_model': model_path,
|
||||
'tokens_to_generate': 128,
|
||||
'type_haystack': config['type_haystack'],
|
||||
'type_needle_k': config['type_needle_k'],
|
||||
'type_needle_v': config['type_needle_v'],
|
||||
'num_needle_k': config['num_needle_k'],
|
||||
'num_needle_v': config['num_needle_v'],
|
||||
'num_needle_q': config['num_needle_q'],
|
||||
'reader_cfg': dict(input_columns=['prompt'], output_column='answer'),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{prompt}'),
|
||||
dict(role='BOT', prompt='{answer}\n'),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(type=xRulerNiahEvaluator),
|
||||
),
|
||||
}
|
||||
niah_datasets.append(dataset_dict)
|
40
opencompass/configs/datasets/xruler/xruler_qa_gen.py
Normal file
40
opencompass/configs/datasets/xruler/xruler_qa_gen.py
Normal file
@ -0,0 +1,40 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets.xruler.ruler_qa import xRulerQaDataset
|
||||
from opencompass.datasets.xruler.ruler_qa import xRulerQaEvaluator
|
||||
LANGS = ['ar', 'bn', 'cs']
|
||||
qa_configurations = [
|
||||
{'lang': lang, 'dataset': 'squad', 'path': f'./data/xruler/xquad/xquad_{lang}.json'} for lang in LANGS
|
||||
# {'dataset': 'hotpotqa', 'path': './data/ruler/hotpotqa.json'},
|
||||
]
|
||||
|
||||
qa_datasets = []
|
||||
for index, config in enumerate(qa_configurations):
|
||||
dataset_dict = {
|
||||
'abbr': f'xruler_qa_{config["dataset"]}_{config["lang"]}',
|
||||
'dataset': config['dataset'],
|
||||
'path': config['path'],
|
||||
'lang': config['lang'],
|
||||
'type': xRulerQaDataset,
|
||||
'tokens_to_generate': 50,
|
||||
'max_seq_length': 1024 * 8,
|
||||
'reader_cfg': dict(input_columns=['prompt'], output_column='answer'),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{prompt}'),
|
||||
dict(role='BOT', prompt='{answer}\n'),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(type=xRulerQaEvaluator),
|
||||
),
|
||||
}
|
||||
qa_datasets.append(dataset_dict)
|
@ -146,3 +146,6 @@ from .xcopa import * # noqa: F401, F403
|
||||
from .xiezhi import XiezhiDataset, XiezhiRetriever # noqa: F401, F403
|
||||
from .xlsum import * # noqa: F401, F403
|
||||
from .xsum import * # noqa: F401, F403
|
||||
from .xIFEval.ifeval import xIFEvalDataset, xIFEvaluator # noqa: F401, F403
|
||||
from .xlivecodebench import xLCBCodeGenerationDataset, xLCBCodeGenerationEvaluator
|
||||
from .xmgsm import * # noqa: F401, F403
|
||||
|
0
opencompass/datasets/xIFEval/__init__.py
Normal file
0
opencompass/datasets/xIFEval/__init__.py
Normal file
256
opencompass/datasets/xIFEval/evaluation_main.py
Normal file
256
opencompass/datasets/xIFEval/evaluation_main.py
Normal file
@ -0,0 +1,256 @@
|
||||
# flake8: noqa
|
||||
# yapf: disable
|
||||
|
||||
# Copyright 2023 The Google Research Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import dataclasses
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
from absl import flags
|
||||
|
||||
import opencompass.datasets.xIFEval.instructions_registry as instructions_registry
|
||||
|
||||
# _INPUT_DATA = flags.DEFINE_string('input_data',
|
||||
# None,
|
||||
# 'path to input data',
|
||||
# required=True)
|
||||
|
||||
# _INPUT_RESPONSE_DATA = flags.DEFINE_string('input_response_data',
|
||||
# None,
|
||||
# 'path to input response data',
|
||||
# required=False)
|
||||
|
||||
# _OUTPUT_DIR = flags.DEFINE_string(
|
||||
# 'output_dir',
|
||||
# None,
|
||||
# 'Output directory for inference and eval results.',
|
||||
# required=True,
|
||||
# )
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class InputExample:
|
||||
key: int
|
||||
instruction_id_list: List[str]
|
||||
prompt: str
|
||||
kwargs: List[Dict[str, Optional[Union[str, int]]]]
|
||||
lang: str
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class OutputExample:
|
||||
instruction_id_list: List[str]
|
||||
prompt: str
|
||||
response: str
|
||||
follow_all_instructions: bool
|
||||
follow_instruction_list: List[bool]
|
||||
|
||||
|
||||
# def test_instruction_following_strict(
|
||||
# inp,
|
||||
# response,
|
||||
# ):
|
||||
# """Tests response to see if instrutions are followed."""
|
||||
# instruction_list = inp.instruction_id_list
|
||||
# is_following_list = []
|
||||
|
||||
# for index, instruction_id in enumerate(instruction_list):
|
||||
# instruction_cls = instructions_registry.INSTRUCTION_DICT[
|
||||
# instruction_id]
|
||||
# instruction = instruction_cls(instruction_id)
|
||||
# instruction.build_description(**inp.kwargs[index])
|
||||
# args = instruction.get_instruction_args()
|
||||
# if args and 'prompt' in args:
|
||||
# instruction.build_description(prompt=inp.prompt)
|
||||
|
||||
# if response.strip() and instruction.check_following(response):
|
||||
# is_following_list.append(True)
|
||||
# else:
|
||||
# is_following_list.append(False)
|
||||
|
||||
# return OutputExample(
|
||||
# instruction_id_list=inp.instruction_id_list,
|
||||
# prompt=inp.prompt,
|
||||
# response=response,
|
||||
# follow_all_instructions=all(is_following_list),
|
||||
# follow_instruction_list=is_following_list,
|
||||
# )
|
||||
|
||||
|
||||
# def test_instruction_following_loose(
|
||||
# inp,
|
||||
# response,
|
||||
# ):
|
||||
# """Tests response for an upper bound for following instructions."""
|
||||
# r = response.split('\n')
|
||||
# response_remove_first = '\n'.join(r[1:]).strip()
|
||||
# response_remove_last = '\n'.join(r[:-1]).strip()
|
||||
# response_remove_both = '\n'.join(r[1:-1]).strip()
|
||||
# revised_response = response.replace('*', '')
|
||||
# revised_response_remove_first = response_remove_first.replace('*', '')
|
||||
# revised_response_remove_last = response_remove_last.replace('*', '')
|
||||
# revised_response_remove_both = response_remove_both.replace('*', '')
|
||||
# all_responses = [
|
||||
# response,
|
||||
# revised_response,
|
||||
# response_remove_first,
|
||||
# response_remove_last,
|
||||
# response_remove_both,
|
||||
# revised_response_remove_first,
|
||||
# revised_response_remove_last,
|
||||
# revised_response_remove_both,
|
||||
# ]
|
||||
# instruction_list = inp.instruction_id_list
|
||||
# is_following_list = []
|
||||
|
||||
# for index, instruction_id in enumerate(instruction_list):
|
||||
# instruction_cls = instructions_registry.INSTRUCTION_DICT[
|
||||
# instruction_id]
|
||||
# instruction = instruction_cls(instruction_id)
|
||||
|
||||
# instruction.build_description(**inp.kwargs[index])
|
||||
# args = instruction.get_instruction_args()
|
||||
# if args and 'prompt' in args:
|
||||
# instruction.build_description(prompt=inp.prompt)
|
||||
|
||||
# is_following = False
|
||||
# for r in all_responses:
|
||||
# if r.strip() and instruction.check_following(r):
|
||||
# is_following = True
|
||||
# break
|
||||
|
||||
# is_following_list.append(is_following)
|
||||
|
||||
# return OutputExample(
|
||||
# instruction_id_list=inp.instruction_id_list,
|
||||
# prompt=inp.prompt,
|
||||
# response=response,
|
||||
# follow_all_instructions=all(is_following_list),
|
||||
# follow_instruction_list=is_following_list,
|
||||
# )
|
||||
|
||||
|
||||
def test_instruction_following_strict(
|
||||
inp,
|
||||
response,
|
||||
):
|
||||
"""Tests response to see if instructions are followed."""
|
||||
instruction_list = inp.instruction_id_list
|
||||
is_following_list = []
|
||||
|
||||
for index, instruction_id in enumerate(instruction_list):
|
||||
instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id]
|
||||
instruction = instruction_cls(instruction_id, inp.lang)
|
||||
|
||||
# Remove None values from kwargs to avoid unexpected keyword argument errors in build_description method.
|
||||
kwargs = {k: v for k, v in inp.kwargs[index].items() if v}
|
||||
instruction.build_description(**kwargs)
|
||||
args = instruction.get_instruction_args()
|
||||
if args and "prompt" in args:
|
||||
instruction.build_description(prompt=inp.prompt)
|
||||
|
||||
if response.strip() and instruction.check_following(response):
|
||||
is_following_list.append(True)
|
||||
else:
|
||||
is_following_list.append(False)
|
||||
|
||||
return OutputExample(
|
||||
instruction_id_list=inp.instruction_id_list,
|
||||
prompt=inp.prompt,
|
||||
response=response,
|
||||
follow_all_instructions=all(is_following_list),
|
||||
follow_instruction_list=is_following_list,
|
||||
)
|
||||
|
||||
|
||||
def test_instruction_following_loose(
|
||||
inp,
|
||||
response,
|
||||
):
|
||||
"""Tests response for an upper bound for following instructions."""
|
||||
r = response.split("\n")
|
||||
response_remove_first = "\n".join(r[1:]).strip()
|
||||
response_remove_last = "\n".join(r[:-1]).strip()
|
||||
response_remove_both = "\n".join(r[1:-1]).strip()
|
||||
revised_response = response.replace("*", "")
|
||||
revised_response_remove_first = response_remove_first.replace("*", "")
|
||||
revised_response_remove_last = response_remove_last.replace("*", "")
|
||||
revised_response_remove_both = response_remove_both.replace("*", "")
|
||||
all_responses = [
|
||||
response,
|
||||
revised_response,
|
||||
response_remove_first,
|
||||
response_remove_last,
|
||||
response_remove_both,
|
||||
revised_response_remove_first,
|
||||
revised_response_remove_last,
|
||||
revised_response_remove_both,
|
||||
]
|
||||
instruction_list = inp.instruction_id_list
|
||||
is_following_list = []
|
||||
|
||||
for index, instruction_id in enumerate(instruction_list):
|
||||
instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id]
|
||||
instruction = instruction_cls(instruction_id, inp.lang)
|
||||
|
||||
# Remove None values from kwargs to avoid unexpected keyword argument errors in build_description method.
|
||||
kwargs = {k: v for k, v in inp.kwargs[index].items() if v}
|
||||
instruction.build_description(**kwargs)
|
||||
args = instruction.get_instruction_args()
|
||||
if args and "prompt" in args:
|
||||
instruction.build_description(prompt=inp.prompt)
|
||||
|
||||
is_following = False
|
||||
for r in all_responses:
|
||||
if r.strip() and instruction.check_following(r):
|
||||
is_following = True
|
||||
break
|
||||
|
||||
is_following_list.append(is_following)
|
||||
|
||||
return OutputExample(
|
||||
instruction_id_list=inp.instruction_id_list,
|
||||
prompt=inp.prompt,
|
||||
response=response,
|
||||
follow_all_instructions=all(is_following_list),
|
||||
follow_instruction_list=is_following_list,
|
||||
)
|
||||
|
||||
|
||||
def process_results(doc, results):
|
||||
inp = InputExample(
|
||||
key=doc["key"],
|
||||
instruction_id_list=doc["instruction_id_list"],
|
||||
prompt=doc["prompt"],
|
||||
kwargs=doc["kwargs"],
|
||||
lang=doc.get("lang", "en")
|
||||
)
|
||||
response = results[0]
|
||||
|
||||
out_strict = test_instruction_following_strict(inp, response)
|
||||
out_loose = test_instruction_following_loose(inp, response)
|
||||
|
||||
return {
|
||||
"prompt_level_strict_acc": out_strict.follow_all_instructions,
|
||||
"inst_level_strict_acc": out_strict.follow_instruction_list,
|
||||
"prompt_level_loose_acc": out_loose.follow_all_instructions,
|
||||
"inst_level_loose_acc": out_loose.follow_instruction_list,
|
||||
}
|
||||
|
||||
|
||||
def agg_inst_level_acc(items):
|
||||
flat_items = [item for sublist in items for item in sublist]
|
||||
inst_level_acc = sum(flat_items) / len(flat_items)
|
||||
return inst_level_acc
|
99
opencompass/datasets/xIFEval/ifeval.py
Normal file
99
opencompass/datasets/xIFEval/ifeval.py
Normal file
@ -0,0 +1,99 @@
|
||||
import json
|
||||
|
||||
from datasets import Dataset
|
||||
|
||||
from opencompass.openicl.icl_evaluator import BaseEvaluator
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
from opencompass.utils import get_data_path
|
||||
|
||||
from ..base import BaseDataset
|
||||
from .evaluation_main import (InputExample, test_instruction_following_loose,
|
||||
test_instruction_following_strict)
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class xIFEvalDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(path):
|
||||
path = get_data_path(path, local_mode=True)
|
||||
datasets = []
|
||||
with open(path, 'r', encoding='utf-8') as file:
|
||||
for line in file:
|
||||
tmp = json.loads(line.strip())
|
||||
dataset = dict(prompt=tmp['prompt'], reference=tmp)
|
||||
datasets.append(dataset)
|
||||
return Dataset.from_list(datasets)
|
||||
|
||||
|
||||
class xIFEvaluator(BaseEvaluator):
|
||||
|
||||
def score(self, predictions, references, origin_prompt):
|
||||
prompt_strict_correct, prompt_strict_total = 0, 0
|
||||
inst_strict_correct, inst_strict_total = 0, 0
|
||||
prompt_loose_correct, prompt_loose_total = 0, 0
|
||||
inst_loose_correct, inst_loose_total = 0, 0
|
||||
details = {}
|
||||
for index, (pred, refer) in enumerate(zip(predictions, references)):
|
||||
input = InputExample(
|
||||
key=refer['key'],
|
||||
instruction_id_list=refer['instruction_id_list'],
|
||||
prompt=refer['prompt'],
|
||||
kwargs=refer['kwargs'],
|
||||
lang=refer['lang']
|
||||
)
|
||||
for kwarg in input.kwargs:
|
||||
for k in list(kwarg.keys()):
|
||||
if kwarg[k] is None:
|
||||
kwarg.pop(k, None)
|
||||
|
||||
# strict
|
||||
example = test_instruction_following_strict(input, pred)
|
||||
follow_instruction_list = example.follow_instruction_list
|
||||
instruction_id_list = example.instruction_id_list
|
||||
prompt_strict_total += 1
|
||||
is_strict_correct = all(follow_instruction_list)
|
||||
prompt_strict_correct += is_strict_correct
|
||||
inst_strict_total += len(instruction_id_list)
|
||||
inst_strict_correct += sum(follow_instruction_list)
|
||||
|
||||
# loose
|
||||
example = test_instruction_following_loose(input, pred)
|
||||
follow_instruction_list = example.follow_instruction_list
|
||||
instruction_id_list = example.instruction_id_list
|
||||
prompt_loose_total += 1
|
||||
is_loose_correct = all(follow_instruction_list)
|
||||
prompt_loose_correct += is_loose_correct
|
||||
inst_loose_total += len(instruction_id_list)
|
||||
inst_loose_correct += sum(follow_instruction_list)
|
||||
|
||||
if is_strict_correct:
|
||||
grade = 'strict'
|
||||
elif is_loose_correct:
|
||||
grade = 'loose'
|
||||
else:
|
||||
grade = 'none'
|
||||
|
||||
details[str(index)] = {
|
||||
'prompt': origin_prompt[index],
|
||||
'pred': pred,
|
||||
'refer': refer,
|
||||
'is_strict_correct': is_strict_correct,
|
||||
'is_loose_correct': is_loose_correct,
|
||||
'is_correct': is_strict_correct,
|
||||
'grade': grade
|
||||
}
|
||||
|
||||
results = {
|
||||
'Prompt-level-strict-accuracy':
|
||||
prompt_strict_correct / prompt_strict_total * 100,
|
||||
'Inst-level-strict-accuracy':
|
||||
inst_strict_correct / inst_strict_total * 100,
|
||||
'Prompt-level-loose-accuracy':
|
||||
prompt_loose_correct / prompt_loose_total * 100,
|
||||
'Inst-level-loose-accuracy':
|
||||
inst_loose_correct / inst_loose_total * 100,
|
||||
'details':
|
||||
details
|
||||
}
|
||||
return results
|
1727
opencompass/datasets/xIFEval/instructions.py
Normal file
1727
opencompass/datasets/xIFEval/instructions.py
Normal file
File diff suppressed because it is too large
Load Diff
167
opencompass/datasets/xIFEval/instructions_registry.py
Normal file
167
opencompass/datasets/xIFEval/instructions_registry.py
Normal file
@ -0,0 +1,167 @@
|
||||
# Copyright 2023 The Google Research Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Registry of all instructions."""
|
||||
|
||||
from opencompass.datasets.xIFEval import instructions
|
||||
|
||||
_KEYWORD = "keywords:"
|
||||
|
||||
_LANGUAGE = "language:"
|
||||
|
||||
_LENGTH = "length_constraints:"
|
||||
|
||||
_CONTENT = "detectable_content:"
|
||||
|
||||
_FORMAT = "detectable_format:"
|
||||
|
||||
_MULTITURN = "multi-turn:"
|
||||
|
||||
_COMBINATION = "combination:"
|
||||
|
||||
_STARTEND = "startend:"
|
||||
|
||||
_CHANGE_CASES = "change_case:"
|
||||
|
||||
_PUNCTUATION = "punctuation:"
|
||||
|
||||
INSTRUCTION_DICT = {
|
||||
_KEYWORD + "existence": instructions.KeywordChecker,
|
||||
_KEYWORD + "frequency": instructions.KeywordFrequencyChecker,
|
||||
# TODO(jeffreyzhou): make a proper set of sentences to choose from
|
||||
# _KEYWORD + "key_sentences": instructions.KeySentenceChecker,
|
||||
_KEYWORD + "forbidden_words": instructions.ForbiddenWords,
|
||||
_KEYWORD + "letter_frequency": instructions.LetterFrequencyChecker,
|
||||
_LANGUAGE + "response_language": instructions.ResponseLanguageChecker,
|
||||
_LENGTH + "number_sentences": instructions.NumberOfSentences,
|
||||
_LENGTH + "number_paragraphs": instructions.ParagraphChecker,
|
||||
_LENGTH + "number_words": instructions.NumberOfWords,
|
||||
_LENGTH + "nth_paragraph_first_word": instructions.ParagraphFirstWordCheck,
|
||||
_CONTENT + "number_placeholders": instructions.PlaceholderChecker,
|
||||
_CONTENT + "postscript": instructions.PostscriptChecker,
|
||||
_FORMAT + "number_bullet_lists": instructions.BulletListChecker,
|
||||
# TODO(jeffreyzhou): Pre-create paragraph or use prompt to replace
|
||||
# _CONTENT + "rephrase_paragraph": instructions.RephraseParagraph,
|
||||
_FORMAT + "constrained_response": instructions.ConstrainedResponseChecker,
|
||||
_FORMAT + "number_highlighted_sections": (instructions.HighlightSectionChecker),
|
||||
_FORMAT + "multiple_sections": instructions.SectionChecker,
|
||||
# TODO(tianjianlu): Re-enable rephrasing with preprocessing the message.
|
||||
# _FORMAT + "rephrase": instructions.RephraseChecker,
|
||||
_FORMAT + "json_format": instructions.JsonFormat,
|
||||
_FORMAT + "title": instructions.TitleChecker,
|
||||
# TODO(tianjianlu): Re-enable with specific prompts.
|
||||
# _MULTITURN + "constrained_start": instructions.ConstrainedStartChecker,
|
||||
_COMBINATION + "two_responses": instructions.TwoResponsesChecker,
|
||||
_COMBINATION + "repeat_prompt": instructions.RepeatPromptThenAnswer,
|
||||
_STARTEND + "end_checker": instructions.EndChecker,
|
||||
_CHANGE_CASES + "capital_word_frequency": instructions.CapitalWordFrequencyChecker,
|
||||
_CHANGE_CASES + "english_capital": instructions.CapitalLettersEnglishChecker,
|
||||
_CHANGE_CASES + "english_lowercase": instructions.LowercaseLettersEnglishChecker,
|
||||
_PUNCTUATION + "no_comma": instructions.CommaChecker,
|
||||
_STARTEND + "quotation": instructions.QuotationChecker,
|
||||
}
|
||||
|
||||
INSTRUCTION_CONFLICTS = {
|
||||
_KEYWORD + "existence": {_KEYWORD + "existence"},
|
||||
_KEYWORD + "frequency": {_KEYWORD + "frequency"},
|
||||
# TODO(jeffreyzhou): make a proper set of sentences to choose from
|
||||
# _KEYWORD + "key_sentences": instructions.KeySentenceChecker,
|
||||
_KEYWORD + "forbidden_words": {_KEYWORD + "forbidden_words"},
|
||||
_KEYWORD + "letter_frequency": {_KEYWORD + "letter_frequency"},
|
||||
_LANGUAGE + "response_language": {
|
||||
_LANGUAGE + "response_language",
|
||||
_FORMAT + "multiple_sections",
|
||||
_KEYWORD + "existence",
|
||||
_KEYWORD + "frequency",
|
||||
_KEYWORD + "forbidden_words",
|
||||
_STARTEND + "end_checker",
|
||||
_CHANGE_CASES + "english_capital",
|
||||
_CHANGE_CASES + "english_lowercase",
|
||||
},
|
||||
_LENGTH + "number_sentences": {_LENGTH + "number_sentences"},
|
||||
_LENGTH + "number_paragraphs": {
|
||||
_LENGTH + "number_paragraphs",
|
||||
_LENGTH + "nth_paragraph_first_word",
|
||||
_LENGTH + "number_sentences",
|
||||
_LENGTH + "nth_paragraph_first_word",
|
||||
},
|
||||
_LENGTH + "number_words": {_LENGTH + "number_words"},
|
||||
_LENGTH + "nth_paragraph_first_word": {
|
||||
_LENGTH + "nth_paragraph_first_word",
|
||||
_LENGTH + "number_paragraphs",
|
||||
},
|
||||
_CONTENT + "number_placeholders": {_CONTENT + "number_placeholders"},
|
||||
_CONTENT + "postscript": {_CONTENT + "postscript"},
|
||||
_FORMAT + "number_bullet_lists": {_FORMAT + "number_bullet_lists"},
|
||||
# TODO(jeffreyzhou): Pre-create paragraph or use prompt to replace
|
||||
# _CONTENT + "rephrase_paragraph": instructions.RephraseParagraph,
|
||||
_FORMAT + "constrained_response": set(INSTRUCTION_DICT.keys()),
|
||||
_FORMAT + "number_highlighted_sections": {_FORMAT + "number_highlighted_sections"},
|
||||
_FORMAT + "multiple_sections": {
|
||||
_FORMAT + "multiple_sections",
|
||||
_LANGUAGE + "response_language",
|
||||
_FORMAT + "number_highlighted_sections",
|
||||
},
|
||||
# TODO(tianjianlu): Re-enable rephrasing with preprocessing the message.
|
||||
# _FORMAT + "rephrase": instructions.RephraseChecker,
|
||||
_FORMAT + "json_format": set(INSTRUCTION_DICT.keys()).difference(
|
||||
{_KEYWORD + "forbidden_words", _KEYWORD + "existence"}
|
||||
),
|
||||
_FORMAT + "title": {_FORMAT + "title"},
|
||||
# TODO(tianjianlu): Re-enable with specific prompts.
|
||||
# _MULTITURN + "constrained_start": instructions.ConstrainedStartChecker,
|
||||
_COMBINATION + "two_responses": set(INSTRUCTION_DICT.keys()).difference(
|
||||
{
|
||||
_KEYWORD + "forbidden_words",
|
||||
_KEYWORD + "existence",
|
||||
_LANGUAGE + "response_language",
|
||||
_FORMAT + "title",
|
||||
_PUNCTUATION + "no_comma",
|
||||
}
|
||||
),
|
||||
_COMBINATION + "repeat_prompt": set(INSTRUCTION_DICT.keys()).difference(
|
||||
{_KEYWORD + "existence", _FORMAT + "title", _PUNCTUATION + "no_comma"}
|
||||
),
|
||||
_STARTEND + "end_checker": {_STARTEND + "end_checker"},
|
||||
_CHANGE_CASES + "capital_word_frequency": {
|
||||
_CHANGE_CASES + "capital_word_frequency",
|
||||
_CHANGE_CASES + "english_lowercase",
|
||||
_CHANGE_CASES + "english_capital",
|
||||
},
|
||||
_CHANGE_CASES + "english_capital": {_CHANGE_CASES + "english_capital"},
|
||||
_CHANGE_CASES + "english_lowercase": {
|
||||
_CHANGE_CASES + "english_lowercase",
|
||||
_CHANGE_CASES + "english_capital",
|
||||
},
|
||||
_PUNCTUATION + "no_comma": {_PUNCTUATION + "no_comma"},
|
||||
_STARTEND + "quotation": {_STARTEND + "quotation", _FORMAT + "title"},
|
||||
}
|
||||
|
||||
|
||||
def conflict_make(conflicts):
|
||||
"""Makes sure if A conflicts with B, B will conflict with A.
|
||||
|
||||
Args:
|
||||
conflicts: Dictionary of potential conflicts where key is instruction id
|
||||
and value is set of instruction ids that it conflicts with.
|
||||
|
||||
Returns:
|
||||
Revised version of the dictionary. All instructions conflict with
|
||||
themselves. If A conflicts with B, B will conflict with A.
|
||||
"""
|
||||
for key in conflicts:
|
||||
for k in conflicts[key]:
|
||||
conflicts[k].add(key)
|
||||
conflicts[key].add(key)
|
||||
return conflicts
|
1737
opencompass/datasets/xIFEval/instructions_util.py
Normal file
1737
opencompass/datasets/xIFEval/instructions_util.py
Normal file
File diff suppressed because it is too large
Load Diff
9
opencompass/datasets/xlivecodebench/__init__.py
Normal file
9
opencompass/datasets/xlivecodebench/__init__.py
Normal file
@ -0,0 +1,9 @@
|
||||
from .evaluator import LCBCodeExecutionEvaluator # noqa: F401, F403
|
||||
from .evaluator import xLCBCodeGenerationEvaluator # noqa: F401, F403
|
||||
from .evaluator import LCBTestOutputEvaluator # noqa: F401, F403
|
||||
from .livecodebench import CompassBenchCodeExecutionDataset # noqa: F401, F403
|
||||
from .livecodebench import LCBCodeExecutionDataset # noqa: F401, F403
|
||||
from .livecodebench import xLCBCodeGenerationDataset # noqa: F401, F403
|
||||
from .livecodebench import LCBTestOutputPredictionDataset # noqa: F401, F403
|
||||
from .prompts import TestOutputPromptConstants # noqa: F401, F403
|
||||
# from .xlivecodebench import xLCBCodeGenerationDataset # noqa: F401, F403
|
456
opencompass/datasets/xlivecodebench/evaluator.py
Normal file
456
opencompass/datasets/xlivecodebench/evaluator.py
Normal file
@ -0,0 +1,456 @@
|
||||
import ast
|
||||
import json
|
||||
import multiprocessing
|
||||
from collections import defaultdict
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
|
||||
from opencompass.openicl.icl_evaluator import BaseEvaluator
|
||||
from opencompass.registry import ICL_EVALUATORS
|
||||
from opencompass.utils import get_logger
|
||||
|
||||
from .execute_utils import BASE_IMPORTS, codeexecute_check_correctness
|
||||
from .extract_utils import (extract_code_execution, extract_code_generation,
|
||||
extract_test_output_code)
|
||||
from .livecodebench import xLCBCodeGenerationDataset
|
||||
from .pass_k_utils import compute_metrics_from_results
|
||||
|
||||
|
||||
def codegen_check_correctness(sample, generation, timeout, debug=True):
|
||||
"""Check correctness of code generation with a global timeout.
|
||||
|
||||
The global timeout is to catch some extreme/rare cases not handled by the
|
||||
timeouts inside `run_test`
|
||||
"""
|
||||
|
||||
def _temp_run(sample, generation, debug, result, metadata_list, timeout):
|
||||
from .testing_util import run_test
|
||||
res, metadata = run_test(sample,
|
||||
test=generation,
|
||||
debug=debug,
|
||||
timeout=timeout)
|
||||
result.append(res)
|
||||
metadata_list.append(metadata)
|
||||
|
||||
manager = multiprocessing.Manager()
|
||||
result = manager.list()
|
||||
metadata_list = manager.list()
|
||||
p = multiprocessing.Process(
|
||||
target=_temp_run,
|
||||
args=(sample, generation, debug, result, metadata_list, timeout),
|
||||
)
|
||||
p.start()
|
||||
p.join(timeout=(timeout + 1) *
|
||||
len(json.loads(sample['input_output'])['inputs']) + 5)
|
||||
if p.is_alive():
|
||||
p.kill()
|
||||
if not result:
|
||||
in_outs = json.loads(sample['input_output'])
|
||||
# consider that all tests failed
|
||||
result = [[-1 for i in range(len(in_outs['inputs']))]]
|
||||
if debug:
|
||||
logger = get_logger()
|
||||
logger.info('global timeout')
|
||||
return result[0], metadata_list[0]
|
||||
|
||||
|
||||
def evaluate_generations_by_problem(problem_generations: list, sample: list,
|
||||
debug: bool, timeout: int):
|
||||
"""Evaluate each problem.
|
||||
|
||||
Args:
|
||||
problem_generations:
|
||||
sample:
|
||||
debug:
|
||||
timeout
|
||||
"""
|
||||
# problem_generations: list[str] = args[0]
|
||||
# sample = args[1]
|
||||
# debug: bool = args[2]
|
||||
# timeout: int = args[3]
|
||||
logger = get_logger()
|
||||
|
||||
res = []
|
||||
metadata = []
|
||||
for o_idx, o in enumerate(problem_generations):
|
||||
curr_res = [-2]
|
||||
try:
|
||||
curr_res, curr_metadata = codegen_check_correctness(
|
||||
sample, o, timeout=timeout, debug=debug)
|
||||
if debug:
|
||||
logger.info(f'\nSuccessful compilation of task {o_idx}!')
|
||||
fixed = []
|
||||
for e in curr_res:
|
||||
if isinstance(e, np.ndarray):
|
||||
e = e.item(0)
|
||||
if isinstance(e, np.bool_):
|
||||
e = bool(e)
|
||||
fixed.append(e)
|
||||
curr_res = fixed
|
||||
if not np.all(curr_res):
|
||||
if debug:
|
||||
logger.info(
|
||||
f'Results were not True for all test cases' # noqa: F541, E501
|
||||
f' {curr_res=}\n')
|
||||
except Exception as e:
|
||||
if debug:
|
||||
logger.info(
|
||||
f'Compilation failed, test framework exception' # noqa: F541, E501
|
||||
f' = {repr(e)}{e}\n')
|
||||
# break
|
||||
curr_metadata = {}
|
||||
finally:
|
||||
assert isinstance(curr_res, list)
|
||||
assert isinstance(curr_metadata, dict)
|
||||
res.append(curr_res)
|
||||
metadata.append(curr_metadata)
|
||||
if debug:
|
||||
for i, r in enumerate(problem_generations):
|
||||
logger.info(f'Sample\n{r}\nResult\n{res[i]}')
|
||||
logger.info('*' * 30 + '\n\n')
|
||||
return res, metadata
|
||||
|
||||
|
||||
def evaluate_generations(
|
||||
samples_list: list,
|
||||
generations_list: list[list[str]],
|
||||
debug: bool = False,
|
||||
num_process_evaluate: int = 16,
|
||||
timeout=6,
|
||||
):
|
||||
"""We take the list of code generations and try to compile them and the run
|
||||
their corresponding unit tests which are retrieved from the APPS dataset.
|
||||
|
||||
Args:
|
||||
generations: list of code generations (same order as samples in APPS
|
||||
dataset)
|
||||
level: difficulty level used in the generation, can be "all",
|
||||
"introductory", "interview" or "competition"
|
||||
|
||||
Returns:
|
||||
results: dictionary of results, key is the problem index, value is
|
||||
a list of results for each generation
|
||||
[-2] = compile error, [-1] = runtime error [False] = failed test
|
||||
case [True] = passed test case
|
||||
"""
|
||||
|
||||
# generations are code generations in the same order of the dataset
|
||||
|
||||
inputs = [[(generations_list[index], samples_list[index], debug, timeout),
|
||||
index] for index in range(len(generations_list))]
|
||||
|
||||
with tqdm(total=len(inputs)) as pbar:
|
||||
with ProcessPoolExecutor(
|
||||
max_workers=1 if debug else num_process_evaluate) as executor:
|
||||
futures = {
|
||||
executor.submit(evaluate_generations_by_problem,
|
||||
problem_generations, sample, debug, timeout):
|
||||
index
|
||||
for (problem_generations, sample, debug,
|
||||
timeout), index in inputs
|
||||
}
|
||||
|
||||
results = {}
|
||||
metadata = {}
|
||||
for future in as_completed(futures):
|
||||
index = futures[future]
|
||||
results[index], metadata[index] = future.result()
|
||||
pbar.update(1)
|
||||
|
||||
assert len(results) == len(
|
||||
inputs), f'results = {len(results)} inputs = {len(inputs)} {results=}'
|
||||
# results = {i: r for r, (_, i) in zip(results, inputs)}
|
||||
|
||||
return results, metadata
|
||||
|
||||
|
||||
def codegen_metrics(
|
||||
samples_list,
|
||||
generations_list,
|
||||
k_list=[1, 5, 10, 20, 40, 50, 75, 100, 125, 150, 200, 500, 1000],
|
||||
num_process_evaluate=16,
|
||||
timeout=6,
|
||||
debug=False,
|
||||
):
|
||||
logger = get_logger()
|
||||
|
||||
samples_linear = []
|
||||
generations_linear = []
|
||||
remap_index = []
|
||||
results = defaultdict(list)
|
||||
metadatas = defaultdict(list)
|
||||
for idx, (sample,
|
||||
generation_list) in enumerate(zip(samples_list,
|
||||
generations_list)):
|
||||
assert isinstance(generation_list, list), generations_list[0]
|
||||
for generation in generation_list:
|
||||
assert isinstance(generation, str), generations_list[0]
|
||||
samples_linear.append(sample)
|
||||
generations_linear.append([generation])
|
||||
remap_index.append(idx)
|
||||
|
||||
logger.info(f'LCBCodeGeneration: Evaluating {len(samples_linear)}...')
|
||||
|
||||
results_linear, metadatas_linear = evaluate_generations(
|
||||
samples_linear,
|
||||
generations_linear,
|
||||
debug=debug,
|
||||
num_process_evaluate=num_process_evaluate,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
for idx, sub_results in sorted(results_linear.items(), key=lambda x: x[0]):
|
||||
results[remap_index[idx]].append(sub_results[0])
|
||||
|
||||
for idx, sub_metadatas in sorted(metadatas_linear.items(),
|
||||
key=lambda x: x[0]):
|
||||
metadatas[remap_index[idx]].append(sub_metadatas[0])
|
||||
|
||||
metrics = compute_metrics_from_results(results, k_list=k_list)
|
||||
|
||||
final_metadata = []
|
||||
for key in sorted(list(metadatas.keys())):
|
||||
final_metadata.append(metadatas[key])
|
||||
for i in range(len(final_metadata)):
|
||||
if type(final_metadata[i]) is not list:
|
||||
final_metadata[i] = [json.dumps(final_metadata[i])]
|
||||
else:
|
||||
final_metadata[i] = [json.dumps(x) for x in final_metadata[i]]
|
||||
|
||||
assert len(final_metadata[i]) == len(
|
||||
generations_list[0]), f'{len(final_metadata[i])=}'
|
||||
|
||||
return [metrics, results, final_metadata]
|
||||
|
||||
|
||||
@ICL_EVALUATORS.register_module()
|
||||
class xLCBCodeGenerationEvaluator(BaseEvaluator):
|
||||
|
||||
def __init__(self,
|
||||
num_process_evaluate,
|
||||
path,
|
||||
timeout=6,
|
||||
release_version='release_v1'):
|
||||
super().__init__()
|
||||
self.num_process_evaluate = num_process_evaluate
|
||||
self.timeout = timeout
|
||||
self.dataset = xLCBCodeGenerationDataset.load(path=path,
|
||||
release_version=release_version, local_mode=True)['test']
|
||||
|
||||
def score(self, predictions, references):
|
||||
predictions = [[extract_code_generation(item)] for item in predictions]
|
||||
|
||||
evaluation_samples = dict()
|
||||
for idx in range(len(self.dataset)):
|
||||
evaluation_samples[self.dataset[idx][
|
||||
'question_id']] = self.dataset[idx]['evaluation_sample']
|
||||
|
||||
references = [evaluation_samples[item] for item in references]
|
||||
|
||||
references = [{'input_output': item} for item in references]
|
||||
|
||||
BaseEvaluator.is_num_equal(predictions, references)
|
||||
|
||||
results = { # noqa: F841
|
||||
'pass': 0,
|
||||
'timeout': 0,
|
||||
'failed': 0,
|
||||
'wrong_answer': 0
|
||||
} # noqa: F401, F403
|
||||
|
||||
metrics, eval_results, final_metadata = codegen_metrics(
|
||||
references,
|
||||
predictions,
|
||||
k_list=[1],
|
||||
num_process_evaluate=self.num_process_evaluate,
|
||||
timeout=self.timeout,
|
||||
)
|
||||
|
||||
return metrics
|
||||
|
||||
|
||||
def evaluate_score(args) -> list[bool]:
|
||||
gs, (c, i, o) = args
|
||||
|
||||
execution_results = []
|
||||
for g in gs:
|
||||
if i in g:
|
||||
pass
|
||||
else:
|
||||
code_to_execute = f'{BASE_IMPORTS}\n{c}\nassert {o} == {g}'
|
||||
execution_results.append(
|
||||
codeexecute_check_correctness(code_to_execute, 3))
|
||||
if len(execution_results) == 0:
|
||||
execution_results = [False] * len(gs)
|
||||
return execution_results
|
||||
|
||||
|
||||
def code_execution_metrics(
|
||||
samples,
|
||||
generations,
|
||||
):
|
||||
|
||||
def pass_at_k(n, c, k):
|
||||
if n - c < k:
|
||||
return 1.0
|
||||
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1))
|
||||
|
||||
# execute the code
|
||||
references = [(doc['code'], doc['input'], doc['output'])
|
||||
for doc in samples]
|
||||
with ProcessPoolExecutor() as executor:
|
||||
args_list = zip(generations, references)
|
||||
results = executor.map(evaluate_score, args_list)
|
||||
all_results = list(results)
|
||||
|
||||
# serial version
|
||||
# all_results = []
|
||||
# for i in range(len(generations)):
|
||||
# generation = generations[i]
|
||||
# result = evaluate_score([generation, references[i]])
|
||||
# all_results.append(result)
|
||||
|
||||
# compute pass@1
|
||||
pass_at_1s = []
|
||||
for execution_result in all_results:
|
||||
c, n = execution_result.count(True), len(execution_result)
|
||||
pass_at_1s.append(pass_at_k(n, c, 1))
|
||||
metrics = {'pass@1': sum(pass_at_1s) / len(pass_at_1s) * 100}
|
||||
|
||||
results = {}
|
||||
for i, r in enumerate(all_results):
|
||||
r_new = []
|
||||
for _r in r:
|
||||
r_new.append([_r])
|
||||
results[i] = r_new
|
||||
return [metrics, results]
|
||||
|
||||
|
||||
@ICL_EVALUATORS.register_module()
|
||||
class LCBCodeExecutionEvaluator(BaseEvaluator):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# self.num_process_evaluate = num_process_evaluate
|
||||
# self.timeout = timeout
|
||||
|
||||
def score(self, predictions, references):
|
||||
predictions = [[extract_code_execution(item)] for item in predictions]
|
||||
references = [json.loads(item) for item in references]
|
||||
metrics, results = code_execution_metrics(references, predictions)
|
||||
return metrics
|
||||
|
||||
|
||||
def parse_assert_statement(statement):
|
||||
"""Parse a Python assert statement and extract the expected output from the
|
||||
right side of the '==' operator as a string.
|
||||
|
||||
:param statement: A string containing the assert statement.
|
||||
:return: The expected output from the assert statement as a string.
|
||||
"""
|
||||
try:
|
||||
parsed = ast.parse(statement, mode='exec')
|
||||
except SyntaxError:
|
||||
return 'Invalid syntax'
|
||||
|
||||
if len(parsed.body) == 0:
|
||||
return 'Empty statement'
|
||||
|
||||
if not isinstance(parsed.body[0], ast.Assert):
|
||||
return 'Not an assert statement'
|
||||
|
||||
comparison = parsed.body[0].test
|
||||
|
||||
if not isinstance(comparison, ast.Compare) or not isinstance(
|
||||
comparison.ops[0], ast.Eq):
|
||||
return 'Not an equality assertion'
|
||||
|
||||
# Extract and return the right side of the '==' operator as a string
|
||||
return ast.get_source_segment(statement, comparison.comparators[0])
|
||||
|
||||
|
||||
def check_testcase_output(testcase_str, expected_output):
|
||||
|
||||
if len(testcase_str.splitlines()) > 1:
|
||||
for line in testcase_str.splitlines():
|
||||
if line.startswith('#'):
|
||||
continue
|
||||
if 'assert' in line:
|
||||
testcase_str = line
|
||||
break
|
||||
|
||||
testcase_str = testcase_str.strip()
|
||||
|
||||
if 'assert' in testcase_str:
|
||||
testcase_output_str = str(parse_assert_statement(testcase_str))
|
||||
|
||||
else:
|
||||
testcase_output_str = testcase_str
|
||||
|
||||
global_result = None
|
||||
|
||||
try:
|
||||
testcase_output_eval = eval(testcase_output_str)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
global_result = False
|
||||
# print("Failed to eval testcase output", testcase_output_str)
|
||||
# breakpoint()
|
||||
|
||||
try:
|
||||
expected_output_eval = json.loads(expected_output)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
global_result = False
|
||||
print('Failed to eval expected testcase output', expected_output)
|
||||
|
||||
if global_result is None:
|
||||
global_result = testcase_output_eval == expected_output_eval
|
||||
|
||||
return global_result
|
||||
|
||||
|
||||
def test_output_metrics(
|
||||
samples,
|
||||
generations,
|
||||
k_list=[1, 5],
|
||||
):
|
||||
num_samples = len(samples)
|
||||
results = []
|
||||
for idx in tqdm(list(range(num_samples))):
|
||||
idx_results = []
|
||||
sample = samples[idx]
|
||||
extracted_generation_list = generations[idx]
|
||||
for extracted_generation in extracted_generation_list:
|
||||
global_result = check_testcase_output(extracted_generation,
|
||||
sample['output'])
|
||||
idx_results.append([global_result])
|
||||
results.append(idx_results)
|
||||
|
||||
results = {
|
||||
result_idx: results[result_idx]
|
||||
for result_idx in range(len(results))
|
||||
}
|
||||
|
||||
metrics = compute_metrics_from_results(results, k_list=k_list)
|
||||
|
||||
return [metrics, results]
|
||||
|
||||
|
||||
@ICL_EVALUATORS.register_module()
|
||||
class LCBTestOutputEvaluator(BaseEvaluator):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def score(self, predictions, references):
|
||||
|
||||
predictions = [[extract_test_output_code(item)]
|
||||
for item in predictions]
|
||||
references = [json.loads(item) for item in references]
|
||||
metrics, results = test_output_metrics(references,
|
||||
predictions,
|
||||
k_list=[1])
|
||||
return metrics
|
271
opencompass/datasets/xlivecodebench/execute_utils.py
Normal file
271
opencompass/datasets/xlivecodebench/execute_utils.py
Normal file
@ -0,0 +1,271 @@
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the
|
||||
# current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This code is adapted from OpenAI's release
|
||||
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
|
||||
|
||||
import contextlib
|
||||
import faulthandler
|
||||
import io
|
||||
import multiprocessing
|
||||
import os
|
||||
import platform
|
||||
import signal
|
||||
import tempfile
|
||||
|
||||
BASE_IMPORTS = """from itertools import accumulate, chain, combinations, count, permutations, product, groupby, islice, repeat
|
||||
from copy import deepcopy
|
||||
from string import ascii_lowercase
|
||||
from math import floor, log2, log10, sqrt, comb, gcd, ceil, inf, isqrt
|
||||
from collections import defaultdict, deque, Counter
|
||||
from bisect import bisect, bisect_left, bisect_right, insort
|
||||
from heapq import heappush, heappop, heapify, merge
|
||||
from functools import reduce, cache, lru_cache
|
||||
from random import randrange, shuffle
|
||||
from operator import itemgetter, sub
|
||||
from re import search as re_search # Assuming 're' refers to a regex search
|
||||
from os.path import commonprefix
|
||||
from typing import List, Tuple, Dict, Set, Optional, Union, Any, Callable, Iterable, Iterator, Generator
|
||||
import copy
|
||||
import string
|
||||
import math
|
||||
import collections
|
||||
import bisect
|
||||
import heapq
|
||||
import functools
|
||||
import random
|
||||
import itertools
|
||||
import operator
|
||||
import re
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from math import log, prod # 'log' and 'prod' are functions in the math module
|
||||
from collections import deque, defaultdict, Counter, OrderedDict
|
||||
from itertools import accumulate, permutations, combinations, product, groupby, islice, chain, repeat, zip_longest, cycle
|
||||
from functools import lru_cache, reduce, partial
|
||||
# from sortedcontainers import SortedList, SortedDict, SortedSet
|
||||
# import sortedcontainers
|
||||
from operator import iand
|
||||
import sys
|
||||
""" # noqa: E501
|
||||
|
||||
|
||||
def codeexecute_check_correctness(check_program, timeout=3):
|
||||
"""Evaluates the functional correctness of a completion by running the test
|
||||
suite provided in the problem.
|
||||
|
||||
:param completion_id: an optional completion ID so we can match
|
||||
the results later even if execution finishes asynchronously.
|
||||
"""
|
||||
manager = multiprocessing.Manager()
|
||||
result = manager.list()
|
||||
|
||||
p = multiprocessing.Process(target=unsafe_execute,
|
||||
args=(check_program, result, timeout))
|
||||
p.start()
|
||||
p.join(timeout=timeout + 1)
|
||||
if p.is_alive():
|
||||
p.kill()
|
||||
|
||||
if not result:
|
||||
result.append('timed out')
|
||||
|
||||
return result[0] == 'passed'
|
||||
|
||||
|
||||
def unsafe_execute(check_program, result, timeout):
|
||||
|
||||
with create_tempdir():
|
||||
|
||||
# These system calls are needed when cleaning up tempdir.
|
||||
import os
|
||||
import shutil
|
||||
|
||||
rmtree = shutil.rmtree
|
||||
rmdir = os.rmdir
|
||||
chdir = os.chdir
|
||||
|
||||
# Disable functionalities that can make destructive changes
|
||||
# to the test.
|
||||
reliability_guard()
|
||||
|
||||
# Run program.
|
||||
try:
|
||||
exec_globals = {}
|
||||
with swallow_io():
|
||||
with time_limit(timeout):
|
||||
exec(check_program, exec_globals)
|
||||
result.append('passed')
|
||||
except TimeoutException:
|
||||
result.append('timed out')
|
||||
except BaseException as e:
|
||||
result.append(f'failed: {e}')
|
||||
|
||||
# Needed for cleaning up.
|
||||
shutil.rmtree = rmtree
|
||||
os.rmdir = rmdir
|
||||
os.chdir = chdir
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def time_limit(seconds):
|
||||
|
||||
def signal_handler(signum, frame):
|
||||
raise TimeoutException('Timed out!')
|
||||
|
||||
signal.setitimer(signal.ITIMER_REAL, seconds)
|
||||
signal.signal(signal.SIGALRM, signal_handler)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
signal.setitimer(signal.ITIMER_REAL, 0)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def swallow_io():
|
||||
stream = WriteOnlyStringIO()
|
||||
with contextlib.redirect_stdout(stream):
|
||||
with contextlib.redirect_stderr(stream):
|
||||
with redirect_stdin(stream):
|
||||
yield
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def create_tempdir():
|
||||
with tempfile.TemporaryDirectory() as dirname:
|
||||
with chdir(dirname):
|
||||
yield dirname
|
||||
|
||||
|
||||
class TimeoutException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class WriteOnlyStringIO(io.StringIO):
|
||||
"""StringIO that throws an exception when it's read from."""
|
||||
|
||||
def read(self, *args, **kwargs):
|
||||
raise OSError
|
||||
|
||||
def readline(self, *args, **kwargs):
|
||||
raise OSError
|
||||
|
||||
def readlines(self, *args, **kwargs):
|
||||
raise OSError
|
||||
|
||||
def readable(self, *args, **kwargs):
|
||||
"""Returns True if the IO object can be read."""
|
||||
return False
|
||||
|
||||
|
||||
class redirect_stdin(contextlib._RedirectStream): # type: ignore
|
||||
_stream = 'stdin'
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def chdir(root):
|
||||
if root == '.':
|
||||
yield
|
||||
return
|
||||
cwd = os.getcwd()
|
||||
os.chdir(root)
|
||||
try:
|
||||
yield
|
||||
except BaseException as exc:
|
||||
raise exc
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
|
||||
def reliability_guard(maximum_memory_bytes=None):
|
||||
"""This disables various destructive functions and prevents the generated
|
||||
code from interfering with the test (e.g. fork bomb, killing other
|
||||
processes, removing filesystem files, etc.)
|
||||
|
||||
WARNING This function is NOT a security sandbox. Untrusted code, including,
|
||||
model- generated code, should not be blindly executed outside of one. See
|
||||
the Codex paper for more information about OpenAI's code sandbox, and
|
||||
proceed with caution.
|
||||
"""
|
||||
|
||||
if maximum_memory_bytes is not None:
|
||||
import resource
|
||||
|
||||
resource.setrlimit(resource.RLIMIT_AS,
|
||||
(maximum_memory_bytes, maximum_memory_bytes))
|
||||
resource.setrlimit(resource.RLIMIT_DATA,
|
||||
(maximum_memory_bytes, maximum_memory_bytes))
|
||||
if not platform.uname().system == 'Darwin':
|
||||
resource.setrlimit(resource.RLIMIT_STACK,
|
||||
(maximum_memory_bytes, maximum_memory_bytes))
|
||||
|
||||
faulthandler.disable()
|
||||
|
||||
import builtins
|
||||
|
||||
builtins.exit = None
|
||||
builtins.quit = None
|
||||
|
||||
import os
|
||||
|
||||
os.environ['OMP_NUM_THREADS'] = '1'
|
||||
|
||||
os.kill = None
|
||||
os.system = None
|
||||
os.putenv = None
|
||||
os.remove = None
|
||||
os.removedirs = None
|
||||
os.rmdir = None
|
||||
os.fchdir = None
|
||||
os.setuid = None
|
||||
os.fork = None
|
||||
os.forkpty = None
|
||||
os.killpg = None
|
||||
os.rename = None
|
||||
os.renames = None
|
||||
os.truncate = None
|
||||
os.replace = None
|
||||
os.unlink = None
|
||||
os.fchmod = None
|
||||
os.fchown = None
|
||||
os.chmod = None
|
||||
os.chown = None
|
||||
os.chroot = None
|
||||
os.fchdir = None
|
||||
os.lchflags = None
|
||||
os.lchmod = None
|
||||
os.lchown = None
|
||||
os.getcwd = None
|
||||
os.chdir = None
|
||||
|
||||
import shutil
|
||||
|
||||
shutil.rmtree = None
|
||||
shutil.move = None
|
||||
shutil.chown = None
|
||||
|
||||
import subprocess
|
||||
|
||||
subprocess.Popen = None # type: ignore
|
||||
|
||||
__builtins__['help'] = None
|
||||
|
||||
import sys
|
||||
|
||||
sys.modules['ipdb'] = None
|
||||
sys.modules['joblib'] = None
|
||||
sys.modules['resource'] = None
|
||||
sys.modules['psutil'] = None
|
||||
sys.modules['tkinter'] = None
|
75
opencompass/datasets/xlivecodebench/extract_utils.py
Normal file
75
opencompass/datasets/xlivecodebench/extract_utils.py
Normal file
@ -0,0 +1,75 @@
|
||||
# Copyright LiveCodeBench @ 2024,
|
||||
|
||||
import re
|
||||
|
||||
|
||||
def extract_code_generation(model_output: str, model_type: str = 'chat'):
|
||||
# modified from
|
||||
outputlines = model_output.split('\n')
|
||||
# TODO: handle codellama
|
||||
|
||||
if model_type == 'base':
|
||||
return model_output.strip()
|
||||
elif model_type == 'chat':
|
||||
indexlines = [i for i, line in enumerate(outputlines) if '```' in line]
|
||||
else:
|
||||
raise ValueError(f'Invalid mode type: {model_type}')
|
||||
|
||||
if len(indexlines) < 2:
|
||||
return ''
|
||||
return '\n'.join(outputlines[indexlines[0] + 1:indexlines[1]])
|
||||
|
||||
|
||||
def extract_code_execution(model_output: str, cot: bool = False):
|
||||
pattern = r'\[PYTHON\](.*?)\[\/PYTHON\]'
|
||||
matches = re.findall(pattern, model_output, re.DOTALL)
|
||||
if matches:
|
||||
# fetch the last one
|
||||
model_output = matches[-1]
|
||||
|
||||
if '[PYTHON]' in model_output:
|
||||
model_output
|
||||
if cot:
|
||||
if '[ANSWER]' in model_output:
|
||||
model_output = model_output.split('[ANSWER]')[1].strip()
|
||||
if '==' in model_output:
|
||||
model_output = model_output.split('==')[1].strip()
|
||||
if '[/ANSWER]' in model_output:
|
||||
model_output = model_output.split('[/ANSWER]')[0].strip()
|
||||
else:
|
||||
model_output = model_output.split('\n')[0].strip()
|
||||
return model_output.strip()
|
||||
|
||||
|
||||
def extract_test_output_code(model_output: str):
|
||||
outputlines = model_output.split('\n')
|
||||
# find the last line startwith assert...
|
||||
indexlines = [
|
||||
i for i, line in enumerate(outputlines) if line.startswith('assert')
|
||||
]
|
||||
if indexlines:
|
||||
return outputlines[indexlines[-1]]
|
||||
|
||||
# TODO: handle codellama format
|
||||
# if lmstyle and lmstyle == LMStyle.CodeLLaMaInstruct:
|
||||
# indexlines = \
|
||||
# [i for i, line in enumerate(outputlines) if "PYTHON]" in line]
|
||||
# else:
|
||||
|
||||
# first try to extract ```python if not then try ```
|
||||
indexlines = [
|
||||
i for i, line in enumerate(outputlines)
|
||||
if '```python' in line or '```Python' in line
|
||||
]
|
||||
if indexlines:
|
||||
start_index = indexlines[0]
|
||||
else:
|
||||
start_index = None
|
||||
indexlines = [i for i, line in enumerate(outputlines) if '```' in line]
|
||||
if start_index is not None:
|
||||
indexlines = [i for i in indexlines if i > start_index]
|
||||
indexlines = [start_index] + indexlines
|
||||
|
||||
if len(indexlines) < 2:
|
||||
return ''
|
||||
return '\n'.join(outputlines[indexlines[0] + 1:indexlines[1]])
|
262
opencompass/datasets/xlivecodebench/livecodebench.py
Normal file
262
opencompass/datasets/xlivecodebench/livecodebench.py
Normal file
@ -0,0 +1,262 @@
|
||||
# Copyright (c) 2024, LiveCodeBench and its contributors.
|
||||
# Copyright (c) 2023, OpenCompass and its contributors.
|
||||
|
||||
import base64
|
||||
import json
|
||||
import pickle
|
||||
import zlib
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
from datasets import DatasetDict, load_dataset, load_from_disk, Dataset
|
||||
|
||||
from opencompass.utils import get_data_path # noqa: F401, F403
|
||||
|
||||
from ..base import BaseDataset
|
||||
from .prompts import SelfRepairPromptConstants # noqa: F401, F403
|
||||
from .prompts import TestOutputPromptConstants # noqa: F401, F403
|
||||
from .prompts import (CodeGenerationPromptConstants,
|
||||
get_generic_question_template_answer_self_repair,
|
||||
get_generic_question_template_test_completion,
|
||||
make_code_execution_prompt)
|
||||
|
||||
|
||||
class Platform(Enum):
|
||||
LEETCODE = 'leetcode'
|
||||
CODEFORCES = 'codeforces'
|
||||
ATCODER = 'atcoder'
|
||||
|
||||
|
||||
class Difficulty(Enum):
|
||||
EASY = 'easy'
|
||||
MEDIUM = 'medium'
|
||||
HARD = 'hard'
|
||||
|
||||
|
||||
class TestType(Enum):
|
||||
STDIN = 'stdin'
|
||||
FUNCTIONAL = 'functional'
|
||||
|
||||
|
||||
@dataclass
|
||||
class Test:
|
||||
input: str
|
||||
output: str
|
||||
testtype: TestType
|
||||
|
||||
def __post_init__(self):
|
||||
self.testtype = TestType(self.testtype)
|
||||
|
||||
|
||||
class xLCBCodeGenerationDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(path: str = 'opencompass/code_generation_lite',
|
||||
local_mode: bool = False,
|
||||
release_version: str = 'release_v1'):
|
||||
|
||||
def transform(item):
|
||||
# Define the dataitem mapping logic
|
||||
|
||||
# starter_code
|
||||
if item['starter_code']:
|
||||
format_prompt = f'### Format: {CodeGenerationPromptConstants.FORMATTING_MESSAGE_WITH_STARTER_CODE}\n' # noqa: E501
|
||||
format_prompt += f"```python\n{item['starter_code']}\n```\n\n"
|
||||
else:
|
||||
format_prompt = f'### Format: {CodeGenerationPromptConstants.FORMATTING_WITHOUT_STARTER_CODE}\n' # noqa: E501
|
||||
format_prompt += '```python\n# YOUR CODE HERE\n```\n\n'
|
||||
|
||||
item['format_prompt'] = format_prompt
|
||||
|
||||
# load test cases
|
||||
public_test_cases = item['public_test_cases']
|
||||
public_test_cases = json.loads(item['public_test_cases'])
|
||||
|
||||
private_test_cases = item['private_test_cases']
|
||||
try:
|
||||
private_test_cases = json.loads(item['private_test_cases'])
|
||||
except Exception as e: # noqa: F841
|
||||
private_test_cases = json.loads(
|
||||
pickle.loads(
|
||||
zlib.decompress(
|
||||
base64.b64decode(private_test_cases.encode(
|
||||
'utf-8')) # type: ignore
|
||||
))) # type: ignore
|
||||
|
||||
# load metadata
|
||||
metadata = json.loads(item['metadata'])
|
||||
evaluation_sample = json.dumps({
|
||||
'inputs':
|
||||
[t['input'] for t in public_test_cases + private_test_cases],
|
||||
'outputs':
|
||||
[t['output'] for t in public_test_cases + private_test_cases],
|
||||
'fn_name':
|
||||
metadata.get('func_name', None),
|
||||
})
|
||||
item['evaluation_sample'] = evaluation_sample
|
||||
|
||||
return item
|
||||
|
||||
path = get_data_path(path, local_mode=local_mode)
|
||||
print('path', path)
|
||||
def load_jsonl(path):
|
||||
dataset_dict = []
|
||||
with open(path, 'r') as f:
|
||||
for line in f:
|
||||
dataset_dict.append(json.loads(line))
|
||||
return dataset_dict
|
||||
|
||||
dataset_dict = load_jsonl(path)
|
||||
# tranform list to da
|
||||
#print(dataset_dict)
|
||||
dataset = Dataset.from_list(dataset_dict)
|
||||
# dataset = load_dataset(
|
||||
# path, # 'livecodebench/code_generation_lite'
|
||||
# split='test',
|
||||
# version_tag=release_version,
|
||||
# trust_remote_code=True)
|
||||
|
||||
dataset = dataset.map(transform)
|
||||
|
||||
return DatasetDict({'test': dataset, 'train': dataset})
|
||||
|
||||
|
||||
class LCBCodeExecutionDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(
|
||||
path: str = 'opencompass/execution-v2',
|
||||
local_mode: bool = False,
|
||||
cot: bool = False,
|
||||
# release_version: str = "release_v1"
|
||||
):
|
||||
# path = get_data_path(path, local_mode=local_mode)
|
||||
|
||||
def transform(item):
|
||||
code, input = item['code'], item['input']
|
||||
prompt = make_code_execution_prompt(code, input, cot=cot)
|
||||
|
||||
item['prompt'] = prompt
|
||||
|
||||
evaluation_sample = json.dumps({
|
||||
'code': item['code'],
|
||||
'input': item['input'],
|
||||
'output': item['output']
|
||||
})
|
||||
item['evaluation_sample'] = evaluation_sample
|
||||
|
||||
return item
|
||||
|
||||
path = get_data_path(path, local_mode=local_mode)
|
||||
dataset = load_dataset(path,
|
||||
split='test') # 'livecodebench/execution-v2'
|
||||
dataset = dataset.map(transform)
|
||||
|
||||
return DatasetDict({'test': dataset, 'train': dataset})
|
||||
|
||||
|
||||
class LCBTestOutputPredictionDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(
|
||||
path: str = 'opencompass/test_generation',
|
||||
local_mode: bool = False,
|
||||
# release_version: str = "release_v1"
|
||||
):
|
||||
# path = get_data_path(path, local_mode=local_mode)
|
||||
|
||||
def transform(item):
|
||||
question_content = item['question_content']
|
||||
starter_code = item['starter_code']
|
||||
test = json.loads(item['test'])
|
||||
|
||||
testcase_input = test[0]['input']
|
||||
testcase_output = test[0]['output']
|
||||
|
||||
item['testcase_input'] = testcase_input
|
||||
item['testcase_output'] = testcase_output
|
||||
|
||||
item['prompt'] = get_generic_question_template_test_completion(
|
||||
question_content=question_content,
|
||||
starter_code=starter_code,
|
||||
testcase_input=testcase_input)
|
||||
|
||||
evaluation_sample = json.dumps({
|
||||
'input':
|
||||
item['question_content'],
|
||||
'output':
|
||||
json.loads(item['test'])[0]['output']
|
||||
})
|
||||
item['evaluation_sample'] = evaluation_sample
|
||||
|
||||
return item
|
||||
|
||||
path = get_data_path(path, local_mode=local_mode)
|
||||
# 'livecodebench/test_generation',
|
||||
dataset = load_dataset(path, split='test', trust_remote_code=True)
|
||||
dataset = dataset.map(transform)
|
||||
|
||||
return DatasetDict({'test': dataset, 'train': dataset})
|
||||
|
||||
|
||||
class LCBSelfRepairDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(path: str = 'livecodebench/code_generation_lite',
|
||||
local_mode: bool = False,
|
||||
release_version: str = 'release_v1'):
|
||||
|
||||
def transform(item):
|
||||
# define the data item mapping logic
|
||||
|
||||
question = item['question_content']
|
||||
code = item['code_list'][0]
|
||||
metadata = item['metadata']
|
||||
|
||||
prompt = get_generic_question_template_answer_self_repair(
|
||||
question=question, code=code, metadata=metadata)
|
||||
item['prompt'] = prompt
|
||||
|
||||
return
|
||||
|
||||
dataset = load_dataset(path,
|
||||
split='test',
|
||||
version_tag=release_version,
|
||||
trust_remote_code=True)
|
||||
dataset = dataset.map(transform)
|
||||
|
||||
return DatasetDict({'test': dataset, 'train': dataset})
|
||||
|
||||
|
||||
class CompassBenchCodeExecutionDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(
|
||||
path: str = 'opencompass/execution-v2',
|
||||
local_mode: bool = False,
|
||||
cot: bool = False,
|
||||
# release_version: str = "release_v1"
|
||||
):
|
||||
# path = get_data_path(path, local_mode=local_mode)
|
||||
|
||||
def transform(item):
|
||||
code, input = item['code'], item['input']
|
||||
prompt = make_code_execution_prompt(code, input, cot=cot)
|
||||
|
||||
item['prompt'] = prompt
|
||||
|
||||
evaluation_sample = json.dumps({
|
||||
'code': item['code'],
|
||||
'input': item['input'],
|
||||
'output': item['output']
|
||||
})
|
||||
item['evaluation_sample'] = evaluation_sample
|
||||
|
||||
return item
|
||||
|
||||
path = get_data_path(path, local_mode=local_mode)
|
||||
dataset = load_from_disk(path) # 'livecodebench/execution-v2'
|
||||
dataset = dataset['test']
|
||||
dataset = dataset.map(transform)
|
||||
|
||||
return DatasetDict({'test': dataset, 'train': dataset})
|
72
opencompass/datasets/xlivecodebench/pass_k_utils.py
Normal file
72
opencompass/datasets/xlivecodebench/pass_k_utils.py
Normal file
@ -0,0 +1,72 @@
|
||||
# Copyright LiveCodeBench @ 2024,
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
def estimate_pass_at_k(num_samples, num_correct, k):
|
||||
"""Estimates pass@k of each problem and returns them in an array."""
|
||||
|
||||
def estimator(n: int, c: int, k: int) -> float:
|
||||
"""Calculates 1 - comb(n - c, k) / comb(n, k)."""
|
||||
if n - c < k:
|
||||
return 1.0 * 100
|
||||
return 100 * (1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1)))
|
||||
|
||||
import itertools
|
||||
|
||||
if isinstance(num_samples, int):
|
||||
num_samples_it = itertools.repeat(num_samples, len(num_correct))
|
||||
else:
|
||||
assert len(num_samples) == len(num_correct)
|
||||
num_samples_it = iter(num_samples)
|
||||
|
||||
return np.array([
|
||||
estimator(int(n), int(c), k)
|
||||
for n, c in zip(num_samples_it, num_correct)
|
||||
])
|
||||
|
||||
|
||||
def compute_metrics_from_results(results, k_list=[1, 5]):
|
||||
total = []
|
||||
correct = []
|
||||
task_ids = []
|
||||
for task_id, res in results.items():
|
||||
all_correct = []
|
||||
for generation in res:
|
||||
gen = np.array(generation)
|
||||
all_correct.append(np.all(gen > 0))
|
||||
task_ids.append(task_id)
|
||||
total.append(len(all_correct))
|
||||
correct.append(sum(all_correct))
|
||||
total = np.array(total)
|
||||
correct = np.array(correct)
|
||||
ks = k_list
|
||||
detail_pass_at_k = {
|
||||
f'pass@{k}': estimate_pass_at_k(total, correct, k).tolist()
|
||||
for k in ks if (total >= k).all()
|
||||
}
|
||||
pass_at_k = {
|
||||
f'pass@{k}': estimate_pass_at_k(total, correct, k).mean()
|
||||
for k in ks if (total >= k).all()
|
||||
}
|
||||
detail_metrics = {
|
||||
k: dict(zip(task_ids, v))
|
||||
for k, v in detail_pass_at_k.items()
|
||||
}
|
||||
pass_at_k['detail'] = detail_metrics
|
||||
return pass_at_k
|
||||
|
||||
|
||||
def extract_instance_results(results):
|
||||
instance_wise_grades = {}
|
||||
for task_id, res in results.items():
|
||||
instance_wise_grades[task_id] = []
|
||||
for generation in res:
|
||||
instance_wise_grades[task_id].append(
|
||||
all([g > 0 for g in generation]))
|
||||
|
||||
instance_wise_grades = [
|
||||
v for _, v in sorted(instance_wise_grades.items(),
|
||||
key=lambda item: item[0])
|
||||
]
|
||||
return instance_wise_grades
|
210
opencompass/datasets/xlivecodebench/prompts.py
Normal file
210
opencompass/datasets/xlivecodebench/prompts.py
Normal file
@ -0,0 +1,210 @@
|
||||
# Copyright LiveCodeBench @ 2024,
|
||||
|
||||
import json
|
||||
|
||||
|
||||
class CodeGenerationPromptConstants:
|
||||
SYSTEM_MESSAGE_GENERIC = 'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.' # noqa: E501
|
||||
|
||||
SYSTEM_MESSAGE_GEMINI = 'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program. Do NOT use system calls like `exit` in the generated program.' # noqa: E501
|
||||
|
||||
SYSTEM_MESSAGE_DEEPSEEK = 'You are an AI programming assistant, utilizing the DeepSeek Coder model, developed by DeepSeek Company, and you answer questions related to computer science.' # noqa: E501
|
||||
|
||||
SYSTEM_MESSAGE_MAGIC = 'You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.\n\n@@ Instruction\n' # noqa: E501
|
||||
|
||||
SYSTEM_MESSAGE_WIZARD = 'Below is an instruction that describes a task. Write a response that appropriately completes the request.' # noqa: E501
|
||||
|
||||
SYSTEM_MESSAGE_PHIND = """You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program. Put your fixed program within code delimiters, for example:
|
||||
```python
|
||||
# YOUR CODE HERE
|
||||
```""" # noqa: E501
|
||||
|
||||
SYSTEM_MESSAGE_CODEQWEN = (
|
||||
'<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user' # noqa: E501
|
||||
)
|
||||
|
||||
FORMATTING_MESSAGE_WITH_STARTER_CODE = 'You will use the following starter code to write the solution to the problem and enclose your code within delimiters.' # noqa: E501
|
||||
|
||||
FORMATTING_WITHOUT_STARTER_CODE = 'Read the inputs from stdin solve the problem and write the answer to stdout (do not directly test on the sample inputs). Enclose your code within delimiters as follows.' # noqa: E501
|
||||
|
||||
PYTHON_FORMAT = '```python\n# YOUR CODE HERE\n```\n\n'
|
||||
|
||||
|
||||
class TestOutputPromptConstants:
|
||||
SYSTEM_MESSAGE_CHAT_GENERIC = 'You are a helpful programming assistant and an expert Python programmer. You are helping a user to write a test case to help to check the correctness of the function. The user has written a input for the testcase. You will calculate the output of the testcase and write the whole assertion statement in the markdown code block with the correct output.' # noqa: E501
|
||||
|
||||
SYSTEM_MESSAGE_COMPLETION_GENERIC = 'You are a helpful programming assistant and an expert Python programmer. You are helping a user to write a test case to help to check the correctness of the function.' # noqa: E501
|
||||
|
||||
SYSTEM_MESSAGE_INST_CLLAMA = 'You are a helpful programming assistant and an expert Python programmer. You are helping a user to write a test case to help to check the correctness of the function. The user has written a input for the testcase. You will calculate the output of the testcase and write out the complete assertion statement between [PYTHON] and [/PYTHON] tags.' # noqa: E501
|
||||
|
||||
SYSTEM_MESSAGE_WIZARD = 'Below is an instruction that describes a task. Write a response that appropriately completes the request.' # noqa: E501
|
||||
|
||||
SYSTEM_MESSAGE_PHIND = """You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program. You must put the entire fixed program within code delimiters only for once., for example:
|
||||
```python
|
||||
# YOUR CODE HERE
|
||||
```""" # noqa: E501
|
||||
|
||||
FORMATTING_MESSAGE = 'You will use the following starter code to write the solution to the problem and enclose your code within delimiters.' # noqa: E501
|
||||
|
||||
FORMATTING_WITHOUT_STARTER_MESSAGE = 'Read the inputs from stdin solve the problem and write the answer to stdout (do not directly test on the sample inputs). Enclose your code within delimiters as follows.' # noqa: E501
|
||||
|
||||
|
||||
class SelfRepairPromptConstants:
|
||||
SYSTEM_MESSAGE_GENERIC = 'You are a helpful programming assistant and an expert Python programmer. You are helping a user write a program to solve a problem. The user has written some code, but it has some errors and is not passing the tests. You will help the user by first giving a concise (at most 2-3 sentences) textual explanation of what is wrong with the code. After you have pointed out what is wrong with the code, you will then generate a fixed version of the program. You must put the entire fixed program within code delimiters only for once.' # noqa: E501
|
||||
|
||||
SYSTEM_MESSAGE_DEEPSEEK = 'You are an AI programming assistant, utilizing the DeepSeek Coder model, developed by DeepSeek Company, and you are helping a user correct a error program for code competition. The user has written some code, but it has some errors and is not passing the tests. You will help the user by first giving a concise (at most 2-3 sentences) textual explanation of what is wrong with the code. After you have pointed out what is wrong with the code, you will then generate a fixed version of the entire executable program. You must put the entire fixed executable program within code delimiters.' # noqa: E501
|
||||
|
||||
SYSTEM_MESSAGE_MAGIC = 'You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.\n\n@@ Instruction\n' # noqa: E501
|
||||
|
||||
SYSTEM_MESSAGE_WIZARD = 'Below is an instruction that describes a task. Write a response that appropriately completes the request.' # noqa: E501
|
||||
|
||||
SYSTEM_MESSAGE_PHIND = """You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program. You must put the entire fixed program within code delimiters only for once., for example:
|
||||
```python
|
||||
# YOUR CODE HERE
|
||||
```""" # noqa: E501
|
||||
|
||||
FORMATTING_REPEAT = 'First reason about the code providing a textual explanation of what is wrong with the code and then generate a fixed of the program enclosed code delimiters.' # noqa: E501
|
||||
|
||||
FORMATTING_MESSAGE = 'You will use the following starter code to write the solution to the problem and enclose your code within delimiters.' # noqa: E501
|
||||
|
||||
FORMATTING_WITHOUT_STARTER_CODE = 'Read the inputs from stdin solve the problem and write the answer to stdout (do not directly test on the sample inputs). Enclose your code within delimiters as follows.' # noqa: E501
|
||||
|
||||
|
||||
def make_code_execution_prompt(code, input, cot):
|
||||
if cot:
|
||||
# make_cot_output_prompt
|
||||
return f"""You are given a Python function and an assertion containing an input to the function. Complete the assertion with a literal (no unsimplified expressions, no function calls) containing the output when executing the provided code on the given input, even if the function is incorrect or incomplete. Do NOT output any extra information. Execute the program step by step before arriving at an answer, and provide the full assertion with the correct output in [ANSWER] and [/ANSWER] tags, following the examples.
|
||||
|
||||
[PYTHON]
|
||||
def performOperation(s):
|
||||
s = s + s
|
||||
return "b" + s + "a"
|
||||
assert performOperation(s = "hi") == ??
|
||||
[/PYTHON]
|
||||
[THOUGHT]
|
||||
Let's execute the code step by step:
|
||||
|
||||
1. The function performOperation is defined, which takes a single argument s.
|
||||
2. The function is called with the argument "hi", so within the function, s is initially "hi".
|
||||
3. Inside the function, s is concatenated with itself, so s becomes "hihi".
|
||||
4. The function then returns a new string that starts with "b", followed by the value of s (which is now "hihi"), and ends with "a".
|
||||
5. The return value of the function is therefore "bhihia".
|
||||
[/THOUGHT]
|
||||
[ANSWER]
|
||||
assert performOperation(s = "hi") == "bhihia"
|
||||
[/ANSWER]
|
||||
|
||||
[PYTHON]
|
||||
{code}
|
||||
assert {input} == ??
|
||||
[/PYTHON]
|
||||
[THOUGHT]
|
||||
""" # noqa: E501
|
||||
else:
|
||||
# make_direct_output_prompt
|
||||
return f"""You are given a Python function and an assertion containing an input to the function. Complete the assertion with a literal (no unsimplified expressions, no function calls) containing the output when executing the provided code on the given input, even if the function is incorrect or incomplete. Do NOT output any extra information. Provide the full assertion with the correct output in [ANSWER] and [/ANSWER] tags, following the examples.
|
||||
|
||||
[PYTHON]
|
||||
def repeatNumber(number : int) -> int:
|
||||
return number
|
||||
assert repeatNumber(number = 17) == ??
|
||||
[/PYTHON]
|
||||
[ANSWER]
|
||||
assert repeatNumber(number = 17) == 17
|
||||
[/ANSWER]
|
||||
|
||||
[PYTHON]
|
||||
def addCharacterA(string : str) -> str:
|
||||
return string + "a"
|
||||
assert addCharacterA(string = "x9j") == ??
|
||||
[/PYTHON]
|
||||
[ANSWER]
|
||||
assert addCharacterA(string = "x9j") == "x9ja"
|
||||
[/ANSWER]
|
||||
|
||||
[PYTHON]
|
||||
{code}
|
||||
assert {input} == ??
|
||||
[/PYTHON]
|
||||
[ANSWER]
|
||||
""" # noqa: E501
|
||||
|
||||
|
||||
def get_generic_question_template_test_completion(question_content,
|
||||
starter_code,
|
||||
testcase_input: str):
|
||||
|
||||
def format_testcase_func_name_input(function_name, testcase):
|
||||
"""use the form of "assert func_name(input) == "."""
|
||||
# TODO should there be a space after the == ?
|
||||
input_str = ', '.join(testcase.split('\n'))
|
||||
return f'assert {function_name}({input_str}) == # TODO'
|
||||
|
||||
def parse_function_name_from_starter_code(starter_code):
|
||||
"""
|
||||
starter_code : str
|
||||
"""
|
||||
import ast
|
||||
|
||||
tree = ast.parse(starter_code)
|
||||
fn = None
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, ast.FunctionDef):
|
||||
assert fn is None
|
||||
fn = node.name
|
||||
return fn
|
||||
|
||||
prompt = f'Problem:\n{question_content}'
|
||||
prompt += f'Function:\n```\n{starter_code}\n```\n'
|
||||
|
||||
# parse function name from starter_code
|
||||
func_name = parse_function_name_from_starter_code(starter_code)
|
||||
prompt += 'Please complete the following test case:\n\n'
|
||||
prompt += (
|
||||
f'```\n{format_testcase_func_name_input(func_name, testcase_input)}\n```\n' # noqa: E501
|
||||
)
|
||||
|
||||
return prompt
|
||||
|
||||
|
||||
def get_generic_question_template_answer_self_repair(question: str, code,
|
||||
metadata):
|
||||
|
||||
def get_check_prompt(metadata):
|
||||
# def get_check_prompt(question: str, result, metadata):
|
||||
# # assumes i/o examples are already truncated!
|
||||
# # less pressure on storing 10 MB json because on a single large
|
||||
# # input-output pair
|
||||
# result_by_test_case = result
|
||||
# assert len(metadata) == 1, f"metadata = {metadata}"
|
||||
# metadata = metadata[0]
|
||||
metadata = json.loads(metadata)
|
||||
if 'error_code' not in metadata:
|
||||
return ''
|
||||
if metadata['error_code'] == -1:
|
||||
# time limit exceeded
|
||||
message = f"The above code is incorrect and got the following compilation error.\n{metadata['error']}" # noqa: E501
|
||||
elif metadata['error_code'] == -2:
|
||||
# wrong answer
|
||||
message = f"The above code is incorrect and got a wrong answer.\nInput: {metadata['inputs']}\nGenerated Output: {metadata['output']}\nExpected: {metadata['expected']}" # noqa: E501
|
||||
elif metadata['error_code'] == -3:
|
||||
# time limit exceeded
|
||||
message = f"The above code is incorrect and got time limit exceeded.\n{metadata['error']}\nInput: {metadata['inputs']}\nExpected: {metadata['expected']}" # noqa: E501
|
||||
pass
|
||||
elif metadata['error_code'] == -4:
|
||||
# runtime error
|
||||
message = f"The above code is incorrect and got a runtime error.\nInput: {metadata['inputs']}\nExpected: {metadata['expected']}\n{metadata['error']}" # noqa: E501
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"metadata['error_code'] = {metadata['error_code']} not implemented || {metadata=}" # noqa: E501
|
||||
)
|
||||
return message
|
||||
|
||||
prompt = f'### Question:\n{question}\n\n'
|
||||
prompt += f'### Answer:\n```python\n{code}\n```\n\n'
|
||||
# prompt += get_check_prompt(question, result, metadata) + "\n"
|
||||
prompt += get_check_prompt(metadata) + '\n'
|
||||
prompt += f'### Format: {SelfRepairPromptConstants.FORMATTING_WITHOUT_STARTER_CODE}\n' # noqa: E501
|
||||
prompt += '```python\n# YOUR CODE HERE\n```\n\n'
|
||||
prompt += '### Answer: (use the provided format with backticks)\n\n'
|
||||
return prompt
|
762
opencompass/datasets/xlivecodebench/testing_util.py
Normal file
762
opencompass/datasets/xlivecodebench/testing_util.py
Normal file
@ -0,0 +1,762 @@
|
||||
# Copyright LiveCodeBench @ 2024,
|
||||
|
||||
import ast
|
||||
import faulthandler
|
||||
import json
|
||||
import platform
|
||||
# to run the solution files we're using a timing based approach
|
||||
import signal
|
||||
import sys
|
||||
# used for debugging to time steps
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
# for capturing the stdout
|
||||
from io import StringIO
|
||||
# used for testing the code that reads from input
|
||||
from unittest.mock import mock_open, patch
|
||||
|
||||
import numpy as np
|
||||
from pyext import RuntimeModule
|
||||
|
||||
|
||||
def truncatefn(s, length=300):
|
||||
assert isinstance(s, str)
|
||||
if len(s) <= length:
|
||||
return s
|
||||
|
||||
return s[:length // 2] + '...(truncated) ...' + s[-length // 2:]
|
||||
|
||||
|
||||
class CODE_TYPE(Enum):
|
||||
call_based = 0
|
||||
standard_input = 1
|
||||
|
||||
|
||||
# stuff for setting up signal timer
|
||||
class TimeoutException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def timeout_handler(signum, frame):
|
||||
print('alarm went off')
|
||||
# return
|
||||
raise TimeoutException
|
||||
|
||||
|
||||
signal.signal(signal.SIGALRM, timeout_handler)
|
||||
|
||||
# timeout = 6 # seconds
|
||||
|
||||
|
||||
# used to capture stdout as a list
|
||||
# from https://stackoverflow.com/a/16571630/6416660
|
||||
# alternative use redirect_stdout() from contextlib
|
||||
class Capturing(list):
|
||||
|
||||
def __enter__(self):
|
||||
self._stdout = sys.stdout
|
||||
sys.stdout = self._stringio = StringIO()
|
||||
# Make closing the StringIO a no-op
|
||||
self._stringio.close = lambda x: 1
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.append(self._stringio.getvalue())
|
||||
del self._stringio # free up some memory
|
||||
sys.stdout = self._stdout
|
||||
|
||||
|
||||
def only_int_check(val):
|
||||
return isinstance(val, int)
|
||||
|
||||
|
||||
def string_int_check(val):
|
||||
return isinstance(val, str) and val.isdigit()
|
||||
|
||||
|
||||
def combined_int_check(val):
|
||||
return only_int_check(val) or string_int_check(val)
|
||||
|
||||
|
||||
def run_test(sample, test=None, debug=False, timeout=6):
|
||||
"""if test(generated_code) is not None it'll try to run the code.
|
||||
|
||||
otherwise it'll just return an input and output pair.
|
||||
"""
|
||||
# Disable functionalities that can make destructive changes to the test.
|
||||
reliability_guard()
|
||||
|
||||
if debug:
|
||||
print(f'start = {datetime.now().time()}')
|
||||
|
||||
try:
|
||||
in_outs = json.loads(sample['input_output'])
|
||||
except ValueError:
|
||||
in_outs = None
|
||||
if in_outs:
|
||||
if in_outs.get('fn_name') is None:
|
||||
which_type = CODE_TYPE.standard_input # Standard input
|
||||
method_name = None
|
||||
else:
|
||||
which_type = CODE_TYPE.call_based # Call-based
|
||||
method_name = in_outs['fn_name']
|
||||
|
||||
if debug:
|
||||
print(f'loaded input_output = {datetime.now().time()}')
|
||||
|
||||
if test is None:
|
||||
assert False, 'should not happen: test code is none'
|
||||
return in_outs, {'error': 'no test code provided'}
|
||||
elif test is not None:
|
||||
results = []
|
||||
sol = 'from string import *\nfrom re import *\nfrom datetime import *\nfrom collections import *\nfrom heapq import *\nfrom bisect import *\nfrom copy import *\nfrom math import *\nfrom random import *\nfrom statistics import *\nfrom itertools import *\nfrom functools import *\nfrom operator import *\nfrom io import *\nfrom sys import *\nfrom json import *\nfrom builtins import *\nfrom typing import *\nimport string\nimport re\nimport datetime\nimport collections\nimport heapq\nimport bisect\nimport copy\nimport math\nimport random\nimport statistics\nimport itertools\nimport functools\nimport operator\nimport io\nimport sys\nimport json\nsys.setrecursionlimit(6*10**5)\n' # noqa: E501
|
||||
if debug:
|
||||
print(f'loading test code = {datetime.now().time()}')
|
||||
|
||||
if which_type == CODE_TYPE.call_based:
|
||||
|
||||
sol += test
|
||||
if debug:
|
||||
print(f'sol = {sol}')
|
||||
signal.alarm(timeout)
|
||||
try:
|
||||
tmp_sol = RuntimeModule.from_string('tmp_sol', '', sol)
|
||||
if 'class Solution' not in test:
|
||||
tmp = tmp_sol
|
||||
else:
|
||||
tmp = tmp_sol.Solution()
|
||||
signal.alarm(0)
|
||||
except Exception as e:
|
||||
signal.alarm(0)
|
||||
if debug:
|
||||
print(f'type 0 compilation error = {e}')
|
||||
results.append(-2)
|
||||
return results, {
|
||||
'error': repr(e),
|
||||
'error_code': -1,
|
||||
'error_message': 'Compilation Error',
|
||||
}
|
||||
signal.alarm(0)
|
||||
|
||||
elif which_type == CODE_TYPE.standard_input:
|
||||
# sol
|
||||
# if code has if __name__ == "__main__": then remove it
|
||||
try:
|
||||
astree = ast.parse(test)
|
||||
last_block = astree.body[-1]
|
||||
if isinstance(last_block, ast.If):
|
||||
condition = last_block.test
|
||||
if ast.unparse(
|
||||
condition).strip() == "__name__ == '__main__'":
|
||||
test = (ast.unparse(astree.body[:-1]) + '\n' +
|
||||
ast.unparse(last_block.body))
|
||||
except Exception as e: # noqa:
|
||||
pass
|
||||
|
||||
tmp_test = test.split('\n')
|
||||
|
||||
new_test = []
|
||||
for x in tmp_test:
|
||||
if (not x.startswith('from ')) and (
|
||||
not x.startswith('import ')):
|
||||
new_test.append('\t' + x + '\n')
|
||||
else:
|
||||
new_test.append(x + '\n')
|
||||
tmp_test = new_test
|
||||
|
||||
new_test = ''
|
||||
started = False
|
||||
for i in tmp_test:
|
||||
if i.startswith('\t') and not started:
|
||||
new_test += 'stdin = sys.stdin\nstdout = sys.stdout\n'
|
||||
new_test += 'def code():\n'
|
||||
new_test += i
|
||||
started = True
|
||||
elif started and ((i.startswith('from ')) or
|
||||
(i.startswith('import '))):
|
||||
new_test += '\t' + i
|
||||
else:
|
||||
new_test += i
|
||||
tmp_test = new_test
|
||||
|
||||
sol += tmp_test
|
||||
if debug:
|
||||
print(f'sol = {sol}')
|
||||
method_name = 'code'
|
||||
signal.alarm(timeout)
|
||||
try:
|
||||
tmp_sol = RuntimeModule.from_string('tmp_sol', '', sol)
|
||||
tmp = tmp_sol
|
||||
signal.alarm(0)
|
||||
except Exception as e:
|
||||
signal.alarm(0)
|
||||
if debug:
|
||||
print(f'type 1 compilation error = {e}')
|
||||
results.append(-2)
|
||||
return results, {
|
||||
'error': repr(e),
|
||||
'error_code': -1,
|
||||
'error_message': 'Compilation Error',
|
||||
}
|
||||
signal.alarm(0)
|
||||
if debug:
|
||||
print(f'get method = {datetime.now().time()}')
|
||||
|
||||
try:
|
||||
method = getattr(tmp,
|
||||
method_name) # get_attr second arg must be str
|
||||
except Exception as e:
|
||||
signal.alarm(0)
|
||||
e = sys.exc_info()
|
||||
print(f'unable to get function error = {e}')
|
||||
results.append(-2)
|
||||
return results, {
|
||||
'error': repr(e),
|
||||
'error_code': -1,
|
||||
'error_message': 'Unable to extract code',
|
||||
}
|
||||
|
||||
for index, inputs in enumerate(in_outs['inputs']):
|
||||
raw_inputs = inputs
|
||||
raw_outputs = in_outs['outputs'][index]
|
||||
if which_type == CODE_TYPE.call_based:
|
||||
inputs = [json.loads(line) for line in inputs.split('\n')]
|
||||
in_outs['outputs'][index] = json.loads(
|
||||
in_outs['outputs'][index])
|
||||
|
||||
truncate_line_size = 300 // (raw_inputs.count('\n') + 1)
|
||||
raw_inputs = '\n'.join([
|
||||
truncatefn(line, truncate_line_size)
|
||||
for line in raw_inputs.strip().split('\n')
|
||||
])
|
||||
raw_outputs = truncatefn(raw_outputs, 200)
|
||||
else:
|
||||
raw_inputs = truncatefn(raw_inputs)
|
||||
raw_outputs = truncatefn(raw_outputs, 200)
|
||||
# JSON forces dictionaries to have string keys; this undoes this
|
||||
# (assuming a singleton list)
|
||||
try:
|
||||
if isinstance(inputs[0], dict):
|
||||
inputs = [{int(k): v for k, v in inputs[0].items()}]
|
||||
except Exception as e: # noqa: F841
|
||||
True
|
||||
try:
|
||||
if isinstance(in_outs['outputs'][index], dict):
|
||||
in_outs['outputs'][index] = [{
|
||||
int(k): v
|
||||
for k, v in in_outs['outputs'][index].items()
|
||||
}]
|
||||
except Exception as e: # noqa: F841
|
||||
True
|
||||
try:
|
||||
if isinstance(in_outs['outputs'][index][0], dict):
|
||||
in_outs['outputs'][index] = [{
|
||||
int(k): v
|
||||
for k, v in in_outs['outputs'][index][0].items()
|
||||
}]
|
||||
except Exception as e: # noqa: F841
|
||||
True
|
||||
|
||||
if debug:
|
||||
print(
|
||||
f'time: {datetime.now().time()} testing index = {index} '
|
||||
f'inputs = {inputs}, {type(inputs)}. type = {which_type}')
|
||||
if which_type == CODE_TYPE.call_based: # Call-based
|
||||
signal.alarm(timeout)
|
||||
faulthandler.enable()
|
||||
try:
|
||||
output = method(*inputs)
|
||||
raw_true_output = output
|
||||
|
||||
raw_true_output_copy = json.dumps(output)
|
||||
raw_true_output_copy = truncatefn(raw_true_output_copy,
|
||||
200)
|
||||
|
||||
# ground truth sequences are not tuples
|
||||
if isinstance(output, tuple):
|
||||
output = list(output)
|
||||
|
||||
tmp_result = output == in_outs['outputs'][index]
|
||||
if (isinstance(in_outs['outputs'][index], list)
|
||||
and in_outs['outputs'][index]):
|
||||
tmp_result = tmp_result or (
|
||||
output == in_outs['outputs'][index][0])
|
||||
|
||||
# ground truth sequences are not tuples
|
||||
try:
|
||||
if isinstance(output[0], tuple):
|
||||
tmp_result = tmp_result or ([
|
||||
list(x) for x in output
|
||||
] == in_outs['outputs'][index][0])
|
||||
except Exception as e: # noqa: F841
|
||||
True
|
||||
results.append(tmp_result)
|
||||
if tmp_result is not True:
|
||||
return results, {
|
||||
'output': raw_true_output_copy,
|
||||
'expected': raw_outputs,
|
||||
'inputs': raw_inputs,
|
||||
'error_code': -2,
|
||||
'error_message': 'Wrong Answer',
|
||||
}
|
||||
# reset the alarm
|
||||
signal.alarm(0)
|
||||
except Exception as e:
|
||||
signal.alarm(0)
|
||||
faulthandler.disable()
|
||||
if debug:
|
||||
print(
|
||||
f'Standard input runtime error or time limit exceeded error = {e}' # noqa: E501
|
||||
)
|
||||
results.append(-1)
|
||||
if 'timeoutexception' in repr(e).lower():
|
||||
return results, {
|
||||
'error': repr(e),
|
||||
'error_code': -3,
|
||||
'error_message': 'Time Limit Exceeded',
|
||||
'inputs': raw_inputs,
|
||||
'expected': raw_outputs,
|
||||
}
|
||||
else:
|
||||
return results, {
|
||||
'error': repr(e),
|
||||
'error_code': -4,
|
||||
'error_message': 'Runtime Error',
|
||||
'inputs': raw_inputs,
|
||||
'expected': raw_outputs,
|
||||
}
|
||||
faulthandler.disable()
|
||||
signal.alarm(0)
|
||||
if debug:
|
||||
print(
|
||||
f"outputs = {output}, test outputs = {in_outs['outputs'][index]}, inputs = {inputs}, {type(inputs)}, {output == [in_outs['outputs'][index]]}" # noqa: E501
|
||||
)
|
||||
elif which_type == CODE_TYPE.standard_input: # Standard input
|
||||
faulthandler.enable()
|
||||
passed = False
|
||||
|
||||
if isinstance(inputs, list):
|
||||
inputs = '\n'.join(inputs)
|
||||
if isinstance(in_outs['outputs'][index], list):
|
||||
in_outs['outputs'][index] = '\n'.join(
|
||||
in_outs['outputs'][index])
|
||||
|
||||
signal.alarm(timeout)
|
||||
with Capturing() as output:
|
||||
try:
|
||||
call_method(method, inputs)
|
||||
# reset the alarm
|
||||
signal.alarm(0)
|
||||
passed = True
|
||||
except Exception as e:
|
||||
# runtime error or took too long
|
||||
signal.alarm(0)
|
||||
print(
|
||||
f'Call-based runtime error or time limit exceeded error = {repr(e)}{e}' # noqa: E501
|
||||
)
|
||||
results.append(-1)
|
||||
if 'timeoutexception' in repr(e).lower():
|
||||
return results, {
|
||||
'error': repr(e),
|
||||
'error_code': -3,
|
||||
'error_message': 'Time Limit Exceeded',
|
||||
'inputs': raw_inputs,
|
||||
'expected': raw_outputs,
|
||||
}
|
||||
else:
|
||||
return results, {
|
||||
'error': repr(e),
|
||||
'error_code': -4,
|
||||
'error_message': 'Runtime Error',
|
||||
'inputs': raw_inputs,
|
||||
'expected': raw_outputs,
|
||||
}
|
||||
signal.alarm(0)
|
||||
raw_true_output = output[0]
|
||||
raw_true_output_copy = truncatefn(raw_true_output, 200)
|
||||
output = raw_true_output.splitlines()
|
||||
if not passed:
|
||||
if debug:
|
||||
nl = '\n'
|
||||
if not isinstance(inputs, list):
|
||||
print(
|
||||
f"not passed output = {output}, test outputs = {in_outs['outputs'][index]}, inputs = {inputs.replace(nl,' new-line ')}, {type(inputs)}, {output == [in_outs['outputs'][index]]}" # noqa: E501
|
||||
)
|
||||
else:
|
||||
print(
|
||||
f"not passed output = {output}, test outputs = {in_outs['outputs'][index]}, inputs = {inputs}, {type(inputs)}, {output == [in_outs['outputs'][index]]}" # noqa: E501
|
||||
)
|
||||
continue
|
||||
|
||||
if passed and debug:
|
||||
print(
|
||||
f"==> output = {output}, test outputs = {in_outs['outputs'][index]}" # noqa: E501
|
||||
)
|
||||
|
||||
if custom_compare_(output, in_outs['outputs'][index]):
|
||||
tmp_result = True
|
||||
results.append(tmp_result)
|
||||
continue
|
||||
|
||||
# ground truth sequences are expressed as lists not tuples
|
||||
if isinstance(output, tuple):
|
||||
output = list(output)
|
||||
|
||||
tmp_result = False
|
||||
try:
|
||||
tmp_result = output == [in_outs['outputs'][index]]
|
||||
if isinstance(in_outs['outputs'][index], list):
|
||||
tmp_result = tmp_result or (
|
||||
output == in_outs['outputs'][index])
|
||||
if isinstance(output[0], str):
|
||||
tmp_result = tmp_result or ([
|
||||
e.strip() for e in output
|
||||
] == in_outs['outputs'][index])
|
||||
except Exception as e:
|
||||
if debug:
|
||||
print(f'Failed check1 exception = {e}')
|
||||
pass
|
||||
|
||||
if tmp_result is True:
|
||||
results.append(tmp_result)
|
||||
continue
|
||||
|
||||
# try one more time without \n
|
||||
if isinstance(in_outs['outputs'][index], list):
|
||||
for tmp_index, i in enumerate(in_outs['outputs'][index]):
|
||||
in_outs['outputs'][index][tmp_index] = i.split('\n')
|
||||
in_outs['outputs'][index][tmp_index] = [
|
||||
x.strip()
|
||||
for x in in_outs['outputs'][index][tmp_index] if x
|
||||
]
|
||||
else:
|
||||
in_outs['outputs'][index] = in_outs['outputs'][
|
||||
index].split('\n')
|
||||
in_outs['outputs'][index] = list(
|
||||
filter(len, in_outs['outputs'][index]))
|
||||
in_outs['outputs'][index] = list(
|
||||
map(lambda x: x.strip(), in_outs['outputs'][index]))
|
||||
|
||||
try:
|
||||
tmp_result = output == [in_outs['outputs'][index]]
|
||||
if isinstance(in_outs['outputs'][index], list):
|
||||
tmp_result = tmp_result or (
|
||||
output == in_outs['outputs'][index])
|
||||
except Exception as e:
|
||||
if debug:
|
||||
print(f'Failed check2 exception = {e}')
|
||||
pass
|
||||
|
||||
if tmp_result is True:
|
||||
results.append(tmp_result)
|
||||
continue
|
||||
|
||||
# try by converting the output into a split up list too
|
||||
if isinstance(output, list):
|
||||
output = list(filter(len, output))
|
||||
|
||||
if debug:
|
||||
nl = '\n'
|
||||
if not isinstance(inputs, list):
|
||||
print(
|
||||
f"@1 output = {output}, test outputs = {in_outs['outputs'][index]}, inputs = {inputs.replace(nl,' new-line ')}, {type(inputs)}, {output == [in_outs['outputs'][index]]} {tmp_result=}" # noqa: E501
|
||||
)
|
||||
else:
|
||||
print(
|
||||
f"@1 output = {output}, test outputs = {in_outs['outputs'][index]}, inputs = {inputs}, {type(inputs)}, {output == [in_outs['outputs'][index]]} {tmp_result=}" # noqa: E501
|
||||
)
|
||||
|
||||
if tmp_result is True:
|
||||
results.append(tmp_result)
|
||||
continue
|
||||
|
||||
if debug:
|
||||
print(f'{tmp_result=} @a')
|
||||
|
||||
try:
|
||||
tmp_result = output == [in_outs['outputs'][index]]
|
||||
if isinstance(in_outs['outputs'][index], list):
|
||||
tmp_result = tmp_result or (
|
||||
output == in_outs['outputs'][index])
|
||||
except Exception as e:
|
||||
if debug:
|
||||
print(f'Failed check3 exception = {e}')
|
||||
pass
|
||||
|
||||
if debug:
|
||||
print(f'{tmp_result=} @b')
|
||||
|
||||
try:
|
||||
all_ints = all(
|
||||
combined_int_check(e1) and combined_int_check(e2)
|
||||
for e1, e2 in zip(output, in_outs['outputs'][index]))
|
||||
if not all_ints:
|
||||
if debug:
|
||||
print([
|
||||
combined_int_check(e1)
|
||||
and combined_int_check(e2) for e1, e2 in zip(
|
||||
output, in_outs['outputs'][index])
|
||||
])
|
||||
output_float = [float(e) for e in output]
|
||||
gt_float = [
|
||||
float(e) for e in in_outs['outputs'][index]
|
||||
]
|
||||
tmp_result = tmp_result or (
|
||||
(len(output_float) == len(gt_float))
|
||||
and np.allclose(output_float, gt_float))
|
||||
except Exception as e: # noqa: F841
|
||||
pass
|
||||
|
||||
if debug:
|
||||
print(f'{tmp_result=} @c')
|
||||
|
||||
try:
|
||||
if isinstance(output[0], list):
|
||||
all_ints = all(
|
||||
combined_int_check(e1) and combined_int_check(e2)
|
||||
for e1, e2 in zip(output[0], in_outs['outputs']
|
||||
[index]))
|
||||
if not all_ints:
|
||||
output_float = [float(e) for e in output[0]]
|
||||
gt_float = [
|
||||
float(e) for e in in_outs['outputs'][index][0]
|
||||
]
|
||||
tmp_result = tmp_result or (
|
||||
(len(output_float) == len(gt_float))
|
||||
and np.allclose(output_float, gt_float))
|
||||
except Exception as e: # noqa: F841
|
||||
pass
|
||||
|
||||
if tmp_result is True:
|
||||
results.append(tmp_result)
|
||||
continue
|
||||
|
||||
if debug:
|
||||
print(f'{tmp_result=} @d')
|
||||
# try by converting the stuff into split up list
|
||||
if isinstance(in_outs['outputs'][index], list):
|
||||
for tmp_index, i in enumerate(in_outs['outputs'][index]):
|
||||
in_outs['outputs'][index][tmp_index] = set(i.split())
|
||||
else:
|
||||
in_outs['outputs'][index] = set(
|
||||
in_outs['outputs'][index].split())
|
||||
|
||||
if debug:
|
||||
print(f'{tmp_result=} @e')
|
||||
|
||||
try:
|
||||
tmp_result = output == in_outs['outputs'][index]
|
||||
except Exception as e:
|
||||
if debug:
|
||||
print(f'Failed check4 exception = {e}')
|
||||
continue
|
||||
|
||||
if tmp_result is True:
|
||||
results.append(tmp_result)
|
||||
continue
|
||||
|
||||
if debug:
|
||||
print(f'{tmp_result=} @f')
|
||||
|
||||
# try by converting the output into a split up list too
|
||||
if isinstance(output, list):
|
||||
for tmp_index, i in enumerate(output):
|
||||
output[tmp_index] = i.split()
|
||||
output = list(filter(len, output))
|
||||
for tmp_index, i in enumerate(output):
|
||||
output[tmp_index] = set(i)
|
||||
else:
|
||||
output = output.split()
|
||||
output = list(filter(len, output))
|
||||
output = set(output)
|
||||
|
||||
if debug:
|
||||
print(f'{tmp_result=} @g')
|
||||
# try:
|
||||
# tmp_result = set(frozenset(s) for s in output) == set(
|
||||
# frozenset(s) for s in in_outs["outputs"][index]
|
||||
# )
|
||||
# except Exception as e:
|
||||
# if debug:
|
||||
# print(f"Failed check5 exception = {e}")
|
||||
|
||||
# if they are all numbers, round so that similar numbers are
|
||||
# treated as identical
|
||||
# try:
|
||||
# all_ints = all(
|
||||
# combined_int_check(e1) and combined_int_check(e2)
|
||||
# for e1, e2 in zip(output, in_outs["outputs"][index])
|
||||
# )
|
||||
# tmp_result = tmp_result or (
|
||||
# set(
|
||||
# frozenset(round(float(t), 3) for t in s) for s in output)
|
||||
# == set(
|
||||
# frozenset(round(float(t), 3) for t in s)
|
||||
# for s in in_outs["outputs"][index]
|
||||
# )
|
||||
# )
|
||||
# except Exception as e:
|
||||
# if debug:
|
||||
# print(f"Failed check6 exception = {e}")
|
||||
|
||||
if debug:
|
||||
print(f'{tmp_result=} @h')
|
||||
|
||||
if tmp_result is True and debug:
|
||||
print('PASSED')
|
||||
|
||||
results.append(tmp_result)
|
||||
if tmp_result is not True:
|
||||
return results, {
|
||||
'output': raw_true_output_copy,
|
||||
'expected': raw_outputs,
|
||||
'inputs': raw_inputs,
|
||||
'error_code': -2,
|
||||
'error_message': 'Wrong Answer',
|
||||
}
|
||||
|
||||
if debug:
|
||||
nl = '\n'
|
||||
if not isinstance(inputs, list):
|
||||
print(
|
||||
f"@2 output = {output}, test outputs = {in_outs['outputs'][index]}, inputs = {inputs.replace(nl,' new-line ')}, {type(inputs)}, {output == [in_outs['outputs'][index]]}" # noqa: E501
|
||||
)
|
||||
else:
|
||||
print(
|
||||
f"@2 output = {output}, test outputs = {in_outs['outputs'][index]}, inputs = {inputs}, {type(inputs)}, {output == [in_outs['outputs'][index]]}" # noqa: E501
|
||||
)
|
||||
|
||||
print(f'results = {results}')
|
||||
|
||||
return results, {}
|
||||
|
||||
|
||||
def custom_compare_(output, ground_truth):
|
||||
|
||||
if isinstance(output, list):
|
||||
output_1 = '\n'.join(output)
|
||||
if stripped_string_compare(output_1, ground_truth):
|
||||
return True
|
||||
|
||||
if isinstance(output, list):
|
||||
output_2 = [o.lstrip().rstrip() for o in output]
|
||||
output_2 = '\n'.join(output_2)
|
||||
if stripped_string_compare(output_2, ground_truth):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def stripped_string_compare(s1, s2):
|
||||
s1 = s1.lstrip().rstrip()
|
||||
s2 = s2.lstrip().rstrip()
|
||||
return s1 == s2
|
||||
|
||||
|
||||
def call_method(method, inputs):
|
||||
|
||||
if isinstance(inputs, list):
|
||||
inputs = '\n'.join(inputs)
|
||||
|
||||
inputs_line_iterator = iter(inputs.split('\n'))
|
||||
|
||||
# sys.setrecursionlimit(10000)
|
||||
|
||||
# @patch('builtins.input', side_effect=inputs.split("\n"))
|
||||
@patch('builtins.open', mock_open(read_data=inputs))
|
||||
@patch('sys.stdin', StringIO(inputs))
|
||||
@patch('sys.stdin.readline', lambda *args: next(inputs_line_iterator))
|
||||
@patch('sys.stdin.readlines', lambda *args: inputs.split('\n'))
|
||||
@patch('sys.stdin.read', lambda *args: inputs)
|
||||
# @patch('sys.stdout.write', print)
|
||||
def _inner_call_method(_method):
|
||||
try:
|
||||
return _method()
|
||||
except SystemExit as e: # noqa: F841
|
||||
pass
|
||||
finally:
|
||||
pass
|
||||
|
||||
return _inner_call_method(method)
|
||||
|
||||
|
||||
def reliability_guard(maximum_memory_bytes=None):
|
||||
"""This disables various destructive functions and prevents the generated
|
||||
code from interfering with the test (e.g. fork bomb, killing other
|
||||
processes, removing filesystem files, etc.) WARNING This function is NOT a
|
||||
security sandbox.
|
||||
|
||||
Untrusted code, including, model- generated code, should not be blindly
|
||||
executed outside of one. See the Codex paper for more information about
|
||||
OpenAI's code sandbox, and proceed with caution.
|
||||
"""
|
||||
|
||||
if maximum_memory_bytes is not None:
|
||||
import resource
|
||||
|
||||
resource.setrlimit(resource.RLIMIT_AS,
|
||||
(maximum_memory_bytes, maximum_memory_bytes))
|
||||
resource.setrlimit(resource.RLIMIT_DATA,
|
||||
(maximum_memory_bytes, maximum_memory_bytes))
|
||||
if not platform.uname().system == 'Darwin':
|
||||
resource.setrlimit(resource.RLIMIT_STACK,
|
||||
(maximum_memory_bytes, maximum_memory_bytes))
|
||||
|
||||
faulthandler.disable()
|
||||
|
||||
import builtins
|
||||
|
||||
builtins.exit = None
|
||||
builtins.quit = None
|
||||
|
||||
import os
|
||||
|
||||
os.environ['OMP_NUM_THREADS'] = '1'
|
||||
|
||||
os.kill = None
|
||||
os.system = None
|
||||
os.putenv = None
|
||||
os.remove = None
|
||||
os.removedirs = None
|
||||
os.rmdir = None
|
||||
os.fchdir = None
|
||||
os.setuid = None
|
||||
os.fork = None
|
||||
os.forkpty = None
|
||||
os.killpg = None
|
||||
os.rename = None
|
||||
os.renames = None
|
||||
os.truncate = None
|
||||
os.replace = None
|
||||
os.unlink = None
|
||||
os.fchmod = None
|
||||
os.fchown = None
|
||||
os.chmod = None
|
||||
os.chown = None
|
||||
os.chroot = None
|
||||
os.fchdir = None
|
||||
os.lchflags = None
|
||||
os.lchmod = None
|
||||
os.lchown = None
|
||||
os.getcwd = None
|
||||
os.chdir = None
|
||||
|
||||
import shutil
|
||||
|
||||
shutil.rmtree = None
|
||||
shutil.move = None
|
||||
shutil.chown = None
|
||||
|
||||
import subprocess
|
||||
|
||||
subprocess.Popen = None # type: ignore
|
||||
|
||||
__builtins__['help'] = None
|
||||
|
||||
import sys
|
||||
|
||||
sys.modules['ipdb'] = None
|
||||
sys.modules['joblib'] = None
|
||||
sys.modules['resource'] = None
|
||||
sys.modules['psutil'] = None
|
||||
sys.modules['tkinter'] = None
|
96
opencompass/datasets/xmgsm.py
Normal file
96
opencompass/datasets/xmgsm.py
Normal file
@ -0,0 +1,96 @@
|
||||
import re
|
||||
|
||||
from datasets import Dataset
|
||||
|
||||
from opencompass.openicl.icl_evaluator import BaseEvaluator
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
from opencompass.utils import get_data_path
|
||||
import json
|
||||
from .base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class xMGSMSDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(path: str):
|
||||
def read_jsonl(path: str):
|
||||
data = []
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
for line in f:
|
||||
data.append(json.loads(line))
|
||||
return data
|
||||
|
||||
path = get_data_path(path, local_mode=True)
|
||||
# src_lines = open(path, 'r', encoding='utf-8').readlines()
|
||||
data = {'question': [], 'answer': []}
|
||||
all_data = read_jsonl(path)
|
||||
for lines in all_data:
|
||||
#question, answer = lines.strip().split('\t')
|
||||
data['question'].append(lines['question'])
|
||||
data['answer'].append(lines['answer_number'])
|
||||
|
||||
dataset = Dataset.from_dict({
|
||||
'question': data['question'],
|
||||
'answer': data['answer']
|
||||
})
|
||||
return dataset
|
||||
|
||||
|
||||
LANG_TO_ANSWER_PREFIX = {
|
||||
'en': 'Answer',
|
||||
'bn': 'উত্তর',
|
||||
'de': 'Antwort',
|
||||
'es': 'Respuesta',
|
||||
'fr': 'Réponse',
|
||||
'ja': '答え',
|
||||
'ru': 'Ответ',
|
||||
'sw': 'Jibu',
|
||||
'te': 'సమాధానం',
|
||||
'th': 'คำตอบ',
|
||||
'zh': '答案',
|
||||
'sr': 'Одговор',
|
||||
'ar': 'الإجاب',# based on above instructions
|
||||
'hu': 'Válasz',
|
||||
'cs': 'Odpověď',
|
||||
'vi': 'Câu trả lời',
|
||||
'ko': '답',
|
||||
}
|
||||
|
||||
|
||||
def mgsm_postprocess(text: str, lang: str) -> str:
|
||||
answer_prefix = LANG_TO_ANSWER_PREFIX[lang]
|
||||
if answer_prefix not in text:
|
||||
return ''
|
||||
answer_text = text.split(answer_prefix)[-1].strip()
|
||||
numbers = re.findall(r'\d+\.?\d*', answer_text.replace(',', ''))
|
||||
return numbers[-1].rstrip('.') if numbers else ''
|
||||
|
||||
|
||||
class xMGSM_Evaluator(BaseEvaluator):
|
||||
|
||||
def score(self, predictions, references):
|
||||
assert len(predictions) == len(references)
|
||||
print('references', references)
|
||||
print('predictions', predictions)
|
||||
|
||||
num_correct, total = 0, 0
|
||||
details = {}
|
||||
for index, (references_answer, predictions_answer) in enumerate(
|
||||
zip(references, predictions)):
|
||||
if str(references_answer) == str(predictions_answer):
|
||||
is_correct = True
|
||||
else:
|
||||
is_correct = False
|
||||
|
||||
num_correct += is_correct
|
||||
total += 1
|
||||
details[str(index)] = {
|
||||
'references': references_answer,
|
||||
'predictions': predictions_answer,
|
||||
'correct': is_correct,
|
||||
}
|
||||
|
||||
accuracy = num_correct / total * 100
|
||||
final_result = {'accuracy': accuracy, 'details': details}
|
||||
return final_result
|
Loading…
Reference in New Issue
Block a user