mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00

* update * update * update * update * update * update * updaste * update * update * update * update * update * update * update * updaste * update * update * update * update * update * update * update * update * update * Update daily-run-test.yml * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update
457 lines
18 KiB
YAML
457 lines
18 KiB
YAML
internlm2_5-7b-chat-hf_fullbench:
|
|
objective:
|
|
race-high_accuracy: 93.75
|
|
ARC-c_accuracy: 93.75
|
|
BoolQ_accuracy: 81.25
|
|
triviaqa_wiki_1shot_score: 50
|
|
nq_open_1shot_score: 25
|
|
IFEval_Prompt-level-strict-accuracy: 50
|
|
drop_accuracy: 81.25
|
|
GPQA_diamond_accuracy: 25
|
|
hellaswag_accuracy: 87.5
|
|
TheoremQA_score: 18.75
|
|
musr_average_naive_average: 39.58
|
|
korbench_single_naive_average: 40
|
|
gsm8k_accuracy: 62.50
|
|
math_accuracy: 75
|
|
cmo_fib_accuracy: 6.25
|
|
aime2024_accuracy: 6.25
|
|
wikibench-wiki-single_choice_cncircular_perf_4: 50
|
|
sanitized_mbpp_score: 68.75
|
|
ds1000_naive_average: 16.96
|
|
lcb_code_generation_pass@1: 12.5
|
|
lcb_code_execution_pass@1: 43.75
|
|
lcb_test_output_pass@1: 18.75
|
|
bbh-logical_deduction_seven_objects_score: 50
|
|
bbh-multistep_arithmetic_two_score: 68.75
|
|
mmlu-other_naive_average: 72.6
|
|
cmmlu-china-specific_naive_average: 76.25
|
|
mmlu_pro_math_accuracy: 25
|
|
ds1000_Pandas_accuracy: 12.5
|
|
ds1000_Numpy_accuracy: 0
|
|
ds1000_Tensorflow_accuracy: 12.5
|
|
ds1000_Scipy_accuracy: 18.75
|
|
ds1000_Sklearn_accuracy: 18.75
|
|
ds1000_Pytorch_accuracy: 12.5
|
|
ds1000_Matplotlib_accuracy: 43.75
|
|
openai_mmmlu_lite_AR-XY_accuracy: 37.5
|
|
college_naive_average: 12.5
|
|
college_knowledge_naive_average: 87.5
|
|
subjective:
|
|
alignment_bench_v1_1_总分: 0.66
|
|
alpaca_eval_total: 20
|
|
arenahard_score: 50
|
|
Followbench_naive_average: 1
|
|
CompassArena_naive_average: 44.00
|
|
mtbench101_avg: 7.8
|
|
wildbench_average: -12.78
|
|
simpleqa_accuracy_given_attempted: 0
|
|
chinese_simpleqa_given_attempted_accuracy: 1
|
|
alignment_bench_v1_1_专业能力: 7.90
|
|
alignment_bench_v1_1_数学计算: 0
|
|
alignment_bench_v1_1_基本任务: 0
|
|
alignment_bench_v1_1_逻辑推理: 0
|
|
alignment_bench_v1_1_中文理解: 0
|
|
alignment_bench_v1_1_文本写作: 0
|
|
alignment_bench_v1_1_角色扮演: 0
|
|
alignment_bench_v1_1_综合问答: 0
|
|
alpaca_eval_helpful_base: 20
|
|
compassarena_language_naive_average: 35
|
|
compassarena_knowledge_naive_average: 55
|
|
compassarena_reason_v2_naive_average: 45.00
|
|
compassarena_math_v2_naive_average: 55
|
|
compassarena_creationv2_zh_naive_average: 30
|
|
followbench_llmeval_en_HSR_AVG: 1
|
|
followbench_llmeval_en_SSR_AVG: 1
|
|
followbench_llmeval_en_HSR_L1: 1
|
|
followbench_llmeval_en_HSR_L2: 1
|
|
followbench_llmeval_en_HSR_L3: 1
|
|
followbench_llmeval_en_HSR_L4: 1
|
|
followbench_llmeval_en_HSR_L5: 1
|
|
followbench_llmeval_en_SSR_L1: 1
|
|
followbench_llmeval_en_SSR_L2: 1
|
|
followbench_llmeval_en_SSR_L3: 1
|
|
followbench_llmeval_en_SSR_L4: 1
|
|
followbench_llmeval_en_SSR_L5: 1
|
|
simpleqa_f1: 0
|
|
|
|
internlm2_5-7b-chat-turbomind_fullbench:
|
|
objective:
|
|
race-high_accuracy: 93.75
|
|
ARC-c_accuracy: 93.75
|
|
BoolQ_accuracy: 68.75
|
|
triviaqa_wiki_1shot_score: 50
|
|
nq_open_1shot_score: 25
|
|
IFEval_Prompt-level-strict-accuracy: 56.25
|
|
drop_accuracy: 81.25
|
|
GPQA_diamond_accuracy: 31.25
|
|
hellaswag_accuracy: 81.25
|
|
TheoremQA_score: 6.25
|
|
musr_average_naive_average: 39.58
|
|
korbench_single_naive_average: 37.50
|
|
gsm8k_accuracy: 68.75
|
|
math_accuracy: 68.75
|
|
cmo_fib_accuracy: 6.25
|
|
aime2024_accuracy: 6.25
|
|
wikibench-wiki-single_choice_cncircular_perf_4: 50.00
|
|
sanitized_mbpp_score: 68.75
|
|
ds1000_naive_average: 16.96
|
|
lcb_code_generation_pass@1: 12.5
|
|
lcb_code_execution_pass@1: 43.75
|
|
lcb_test_output_pass@1: 25.00
|
|
bbh-logical_deduction_seven_objects_score: 50.00
|
|
bbh-multistep_arithmetic_two_score: 68.75
|
|
mmlu-other_naive_average: 69.71
|
|
cmmlu-china-specific_naive_average: 75.83
|
|
mmlu_pro_math_accuracy: 31.25
|
|
ds1000_Pandas_accuracy: 0
|
|
ds1000_Numpy_accuracy: 0
|
|
ds1000_Tensorflow_accuracy: 12.5
|
|
ds1000_Scipy_accuracy: 18.75
|
|
ds1000_Sklearn_accuracy: 18.75
|
|
ds1000_Pytorch_accuracy: 18.75
|
|
ds1000_Matplotlib_accuracy: 50.00
|
|
openai_mmmlu_lite_AR-XY_accuracy: 37.5
|
|
college_naive_average: 12.50
|
|
college_knowledge_naive_average: 87.5
|
|
subjective:
|
|
alignment_bench_v1_1_总分: 0.70
|
|
alpaca_eval_total: 0
|
|
arenahard_score: 50
|
|
Followbench_naive_average: 1
|
|
CompassArena_naive_average: 38
|
|
mtbench101_avg: 7.80
|
|
wildbench_average: -4.86
|
|
simpleqa_accuracy_given_attempted: 0
|
|
chinese_simpleqa_given_attempted_accuracy: 1
|
|
alignment_bench_v1_1_专业能力: 8.4
|
|
alignment_bench_v1_1_数学计算: 0
|
|
alignment_bench_v1_1_基本任务: 0
|
|
alignment_bench_v1_1_逻辑推理: 0
|
|
alignment_bench_v1_1_中文理解: 0
|
|
alignment_bench_v1_1_文本写作: 0
|
|
alignment_bench_v1_1_角色扮演: 0
|
|
alignment_bench_v1_1_综合问答: 0
|
|
alpaca_eval_helpful_base: 0
|
|
compassarena_language_naive_average: 35
|
|
compassarena_knowledge_naive_average: 50
|
|
compassarena_reason_v2_naive_average: 30
|
|
compassarena_math_v2_naive_average: 50
|
|
compassarena_creationv2_zh_naive_average: 25
|
|
followbench_llmeval_en_HSR_AVG: 1
|
|
followbench_llmeval_en_SSR_AVG: 1
|
|
followbench_llmeval_en_HSR_L1: 1
|
|
followbench_llmeval_en_HSR_L2: 1
|
|
followbench_llmeval_en_HSR_L3: 1
|
|
followbench_llmeval_en_HSR_L4: 1
|
|
followbench_llmeval_en_HSR_L5: 1
|
|
followbench_llmeval_en_SSR_L1: 1
|
|
followbench_llmeval_en_SSR_L2: 1
|
|
followbench_llmeval_en_SSR_L3: 1
|
|
followbench_llmeval_en_SSR_L4: 1
|
|
followbench_llmeval_en_SSR_L5: 1
|
|
simpleqa_f1: 0
|
|
|
|
internlm2_5-7b-hf_fullbench:
|
|
objective:
|
|
race-high_accuracy: 100
|
|
ARC-c_accuracy: 68.75
|
|
BoolQ_accuracy: 87.5
|
|
triviaqa_wiki_1shot_score: 43.75
|
|
nq_open_1shot_score: 43.75
|
|
drop_accuracy: 62.5
|
|
GPQA_diamond_accuracy: 62.5
|
|
hellaswag_accuracy: 93.75
|
|
TheoremQA_score: 25
|
|
winogrande_accuracy: 75
|
|
gsm8k_accuracy: 37.5
|
|
GaokaoBench_2010-2022_Math_II_MCQs_score: 62.5
|
|
GaokaoBench_2010-2022_Math_II_Fill-in-the-Blank_score: 0
|
|
math_accuracy: 12.5
|
|
wikibench-wiki-single_choice_cncircular_perf_4: 25
|
|
sanitized_mbpp_score: 56.25
|
|
dingo_en_192_score: 37.5
|
|
dingo_zh_170_score: 100
|
|
mmlu-other_accuracy: 76.92
|
|
cmmlu-china-specific_accuracy: 84.17
|
|
mmlu_pro_math_accuracy: 18.75
|
|
bbh-logical_deduction_seven_objects_score: 43.75
|
|
bbh-multistep_arithmetic_two_score: 56.25
|
|
college_naive_average: 12.5
|
|
college_knowledge_naive_average: 87.5
|
|
|
|
internlm2_5-7b-turbomind_fullbench:
|
|
objective:
|
|
race-high_accuracy: 100
|
|
ARC-c_accuracy: 68.75
|
|
BoolQ_accuracy: 87.5
|
|
triviaqa_wiki_1shot_score: 43.75
|
|
nq_open_1shot_score: 43.75
|
|
drop_accuracy: 62.5
|
|
GPQA_diamond_accuracy: 62.5
|
|
hellaswag_accuracy: 93.75
|
|
TheoremQA_score: 25.00
|
|
winogrande_accuracy: 87.5
|
|
gsm8k_accuracy: 62.50
|
|
GaokaoBench_2010-2022_Math_II_MCQs_score: 81.25
|
|
GaokaoBench_2010-2022_Math_II_Fill-in-the-Blank_score: 0
|
|
math_accuracy: 18.75
|
|
wikibench-wiki-single_choice_cncircular_perf_4: 25
|
|
sanitized_mbpp_score: 62.50
|
|
dingo_en_192_score: 31.25
|
|
dingo_zh_170_score: 93.75
|
|
mmlu-other_accuracy: 76.92
|
|
cmmlu-china-specific_accuracy: 84.17
|
|
mmlu_pro_math_accuracy: 18.75
|
|
bbh-logical_deduction_seven_objects_score: 50
|
|
bbh-multistep_arithmetic_two_score: 56.25
|
|
college_naive_average: 12.5
|
|
college_knowledge_naive_average: 87.5
|
|
|
|
internlm2_5-7b-turbomind:
|
|
objective:
|
|
race-high_accuracy: 89.28
|
|
ARC-c_accuracy: 52.2
|
|
BoolQ_accuracy: 89.72
|
|
triviaqa_wiki_1shot_score: 65.88
|
|
nq_open_1shot_score: 34.82
|
|
drop_accuracy: 68.1
|
|
bbh_naive_average: 72.15
|
|
GPQA_diamond_accuracy: 32.83
|
|
hellaswag_accuracy: 88.36
|
|
TheoremQA_score: 25
|
|
winogrande_accuracy: 81.29
|
|
gsm8k_accuracy: 74.68
|
|
GaokaoBench_weighted_average: 58.19
|
|
math_accuracy: 33.98
|
|
Mathbench_naive_average: 48.38
|
|
wikibench-wiki-single_choice_cncircular_perf_4: 29.1
|
|
cmmlu_naive_average: 78.94
|
|
mmlu_naive_average: 71.44
|
|
mmlu_pro_naive_average: 38.18
|
|
openai_humaneval_humaneval_pass@1: 59.76
|
|
openai_humaneval_v2_humaneval_pass@1: 51.22
|
|
sanitized_mbpp_score: 55.25
|
|
dingo_en_192_score: 60.94
|
|
dingo_zh_170_score: 67.65
|
|
mmlu-stem_naive_average: 63.72
|
|
mmlu-social-science_naive_average: 80.15
|
|
mmlu-humanities_naive_average: 74.27
|
|
mmlu-other_naive_average: 71.85
|
|
cmmlu-stem_naive_average: 67.07
|
|
cmmlu-social-science_naive_average: 81.49
|
|
cmmlu-humanities_naive_average: 85.84
|
|
cmmlu-other_naive_average: 82.69
|
|
cmmlu-china-specific_naive_average: 79.88
|
|
mmlu_pro_biology_accuracy: 58.58
|
|
mmlu_pro_business_accuracy: 28.01
|
|
mmlu_pro_chemistry_accuracy: 22.79
|
|
mmlu_pro_computer_science_accuracy: 39.02
|
|
mmlu_pro_economics_accuracy: 53.08
|
|
mmlu_pro_engineering_accuracy: 25.7
|
|
mmlu_pro_health_accuracy: 46.94
|
|
mmlu_pro_history_accuracy: 43.04
|
|
mmlu_pro_law_accuracy: 29.7
|
|
mmlu_pro_math_accuracy: 24.2
|
|
mmlu_pro_philosophy_accuracy: 42.48
|
|
mmlu_pro_physics_accuracy: 26.02
|
|
mmlu_pro_psychology_accuracy: 52.76
|
|
mmlu_pro_other_accuracy: 42.21
|
|
college_naive_average: 10.67
|
|
high_naive_average: 6.67
|
|
middle_naive_average: 26.67
|
|
primary_naive_average: 60
|
|
arithmetic_naive_average: 55
|
|
mathbench-a (average)_naive_average: 31.8
|
|
college_knowledge_naive_average: 62.34
|
|
high_knowledge_naive_average: 59.83
|
|
middle_knowledge_naive_average: 71.15
|
|
primary_knowledge_naive_average: 66.55
|
|
mathbench-t (average)_naive_average: 64.97
|
|
long_context:
|
|
Single-Needle-Retrieval(S-RT)-32000_naive_average: 100
|
|
Single-Needle-Retrieval-EN-32000_naive_average: 100
|
|
Single-Needle-Retrieval-ZH-32000_naive_average: 100
|
|
Single-Needle-Retrieval(S-RT)-100000_naive_average: 100
|
|
Single-Needle-Retrieval-EN-100000_naive_average: 100
|
|
Single-Needle-Retrieval-ZH-100000_naive_average: 100
|
|
Single-Needle-Retrieval(S-RT)-200000_naive_average: 100
|
|
Single-Needle-Retrieval-EN-200000_naive_average: 100
|
|
Single-Needle-Retrieval-ZH-200000_naive_average: 100
|
|
longbench_naive_average: 46.19
|
|
longbench_zh_naive_average: 49.3
|
|
longbench_en_naive_average: 43.97
|
|
longbench_single-document-qa_naive_average: 42.84
|
|
longbench_multi-document-qa_naive_average: 37.29
|
|
longbench_summarization_naive_average: 23.21
|
|
longbench_few-shot-learning_naive_average: 61.67
|
|
longbench_synthetic-tasks_naive_average: 60.05
|
|
longbench_code-completion_naive_average: 52.09
|
|
|
|
internlm2_5-7b-chat-turbomind:
|
|
objective:
|
|
race-high_accuracy: 86.16
|
|
ARC-c_accuracy: 90.17
|
|
BoolQ_accuracy: 87.89
|
|
triviaqa_wiki_1shot_score: 64.91
|
|
nq_open_1shot_score: 22.69
|
|
mmmlu_lite_naive_average: 44.96
|
|
IFEval_Prompt-level-strict-accuracy: 58.04
|
|
drop_accuracy: 77.68
|
|
bbh_naive_average: 73.14
|
|
GPQA_diamond_accuracy: 25.76
|
|
hellaswag_accuracy: 94.79
|
|
TheoremQA_score: 21.5
|
|
musr_average_naive_average: 51.03
|
|
korbench_single_naive_average: 31.92
|
|
ARC_Prize_Public_Evaluation_accuracy: 0.01
|
|
gsm8k_accuracy: 86.73
|
|
GaokaoBench_weighted_average: 77.89
|
|
math_accuracy: 61.5
|
|
cmo_fib_accuracy: 12.5
|
|
aime2024_accuracy: 3.33
|
|
Mathbench_naive_average: 65.17
|
|
wikibench-wiki-single_choice_cncircular_perf_4: 31.55
|
|
cmmlu_naive_average: 74.14
|
|
mmlu_naive_average: 70.52
|
|
mmlu_pro_naive_average: 44.98
|
|
openai_humaneval_humaneval_pass@1: 70.73
|
|
sanitized_mbpp_score: 63.81
|
|
humanevalx_naive_average: 38.17
|
|
ds1000_naive_average: 14.15
|
|
lcb_code_generation_pass@1: 17.75
|
|
lcb_code_execution_pass@1: 32.57
|
|
lcb_test_output_pass@1: 24.89
|
|
bigcodebench_hard_instruct_pass@1: 0.08
|
|
bigcodebench_hard_complete_pass@1: 0.06
|
|
teval_naive_average: 80.03
|
|
qa_dingo_cn_score: 99.01
|
|
mmlu-stem_naive_average: 68.2
|
|
mmlu-social-science_naive_average: 76.11
|
|
mmlu-humanities_naive_average: 68.71
|
|
mmlu-other_naive_average: 70.56
|
|
cmmlu-stem_naive_average: 66.27
|
|
cmmlu-social-science_naive_average: 75.7
|
|
cmmlu-humanities_naive_average: 77.7
|
|
cmmlu-other_naive_average: 77.71
|
|
cmmlu-china-specific_naive_average: 72.94
|
|
mmlu_pro_biology_accuracy: 66.25
|
|
mmlu_pro_business_accuracy: 48.42
|
|
mmlu_pro_chemistry_accuracy: 35.25
|
|
mmlu_pro_computer_science_accuracy: 47.56
|
|
mmlu_pro_economics_accuracy: 55.92
|
|
mmlu_pro_engineering_accuracy: 30.44
|
|
mmlu_pro_health_accuracy: 45.97
|
|
mmlu_pro_history_accuracy: 41.21
|
|
mmlu_pro_law_accuracy: 25.79
|
|
mmlu_pro_math_accuracy: 54.03
|
|
mmlu_pro_philosophy_accuracy: 36.47
|
|
mmlu_pro_physics_accuracy: 37.41
|
|
mmlu_pro_psychology_accuracy: 58.77
|
|
mmlu_pro_other_accuracy: 46.21
|
|
humanevalx-python_pass@1: 53.66
|
|
humanevalx-cpp_pass@1: 24.39
|
|
humanevalx-go_pass@1: 0
|
|
humanevalx-java_pass@1: 57.93
|
|
humanevalx-js_pass@1: 54.88
|
|
ds1000_Pandas_accuracy: 12.03
|
|
ds1000_Numpy_accuracy: 4.09
|
|
ds1000_Tensorflow_accuracy: 11.11
|
|
ds1000_Scipy_accuracy: 8.49
|
|
ds1000_Sklearn_accuracy: 6.96
|
|
ds1000_Pytorch_accuracy: 7.35
|
|
ds1000_Matplotlib_accuracy: 49.03
|
|
openai_mmmlu_lite_AR-XY_accuracy: 17.89
|
|
openai_mmmlu_lite_BN-BD_accuracy: 27.58
|
|
openai_mmmlu_lite_DE-DE_accuracy: 51.16
|
|
openai_mmmlu_lite_ES-LA_accuracy: 56.84
|
|
openai_mmmlu_lite_FR-FR_accuracy: 57.96
|
|
openai_mmmlu_lite_HI-IN_accuracy: 33.68
|
|
openai_mmmlu_lite_ID-ID_accuracy: 51.02
|
|
openai_mmmlu_lite_IT-IT_accuracy: 50.46
|
|
openai_mmmlu_lite_JA-JP_accuracy: 50.53
|
|
openai_mmmlu_lite_KO-KR_accuracy: 45.05
|
|
openai_mmmlu_lite_PT-BR_accuracy: 57.68
|
|
openai_mmmlu_lite_SW-KE_accuracy: 32.77
|
|
openai_mmmlu_lite_YO-NG_accuracy: 31.79
|
|
openai_mmmlu_lite_ZH-CN_accuracy: 65.05
|
|
college_naive_average: 20.33
|
|
high_naive_average: 47.67
|
|
middle_naive_average: 62
|
|
primary_naive_average: 72
|
|
arithmetic_naive_average: 62.33
|
|
mathbench-a (average)_naive_average: 52.87
|
|
college_knowledge_naive_average: 70.57
|
|
high_knowledge_naive_average: 70.13
|
|
middle_knowledge_naive_average: 81.17
|
|
primary_knowledge_naive_average: 88.01
|
|
mathbench-t (average)_naive_average: 77.47
|
|
subjective:
|
|
alignment_bench_v1_1_总分: 5.68
|
|
alpaca_eval_total: 25.96
|
|
arenahard_score: 17.15
|
|
Followbench_naive_average: 0.81
|
|
CompassArena_naive_average: 34.61
|
|
FoFo_naive_average: 0.38
|
|
mtbench101_avg: 8.01
|
|
wildbench_average: -15.69
|
|
simpleqa_accuracy_given_attempted: 0.04
|
|
chinese_simpleqa_given_attempted_accuracy: 0.34
|
|
alignment_bench_v1_1_专业能力: 6.05
|
|
alignment_bench_v1_1_数学计算: 5.87
|
|
alignment_bench_v1_1_基本任务: 6.01
|
|
alignment_bench_v1_1_逻辑推理: 4.48
|
|
alignment_bench_v1_1_中文理解: 6.17
|
|
alignment_bench_v1_1_文本写作: 6.06
|
|
alignment_bench_v1_1_角色扮演: 6.3
|
|
alignment_bench_v1_1_综合问答: 6.45
|
|
alpaca_eval_helpful_base: 17.83
|
|
alpaca_eval_koala: 28.21
|
|
alpaca_eval_oasst: 23.4
|
|
alpaca_eval_selfinstruct: 30.95
|
|
alpaca_eval_vicuna: 25
|
|
compassarena_language_naive_average: 52.5
|
|
compassarena_knowledge_naive_average: 36
|
|
compassarena_reason_v2_naive_average: 35
|
|
compassarena_math_v2_naive_average: 19.91
|
|
compassarena_creationv2_zh_naive_average: 29.64
|
|
fofo_test_prompts_overall: 0.35
|
|
fofo_test_prompts_cn_overall: 0.41
|
|
followbench_llmeval_en_HSR_AVG: 0.73
|
|
followbench_llmeval_en_SSR_AVG: 0.88
|
|
followbench_llmeval_en_HSR_L1: 0.94
|
|
followbench_llmeval_en_HSR_L2: 0.77
|
|
followbench_llmeval_en_HSR_L3: 0.73
|
|
followbench_llmeval_en_HSR_L4: 0.68
|
|
followbench_llmeval_en_HSR_L5: 0.54
|
|
followbench_llmeval_en_SSR_L1: 0.94
|
|
followbench_llmeval_en_SSR_L2: 0.88
|
|
followbench_llmeval_en_SSR_L3: 0.87
|
|
followbench_llmeval_en_SSR_L4: 0.87
|
|
followbench_llmeval_en_SSR_L5: 0.85
|
|
simpleqa_f1: 0.04
|
|
|
|
internlm2_5-7b-chat-1m-turbomind:
|
|
long_context:
|
|
ruler_8k_naive_average: 88.53
|
|
ruler_32k_naive_average: 83.84
|
|
ruler_128k_naive_average: 70.94
|
|
NeedleBench-Overall-Score-8K_weighted_average: 91.89
|
|
NeedleBench-Overall-Score-32K_weighted_average: 91.42
|
|
NeedleBench-Overall-Score-128K_weighted_average: 88.57
|
|
longbench_naive_average: 46.44
|
|
longbench_zh_naive_average: 45.19
|
|
longbench_en_naive_average: 45.71
|
|
babilong_0k_naive_average: 79.3
|
|
babilong_4k_naive_average: 67
|
|
babilong_16k_naive_average: 52.7
|
|
babilong_32k_naive_average: 48.9
|
|
babilong_128k_naive_average: 40.8
|
|
babilong_256k_naive_average: 23.5
|
|
longbench_single-document-qa_naive_average: 43.56
|
|
longbench_multi-document-qa_naive_average: 46.24
|
|
longbench_summarization_naive_average: 24.32
|
|
longbench_few-shot-learning_naive_average: 51.67
|
|
longbench_synthetic-tasks_naive_average: 66.83
|
|
longbench_code-completion_naive_average: 45.99
|