mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
[API] Update API (#624)
* update api * update generation_kwargs impl * update api * refactor --------- Co-authored-by: Leymore <zfz-960727@163.com>
This commit is contained in:
parent
d4d1330a5a
commit
5202456b4c
@ -5,8 +5,8 @@ from opencompass.runners.local_api import LocalAPIRunner
|
||||
from opencompass.tasks import OpenICLInferTask
|
||||
|
||||
with read_base():
|
||||
from .summarizers.medium import summarizer
|
||||
from .datasets.ceval.ceval_gen import ceval_datasets
|
||||
from ..summarizers.medium import summarizer
|
||||
from ..datasets.ceval.ceval_gen import ceval_datasets
|
||||
|
||||
datasets = [
|
||||
*ceval_datasets,
|
||||
@ -33,4 +33,4 @@ infer = dict(
|
||||
task=dict(type=OpenICLInferTask)),
|
||||
)
|
||||
|
||||
work_dir ="./output/360GPT_S2_V9"
|
||||
work_dir ="./output/api_360GPT_S2_V9"
|
@ -1,13 +1,12 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.models import BaiChuan
|
||||
|
||||
from opencompass.partitioners import NaivePartitioner
|
||||
from opencompass.runners.local_api import LocalAPIRunner
|
||||
from opencompass.tasks import OpenICLInferTask
|
||||
|
||||
with read_base():
|
||||
from .summarizers.medium import summarizer
|
||||
from .datasets.ceval.ceval_gen import ceval_datasets
|
||||
from ..summarizers.medium import summarizer
|
||||
from ..datasets.ceval.ceval_gen import ceval_datasets
|
||||
|
||||
datasets = [
|
||||
*ceval_datasets,
|
38
configs/api_examples/eval_api_baidu.py
Normal file
38
configs/api_examples/eval_api_baidu.py
Normal file
@ -0,0 +1,38 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.models import ERNIEBot
|
||||
from opencompass.partitioners import NaivePartitioner
|
||||
from opencompass.runners.local_api import LocalAPIRunner
|
||||
from opencompass.tasks import OpenICLInferTask
|
||||
|
||||
with read_base():
|
||||
from ..summarizers.medium import summarizer
|
||||
from ..datasets.ceval.ceval_gen import ceval_datasets
|
||||
|
||||
datasets = [
|
||||
*ceval_datasets,
|
||||
]
|
||||
|
||||
models = [
|
||||
dict(
|
||||
abbr='erniebot',
|
||||
type=ERNIEBot,
|
||||
path='erniebot',
|
||||
key='xxxxxx', # please give you key
|
||||
secretkey='xxxxxxxxx', # please give your group_id
|
||||
url='xxxxxxxxx',
|
||||
query_per_second=1,
|
||||
max_out_len=2048,
|
||||
max_seq_len=2048,
|
||||
batch_size=8),
|
||||
]
|
||||
|
||||
infer = dict(
|
||||
partitioner=dict(type=NaivePartitioner),
|
||||
runner=dict(
|
||||
type=LocalAPIRunner,
|
||||
max_num_workers=2,
|
||||
concurrent_users=2,
|
||||
task=dict(type=OpenICLInferTask)),
|
||||
)
|
||||
|
||||
work_dir = "outputs/api_erniebot/"
|
39
configs/api_examples/eval_api_bytedance.py
Normal file
39
configs/api_examples/eval_api_bytedance.py
Normal file
@ -0,0 +1,39 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.models import ByteDance
|
||||
from opencompass.partitioners import NaivePartitioner
|
||||
from opencompass.runners.local_api import LocalAPIRunner
|
||||
from opencompass.tasks import OpenICLInferTask
|
||||
|
||||
with read_base():
|
||||
# from .datasets.collections.chat_medium import datasets
|
||||
from ..summarizers.medium import summarizer
|
||||
from ..datasets.ceval.ceval_gen import ceval_datasets
|
||||
|
||||
datasets = [
|
||||
*ceval_datasets,
|
||||
]
|
||||
|
||||
models = [
|
||||
dict(
|
||||
abbr='skylark-pro-public',
|
||||
type=ByteDance,
|
||||
path='skylark-pro-public',
|
||||
accesskey="xxxxxxx",
|
||||
secretkey="xxxxxxx",
|
||||
url='xxxxxx',
|
||||
query_per_second=1,
|
||||
max_out_len=2048,
|
||||
max_seq_len=2048,
|
||||
batch_size=8),
|
||||
]
|
||||
|
||||
infer = dict(
|
||||
partitioner=dict(type=NaivePartitioner),
|
||||
runner=dict(
|
||||
type=LocalAPIRunner,
|
||||
max_num_workers=2,
|
||||
concurrent_users=2,
|
||||
task=dict(type=OpenICLInferTask)),
|
||||
)
|
||||
|
||||
work_dir = "outputs/api_bytedance/"
|
@ -1,14 +1,12 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.models import MiniMax
|
||||
from opencompass.partitioners import NaivePartitioner
|
||||
from opencompass.runners import LocalRunner
|
||||
from opencompass.runners.local_api import LocalAPIRunner
|
||||
from opencompass.tasks import OpenICLInferTask
|
||||
|
||||
with read_base():
|
||||
# from .datasets.collections.chat_medium import datasets
|
||||
from .summarizers.medium import summarizer
|
||||
from .datasets.ceval.ceval_gen import ceval_datasets
|
||||
from ..summarizers.medium import summarizer
|
||||
from ..datasets.ceval.ceval_gen import ceval_datasets
|
||||
|
||||
datasets = [
|
||||
*ceval_datasets,
|
||||
@ -35,3 +33,5 @@ infer = dict(
|
||||
concurrent_users=4,
|
||||
task=dict(type=OpenICLInferTask)),
|
||||
)
|
||||
|
||||
work_dir = "outputs/api_minimax/"
|
37
configs/api_examples/eval_api_moonshot.py
Normal file
37
configs/api_examples/eval_api_moonshot.py
Normal file
@ -0,0 +1,37 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.models import MoonShot
|
||||
from opencompass.partitioners import NaivePartitioner
|
||||
from opencompass.runners.local_api import LocalAPIRunner
|
||||
from opencompass.tasks import OpenICLInferTask
|
||||
|
||||
with read_base():
|
||||
from ..summarizers.medium import summarizer
|
||||
from ..datasets.ceval.ceval_gen import ceval_datasets
|
||||
|
||||
datasets = [
|
||||
*ceval_datasets,
|
||||
]
|
||||
|
||||
models = [
|
||||
dict(
|
||||
abbr='moonshot-v1-32k',
|
||||
type=MoonShot,
|
||||
path='moonshot-v1-32k',
|
||||
key='xxxxxxx',
|
||||
url= 'xxxxxxxx',
|
||||
query_per_second=1,
|
||||
max_out_len=2048,
|
||||
max_seq_len=2048,
|
||||
batch_size=8),
|
||||
]
|
||||
|
||||
infer = dict(
|
||||
partitioner=dict(type=NaivePartitioner),
|
||||
runner=dict(
|
||||
type=LocalAPIRunner,
|
||||
max_num_workers=4,
|
||||
concurrent_users=4,
|
||||
task=dict(type=OpenICLInferTask)),
|
||||
)
|
||||
|
||||
work_dir = "outputs/api_moonshot/"
|
@ -1,13 +1,12 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.models import PanGu
|
||||
|
||||
from opencompass.partitioners import NaivePartitioner
|
||||
from opencompass.runners.local_api import LocalAPIRunner
|
||||
from opencompass.tasks import OpenICLInferTask
|
||||
|
||||
with read_base():
|
||||
from .summarizers.medium import summarizer
|
||||
from .datasets.ceval.ceval_gen import ceval_datasets
|
||||
from ..summarizers.medium import summarizer
|
||||
from ..datasets.ceval.ceval_gen import ceval_datasets
|
||||
|
||||
datasets = [
|
||||
*ceval_datasets,
|
@ -5,8 +5,8 @@ from opencompass.runners.local_api import LocalAPIRunner
|
||||
from opencompass.tasks import OpenICLInferTask
|
||||
|
||||
with read_base():
|
||||
from .summarizers.medium import summarizer
|
||||
from .datasets.ceval.ceval_gen import ceval_datasets
|
||||
from ..summarizers.medium import summarizer
|
||||
from ..datasets.ceval.ceval_gen import ceval_datasets
|
||||
|
||||
datasets = [
|
||||
*ceval_datasets,
|
||||
@ -33,3 +33,5 @@ infer = dict(
|
||||
concurrent_users=2,
|
||||
task=dict(type=OpenICLInferTask)),
|
||||
)
|
||||
|
||||
work_dir = "outputs/api_sensetime/"
|
@ -1,14 +1,13 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.models.xunfei_api import XunFei
|
||||
from opencompass.partitioners import NaivePartitioner
|
||||
from opencompass.runners import LocalRunner
|
||||
from opencompass.runners.local_api import LocalAPIRunner
|
||||
from opencompass.tasks import OpenICLInferTask
|
||||
|
||||
with read_base():
|
||||
# from .datasets.collections.chat_medium import datasets
|
||||
from .summarizers.medium import summarizer
|
||||
from .datasets.ceval.ceval_gen import ceval_datasets
|
||||
from ..summarizers.medium import summarizer
|
||||
from ..datasets.ceval.ceval_gen import ceval_datasets
|
||||
|
||||
datasets = [
|
||||
*ceval_datasets,
|
||||
@ -48,3 +47,5 @@ infer = dict(
|
||||
concurrent_users=2,
|
||||
task=dict(type=OpenICLInferTask)),
|
||||
)
|
||||
|
||||
work_dir = "outputs/api_xunfei/"
|
@ -1,14 +1,13 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.models import ZhiPuAI
|
||||
from opencompass.partitioners import NaivePartitioner
|
||||
from opencompass.runners import LocalRunner
|
||||
from opencompass.runners.local_api import LocalAPIRunner
|
||||
from opencompass.tasks import OpenICLInferTask
|
||||
|
||||
with read_base():
|
||||
# from .datasets.collections.chat_medium import datasets
|
||||
from .summarizers.medium import summarizer
|
||||
from .datasets.ceval.ceval_gen import ceval_datasets
|
||||
from ..summarizers.medium import summarizer
|
||||
from ..datasets.ceval.ceval_gen import ceval_datasets
|
||||
|
||||
datasets = [
|
||||
*ceval_datasets,
|
||||
@ -34,3 +33,5 @@ infer = dict(
|
||||
concurrent_users=2,
|
||||
task=dict(type=OpenICLInferTask)),
|
||||
)
|
||||
|
||||
work_dir = "outputs/api_zhipu/"
|
@ -15,6 +15,7 @@ from .lightllm_api import LightllmAPI # noqa: F401
|
||||
from .llama2 import Llama2, Llama2Chat # noqa: F401, F403
|
||||
from .minimax_api import MiniMax # noqa: F401
|
||||
from .modelscope import ModelScope, ModelScopeCausalLM # noqa: F401, F403
|
||||
from .moonshot_api import MoonShot # noqa: F401
|
||||
from .openai_api import OpenAI # noqa: F401
|
||||
from .pangu_api import PanGu # noqa: F401
|
||||
from .sensetime_api import SenseTime # noqa: F401
|
||||
|
@ -160,7 +160,6 @@ class AI360GPT(BaseAPIModel):
|
||||
or raw_response.status_code == 429
|
||||
or raw_response.status_code == 500):
|
||||
print(raw_response.text)
|
||||
# return ''
|
||||
continue
|
||||
print(raw_response)
|
||||
max_num_retries += 1
|
||||
|
@ -150,14 +150,14 @@ class BaiChuan(BaseAPIModel):
|
||||
self.wait()
|
||||
continue
|
||||
if raw_response.status_code == 200 and response['code'] == 0:
|
||||
# msg = json.load(response.text)
|
||||
# response
|
||||
|
||||
msg = response['data']['messages'][0]['content']
|
||||
return msg
|
||||
|
||||
if response['code'] != 0:
|
||||
print(response)
|
||||
return ''
|
||||
time.sleep(1)
|
||||
continue
|
||||
print(response)
|
||||
max_num_retries += 1
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
@ -165,8 +166,9 @@ class ByteDance(BaseAPIModel):
|
||||
|
||||
if isinstance(response, MaasException):
|
||||
print(response)
|
||||
return ''
|
||||
print(response)
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
max_num_retries += 1
|
||||
|
||||
raise RuntimeError(response)
|
||||
|
@ -1,3 +1,4 @@
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
@ -168,7 +169,8 @@ class MiniMax(BaseAPIModel):
|
||||
or response.status_code == 1039
|
||||
or response.status_code == 2013):
|
||||
print(response.text)
|
||||
return ''
|
||||
time.sleep(1)
|
||||
continue
|
||||
print(response)
|
||||
max_num_retries += 1
|
||||
|
||||
|
163
opencompass/models/moonshot_api.py
Normal file
163
opencompass/models/moonshot_api.py
Normal file
@ -0,0 +1,163 @@
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
import requests
|
||||
|
||||
from opencompass.utils.prompt import PromptList
|
||||
|
||||
from .base_api import BaseAPIModel
|
||||
|
||||
PromptType = Union[PromptList, str]
|
||||
|
||||
|
||||
class MoonShot(BaseAPIModel):
|
||||
"""Model wrapper around MoonShot.
|
||||
|
||||
Documentation:
|
||||
|
||||
Args:
|
||||
path (str): The name of MoonShot model.
|
||||
e.g. `moonshot-v1-32k`
|
||||
key (str): Authorization key.
|
||||
query_per_second (int): The maximum queries allowed per second
|
||||
between two consecutive calls of the API. Defaults to 1.
|
||||
max_seq_len (int): Unused here.
|
||||
meta_template (Dict, optional): The model's meta prompt
|
||||
template if needed, in case the requirement of injecting or
|
||||
wrapping of any meta instructions.
|
||||
retry (int): Number of retires if the API call fails. Defaults to 2.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path: str,
|
||||
key: str,
|
||||
url: str,
|
||||
query_per_second: int = 2,
|
||||
max_seq_len: int = 2048,
|
||||
meta_template: Optional[Dict] = None,
|
||||
retry: int = 2,
|
||||
):
|
||||
super().__init__(path=path,
|
||||
max_seq_len=max_seq_len,
|
||||
query_per_second=query_per_second,
|
||||
meta_template=meta_template,
|
||||
retry=retry)
|
||||
self.headers = {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': 'Bearer ' + key,
|
||||
}
|
||||
self.url = url
|
||||
self.model = path
|
||||
|
||||
def generate(
|
||||
self,
|
||||
inputs: List[str or PromptList],
|
||||
max_out_len: int = 512,
|
||||
) -> List[str]:
|
||||
"""Generate results given a list of inputs.
|
||||
|
||||
Args:
|
||||
inputs (List[str or PromptList]): A list of strings or PromptDicts.
|
||||
The PromptDict should be organized in OpenCompass'
|
||||
API format.
|
||||
max_out_len (int): The maximum length of the output.
|
||||
|
||||
Returns:
|
||||
List[str]: A list of generated strings.
|
||||
"""
|
||||
with ThreadPoolExecutor() as executor:
|
||||
results = list(
|
||||
executor.map(self._generate, inputs,
|
||||
[max_out_len] * len(inputs)))
|
||||
self.flush()
|
||||
return results
|
||||
|
||||
def _generate(
|
||||
self,
|
||||
input: str or PromptList,
|
||||
max_out_len: int = 512,
|
||||
) -> str:
|
||||
"""Generate results given an input.
|
||||
|
||||
Args:
|
||||
inputs (str or PromptList): A string or PromptDict.
|
||||
The PromptDict should be organized in OpenCompass'
|
||||
API format.
|
||||
max_out_len (int): The maximum length of the output.
|
||||
|
||||
Returns:
|
||||
str: The generated string.
|
||||
"""
|
||||
assert isinstance(input, (str, PromptList))
|
||||
|
||||
if isinstance(input, str):
|
||||
messages = [{'role': 'user', 'content': input}]
|
||||
else:
|
||||
messages = []
|
||||
for item in input:
|
||||
msg = {'content': item['prompt']}
|
||||
if item['role'] == 'HUMAN':
|
||||
msg['role'] = 'user'
|
||||
elif item['role'] == 'BOT':
|
||||
msg['role'] = 'assistant'
|
||||
|
||||
messages.append(msg)
|
||||
|
||||
system = {
|
||||
'role':
|
||||
'system',
|
||||
'content':
|
||||
'你是 Kimi,由 Moonshot AI 提供的人工智能助手,你更擅长中文和英文的对话。'
|
||||
'你会为用户提供安全,有帮助,准确的回答。同时,你会拒绝一些涉及恐怖主义,种族歧视,'
|
||||
'黄色暴力等问题的回答。Moonshot AI 为专有名词,不可翻译成其他语言。'
|
||||
}
|
||||
|
||||
messages.insert(0, system)
|
||||
|
||||
data = {
|
||||
'model': self.model,
|
||||
'messages': messages,
|
||||
}
|
||||
|
||||
max_num_retries = 0
|
||||
while max_num_retries < self.retry:
|
||||
self.acquire()
|
||||
raw_response = requests.request('POST',
|
||||
url=self.url,
|
||||
headers=self.headers,
|
||||
json=data)
|
||||
|
||||
response = raw_response.json()
|
||||
self.release()
|
||||
|
||||
if response is None:
|
||||
print('Connection error, reconnect.')
|
||||
# if connect error, frequent requests will casuse
|
||||
# continuous unstable network, therefore wait here
|
||||
# to slow down the request
|
||||
self.wait()
|
||||
continue
|
||||
|
||||
if raw_response.status_code == 200:
|
||||
# msg = json.load(response.text)
|
||||
# response
|
||||
msg = response['choices'][0]['message']['content']
|
||||
return msg
|
||||
|
||||
if raw_response.status_code == 403:
|
||||
print('请求被拒绝 api_key错误')
|
||||
continue
|
||||
elif raw_response.status_code == 400:
|
||||
print('请求失败,状态码:', raw_response)
|
||||
time.sleep(1)
|
||||
continue
|
||||
elif raw_response.status_code == 429:
|
||||
print('请求失败,状态码:', raw_response)
|
||||
time.sleep(3)
|
||||
continue
|
||||
|
||||
max_num_retries += 1
|
||||
|
||||
raise RuntimeError(raw_response)
|
@ -1,3 +1,4 @@
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
@ -56,6 +57,14 @@ class PanGu(BaseAPIModel):
|
||||
self.project_name = project_name
|
||||
self.model = path
|
||||
|
||||
token_response = self._get_token()
|
||||
if token_response.status_code == 201:
|
||||
self.token = token_response.headers['X-Subject-Token']
|
||||
print('请求成功!')
|
||||
else:
|
||||
self.token = None
|
||||
print('token生成失败')
|
||||
|
||||
def generate(
|
||||
self,
|
||||
inputs: List[str or PromptList],
|
||||
@ -139,16 +148,18 @@ class PanGu(BaseAPIModel):
|
||||
|
||||
data = {'messages': messages, 'stream': False}
|
||||
|
||||
token_response = self._get_token()
|
||||
if token_response.status_code == 201:
|
||||
token = token_response.headers['X-Subject-Token']
|
||||
print('请求成功!')
|
||||
else:
|
||||
msg = 'token生成失败'
|
||||
print(msg)
|
||||
return ''
|
||||
# token_response = self._get_token()
|
||||
# if token_response.status_code == 201:
|
||||
# self.token = token_response.headers['X-Subject-Token']
|
||||
# print('请求成功!')
|
||||
# else:
|
||||
# self.token = None
|
||||
# print('token生成失败')
|
||||
|
||||
headers = {'Content-Type': 'application/json', 'X-Auth-Token': token}
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Auth-Token': self.token
|
||||
}
|
||||
|
||||
max_num_retries = 0
|
||||
while max_num_retries < self.retry:
|
||||
@ -175,7 +186,9 @@ class PanGu(BaseAPIModel):
|
||||
|
||||
if (raw_response.status_code != 200):
|
||||
print(response['error_msg'])
|
||||
return ''
|
||||
# return ''
|
||||
time.sleep(1)
|
||||
continue
|
||||
print(response)
|
||||
max_num_retries += 1
|
||||
|
||||
|
@ -127,9 +127,14 @@ class SenseTime(BaseAPIModel):
|
||||
return msg
|
||||
|
||||
if (raw_response.status_code != 200):
|
||||
print(raw_response.text)
|
||||
time.sleep(1)
|
||||
continue
|
||||
if response['error']['code'] == 18:
|
||||
# security issue
|
||||
return 'error:unsafe'
|
||||
else:
|
||||
print(raw_response.text)
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
print(response)
|
||||
max_num_retries += 1
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user