[Feat] refine docs and codes for more user guides (#409)

This commit is contained in:
Hubert 2023-09-18 16:12:13 +08:00 committed by GitHub
parent a11cb45c83
commit 2c15a0c01d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 22 additions and 10 deletions

View File

@ -18,8 +18,11 @@ truthfulqa_infer_cfg = dict(
# Metrics such as 'truth' and 'info' needs # Metrics such as 'truth' and 'info' needs
# OPENAI_API_KEY with finetuned models in it. # OPENAI_API_KEY with finetuned models in it.
# Please use your own finetuned openai model with keys and refers to # Please use your own finetuned openai model with keys and refers to
# the source code for more details # the source code of `TruthfulQAEvaluator` for more details.
# Metrics such as 'bleurt', 'rouge', 'bleu' are free to test #
# If you cannot provide available models for 'truth' and 'info',
# and want to perform basic metric eval, please set
# `metrics=('bleurt', 'rouge', 'bleu')`
# When key is set to "ENV", the key will be fetched from the environment # When key is set to "ENV", the key will be fetched from the environment
# variable $OPENAI_API_KEY. Otherwise, set key in here directly. # variable $OPENAI_API_KEY. Otherwise, set key in here directly.

View File

@ -20,8 +20,11 @@ truthfulqa_infer_cfg = dict(
# Metrics such as 'truth' and 'info' needs # Metrics such as 'truth' and 'info' needs
# OPENAI_API_KEY with finetuned models in it. # OPENAI_API_KEY with finetuned models in it.
# Please use your own finetuned openai model with keys and refers to # Please use your own finetuned openai model with keys and refers to
# the source code for more details # the source code of `TruthfulQAEvaluator` for more details.
# Metrics such as 'bleurt', 'rouge', 'bleu' are free to test #
# If you cannot provide available models for 'truth' and 'info',
# and want to perform basic metric eval, please set
# `metrics=('bleurt', 'rouge', 'bleu')`
# When key is set to "ENV", the key will be fetched from the environment # When key is set to "ENV", the key will be fetched from the environment
# variable $OPENAI_API_KEY. Otherwise, set key in here directly. # variable $OPENAI_API_KEY. Otherwise, set key in here directly.

View File

@ -39,7 +39,9 @@ class TruthfulQAEvaluator(BaseEvaluator):
Args: Args:
truth_model (str): Truth model name. See "notes" for details. truth_model (str): Truth model name. See "notes" for details.
Defaults to ''.
info_model (str): Informativeness model name. See "notes" for details. info_model (str): Informativeness model name. See "notes" for details.
Defaults to ''.
metrics (tuple): Computing needed metrics for truthfulqa dataset. metrics (tuple): Computing needed metrics for truthfulqa dataset.
Supported metrics are `bleurt`, `rouge`, `bleu`, `truth`, `info`. Supported metrics are `bleurt`, `rouge`, `bleu`, `truth`, `info`.
key (str): Corresponding API key. If set to `ENV`, find it in key (str): Corresponding API key. If set to `ENV`, find it in
@ -67,12 +69,11 @@ class TruthfulQAEvaluator(BaseEvaluator):
'bleu': 'bleu', 'bleu': 'bleu',
} }
def __init__( def __init__(self,
self, truth_model: str = '',
truth_model: str, # noqa info_model: str = '',
info_model: str, # noqa metrics=('bleurt', 'rouge', 'bleu', 'truth', 'info'),
metrics=('bleurt', 'rouge', 'bleu', 'truth', 'info'), key='ENV'):
key='ENV'):
self.API_MODEL = { self.API_MODEL = {
'truth': truth_model, 'truth': truth_model,
'info': info_model, 'info': info_model,
@ -85,6 +86,11 @@ class TruthfulQAEvaluator(BaseEvaluator):
if metric in self.SCORE_KEY.keys(): if metric in self.SCORE_KEY.keys():
self.metrics.append(metric) self.metrics.append(metric)
if metric in self.API_MODEL.keys(): if metric in self.API_MODEL.keys():
assert self.API_MODEL.get(metric), \
f'`{metric}_model` should be set to perform API eval.' \
'If you want to perform basic metric eval, ' \
f'please refer to the docstring of {__file__} ' \
'for more details.'
self.api_metrics.append(metric) self.api_metrics.append(metric)
if self.api_metrics: if self.api_metrics: