fmeval.eval_algorithms.summarization_toxicity
1from fmeval.eval_algorithms import ( 2 EvalAlgorithm, 3) 4from fmeval.eval_algorithms.helper_models.helper_model import ToxigenHelperModel, DetoxifyHelperModel 5from fmeval.eval_algorithms.toxicity import Toxicity, ToxicityConfig 6 7TOXIGEN_MODEL = "toxigen" 8DETOXIFY_MODEL = "detoxify" 9 10TOXICITY_HELPER_MODEL_MAPPING = {TOXIGEN_MODEL: ToxigenHelperModel, DETOXIFY_MODEL: DetoxifyHelperModel} 11 12SUMMARIZATION_TOXICITY = EvalAlgorithm.SUMMARIZATION_TOXICITY.value 13 14 15class SummarizationToxicity(Toxicity): 16 """ 17 Toxicity evaluation specific to the summarization task on our built-in dataset. As for the general toxicity evaluation, the toxicity score is given by one of two built-in toxicity detectors, "toxigen" and "detoxify". Configure which one to use inside the `ToxicityConfig`. 18 19 Disclaimer: the concept of toxicity is cultural and context dependent. As this evaluation employs a model to score generated passages, the various scores represent the “view” of the toxicity detector used. 20 21 Note: This separate eval algo implementation is for use with the built-in summarization datasets. 22 For consuming the toxicity eval algo with your custom dataset please refer and use Toxicity eval algo 23 """ 24 25 def __init__(self, eval_algorithm_config: ToxicityConfig = ToxicityConfig()): 26 """Default constructor 27 28 :param eval_algorithm_config: Toxicity eval algorithm config. 29 """ 30 super().__init__(eval_algorithm_config) 31 self.eval_name = SUMMARIZATION_TOXICITY 32 self._eval_algorithm_config = eval_algorithm_config 33 self._helper_model = TOXICITY_HELPER_MODEL_MAPPING[self._eval_algorithm_config.model_type]()
TOXIGEN_MODEL =
'toxigen'
DETOXIFY_MODEL =
'detoxify'
TOXICITY_HELPER_MODEL_MAPPING =
{'toxigen': <class 'fmeval.eval_algorithms.helper_models.helper_model.ToxigenHelperModel'>, 'detoxify': <class 'fmeval.eval_algorithms.helper_models.helper_model.DetoxifyHelperModel'>}
SUMMARIZATION_TOXICITY =
'summarization_toxicity'
16class SummarizationToxicity(Toxicity): 17 """ 18 Toxicity evaluation specific to the summarization task on our built-in dataset. As for the general toxicity evaluation, the toxicity score is given by one of two built-in toxicity detectors, "toxigen" and "detoxify". Configure which one to use inside the `ToxicityConfig`. 19 20 Disclaimer: the concept of toxicity is cultural and context dependent. As this evaluation employs a model to score generated passages, the various scores represent the “view” of the toxicity detector used. 21 22 Note: This separate eval algo implementation is for use with the built-in summarization datasets. 23 For consuming the toxicity eval algo with your custom dataset please refer and use Toxicity eval algo 24 """ 25 26 def __init__(self, eval_algorithm_config: ToxicityConfig = ToxicityConfig()): 27 """Default constructor 28 29 :param eval_algorithm_config: Toxicity eval algorithm config. 30 """ 31 super().__init__(eval_algorithm_config) 32 self.eval_name = SUMMARIZATION_TOXICITY 33 self._eval_algorithm_config = eval_algorithm_config 34 self._helper_model = TOXICITY_HELPER_MODEL_MAPPING[self._eval_algorithm_config.model_type]()
Toxicity evaluation specific to the summarization task on our built-in dataset. As for the general toxicity evaluation, the toxicity score is given by one of two built-in toxicity detectors, "toxigen" and "detoxify". Configure which one to use inside the ToxicityConfig
.
Disclaimer: the concept of toxicity is cultural and context dependent. As this evaluation employs a model to score generated passages, the various scores represent the “view” of the toxicity detector used.
Note: This separate eval algo implementation is for use with the built-in summarization datasets. For consuming the toxicity eval algo with your custom dataset please refer and use Toxicity eval algo
SummarizationToxicity( eval_algorithm_config: fmeval.eval_algorithms.toxicity.ToxicityConfig = ToxicityConfig(model_type='detoxify'))
26 def __init__(self, eval_algorithm_config: ToxicityConfig = ToxicityConfig()): 27 """Default constructor 28 29 :param eval_algorithm_config: Toxicity eval algorithm config. 30 """ 31 super().__init__(eval_algorithm_config) 32 self.eval_name = SUMMARIZATION_TOXICITY 33 self._eval_algorithm_config = eval_algorithm_config 34 self._helper_model = TOXICITY_HELPER_MODEL_MAPPING[self._eval_algorithm_config.model_type]()
Default constructor
Parameters
- eval_algorithm_config: Toxicity eval algorithm config.