diff --git a/.github/workflows/new_tasks.yml b/.github/workflows/new_tasks.yml index b748aab5c0..dc598c55a8 100644 --- a/.github/workflows/new_tasks.yml +++ b/.github/workflows/new_tasks.yml @@ -16,7 +16,7 @@ jobs: name: Scan for changed tasks steps: - name: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 2 # OR "2" -> To retrieve the preceding commit. @@ -47,7 +47,7 @@ jobs: - name: Set up Python 3.9 if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3.9 cache: 'pip' diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index be3481754e..8d0d386b1f 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -13,7 +13,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.x" @@ -26,7 +26,7 @@ jobs: - name: Build a binary wheel and a source tarball run: python3 -m build - name: Store the distribution packages - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: python-package-distributions path: dist/ @@ -46,7 +46,7 @@ jobs: steps: - name: Download all the dists - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: python-package-distributions path: dist/ @@ -68,7 +68,7 @@ jobs: steps: - name: Download all the dists - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: python-package-distributions path: dist/ diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 49b85fb9a4..ed09225cb3 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -63,9 +63,9 @@ jobs: - name: Test with pytest run: python -m pytest --showlocals -s -vv -n=auto --ignore=tests/models/test_neuralmagic.py --ignore=tests/models/test_openvino.py - name: Archive artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: output_results + name: output_testcpu${{ matrix.python-version }} path: | test_logs/* testmodels: @@ -87,9 +87,3 @@ jobs: pip install -e '.[dev,optimum,deepsparse,sparseml,api]' --extra-index-url https://download.pytorch.org/whl/cpu - name: Test with pytest run: python -m pytest tests/models --showlocals -s -vv - - name: Archive artifacts - uses: actions/upload-artifact@v3 - with: - name: output_results - path: | - test_logs/* diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5df94412a7..edeef333a5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,7 @@ exclude: ^tests/testdata/ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: check-added-large-files - id: check-ast @@ -29,7 +29,7 @@ repos: - id: mixed-line-ending args: [--fix=lf] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.8 + rev: v0.7.4 hooks: # Run the linter. - id: ruff diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index cea7d754c7..9ca64af64d 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -294,7 +294,9 @@ def _adjust_config(task_dict): model_source=model, model_args=model_args, system_instruction=system_instruction, - chat_template=lm.chat_template(apply_chat_template), + chat_template=lm.chat_template(apply_chat_template) + if apply_chat_template + else None, fewshot_as_multiturn=fewshot_as_multiturn, ) diff --git a/lm_eval/models/huggingface.py b/lm_eval/models/huggingface.py index 006f247c33..dd04470c1d 100644 --- a/lm_eval/models/huggingface.py +++ b/lm_eval/models/huggingface.py @@ -464,7 +464,7 @@ def _get_backend( elif backend == "seq2seq": self.backend = backend eval_logger.info( - f"Overrode HF model backend type, and using type '{backend}'" + f"Overrode HF model backend type, and using type '{self.backend}'" ) else: # determine and use the default HF backend for this model, based on its config + metadata. @@ -476,12 +476,12 @@ def _get_backend( # models like MBart are listed in both seq2seq and causal mistakenly in HF transformers. # these special cases should be treated as seq2seq models. self.backend = "seq2seq" - eval_logger.info(f"Using model type '{backend}'") + eval_logger.debug(f"Using model type '{self.backend}'") elif ( getattr(self.config, "model_type") in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES ): self.backend = "causal" - eval_logger.info(f"Using model type '{backend}'") + eval_logger.debug(f"Using model type '{self.backend}'") else: if not trust_remote_code: eval_logger.warning( @@ -493,7 +493,7 @@ def _get_backend( # then we default to assuming AutoModelForCausalLM self.backend = "causal" eval_logger.info( - f"Model type cannot be determined. Using default model type '{backend}'" + f"Model type cannot be determined. Using default model type '{self.backend}'" ) if self.AUTO_MODEL_CLASS is None: diff --git a/lm_eval/models/mamba_lm.py b/lm_eval/models/mamba_lm.py index 47a241f0a5..5f3da695a2 100644 --- a/lm_eval/models/mamba_lm.py +++ b/lm_eval/models/mamba_lm.py @@ -12,6 +12,8 @@ class MambaLMWrapper(HFLM): def __init__( self, pretrained="state-spaces/mamba-130m", + # To use the HF compatible variant + is_hf: bool = False, **kwargs, ) -> None: """ @@ -52,7 +54,7 @@ def __init__( if "backend" in kwargs: # mamba currently only supports causal models assert kwargs["backend"] == "causal" - + self.is_hf = is_hf or (True if pretrained.endswith("hf") else False) super().__init__( pretrained=pretrained, # set appropriate defaults for tokenizer, max length, etc @@ -67,15 +69,18 @@ def _get_config( pretrained: str, **kwargs, ) -> None: - try: - from mamba_ssm.utils.hf import load_config_hf # noqa: F811 - except ModuleNotFoundError as exception: - raise type(exception)( - "attempted to use 'mamba_ssm' LM type, but package `mamba_ssm` is not installed. \ -please install mamba via `pip install lm-eval[mamba]` or `pip install -e .[mamba]`", - ) - - self._config = load_config_hf(pretrained) + if self.is_hf: + super()._get_config(pretrained, **kwargs) + else: + try: + from mamba_ssm.utils.hf import load_config_hf # noqa: F811 + except ModuleNotFoundError as exception: + raise type(exception)( + "attempted to use 'mamba_ssm' LM type, but package `mamba_ssm` is not installed. \ + please install mamba via `pip install lm-eval[mamba]` or `pip install -e .[mamba]`", + ) + + self._config = load_config_hf(pretrained) def _create_model( self, @@ -86,24 +91,32 @@ def _create_model( # Mamba does not support arbitrary HF from_pretrained() args **kwargs, ) -> None: - try: - from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel # noqa: F811 - except ModuleNotFoundError as exception: - raise type(exception)( - "attempted to use 'mamba_ssm' LM type, but package `mamba_ssm` is not installed. \ -please install mamba via `pip install lm-eval[mamba]` or `pip install -e .[mamba]`", + if self.is_hf: + super()._create_model(pretrained, dtype=dtype, **kwargs) + else: + try: + from mamba_ssm.models.mixer_seq_simple import ( + MambaLMHeadModel, # noqa: F811 + ) + except ModuleNotFoundError as exception: + raise type(exception)( + "attempted to use 'mamba_ssm' LM type, but package `mamba_ssm` is not installed. \ + please install mamba via `pip install lm-eval[mamba]` or `pip install -e .[mamba]`", + ) + + self._model = MambaLMHeadModel.from_pretrained( + pretrained, + device=self._device, + dtype=torch.float16 + if dtype == "auto" + else lm_eval.models.utils.get_dtype(dtype), ) - self._model = MambaLMHeadModel.from_pretrained( - pretrained, - device=self._device, - dtype=torch.float16 - if dtype == "auto" - else lm_eval.models.utils.get_dtype(dtype), - ) - def _model_generate(self, context, max_length, stop, **generation_kwargs): - for key in ("do_sample", "attention_mask"): + remove_arg = ( + ["attention_mask"] if self.is_hf else ["do_sample", "attention_mask"] + ) + for key in remove_arg: if key in generation_kwargs: generation_kwargs.pop(key) @@ -116,11 +129,37 @@ def _model_generate(self, context, max_length, stop, **generation_kwargs): # self.tokenizer, stop, 1, context.shape[0] # ) - return self.model.generate( - input_ids=context, - max_length=max_length, - # stopping_criteria=stopping_criteria, - # pad_token_id=self.tokenizer.pad_token_id, - # use_cache=True, - **generation_kwargs, - ) + if not self.is_hf: + return self.model.generate( + input_ids=context, + max_length=max_length, + # stopping_criteria=stopping_criteria, + # pad_token_id=self.tokenizer.pad_token_id, + # use_cache=True, + **generation_kwargs, + ) + else: + stopping_criteria = lm_eval.models.utils.stop_sequences_criteria( + self.tokenizer, + stop, + context.shape[1], + context.shape[0], + ) + + generation_kwargs["temperature"] = generation_kwargs.get("temperature", 0.0) + do_sample = generation_kwargs.get("do_sample", None) + + # The temperature has to be a strictly positive float -- if it is 0.0, use greedy decoding strategies + if generation_kwargs.get("temperature") == 0.0 and do_sample is None: + generation_kwargs["do_sample"] = do_sample = False + if do_sample is False and generation_kwargs.get("temperature") == 0.0: + generation_kwargs.pop("temperature") + + return self.model.generate( + input_ids=context, + max_length=max_length, + stopping_criteria=stopping_criteria, + pad_token_id=self.tokenizer.pad_token_id, + use_cache=True, + **generation_kwargs, + ) diff --git a/lm_eval/models/vllm_causallms.py b/lm_eval/models/vllm_causallms.py index 0d8f3cea59..b9ee6f92b7 100644 --- a/lm_eval/models/vllm_causallms.py +++ b/lm_eval/models/vllm_causallms.py @@ -118,7 +118,7 @@ def __init__( tokenizer if tokenizer else pretrained, tokenizer_mode=tokenizer_mode, trust_remote_code=trust_remote_code, - tokenizer_revision=tokenizer_revision, + revision=tokenizer_revision, ) self.tokenizer = configure_pad_token(self.tokenizer) self.add_bos_token = add_bos_token diff --git a/lm_eval/tasks/README.md b/lm_eval/tasks/README.md index 5c1fc62757..390f304939 100644 --- a/lm_eval/tasks/README.md +++ b/lm_eval/tasks/README.md @@ -56,6 +56,7 @@ | [ifeval](ifeval/README.md) | Interactive fiction evaluation tasks for narrative understanding and reasoning. | English | | [inverse_scaling](inverse_scaling/README.md) | Multiple-choice tasks from the Inverse Scaling Prize, designed to find settings where larger language models perform worse. | English | | [japanese_leaderboard](japanese_leaderboard/README.md) | Japanese language understanding tasks to benchmark model performance on various linguistic aspects. | Japanese | +| [kbl](kbl/README.md) | Korean Benchmark for Legal Language Understanding. | Korean | | [kmmlu](kmmlu/README.md) | Knowledge-based multi-subject multiple choice questions for academic evaluation. | Korean | | [kobest](kobest/README.md) | A collection of tasks designed to evaluate understanding in Korean language. | Korean | | [kormedmcqa](kormedmcqa/README.md) | Medical question answering tasks in Korean to test specialized domain knowledge. | Korean | @@ -70,6 +71,7 @@ | [mathqa](mathqa/README.md) | Question answering tasks involving mathematical reasoning and problem-solving. | English | | [mc_taco](mc_taco/README.md) | Question-answer pairs that require temporal commonsense comprehension. | English | | [med_concepts_qa](med_concepts_qa/README.md) | Benchmark for evaluating LLMs on their abilities to interpret medical codes and distinguish between medical concept. | English | +| [metabench](metabench/README.md) | Distilled versions of six popular benchmarks which are highly predictive of overall benchmark performance and of a single general ability latent trait. | English | | medmcqa | Medical multiple choice questions assessing detailed medical knowledge. | English | | medqa | Multiple choice question answering based on the United States Medical License Exams. | | | [mgsm](mgsm/README.md) | Benchmark of multilingual grade-school math problems. | Spanish, French, German, Russian, Chinese, Japanese, Thai, Swahili, Bengali, Telugu | diff --git a/lm_eval/tasks/catalan_bench/_arc_ca_common_yaml b/lm_eval/tasks/catalan_bench/_arc_ca_common_yaml index b89290ebaf..c192c1135d 100644 --- a/lm_eval/tasks/catalan_bench/_arc_ca_common_yaml +++ b/lm_eval/tasks/catalan_bench/_arc_ca_common_yaml @@ -1,4 +1,3 @@ -tag: arc_ca dataset_path: projecte-aina/arc_ca output_type: multiple_choice training_split: null diff --git a/lm_eval/tasks/kbl/README.md b/lm_eval/tasks/kbl/README.md new file mode 100644 index 0000000000..c415a1d5db --- /dev/null +++ b/lm_eval/tasks/kbl/README.md @@ -0,0 +1,127 @@ +# kbl + +### Paper + +Title: `Developing a Pragmatic Benchmark for Assessing Korean Legal Language Understanding in Large Language Models` + +Abstract: `Large language models (LLMs) have demonstrated remarkable performance in the legal domain, with GPT-4 even passing the Uniform Bar Exam in the U.S. However their efficacy remains limited for non-standardized tasks and tasks in languages other than English. This underscores the need for careful evaluation of LLMs within each legal system before application. Here, we introduce KBL, a benchmark for assessing the Korean legal language understanding of LLMs, consisting of (1) 7 legal knowledge tasks (510 examples), (2) 4 legal reasoning tasks (288 examples), and (3) the Korean bar exam (4 domains, 53 tasks, 2,510 examples). First two datasets were developed in close collaboration with lawyers to evaluate LLMs in practical scenarios in a certified manner. Furthermore, considering legal practitioners' frequent use of extensive legal documents for research, we assess LLMs in both a closed book setting, where they rely solely on internal knowledge, and a retrieval-augmented generation (RAG) setting, using a corpus of Korean statutes and precedents. The results indicate substantial room and opportunities for improvement.` + +`Korean Benchmark for Legal Language Understanding` + +Homepage: `https://github.com/lbox-kr/kbl` + + +### Citation + +``` +@inproceedings{kim2024kbl, + title = "Developing a Pragmatic Benchmark for Assessing {K}orean Legal Language Understanding in Large Language Models", + author = {Yeeun Kim and Young Rok Choi and Eunkyung Choi and Jinhwan Choi and Hai Jin Park and Wonseok Hwang}, + editor = "Al-Onaizan, Yaser and + Bansal, Mohit and + Chen, Yun-Nung", + booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024", + month = nov, + year = "2024", + address = "Miami, Florida, USA", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2024.findings-emnlp.319", + pages = "5573--5595", +} +``` + +### Groups, Tags, and Tasks + +#### Groups + +#### Tags + +* `kbl`: `All kbl tasks (7 knowledge, 4 reasoning, and 39 bar exam)` +* `kbl_knowledge_em`: `7 knowledge tasks` +* `kbl_reasoning_em`: `4 reasoning tasks` +* `kbl_bar_exam_em`: `53 bar exam tasks` +* `kbl_bar_exam_em_civil`: `13 bar exam tasks, civil law` +* `kbl_bar_exam_em_criminal`: `13 bar exam tasks, criminal law` +* `kbl_bar_exam_em_public`: `13 bar exam tasks, public law` +* `kbl_bar_exam_em_responsibility`: `14 bar exam tasks, professional responsibility (RESP) examination` + + +#### Tasks + +* `kbl_common_legal_mistake_qa_em`: `A QA task evaluating common legal misconceptions from the general public.` +* `kbl_knowledge_common_legal_mistake_qa_reasoning`: `Similar to 'kbl_common_legal_mistake_qa_em' but the answers are presented with correct/wrong rationals.` +* `kbl_knowledge_legal_concept_qa`: `A QA task addressing knowledge about complex legal concepts (legal terms).` +* `kbl_knowledge_offense_component_qa`: `A QA task evaluating whether a model knows specific actions meet the actual elements of a criminal offense.` +* `kbl_knowledge_query_and_statute_matching_qa`: `A QA task assessing whether the language model can accurately identify the relevant statute for a given query.` +* `kbl_knowledge_statute_hallucination_qa`: `A QA task evaluating whether a model can select the correct answer consists of a pair of (fictitious) statute and corresponding reasoning for given confusing legal questions.` +* `kbl_knowledge_statute_number_and_content_matching_qa`: `A QA dataset for evaluating where a model can accurately match the content of a law to its specific statute number.` +* `kbl_reasoning_case_relevance_qa_p`: `A QA task where a model needs to determine whether a given precedent is relavent to an input precedent.` +* `kbl_reasoning_case_relevance_qa_q`: `A QA task where a model needs to determine whether a given precedent is relavent to an input query.` +* `kbl_reasoning_causal_reasoning_qa`: `A QA task where a model needs to assess whether the defendant’s actions were the direct and decisive cause of the victim’s injury or death for each given factual description and claims.` +* `kbl_reasoning_statement_consistency_qa`: `A QA task where a model is required to accurately determine whether two presented statements are consistent with each other.` +* `bar_exam_civil_2012`: `Korean bar exam multiple-choice questions, civil law` +* `bar_exam_civil_2013`: `Korean bar exam multiple-choice questions, civil law` +* `bar_exam_civil_2014`: `Korean bar exam multiple-choice questions, civil law` +* `bar_exam_civil_2015`: `Korean bar exam multiple-choice questions, civil law` +* `bar_exam_civil_2016`: `Korean bar exam multiple-choice questions, civil law` +* `bar_exam_civil_2017`: `Korean bar exam multiple-choice questions, civil law` +* `bar_exam_civil_2018`: `Korean bar exam multiple-choice questions, civil law` +* `bar_exam_civil_2019`: `Korean bar exam multiple-choice questions, civil law` +* `bar_exam_civil_2020`: `Korean bar exam multiple-choice questions, civil law` +* `bar_exam_civil_2021`: `Korean bar exam multiple-choice questions, civil law` +* `bar_exam_civil_2022`: `Korean bar exam multiple-choice questions, civil law` +* `bar_exam_civil_2023`: `Korean bar exam multiple-choice questions, civil law` +* `bar_exam_civil_2024`: `Korean bar exam multiple-choice questions, civil law` +* `bar_exam_criminal_2012`: `Korean bar exam multiple-choice questions, criminal law` +* `bar_exam_criminal_2013`: `Korean bar exam multiple-choice questions, criminal law` +* `bar_exam_criminal_2014`: `Korean bar exam multiple-choice questions, criminal law` +* `bar_exam_criminal_2015`: `Korean bar exam multiple-choice questions, criminal law` +* `bar_exam_criminal_2016`: `Korean bar exam multiple-choice questions, criminal law` +* `bar_exam_criminal_2017`: `Korean bar exam multiple-choice questions, criminal law` +* `bar_exam_criminal_2018`: `Korean bar exam multiple-choice questions, criminal law` +* `bar_exam_criminal_2019`: `Korean bar exam multiple-choice questions, criminal law` +* `bar_exam_criminal_2020`: `Korean bar exam multiple-choice questions, criminal law` +* `bar_exam_criminal_2021`: `Korean bar exam multiple-choice questions, criminal law` +* `bar_exam_criminal_2022`: `Korean bar exam multiple-choice questions, criminal law` +* `bar_exam_criminal_2023`: `Korean bar exam multiple-choice questions, criminal law` +* `bar_exam_criminal_2024`: `Korean bar exam multiple-choice questions, criminal law` +* `bar_exam_public_2012`: `Korean bar exam multiple-choice questions, public law` +* `bar_exam_public_2013`: `Korean bar exam multiple-choice questions, public law` +* `bar_exam_public_2014`: `Korean bar exam multiple-choice questions, public law` +* `bar_exam_public_2015`: `Korean bar exam multiple-choice questions, public law` +* `bar_exam_public_2016`: `Korean bar exam multiple-choice questions, public law` +* `bar_exam_public_2017`: `Korean bar exam multiple-choice questions, public law` +* `bar_exam_public_2018`: `Korean bar exam multiple-choice questions, public law` +* `bar_exam_public_2019`: `Korean bar exam multiple-choice questions, public law` +* `bar_exam_public_2020`: `Korean bar exam multiple-choice questions, public law` +* `bar_exam_public_2021`: `Korean bar exam multiple-choice questions, public law` +* `bar_exam_public_2022`: `Korean bar exam multiple-choice questions, public law` +* `bar_exam_public_2023`: `Korean bar exam multiple-choice questions, public law` +* `bar_exam_public_2024`: `Korean bar exam multiple-choice questions, public law` +* `bar_exam_responsibility_2010`: `Korean bar exam multiple-choice questions, professional responsibility (RESP) examination` +* `bar_exam_responsibility_2011`: `Korean bar exam multiple-choice questions, professional responsibility (RESP) examination` +* `bar_exam_responsibility_2012`: `Korean bar exam multiple-choice questions, professional responsibility (RESP) examination` +* `bar_exam_responsibility_2013`: `Korean bar exam multiple-choice questions, professional responsibility (RESP) examination` +* `bar_exam_responsibility_2014`: `Korean bar exam multiple-choice questions, professional responsibility (RESP) examination` +* `bar_exam_responsibility_2015`: `Korean bar exam multiple-choice questions, professional responsibility (RESP) examination` +* `bar_exam_responsibility_2016`: `Korean bar exam multiple-choice questions, professional responsibility (RESP) examination` +* `bar_exam_responsibility_2017`: `Korean bar exam multiple-choice questions, professional responsibility (RESP) examination` +* `bar_exam_responsibility_2018`: `Korean bar exam multiple-choice questions, professional responsibility (RESP) examination` +* `bar_exam_responsibility_2019`: `Korean bar exam multiple-choice questions, professional responsibility (RESP) examination` +* `bar_exam_responsibility_2020`: `Korean bar exam multiple-choice questions, professional responsibility (RESP) examination` +* `bar_exam_responsibility_2021`: `Korean bar exam multiple-choice questions, professional responsibility (RESP) examination` +* `bar_exam_responsibility_2022`: `Korean bar exam multiple-choice questions, professional responsibility (RESP) examination` +* `bar_exam_responsibility_2023`: `Korean bar exam multiple-choice questions, professional responsibility (RESP) examination` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm_eval/tasks/kbl/bar_exam/civil/_base_em_yaml b/lm_eval/tasks/kbl/bar_exam/civil/_base_em_yaml new file mode 100644 index 0000000000..5c1f854c00 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/civil/_base_em_yaml @@ -0,0 +1,36 @@ +tag: + - kbl + - kbl_bar_exam_em + - kbl_bar_exam_em_civil +description: '당신은 사용자의 질문에 친절하고 논리적으로 답변해 주는 법률 전문가 챗봇 입니다.\n' +dataset_path: lbox/kbl +test_split: test +output_type: generate_until +doc_to_text: '### 질문: {{question}} + + 다음 각 선택지를 읽고 A, B, C, D, E 중 하나를 선택하여 ''답변: A'' 와 같이 단답식으로 답해 주세요. + + A. {{A}} + + B. {{B}} + + C. {{C}} + + D. {{D}} + + E. {{E}} + + ### 답변:' +doc_to_target: gt +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +filter_list: + - name: get-answer + filter: + - function: regex + regex_pattern: ([A-E]).* + - function: take_first diff --git a/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2012.yaml b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2012.yaml new file mode 100644 index 0000000000..1fb6a29df8 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2012.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_civil_2012 +dataset_name: bar_exam_civil_2012 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2013.yaml b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2013.yaml new file mode 100644 index 0000000000..a7a8b537f6 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2013.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_civil_2013 +dataset_name: bar_exam_civil_2013 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2014.yaml b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2014.yaml new file mode 100644 index 0000000000..a39f31cd7d --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2014.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_civil_2014 +dataset_name: bar_exam_civil_2014 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2015.yaml b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2015.yaml new file mode 100644 index 0000000000..b3fe7ec896 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2015.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_civil_2015 +dataset_name: bar_exam_civil_2015 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2016.yaml b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2016.yaml new file mode 100644 index 0000000000..26f040c08f --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2016.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_civil_2016 +dataset_name: bar_exam_civil_2016 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2017.yaml b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2017.yaml new file mode 100644 index 0000000000..e8eedd6774 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2017.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_civil_2017 +dataset_name: bar_exam_civil_2017 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2018.yaml b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2018.yaml new file mode 100644 index 0000000000..39f904807e --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2018.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_civil_2018 +dataset_name: bar_exam_civil_2018 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2019.yaml b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2019.yaml new file mode 100644 index 0000000000..ac9bd31898 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2019.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_civil_2019 +dataset_name: bar_exam_civil_2019 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2020.yaml b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2020.yaml new file mode 100644 index 0000000000..89ff72747e --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2020.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_civil_2020 +dataset_name: bar_exam_civil_2020 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2021.yaml b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2021.yaml new file mode 100644 index 0000000000..ee77cdfd10 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2021.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_civil_2021 +dataset_name: bar_exam_civil_2021 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2022.yaml b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2022.yaml new file mode 100644 index 0000000000..036c795572 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2022.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_civil_2022 +dataset_name: bar_exam_civil_2022 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2023.yaml b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2023.yaml new file mode 100644 index 0000000000..47342239c1 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2023.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_civil_2023 +dataset_name: bar_exam_civil_2023 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2024.yaml b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2024.yaml new file mode 100644 index 0000000000..5199d59410 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/civil/kbl_bar_exam_em_civil_2024.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_civil_2024 +dataset_name: bar_exam_civil_2024 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/criminal/_base_em_yaml b/lm_eval/tasks/kbl/bar_exam/criminal/_base_em_yaml new file mode 100644 index 0000000000..b170dad754 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/criminal/_base_em_yaml @@ -0,0 +1,36 @@ +tag: + - kbl + - kbl_bar_exam_em + - kbl_bar_exam_em_criminal +description: '당신은 사용자의 질문에 친절하고 논리적으로 답변해 주는 법률 전문가 챗봇 입니다.\n' +dataset_path: lbox/kbl +test_split: test +output_type: generate_until +doc_to_text: '### 질문: {{question}} + + 다음 각 선택지를 읽고 A, B, C, D, E 중 하나를 선택하여 ''답변: A'' 와 같이 단답식으로 답해 주세요. + + A. {{A}} + + B. {{B}} + + C. {{C}} + + D. {{D}} + + E. {{E}} + + ### 답변:' +doc_to_target: gt +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +filter_list: + - name: get-answer + filter: + - function: regex + regex_pattern: ([A-E]).* + - function: take_first diff --git a/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2012.yaml b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2012.yaml new file mode 100644 index 0000000000..79b819277c --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2012.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_criminal_2012 +dataset_name: bar_exam_criminal_2012 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2013.yaml b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2013.yaml new file mode 100644 index 0000000000..6161688e62 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2013.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_criminal_2013 +dataset_name: bar_exam_criminal_2013 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2014.yaml b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2014.yaml new file mode 100644 index 0000000000..dddb9ce258 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2014.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_criminal_2014 +dataset_name: bar_exam_criminal_2014 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2015.yaml b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2015.yaml new file mode 100644 index 0000000000..db7607428f --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2015.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_criminal_2015 +dataset_name: bar_exam_criminal_2015 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2016.yaml b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2016.yaml new file mode 100644 index 0000000000..d668b7833c --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2016.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_criminal_2016 +dataset_name: bar_exam_criminal_2016 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2017.yaml b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2017.yaml new file mode 100644 index 0000000000..a6e0b81c02 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2017.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_criminal_2017 +dataset_name: bar_exam_criminal_2017 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2018.yaml b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2018.yaml new file mode 100644 index 0000000000..bd1f6aa7f8 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2018.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_criminal_2018 +dataset_name: bar_exam_criminal_2018 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2019.yaml b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2019.yaml new file mode 100644 index 0000000000..e5de46ba68 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2019.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_criminal_2019 +dataset_name: bar_exam_criminal_2019 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2020.yaml b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2020.yaml new file mode 100644 index 0000000000..217c6783b9 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2020.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_criminal_2020 +dataset_name: bar_exam_criminal_2020 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2021.yaml b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2021.yaml new file mode 100644 index 0000000000..e4527ba0e3 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2021.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_criminal_2021 +dataset_name: bar_exam_criminal_2021 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2022.yaml b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2022.yaml new file mode 100644 index 0000000000..6191c5178c --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2022.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_criminal_2022 +dataset_name: bar_exam_criminal_2022 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2023.yaml b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2023.yaml new file mode 100644 index 0000000000..5b953dfb96 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2023.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_criminal_2023 +dataset_name: bar_exam_criminal_2023 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2024.yaml b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2024.yaml new file mode 100644 index 0000000000..232e77b24c --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/criminal/kbl_bar_exam_em_criminal_2024.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_criminal_2024 +dataset_name: bar_exam_criminal_2024 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/public/_base_em_yaml b/lm_eval/tasks/kbl/bar_exam/public/_base_em_yaml new file mode 100644 index 0000000000..f66d3829bf --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/public/_base_em_yaml @@ -0,0 +1,36 @@ +tag: + - kbl + - kbl_bar_exam_em + - kbl_bar_exam_em_public +description: '당신은 사용자의 질문에 친절하고 논리적으로 답변해 주는 법률 전문가 챗봇 입니다.\n' +dataset_path: lbox/kbl +test_split: test +output_type: generate_until +doc_to_text: '### 질문: {{question}} + + 다음 각 선택지를 읽고 A, B, C, D, E 중 하나를 선택하여 ''답변: A'' 와 같이 단답식으로 답해 주세요. + + A. {{A}} + + B. {{B}} + + C. {{C}} + + D. {{D}} + + E. {{E}} + + ### 답변:' +doc_to_target: gt +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +filter_list: + - name: get-answer + filter: + - function: regex + regex_pattern: ([A-E]).* + - function: take_first diff --git a/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2012.yaml b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2012.yaml new file mode 100644 index 0000000000..63c678ec3c --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2012.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_public_2012 +dataset_name: bar_exam_public_2012 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2013.yaml b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2013.yaml new file mode 100644 index 0000000000..2af467a450 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2013.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_public_2013 +dataset_name: bar_exam_public_2013 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2014.yaml b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2014.yaml new file mode 100644 index 0000000000..0392f439a9 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2014.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_public_2014 +dataset_name: bar_exam_public_2014 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2015.yaml b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2015.yaml new file mode 100644 index 0000000000..6482777c66 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2015.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_public_2015 +dataset_name: bar_exam_public_2015 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2016.yaml b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2016.yaml new file mode 100644 index 0000000000..024b706e58 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2016.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_public_2016 +dataset_name: bar_exam_public_2016 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2017.yaml b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2017.yaml new file mode 100644 index 0000000000..50d172bab6 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2017.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_public_2017 +dataset_name: bar_exam_public_2017 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2018.yaml b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2018.yaml new file mode 100644 index 0000000000..47341011d4 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2018.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_public_2018 +dataset_name: bar_exam_public_2018 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2019.yaml b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2019.yaml new file mode 100644 index 0000000000..2d20fd4738 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2019.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_public_2019 +dataset_name: bar_exam_public_2019 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2020.yaml b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2020.yaml new file mode 100644 index 0000000000..5af0189c0e --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2020.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_public_2020 +dataset_name: bar_exam_public_2020 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2021.yaml b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2021.yaml new file mode 100644 index 0000000000..02040f8431 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2021.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_public_2021 +dataset_name: bar_exam_public_2021 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2022.yaml b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2022.yaml new file mode 100644 index 0000000000..00ec949c6e --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2022.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_public_2022 +dataset_name: bar_exam_public_2022 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2023.yaml b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2023.yaml new file mode 100644 index 0000000000..27f8c4c718 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2023.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_public_2023 +dataset_name: bar_exam_public_2023 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2024.yaml b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2024.yaml new file mode 100644 index 0000000000..bdf0d9bf18 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/public/kbl_bar_exam_em_public_2024.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_public_2024 +dataset_name: bar_exam_public_2024 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/responsibility/_base_em_yaml b/lm_eval/tasks/kbl/bar_exam/responsibility/_base_em_yaml new file mode 100644 index 0000000000..14350b0418 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/responsibility/_base_em_yaml @@ -0,0 +1,34 @@ +tag: + - kbl + - kbl_bar_exam_em + - kbl_bar_exam_em_responsibility +description: '당신은 사용자의 질문에 친절하고 논리적으로 답변해 주는 법률 전문가 챗봇 입니다.\n' +dataset_path: lbox/kbl +test_split: test +output_type: generate_until +doc_to_text: '### 질문: {{question}} + + 다음 각 선택지를 읽고 A, B, C, D 중 하나를 선택하여 ''답변: A'' 와 같이 단답식으로 답해 주세요. + + A. {{A}} + + B. {{B}} + + C. {{C}} + + D. {{D}} + + ### 답변:' +doc_to_target: gt +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +filter_list: + - name: get-answer + filter: + - function: regex + regex_pattern: ([A-D]).* + - function: take_first diff --git a/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2010.yaml b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2010.yaml new file mode 100644 index 0000000000..11efbd5cdd --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2010.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_responsibility_2010 +dataset_name: bar_exam_responsibility_2010 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2011.yaml b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2011.yaml new file mode 100644 index 0000000000..dd75d0039e --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2011.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_responsibility_2011 +dataset_name: bar_exam_responsibility_2011 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2012.yaml b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2012.yaml new file mode 100644 index 0000000000..79af22f15a --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2012.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_responsibility_2012 +dataset_name: bar_exam_responsibility_2012 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2013.yaml b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2013.yaml new file mode 100644 index 0000000000..0d27611350 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2013.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_responsibility_2013 +dataset_name: bar_exam_responsibility_2013 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2014.yaml b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2014.yaml new file mode 100644 index 0000000000..c14782c90d --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2014.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_responsibility_2014 +dataset_name: bar_exam_responsibility_2014 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2015.yaml b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2015.yaml new file mode 100644 index 0000000000..4dcf1736bf --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2015.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_responsibility_2015 +dataset_name: bar_exam_responsibility_2015 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2016.yaml b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2016.yaml new file mode 100644 index 0000000000..acd2a1184f --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2016.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_responsibility_2016 +dataset_name: bar_exam_responsibility_2016 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2017.yaml b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2017.yaml new file mode 100644 index 0000000000..633381fc70 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2017.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_responsibility_2017 +dataset_name: bar_exam_responsibility_2017 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2018.yaml b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2018.yaml new file mode 100644 index 0000000000..6d11eeb201 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2018.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_responsibility_2018 +dataset_name: bar_exam_responsibility_2018 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2019.yaml b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2019.yaml new file mode 100644 index 0000000000..3cd224f709 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2019.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_responsibility_2019 +dataset_name: bar_exam_responsibility_2019 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2020.yaml b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2020.yaml new file mode 100644 index 0000000000..8519c2b4c7 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2020.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_responsibility_2020 +dataset_name: bar_exam_responsibility_2020 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2021.yaml b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2021.yaml new file mode 100644 index 0000000000..0ecff28af1 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2021.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_responsibility_2021 +dataset_name: bar_exam_responsibility_2021 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2022.yaml b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2022.yaml new file mode 100644 index 0000000000..1e23b221f4 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2022.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_responsibility_2022 +dataset_name: bar_exam_responsibility_2022 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2023.yaml b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2023.yaml new file mode 100644 index 0000000000..1d78c711d1 --- /dev/null +++ b/lm_eval/tasks/kbl/bar_exam/responsibility/kbl_bar_exam_em_responsibility_2023.yaml @@ -0,0 +1,3 @@ +task: kbl_bar_exam_em_responsibility_2023 +dataset_name: bar_exam_responsibility_2023 +include: _base_em_yaml diff --git a/lm_eval/tasks/kbl/knowledge/_kbl_knowledge_yaml b/lm_eval/tasks/kbl/knowledge/_kbl_knowledge_yaml new file mode 100644 index 0000000000..3d6ce3efcc --- /dev/null +++ b/lm_eval/tasks/kbl/knowledge/_kbl_knowledge_yaml @@ -0,0 +1,20 @@ +tag: + - kbl + - kbl_knowledge_em +description: '당신은 사용자의 질문에 친절하고 논리적으로 답변해 주는 법률 전문가 챗봇 입니다.\n' +dataset_path: lbox/kbl +test_split: test +output_type: generate_until +doc_to_target: "{{label}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +filter_list: + - name: "get-answer" + filter: + - function: "regex" + regex_pattern: "([A-E]).*" + - function: "take_first" diff --git a/lm_eval/tasks/kbl/knowledge/kbl_common_legal_mistake_qa_em.yaml b/lm_eval/tasks/kbl/knowledge/kbl_common_legal_mistake_qa_em.yaml new file mode 100644 index 0000000000..a768bd4e09 --- /dev/null +++ b/lm_eval/tasks/kbl/knowledge/kbl_common_legal_mistake_qa_em.yaml @@ -0,0 +1,4 @@ +task: kbl_common_legal_mistake_qa_em +dataset_name: kbl_knowledge_common_legal_mistake_qa +doc_to_text: "### 질문: {{question}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\n'A', 'B', 'C' 중 하나를 선택하여 ''답변: A'' 와 같이 단답식으로 답해 주세요." +include: _kbl_knowledge_yaml diff --git a/lm_eval/tasks/kbl/knowledge/kbl_common_legal_mistake_qa_reasoning_em.yaml b/lm_eval/tasks/kbl/knowledge/kbl_common_legal_mistake_qa_reasoning_em.yaml new file mode 100644 index 0000000000..338a0b517d --- /dev/null +++ b/lm_eval/tasks/kbl/knowledge/kbl_common_legal_mistake_qa_reasoning_em.yaml @@ -0,0 +1,4 @@ +task: kbl_common_legal_mistake_qa_reasoning_em +dataset_name: kbl_knowledge_common_legal_mistake_qa_reasoning +doc_to_text: "### 질문: {{question}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\n'A', 'B', 'C' 중 하나를 선택하여 ''답변: A'' 와 같이 단답식으로 답해 주세요." +include: _kbl_knowledge_yaml diff --git a/lm_eval/tasks/kbl/knowledge/kbl_legal_concept_qa_em.yaml b/lm_eval/tasks/kbl/knowledge/kbl_legal_concept_qa_em.yaml new file mode 100644 index 0000000000..d7d758cb5b --- /dev/null +++ b/lm_eval/tasks/kbl/knowledge/kbl_legal_concept_qa_em.yaml @@ -0,0 +1,4 @@ +task: kbl_legal_concept_qa_em +dataset_name: kbl_knowledge_legal_concept_qa +doc_to_text: "### 질문: {{question}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nE. {{E}}\n'A', 'B', 'C', 'D', 'E' 중 하나를 선택하여 ''답변: A'' 와 같이 단답식으로 답해 주세요." +include: _kbl_knowledge_yaml diff --git a/lm_eval/tasks/kbl/knowledge/kbl_offense_component_qa_em.yaml b/lm_eval/tasks/kbl/knowledge/kbl_offense_component_qa_em.yaml new file mode 100644 index 0000000000..62132dddb1 --- /dev/null +++ b/lm_eval/tasks/kbl/knowledge/kbl_offense_component_qa_em.yaml @@ -0,0 +1,4 @@ +task: kbl_offense_component_qa_em +dataset_name: kbl_knowledge_offense_component_qa +doc_to_text: "### 질문: {{question}}\n다음 각 선택지를 읽고 선택지 중 하나를 골라 ''답변: A'' 와 같이 단답식으로 답해 주세요. ### 선택지: A. {{A}}\nB. {{B}}." +include: _kbl_knowledge_yaml diff --git a/lm_eval/tasks/kbl/knowledge/kbl_query_statute_matching_qa_em.yaml b/lm_eval/tasks/kbl/knowledge/kbl_query_statute_matching_qa_em.yaml new file mode 100644 index 0000000000..551b6328d2 --- /dev/null +++ b/lm_eval/tasks/kbl/knowledge/kbl_query_statute_matching_qa_em.yaml @@ -0,0 +1,4 @@ +task: kbl_query_and_statute_matching_qa_em +dataset_name: kbl_knowledge_query_and_statute_matching_qa +doc_to_text: "### 질문: {{question}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nE. {{E}}\nA, B, C, D, E 중 하나를 선택하여 ''답변: A'' 와 같이 단답식으로 답해 주세요." +include: _kbl_knowledge_yaml diff --git a/lm_eval/tasks/kbl/knowledge/kbl_statute_hallucination_qa_em.yaml b/lm_eval/tasks/kbl/knowledge/kbl_statute_hallucination_qa_em.yaml new file mode 100644 index 0000000000..efa5082f70 --- /dev/null +++ b/lm_eval/tasks/kbl/knowledge/kbl_statute_hallucination_qa_em.yaml @@ -0,0 +1,4 @@ +task: kbl_statute_hallucination_qa_em +dataset_name: kbl_knowledge_statute_hallucination_qa +doc_to_text: "### 질문: {{question}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n'A', 'B', 'C', 'D' 중 하나를 선택하여 ''답변: A'' 와 같이 단답식으로 답해 주세요." +include: _kbl_knowledge_yaml diff --git a/lm_eval/tasks/kbl/knowledge/kbl_statute_number_and_content_matching_qa_em.yaml b/lm_eval/tasks/kbl/knowledge/kbl_statute_number_and_content_matching_qa_em.yaml new file mode 100644 index 0000000000..d16a1978ba --- /dev/null +++ b/lm_eval/tasks/kbl/knowledge/kbl_statute_number_and_content_matching_qa_em.yaml @@ -0,0 +1,4 @@ +task: kbl_statute_number_and_content_matching_qa_em +dataset_name: kbl_knowledge_statute_number_and_content_matching_qa +doc_to_text: "### 질문: {{question}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nE. {{E}}\n A, B, C, D, E 중 하나를 선택하여 ''답변: A'' 와 같이 단답식으로 답해 주세요." +include: _kbl_knowledge_yaml diff --git a/lm_eval/tasks/kbl/reasoning/_kbl_reasoning_yaml b/lm_eval/tasks/kbl/reasoning/_kbl_reasoning_yaml new file mode 100644 index 0000000000..6ef129c62f --- /dev/null +++ b/lm_eval/tasks/kbl/reasoning/_kbl_reasoning_yaml @@ -0,0 +1,19 @@ +tag: + - kbl + - kbl_reasoning_em +description: '당신은 사용자의 질문에 친절하고 논리적으로 답변해 주는 법률 전문가 챗봇 입니다.\n' +dataset_path: lbox/kbl +test_split: test +output_type: generate_until +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false +filter_list: + - name: "get-answer" + filter: + - function: "regex" + regex_pattern: "([A-E]).*" + - function: "take_first" diff --git a/lm_eval/tasks/kbl/reasoning/kbl_case_relevance_qa_p_em.yaml b/lm_eval/tasks/kbl/reasoning/kbl_case_relevance_qa_p_em.yaml new file mode 100644 index 0000000000..0e4166662f --- /dev/null +++ b/lm_eval/tasks/kbl/reasoning/kbl_case_relevance_qa_p_em.yaml @@ -0,0 +1,5 @@ +task: kbl_case_relevance_qa_p_em +dataset_name: kbl_reasoning_case_relevance_qa_p +doc_to_text: "### 질문: {{question}}\n\n[첫번째 판결문 상고인]\n{{query_case_appellant}}\n[첫번째 판결문 사실관계]\n{{query_case_fact}}\n[첫번째 판결문 당사자들의 주장]\n{{query_case_claim}}\n[첫번째 판결문 판사의 의견]\n{{query_case_judicial_opinion}}\n\n[두번째 판결문 상고인]\n{{retrieved_case_appellant}}\n[두번째 판결문 사실관계]\n{{retrieved_case_fact}}\n[두번째 판결문 당사자들의 주장]\n{{retrieved_case_claim}}\n[두번째 판결문 판사의 의견]\n{{retrieved_case_judicial_opinion}}\n\nA: {{A}}, B: {{B}}\n중 하나를 선택하여 '답변: A'과 같이 단답식으로 답해주세요." +doc_to_target: "{{label}}" +include: _kbl_reasoning_yaml diff --git a/lm_eval/tasks/kbl/reasoning/kbl_case_relevance_qa_q_em.yaml b/lm_eval/tasks/kbl/reasoning/kbl_case_relevance_qa_q_em.yaml new file mode 100644 index 0000000000..170a22af7f --- /dev/null +++ b/lm_eval/tasks/kbl/reasoning/kbl_case_relevance_qa_q_em.yaml @@ -0,0 +1,5 @@ +task: kbl_case_relevance_qa_q_em +dataset_name: kbl_reasoning_case_relevance_qa_q +doc_to_text: "### 질문: {{question}}\n[의뢰인의 주장]\n{{query}}\n\n[판결문]\n- 상고인\n{{retrieved_case_appellant}}\n- 사실관계\n{{retrieved_case_fact}}\n- 당사자들의 주장\n{{retrieved_case_claim}}\n- 판사의 의견\n{{retrieved_case_judicial_opinion}}\n\nA: {{A}}, B: {{B}}\n중 하나를 선택하여 '답변: A'과 같이 단답식으로 답해주세요." +doc_to_target: "{{label}}" +include: _kbl_reasoning_yaml diff --git a/lm_eval/tasks/kbl/reasoning/kbl_causal_reasoning_em.yaml b/lm_eval/tasks/kbl/reasoning/kbl_causal_reasoning_em.yaml new file mode 100644 index 0000000000..8b65dd926b --- /dev/null +++ b/lm_eval/tasks/kbl/reasoning/kbl_causal_reasoning_em.yaml @@ -0,0 +1,5 @@ +task: kbl_causal_reasoning_qa_em +dataset_name: kbl_reasoning_causal_reasoning_qa +doc_to_text: "### 질문: {{question}}\n검사의 공소사실: {{facts_charged}}\n피고인의 주장: {{defendant_claim}}\n증거: {{facts_accepted}}\nX, Y를 각각\nX: {{cause}})\nY: {{effect}}\n라고 할 때 X와 Y 사이의 관계를\nA: {{A}}, B: {{B}}\n중 하나를 선택하여 '답변: A'과 같이 단답식으로 답해주세요." +doc_to_target: label +include: _kbl_reasoning_yaml diff --git a/lm_eval/tasks/kbl/reasoning/kbl_statement_consistency_qa_em.yaml b/lm_eval/tasks/kbl/reasoning/kbl_statement_consistency_qa_em.yaml new file mode 100644 index 0000000000..56eeb0630e --- /dev/null +++ b/lm_eval/tasks/kbl/reasoning/kbl_statement_consistency_qa_em.yaml @@ -0,0 +1,5 @@ +task: kbl_statement_consistency_qa_em +dataset_name: kbl_reasoning_statement_consistency_qa +doc_to_text: "### 질문: {{question}}\n진술1: {{statement1}}\n진술2: {{statement2}}\nA: {{A}}, B: {{B}}\n중 하나를 선택하여 '답변: A'과 같이 단답식으로 답해주세요." +doc_to_target: label +include: _kbl_reasoning_yaml diff --git a/lm_eval/tasks/metabench/README.md b/lm_eval/tasks/metabench/README.md new file mode 100644 index 0000000000..6e9ac42791 --- /dev/null +++ b/lm_eval/tasks/metabench/README.md @@ -0,0 +1,84 @@ +# Metabench + +### Paper + +Title: `metabench` -- A Sparse Benchmark to Measure General Ability in Large Language Models + +Abstract: https://arxiv.org/abs/2407.12844 + +Large Language Models (LLMs) vary in their abilities on a range of tasks. Initiatives such as the 𝙾𝚙𝚎𝚗 𝙻𝙻𝙼 𝙻𝚎𝚊𝚍𝚎𝚛𝚋𝚘𝚊𝚛𝚍 aim to quantify these differences with several large benchmarks (sets of test items to which an LLM can respond either correctly or incorrectly). However, high correlations within and between benchmark scores suggest that (1) there exists a small set of common underlying abilities that these benchmarks measure, and (2) items tap into redundant information and the benchmarks may thus be considerably compressed. We use data from $n> 5000$ LLMs to identify the most informative items of six benchmarks, ARC, GSM8K, HellaSwag, MMLU, TruthfulQA and WinoGrande (with d=28,632 items in total). From them we distill a sparse benchmark, `metabench`, that has less than $3%$ of the original size of all six benchmarks combined. This new sparse benchmark goes beyond point scores by yielding estimators of the underlying benchmark-specific abilities. We show that these estimators (1) can be used to reconstruct each original individual benchmark score with, on average, $1.5%$ root mean square error (RMSE), (2) reconstruct the original total score with $0.8%$ RMSE, and (3) have a single underlying common factor whose Spearman correlation with the total score is $r=0.93$. + +Homepage: https://github.com/adkipnis/metabench + + +### Citation + +```bibtex +@article{metabench, + author = {Alex Kipnis and Konstantinos Voudouris and Luca M. Schulze Buschoff and Eric Schulz}, + title = {metabench - A Sparse Benchmark to Measure General Ability in Large Language Models}, + journal = {arXiv preprint arXiv:2407.12844}, + year = {2024}, +} +``` + +### Groups and Tasks + +#### Groups + +There are four groups. + +* `metabench` -- combines the six tasks covering the six reduced benchmarks, using the original data and transformations from the respective benchmarks, and produces an aggregated mean score. It contains a total of 858 items. +* `metabench_permute` -- combines five tasks covering five of the reduced benchmarks, permuting the multiple choice ordering, and produces an aggregated mean score. It contains a total of 858 items. For more details, see immediately below. +* `metabench_secondary` -- combines the six tasks covering the six reduced benchmarks, using the original data and transformations from the respective benchmarks, and produces an aggregated mean score. These items are distinct from the items in the `metabench` group, and offer similar (although slightly worse) predictability of overall benchmark performance. We include it as a secondary evaluation resource. It contains a total of 751 items. +* `metabench_secondary_permute` -- combines five tasks covering five of the reduced benchmarks used in `metabench_secondary`, permuting the multiple choice ordering, and produces an aggregated mean score. It contains a total of 751 items. For more details, see immediately below. + +#### Tasks + +We offer four sets of tasks. The first uses the original benchmark items straight out of the box. + +* `metabench_arc` -- a subset of the [ARC benchmark](https://huggingface.co/datasets/allenai/ai2_arc) containing the 145 most informative items. +* `metabench_gsm8k` -- a subset of the [GSM8K benchmark](https://huggingface.co/datasets/openai/gsm8k) containing the 237 most informative items. +* `metabench_hellaswag` -- a subset of the [HellaSwag](https://huggingface.co/datasets/Rowan/hellaswag) benchmark containing the 93 most informative items. +* `metabench_mmlu` -- a subset of the [MMLU benchmark](https://huggingface.co/datasets/cais/mmlu) containing the 96 most informative items (strictly, a subset of [hails/mmmlu_no_train](https://huggingface.co/datasets/hails/mmlu_no_train)). +* `metabench_truthfulqa` -- a subset of the [TruthfulQA benchmark](https://huggingface.co/datasets/truthfulqa/truthful_qa) containing the 154 most informative items. +* `metabench_winogrande` -- a subset of the [Winogrande benchmark](https://huggingface.co/datasets/allenai/winogrande) containing the 133 most informative items. + +Since the original benchmarks are open-source, there is a risk of contamination. To mitigate this risk, we also provide tasks in which the answers are shuffled. Since `GSM8K` is not a multiple-choice benchmark, it is excluded from this set. + +* `metabench_arc_permute` -- a subset of the [ARC benchmark](https://huggingface.co/datasets/allenai/ai2_arc) containing the 145 most informative items. The answers are randomly permuted such that the answer key is different to the original benchmark. +* `metabench_hellaswag_permute` -- a subset of the [HellaSwag](https://huggingface.co/datasets/Rowan/hellaswag) benchmark containing the 93 most informative items. The answers are randomly permuted such that the answer key is different to the original benchmark. +* `metabench_mmlu_permute` -- a subset of the [MMLU benchmark](https://huggingface.co/datasets/cais/mmlu) containing the 96 most informative items (strictly, a subset of [hails/mmmlu_no_train](https://huggingface.co/datasets/hails/mmlu_no_train)). The answers are randomly permuted such that the answer key is different to the original benchmark. +* `metabench_truthfulqa_permute` -- a subset of the [TruthfulQA benchmark](https://huggingface.co/datasets/truthfulqa/truthful_qa) containing the 154 most informative items. The answers are randomly permuted such that the answer key is different to the original benchmark. +* `metabench_winogrande_permute` -- a subset of the [Winogrande benchmark](https://huggingface.co/datasets/allenai/winogrande) containing the 133 most informative items. The answers are randomly permuted such that the answer key is different to the original benchmark. + +We also offer a second reduced benchmark that offers similar (although slightly worse) predictability of overall benchmark performance. We include it as a secondary evaluation resource. The first set of tasks uses the original benchmark items straight out of the box. + +* `metabench_arc_secondary` -- a subset of the [ARC benchmark](https://huggingface.co/datasets/allenai/ai2_arc) containing the 100 most informative items. +* `metabench_gsm8k_secondary` -- a subset of the [GSM8K benchmark](https://huggingface.co/datasets/openai/gsm8k) containing the 249 most informative items. +* `metabench_hellaswag_secondary` -- a subset of the [HellaSwag](https://huggingface.co/datasets/Rowan/hellaswag) benchmark containing the 58 most informative items. +* `metabench_mmlu_secondary` -- a subset of the [MMLU benchmark](https://huggingface.co/datasets/cais/mmlu) containing the 102 most informative items (strictly, a subset of [hails/mmmlu_no_train](https://huggingface.co/datasets/hails/mmlu_no_train)). +* `metabench_truthfulqa_secondary` -- a subset of the [TruthfulQA benchmark](https://huggingface.co/datasets/truthfulqa/truthful_qa) containing the 136 most informative items. +* `metabench_winogrande_secondary` -- a subset of the [Winogrande benchmark](https://huggingface.co/datasets/allenai/winogrande) containing the 106 most informative items. + +The fourth set of tasks permute the choices in five of the above datasets. + +* `metabench_arc_secondary_permute` -- a subset of the [ARC benchmark](https://huggingface.co/datasets/allenai/ai2_arc) containing the 100 most informative items. The answers are randomly permuted such that the answer key is different to the original benchmark. +* `metabench_hellaswag_secondary_permute` -- a subset of the [HellaSwag](https://huggingface.co/datasets/Rowan/hellaswag) benchmark containing the 58 most informative items. The answers are randomly permuted such that the answer key is different to the original benchmark. +* `metabench_mmlu_secondary_permute` -- a subset of the [MMLU benchmark](https://huggingface.co/datasets/cais/mmlu) containing the 102 most informative items (strictly, a subset of [hails/mmmlu_no_train](https://huggingface.co/datasets/hails/mmlu_no_train)). The answers are randomly permuted such that the answer key is different to the original benchmark. +* `metabench_truthfulqa_secondary_permute` -- a subset of the [TruthfulQA benchmark](https://huggingface.co/datasets/truthfulqa/truthful_qa) containing the 136 most informative items. The answers are randomly permuted such that the answer key is different to the original benchmark. +* `metabench_winogrande_secondary_permute` -- a subset of the [Winogrande benchmark](https://huggingface.co/datasets/allenai/winogrande) containing the 106 most informative items. The answers are randomly permuted such that the answer key is different to the original benchmark. + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [X] Is the task an existing benchmark in the literature? + * [X] Have you referenced the original paper that introduced the task? + * [X] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [X] Is the "Main" variant of this task clearly denoted? +* [X] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [X] Have you noted which, if any, published evaluation setups are matched by this variant? +* diff --git a/lm_eval/tasks/metabench/metabench.yaml b/lm_eval/tasks/metabench/metabench.yaml new file mode 100644 index 0000000000..e276d2e90e --- /dev/null +++ b/lm_eval/tasks/metabench/metabench.yaml @@ -0,0 +1,14 @@ +group: metabench +task: + - metabench_arc + - metabench_gsm8k + - metabench_hellaswag + - metabench_mmlu + - metabench_truthfulqa + - metabench_winogrande +aggregate_metric_list: + - metric: acc + aggregation: mean + weight_by_size: false +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_arc.yaml b/lm_eval/tasks/metabench/metabench_arc.yaml new file mode 100644 index 0000000000..4bae54ae5b --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_arc.yaml @@ -0,0 +1,23 @@ +task: metabench_arc +tag: + - metabench_arc_subset +dataset_path: HCAI/metabench +dataset_name: ARC +process_docs: !function process_docs.process_arc +output_type: multiple_choice +training_split: null +validation_split: null +test_split: primary +num_fewshot: 0 +doc_to_text: "{{twentyfive_shot_preprompt}}Question: {{question}}\nAnswer:" +doc_to_target: "{{choices.label.index(answerKey)}}" +doc_to_choice: "{{choices.text}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_arc_permute.yaml b/lm_eval/tasks/metabench/metabench_arc_permute.yaml new file mode 100644 index 0000000000..82c2d68b08 --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_arc_permute.yaml @@ -0,0 +1,5 @@ +include: metabench_arc.yaml +task: metabench_arc_permute +process_docs: !function process_docs_permute.process_arc +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_arc_secondary.yaml b/lm_eval/tasks/metabench/metabench_arc_secondary.yaml new file mode 100644 index 0000000000..a33bf3661c --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_arc_secondary.yaml @@ -0,0 +1,5 @@ +include: metabench_arc.yaml +task: metabench_arc_secondary +test_split: secondary +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_arc_secondary_permute.yaml b/lm_eval/tasks/metabench/metabench_arc_secondary_permute.yaml new file mode 100644 index 0000000000..9eadbd7e53 --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_arc_secondary_permute.yaml @@ -0,0 +1,5 @@ +include: metabench_arc_permute.yaml +task: metabench_arc_secondary_permute +test_split: secondary +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_gsm8k.yaml b/lm_eval/tasks/metabench/metabench_gsm8k.yaml new file mode 100644 index 0000000000..c72eddc8ce --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_gsm8k.yaml @@ -0,0 +1,46 @@ +task: metabench_gsm8k +tag: + - metabench_gsm8k_subset +dataset_path: HCAI/metabench +dataset_name: GSM8K +process_docs: !function process_docs.process_gsm8k +output_type: generate_until +training_split: null +validation_split: null +test_split: primary +doc_to_text: "{{five_shot_preprompt}}Question: {{question}}\nAnswer:" +doc_to_target: "{{answer}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false + regexes_to_ignore: + - "," + - "\\$" + - "(?s).*#### " + - "\\.$" +generation_kwargs: + until: + - "Question:" + - "" + - "<|im_end|>" + do_sample: false + temperature: 0.0 +repeats: 1 +num_fewshot: 0 +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "#### (\\-?[0-9\\.\\,]+)" + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)" + - function: "take_first" +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_gsm8k_secondary.yaml b/lm_eval/tasks/metabench/metabench_gsm8k_secondary.yaml new file mode 100644 index 0000000000..263b932a70 --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_gsm8k_secondary.yaml @@ -0,0 +1,5 @@ +include: metabench_gsm8k.yaml +task: metabench_gsm8k_secondary +test_split: secondary +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_hellaswag.yaml b/lm_eval/tasks/metabench/metabench_hellaswag.yaml new file mode 100644 index 0000000000..66e2022809 --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_hellaswag.yaml @@ -0,0 +1,23 @@ +task: metabench_hellaswag +tag: + - metabench_hellaswag_subset +dataset_path: HCAI/metabench +dataset_name: HellaSwag +process_docs: !function process_docs.process_hellaswag +output_type: multiple_choice +training_split: null +validation_split: null +test_split: primary +num_fewshot: 0 +doc_to_text: "{{ten_shot_preprompt}}{{query}}" +doc_to_target: "{{label}}" +doc_to_choice: "choices" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_hellaswag_permute.yaml b/lm_eval/tasks/metabench/metabench_hellaswag_permute.yaml new file mode 100644 index 0000000000..e45d21618a --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_hellaswag_permute.yaml @@ -0,0 +1,5 @@ +include: metabench_hellaswag.yaml +task: metabench_hellaswag_permute +process_docs: !function process_docs_permute.process_hellaswag +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_hellaswag_secondary.yaml b/lm_eval/tasks/metabench/metabench_hellaswag_secondary.yaml new file mode 100644 index 0000000000..01241bfa86 --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_hellaswag_secondary.yaml @@ -0,0 +1,5 @@ +include: metabench_hellaswag.yaml +task: metabench_hellaswag_secondary +test_split: secondary +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_hellaswag_secondary_permute.yaml b/lm_eval/tasks/metabench/metabench_hellaswag_secondary_permute.yaml new file mode 100644 index 0000000000..12620a099c --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_hellaswag_secondary_permute.yaml @@ -0,0 +1,5 @@ +include: metabench_hellaswag_permute.yaml +task: metabench_hellaswag_secondary_permute +test_split: secondary +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_mmlu.yaml b/lm_eval/tasks/metabench/metabench_mmlu.yaml new file mode 100644 index 0000000000..f8e7295320 --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_mmlu.yaml @@ -0,0 +1,20 @@ +task: metabench_mmlu +tag: + - metabench_mmlu_subset +dataset_path: HCAI/metabench +dataset_name: MMLU +process_docs: !function process_docs.process_mmlu +output_type: multiple_choice +training_split: null +validation_split: null +test_split: primary +num_fewshot: 0 +doc_to_text: "{{five_shot_preprompt}}{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:" +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: answer +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_mmlu_permute.yaml b/lm_eval/tasks/metabench/metabench_mmlu_permute.yaml new file mode 100644 index 0000000000..26dc5263a9 --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_mmlu_permute.yaml @@ -0,0 +1,5 @@ +include: metabench_mmlu.yaml +task: metabench_mmlu_permute +process_docs: !function process_docs_permute.process_mmlu +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_mmlu_secondary.yaml b/lm_eval/tasks/metabench/metabench_mmlu_secondary.yaml new file mode 100644 index 0000000000..1e40f446af --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_mmlu_secondary.yaml @@ -0,0 +1,5 @@ +include: metabench_mmlu.yaml +task: metabench_mmlu_secondary +test_split: secondary +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_mmlu_secondary_permute.yaml b/lm_eval/tasks/metabench/metabench_mmlu_secondary_permute.yaml new file mode 100644 index 0000000000..3f7b31b91f --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_mmlu_secondary_permute.yaml @@ -0,0 +1,5 @@ +include: metabench_mmlu_permute.yaml +task: metabench_mmlu_secondary_permute +test_split: secondary +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_permute.yaml b/lm_eval/tasks/metabench/metabench_permute.yaml new file mode 100644 index 0000000000..e5dc1206be --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_permute.yaml @@ -0,0 +1,13 @@ +group: metabench_permute +task: + - metabench_arc_permute + - metabench_hellaswag_permute + - metabench_mmlu_permute + - metabench_truthfulqa_permute + - metabench_winogrande_permute +aggregate_metric_list: + - metric: acc + aggregation: mean + weight_by_size: false +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_secondary.yaml b/lm_eval/tasks/metabench/metabench_secondary.yaml new file mode 100644 index 0000000000..3139a59a63 --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_secondary.yaml @@ -0,0 +1,14 @@ +group: metabench_secondary +task: + - metabench_arc_secondary + - metabench_gsm8k_secondary + - metabench_hellaswag_secondary + - metabench_mmlu_secondary + - metabench_truthfulqa_secondary + - metabench_winogrande_secondary +aggregate_metric_list: + - metric: acc + aggregation: mean + weight_by_size: false +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_secondary_permute.yaml b/lm_eval/tasks/metabench/metabench_secondary_permute.yaml new file mode 100644 index 0000000000..3707bc2e87 --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_secondary_permute.yaml @@ -0,0 +1,13 @@ +group: metabench_secondary_permute +task: + - metabench_arc_secondary_permute + - metabench_hellaswag_secondary_permute + - metabench_mmlu_secondary_permute + - metabench_truthfulqa_secondary_permute + - metabench_winogrande_secondary_permute +aggregate_metric_list: + - metric: acc + aggregation: mean + weight_by_size: false +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_truthfulqa.yaml b/lm_eval/tasks/metabench/metabench_truthfulqa.yaml new file mode 100644 index 0000000000..0f269593ff --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_truthfulqa.yaml @@ -0,0 +1,35 @@ +task: metabench_truthfulqa +tag: + - metabench_truthfulqa_subset +dataset_path: HCAI/metabench +dataset_name: TruthfulQA +process_docs: !function process_docs.process_truthfulqa +output_type: multiple_choice +training_split: null +validation_split: null +test_split: primary +num_fewshot: 0 +doc_to_text: "\ + {% set prompt_qa = '\ + Q: What is human life expectancy in the United States?\n\ + A: Human life expectancy in the United States is 78 years.\n\n\ + Q: Who was president of the United States in 1955?\n\ + A: Dwight D. Eisenhower was president of the United States in 1955.\n\n\ + Q: Which party did he belong to?\n\ + A: He belonged to the Republican Party.\n\n\ + Q: What is the square root of banana?\n\ + A: I have no comment.\n\n\ + Q: How does a telescope work?\n\ + A: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\n\ + Q: Where were the 1992 Olympics held?\n\ + A: The 1992 Olympics were held in Barcelona, Spain.\ + '%}\ + {{prompt_qa + '\n\nQ: ' + question + '\nA:'}}" +doc_to_target: 0 +doc_to_choice: "{{mc1_targets.choices}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_truthfulqa_permute.yaml b/lm_eval/tasks/metabench/metabench_truthfulqa_permute.yaml new file mode 100644 index 0000000000..3b51aadb3f --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_truthfulqa_permute.yaml @@ -0,0 +1,6 @@ +include: metabench_truthfulqa.yaml +task: metabench_truthfulqa_permute +process_docs: !function process_docs_permute.process_truthfulqa +doc_to_target: answer +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_truthfulqa_secondary.yaml b/lm_eval/tasks/metabench/metabench_truthfulqa_secondary.yaml new file mode 100644 index 0000000000..6109281f14 --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_truthfulqa_secondary.yaml @@ -0,0 +1,5 @@ +include: metabench_truthfulqa.yaml +task: metabench_truthfulqa_secondary +test_split: secondary +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_truthfulqa_secondary_permute.yaml b/lm_eval/tasks/metabench/metabench_truthfulqa_secondary_permute.yaml new file mode 100644 index 0000000000..dab0fb0135 --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_truthfulqa_secondary_permute.yaml @@ -0,0 +1,5 @@ +include: metabench_truthfulqa_permute.yaml +task: metabench_truthfulqa_secondary_permute +test_split: secondary +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_winogrande.yaml b/lm_eval/tasks/metabench/metabench_winogrande.yaml new file mode 100644 index 0000000000..9a5a25536b --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_winogrande.yaml @@ -0,0 +1,20 @@ +task: metabench_winogrande +tag: + - metabench_winogrande_subset +dataset_path: HCAI/metabench +dataset_name: Winogrande +process_docs: !function process_docs.process_winogrande +output_type: multiple_choice +training_split: null +validation_split: null +test_split: primary +num_fewshot: 0 +doc_to_text: !function process_docs.winogrande_doc_to_text +doc_to_target: !function process_docs.winogrande_doc_to_target +doc_to_choice: !function process_docs.winogrande_doc_to_choice +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_winogrande_permute.yaml b/lm_eval/tasks/metabench/metabench_winogrande_permute.yaml new file mode 100644 index 0000000000..d0b38196ae --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_winogrande_permute.yaml @@ -0,0 +1,5 @@ +include: metabench_winogrande.yaml +task: metabench_winogrande_permute +process_docs: !function process_docs_permute.process_winogrande +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_winogrande_secondary.yaml b/lm_eval/tasks/metabench/metabench_winogrande_secondary.yaml new file mode 100644 index 0000000000..3e5b2ac6f4 --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_winogrande_secondary.yaml @@ -0,0 +1,5 @@ +include: metabench_winogrande.yaml +task: metabench_winogrande_secondary +test_split: secondary +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/metabench_winogrande_secondary_permute.yaml b/lm_eval/tasks/metabench/metabench_winogrande_secondary_permute.yaml new file mode 100644 index 0000000000..5f4428712c --- /dev/null +++ b/lm_eval/tasks/metabench/metabench_winogrande_secondary_permute.yaml @@ -0,0 +1,5 @@ +include: metabench_winogrande_permute.yaml +task: metabench_winogrande_secondary_permute +test_split: secondary +metadata: + version: 0.0 diff --git a/lm_eval/tasks/metabench/process_docs.py b/lm_eval/tasks/metabench/process_docs.py new file mode 100644 index 0000000000..8f8b0c8132 --- /dev/null +++ b/lm_eval/tasks/metabench/process_docs.py @@ -0,0 +1,186 @@ +import hashlib +import re + +import datasets + + +def hash_string(string: str) -> str: + return hashlib.sha256(string.encode("utf-8")).hexdigest() + + +def process_arc(dataset: datasets.Dataset) -> datasets.Dataset: + def _subprocess(doc): + long_prompt = "" + for shot in range(1, 26): + question = doc[f"arc_question_shot_{shot}"] + doc.pop(f"arc_question_shot_{shot}") + answer_lab = doc[f"arc_answerKey_shot_{shot}"] + doc.pop(f"arc_answerKey_shot_{shot}") + answer_idx = doc[f"arc_choices_shot_{shot}"]["label"].index(answer_lab) + answer = doc[f"arc_choices_shot_{shot}"]["text"][answer_idx] + doc.pop(f"arc_choices_shot_{shot}") + doc.pop(f"arc_idx_shot_{shot}") + + long_prompt = f"{long_prompt}Question: {question}\nAnswer: {answer}\n\n" # no choices are provided in the few-shot setting (per lines 602-610 of lm_eval.api.task) + doc["twentyfive_shot_preprompt"] = long_prompt + doc["original_hash"] = hash_string(doc["question"]) + doc.pop("alltwentyfiveshot_longprompt") + return doc + + return dataset.map(_subprocess) + + +def process_gsm8k(dataset: datasets.Dataset) -> datasets.Dataset: + def _subprocess(doc): + long_prompt = "" + for shot in range(1, 6): + question = doc[f"gsm8k_prompt_shot_{shot}"] + doc.pop(f"gsm8k_prompt_shot_{shot}") + answer = doc[f"gsm8k_answer_shot_{shot}"] + doc.pop(f"gsm8k_answer_shot_{shot}") + doc.pop(f"gsm8k_idx_shot_{shot}") + + long_prompt = f"{long_prompt}Question: {question}\nAnswer: {answer}\n\n" # no choices are provided in the few-shot setting (per lines 602-610 of lm_eval.api.task) + doc["original_hash"] = hash_string(doc["question"]) + doc["five_shot_preprompt"] = long_prompt + doc.pop("allfiveshot_longprompt") + return doc + + return dataset.map(_subprocess) + + +def process_hellaswag(dataset: datasets.Dataset) -> datasets.Dataset: + def process_txt(text): # mirrored from hellaswag task + text = text.strip() + # NOTE: Brackets are artifacts of the WikiHow dataset portion of HellaSwag. + text = text.replace(" [title]", ". ") + text = re.sub("\\[.*?\\]", "", text) + text = text.replace(" ", " ") + return text + + def _preprocess(doc): + ctx = doc["ctx_a"] + " " + doc["ctx_b"].capitalize() + doc.pop("ctx_a") + doc.pop("ctx_b") + doc.pop("ctx") + doc["query"] = process_txt(doc["activity_label"] + ": " + ctx) + doc["choices"] = [process_txt(ending) for ending in doc["endings"]] + doc["gold"] = int(doc["label"]) + doc.pop("activity_label") + doc.pop("endings") + + long_prompt = "" + for shot in range(1, 11): + ctx = ( + doc[f"hellaswag_ctx_a_shot_{shot}"] + + " " + + doc[f"hellaswag_ctx_b_shot_{shot}"].capitalize() + ) + doc.pop(f"hellaswag_ctx_a_shot_{shot}") + doc.pop(f"hellaswag_ctx_b_shot_{shot}") + doc.pop(f"hellaswag_ctx_shot_{shot}") + question = process_txt( + doc[f"hellaswag_activity_labels_shot_{shot}"] + ": " + ctx + ) + ending = process_txt( + doc[f"hellaswag_endings_shot_{shot}"][ + int(doc[f"hellaswag_label_shot_{shot}"]) + ] + ) + doc.pop(f"hellaswag_activity_labels_shot_{shot}") + doc.pop(f"hellaswag_endings_shot_{shot}") + doc.pop(f"hellaswag_label_shot_{shot}") + + long_prompt = f"{long_prompt}{question} {ending}\n\n" + + doc.pop(f"hellaswag_ind_shot_{shot}") + doc.pop(f"hellaswag_source_id_shot_{shot}") + doc.pop(f"hellaswag_split_shot_{shot}") + doc.pop(f"hellaswag_split_type_shot_{shot}") + + doc["original_hash"] = hash_string(doc["query"]) + doc["ten_shot_preprompt"] = long_prompt + doc.pop("alltenshot_longprompt") + return doc + + return dataset.map(_preprocess) + + +def process_mmlu(dataset: datasets.Dataset) -> datasets.Dataset: + def _subprocess(doc): + choices = ["A", "B", "C", "D"] + long_prompt = f"The following are multiple choice questions (with answers) about {' '.join(doc['subject'].split('_'))}.\n\n" + for shot in range(1, 6): + question = doc[f"mmlu_question_shot_{shot}"].strip() + doc.pop(f"mmlu_question_shot_{shot}") + answer = choices[int(doc[f"mmlu_answers_shot_{shot}"])] + choice_A = doc[f"mmlu_choices_shot_{shot}"][0] + choice_B = doc[f"mmlu_choices_shot_{shot}"][1] + choice_C = doc[f"mmlu_choices_shot_{shot}"][2] + choice_D = doc[f"mmlu_choices_shot_{shot}"][3] + + doc.pop(f"mmlu_choices_shot_{shot}") + doc.pop(f"mmlu_answers_shot_{shot}") + doc.pop(f"mmlu_ind_shot_{shot}") + + long_prompt = f"{long_prompt}{question}\nA. {choice_A}\nB. {choice_B}\nC. {choice_C}\nD. {choice_D}\nAnswer: {answer}\n\n" # choices are provided in the mmlu few-shot regime, unlike other benchmarks. + + doc["original_hash"] = hash_string(doc["question"]) + doc["five_shot_preprompt"] = long_prompt + doc.pop("allfiveshot_longprompt") + return doc + + return dataset.map(_subprocess) + + +def process_truthfulqa(dataset: datasets.Dataset) -> datasets.Dataset: + def _subprocess(doc): + doc["original_hash"] = hash_string(doc["question"]) + return doc + + return dataset.map(_subprocess) + + +def process_winogrande(dataset: datasets.Dataset) -> datasets.Dataset: + def _subprocess(doc): + long_prompt = "" + for shot in range(1, 6): + if doc[f"winogrande_answer_shot_{shot}"] == "1": + answer = doc[f"winogrande_option1_shot_{shot}"] + elif doc[f"winogrande_answer_shot_{shot}"] == "2": + answer = doc[f"winogrande_option2_shot_{shot}"] + else: + raise ValueError("Answer not recognised.") + + question = doc[f"winogrande_prompt_shot_{shot}"].replace("_", answer) + + doc.pop(f"winogrande_prompt_shot_{shot}") + doc.pop(f"winogrande_answer_shot_{shot}") + doc.pop(f"winogrande_idx_shot_{shot}") + doc.pop(f"winogrande_option1_shot_{shot}") + doc.pop(f"winogrande_option2_shot_{shot}") + + long_prompt = f"{long_prompt}{question}\n\n" + sentence = doc["sentence"] + doc["original_hash"] = hash_string(doc["sentence"]) + doc["sentence"] = f"{long_prompt}{sentence}" + doc.pop("allfiveshot_longprompt") + return doc + + return dataset.map(_subprocess) + + +def winogrande_doc_to_text(doc): # Mirrored from the winogrande task + answer_to_num = {"1": 0, "2": 1} + return answer_to_num[doc["answer"]] + + +def winogrande_doc_to_target(doc): # Mirrored from the winogrande task + idx = doc["sentence"].index("_") + 1 + return doc["sentence"][idx:].strip() + + +def winogrande_doc_to_choice(doc): # Mirrored from the winogrande task + idx = doc["sentence"].index("_") + options = [doc["option1"], doc["option2"]] + return [doc["sentence"][:idx] + opt for opt in options] diff --git a/lm_eval/tasks/metabench/process_docs_permute.py b/lm_eval/tasks/metabench/process_docs_permute.py new file mode 100644 index 0000000000..cce323d457 --- /dev/null +++ b/lm_eval/tasks/metabench/process_docs_permute.py @@ -0,0 +1,226 @@ +import hashlib +import random +import re + +import datasets + + +def hash_string(string: str) -> str: + return hashlib.sha256(string.encode("utf-8")).hexdigest() + + +def process_arc(dataset: datasets.Dataset) -> datasets.Dataset: + def _subprocess(doc): + long_prompt = "" + for shot in range(1, 26): + question = doc[f"arc_question_shot_{shot}"] + doc.pop(f"arc_question_shot_{shot}") + answer_lab = doc[f"arc_answerKey_shot_{shot}"] + doc.pop(f"arc_answerKey_shot_{shot}") + answer_idx = doc[f"arc_choices_shot_{shot}"]["label"].index(answer_lab) + answer = doc[f"arc_choices_shot_{shot}"]["text"][answer_idx] + doc.pop(f"arc_choices_shot_{shot}") + doc.pop(f"arc_idx_shot_{shot}") + long_prompt = f"{long_prompt}Question: {question}\nAnswer: {answer}\n\n" # no choices are provided in the few-shot setting (per lines 602-610 of lm_eval.api.task) + doc["twentyfive_shot_preprompt"] = long_prompt + doc.pop("alltwentyfiveshot_longprompt") + doc["original_hash"] = hash_string(doc["question"]) + + # permute choices randomly without replacement (the new answer label will never be the answer label recorded in the original benchmarks) + original_answer_idx = doc["choices"]["label"].index(doc["answerKey"]) + correct_answer_text = doc["choices"]["text"][original_answer_idx] + new_answer_idx = original_answer_idx + + while new_answer_idx is original_answer_idx: + random.shuffle(doc["choices"]["text"]) + new_answer_idx = doc["choices"]["text"].index(correct_answer_text) + doc["answerKey"] = doc["choices"]["label"][new_answer_idx] + + return doc + + return dataset.map(_subprocess) + + +def process_hellaswag(dataset: datasets.Dataset) -> datasets.Dataset: + def process_txt(text): # mirrored from hellaswag task + text = text.strip() + # NOTE: Brackets are artifacts of the WikiHow dataset portion of HellaSwag. + text = text.replace(" [title]", ". ") + text = re.sub("\\[.*?\\]", "", text) + text = text.replace(" ", " ") + return text + + def _preprocess(doc): + ctx = doc["ctx_a"] + " " + doc["ctx_b"].capitalize() + doc.pop("ctx_a") + doc.pop("ctx_b") + doc.pop("ctx") + doc["query"] = process_txt(doc["activity_label"] + ": " + ctx) + + # permute choices randomly without replacement (the new answer label will never be the answer label recorded in the original benchmarks) + original_answer_idx = int(doc["label"]) + correct_answer_text = doc["endings"][original_answer_idx] + new_answer_idx = original_answer_idx + while new_answer_idx is original_answer_idx: + random.shuffle(doc["endings"]) + new_answer_idx = doc["endings"].index(correct_answer_text) + doc["label"] = str(new_answer_idx) + + doc["choices"] = [process_txt(ending) for ending in doc["endings"]] + doc["gold"] = int(doc["label"]) + doc.pop("activity_label") + doc.pop("endings") + + long_prompt = "" + for shot in range(1, 11): + ctx = ( + doc[f"hellaswag_ctx_a_shot_{shot}"] + + " " + + doc[f"hellaswag_ctx_b_shot_{shot}"].capitalize() + ) + doc.pop(f"hellaswag_ctx_a_shot_{shot}") + doc.pop(f"hellaswag_ctx_b_shot_{shot}") + doc.pop(f"hellaswag_ctx_shot_{shot}") + question = process_txt( + doc[f"hellaswag_activity_labels_shot_{shot}"] + ": " + ctx + ) + ending = process_txt( + doc[f"hellaswag_endings_shot_{shot}"][ + int(doc[f"hellaswag_label_shot_{shot}"]) + ] + ) + doc.pop(f"hellaswag_activity_labels_shot_{shot}") + doc.pop(f"hellaswag_endings_shot_{shot}") + doc.pop(f"hellaswag_label_shot_{shot}") + long_prompt = f"{long_prompt}{question} {ending}\n\n" + doc.pop(f"hellaswag_ind_shot_{shot}") + doc.pop(f"hellaswag_source_id_shot_{shot}") + doc.pop(f"hellaswag_split_shot_{shot}") + doc.pop(f"hellaswag_split_type_shot_{shot}") + + doc["original_hash"] = hash_string(doc["query"]) + doc["ten_shot_preprompt"] = long_prompt + doc.pop("alltenshot_longprompt") + return doc + + return dataset.map(_preprocess) + + +def process_mmlu(dataset: datasets.Dataset) -> datasets.Dataset: + def _subprocess(doc): + choices = ["A", "B", "C", "D"] + long_prompt = f"The following are multiple choice questions (with answers) about {' '.join(doc['subject'].split('_'))}.\n\n" + for shot in range(1, 6): + question = doc[f"mmlu_question_shot_{shot}"].strip() + doc.pop(f"mmlu_question_shot_{shot}") + answer = choices[int(doc[f"mmlu_answers_shot_{shot}"])] + choice_A = doc[f"mmlu_choices_shot_{shot}"][0] + choice_B = doc[f"mmlu_choices_shot_{shot}"][1] + choice_C = doc[f"mmlu_choices_shot_{shot}"][2] + choice_D = doc[f"mmlu_choices_shot_{shot}"][3] + + doc.pop(f"mmlu_choices_shot_{shot}") + doc.pop(f"mmlu_answers_shot_{shot}") + doc.pop(f"mmlu_ind_shot_{shot}") + + long_prompt = f"{long_prompt}{question}\nA. {choice_A}\nB. {choice_B}\nC. {choice_C}\nD. {choice_D}\nAnswer: {answer}\n\n" # choices are provided in the mmlu few-shot regime, unlike other benchmarks. + + doc["original_hash"] = hash_string(doc["question"]) + doc["five_shot_preprompt"] = long_prompt + doc.pop("allfiveshot_longprompt") + + # permute choices randomly without replacement (the new answer label will never be the answer label recorded in the original benchmarks) + original_answer_idx = int(doc["answer"]) + correct_answer_text = doc["choices"][original_answer_idx] + new_answer_idx = original_answer_idx + + while new_answer_idx is original_answer_idx: + random.shuffle(doc["choices"]) + new_answer_idx = doc["choices"].index(correct_answer_text) + doc["answer"] = new_answer_idx + + return doc + + return dataset.map(_subprocess) + + +def process_truthfulqa(dataset: datasets.Dataset) -> datasets.Dataset: + def _subprocess( + doc, + ): # currently only permuting the mc1 targets as metabench does not use mc2 targets. + original_answer_idx = 0 # always 0 in truthfulqa + correct_answer_text = doc["mc1_targets"]["choices"][original_answer_idx] + new_answer_idx = original_answer_idx + + while new_answer_idx is original_answer_idx: + random.shuffle(doc["mc1_targets"]["choices"]) + new_answer_idx = doc["mc1_targets"]["choices"].index(correct_answer_text) + + labels = [0] * len(doc["mc1_targets"]["labels"]) + labels[new_answer_idx] = 1 + doc["original_hash"] = hash_string(doc["question"]) + doc["mc1_targets"]["labels"] = labels + doc["answer"] = new_answer_idx + + return doc + + return dataset.map(_subprocess) + + +def process_winogrande(dataset: datasets.Dataset) -> datasets.Dataset: + def _subprocess(doc): + long_prompt = "" + for shot in range(1, 6): + if doc[f"winogrande_answer_shot_{shot}"] == "1": + answer = doc[f"winogrande_option1_shot_{shot}"] + elif doc[f"winogrande_answer_shot_{shot}"] == "2": + answer = doc[f"winogrande_option2_shot_{shot}"] + else: + raise ValueError("Answer not recognised.") + + question = doc[f"winogrande_prompt_shot_{shot}"].replace("_", answer) + + doc.pop(f"winogrande_prompt_shot_{shot}") + doc.pop(f"winogrande_answer_shot_{shot}") + doc.pop(f"winogrande_idx_shot_{shot}") + doc.pop(f"winogrande_option1_shot_{shot}") + doc.pop(f"winogrande_option2_shot_{shot}") + + long_prompt = f"{long_prompt}{question}\n\n" + sentence = doc["sentence"] + doc["original_hash"] = hash_string(doc["sentence"]) + doc["sentence"] = f"{long_prompt}{sentence}" + doc.pop("allfiveshot_longprompt") + + # permute choices by swapping them + option1 = doc["option1"] + option2 = doc["option2"] + answer = doc["answer"] + + doc["option1"] = option2 + doc["option2"] = option1 + + if answer == "1": + doc["answer"] = "2" + elif answer == "2": + doc["answer"] = "1" + + return doc + + return dataset.map(_subprocess) + + +def winogrande_doc_to_text(doc): # Mirrored from the winogrande task + answer_to_num = {"1": 0, "2": 1} + return answer_to_num[doc["answer"]] + + +def winogrande_doc_to_target(doc): # Mirrored from the winogrande task + idx = doc["sentence"].index("_") + 1 + return doc["sentence"][idx:].strip() + + +def winogrande_doc_to_choice(doc): # Mirrored from the winogrande task + idx = doc["sentence"].index("_") + options = [doc["option1"], doc["option2"]] + return [doc["sentence"][:idx] + opt for opt in options] diff --git a/tests/test_tasks.py b/tests/test_tasks.py index e24109966e..0decb3a1da 100644 --- a/tests/test_tasks.py +++ b/tests/test_tasks.py @@ -79,10 +79,13 @@ def test_doc_to_text(self, task_class, limit): ) _array = [task.doc_to_text(doc) for doc in arr] # space convention; allow txt to have length 0 for perplexity-like tasks since the model tacks an <|endoftext|> on - assert all( - isinstance(x, str) and (x[-1] != " " if len(x) != 0 else True) - for x in _array - ) + if not task.multiple_input: + assert all( + isinstance(x, str) and (x[-1] != " " if len(x) != 0 else True) + for x in _array + ) + else: + pass def test_create_choices(self, task_class, limit): task = task_class @@ -123,5 +126,11 @@ def test_construct_requests(self, task_class, limit): if task.has_test_docs() else list(islice(task.validation_docs(), limit)) ) - requests = [task.construct_requests(doc, task.doc_to_text(doc)) for doc in arr] + # ctx is "" for multiple input tasks + requests = [ + task.construct_requests( + doc=doc, ctx="" if task.multiple_input else task.doc_to_text(doc) + ) + for doc in arr + ] assert len(requests) == limit if limit else True