From fef053d4d1ec13ea8c176beb6948edfdbf9b1bb9 Mon Sep 17 00:00:00 2001 From: Mike Lee Date: Fri, 16 Aug 2024 22:03:01 +0800 Subject: [PATCH] rename to MoE-PEFT --- .gitignore | 2 +- Install.md | 10 +- README.md | 30 ++-- inference.py | 4 +- launch.py | 6 +- misc/finetune-demo.ipynb | 16 +-- moe_peft.py | 2 +- moe_peft/__init__.py | 4 +- moe_peft/modules/mix_lora.py | 2 +- moe_peft/prompter.py | 2 +- pyproject.toml | 2 +- tests/dummy_data.json | 256 +++++++++++++++++------------------ tests/dummy_train.py | 2 +- 13 files changed, 167 insertions(+), 171 deletions(-) diff --git a/.gitignore b/.gitignore index d8abdb6..8d5ab12 100644 --- a/.gitignore +++ b/.gitignore @@ -162,7 +162,7 @@ cython_debug/ # IDEs .vscode/ -# MoE PEFT Factory +# MoE-PEFT __pycache__/ *.egg-info/ *.egg diff --git a/Install.md b/Install.md index e056c99..437ce92 100644 --- a/Install.md +++ b/Install.md @@ -1,4 +1,4 @@ -# Install MoE PEFT Factory +# Install MoE-PEFT ## Table of Contents @@ -44,7 +44,7 @@ moe_peft.backend.check_available() Expected output: ``` -MoE PEFT Factory: NVIDIA CUDA initialized successfully. +MoE-PEFT: NVIDIA CUDA initialized successfully. ``` ## Linux (Ubuntu, Debian, Fedora, etc.) @@ -94,7 +94,7 @@ moe_peft.backend.check_available() Expected output: ``` -MoE PEFT Factory: NVIDIA CUDA initialized successfully. +MoE-PEFT: NVIDIA CUDA initialized successfully. ``` ## Microsoft Windows @@ -144,7 +144,7 @@ moe_peft.backend.check_available() Expected output: ``` -MoE PEFT Factory: NVIDIA CUDA initialized successfully. +MoE-PEFT: NVIDIA CUDA initialized successfully. ``` ## Apple macOS @@ -189,5 +189,5 @@ moe_peft.backend.check_available() Expected output: ``` -MoE PEFT Factory: APPLE MPS initialized successfully. +MoE-PEFT: APPLE MPS initialized successfully. ``` \ No newline at end of file diff --git a/README.md b/README.md index e976b9e..73f9783 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# MoE PEFT Factory: An Efficient LLM Fine-tuning Framework +# MoE-PEFT: An Efficient LLM Fine-Tuning Factory Optimized for MoE PEFT [![](https://github.com/TUDB-Labs/MoE-PEFT/actions/workflows/python-test.yml/badge.svg)](https://github.com/TUDB-Labs/MoE-PEFT/actions/workflows/python-test.yml) [![](https://img.shields.io/github/stars/TUDB-Labs/MoE-PEFT?logo=GitHub&style=flat)](https://github.com/TUDB-Labs/MoE-PEFT/stargazers) [![](https://img.shields.io/github/v/release/TUDB-Labs/MoE-PEFT?logo=Github)](https://github.com/TUDB-Labs/MoE-PEFT/releases/latest) @@ -6,15 +6,15 @@ [![](https://img.shields.io/docker/v/mikecovlee/moe_peft?logo=Docker&label=docker)](https://hub.docker.com/r/mikecovlee/moe_peft/tags) [![](https://img.shields.io/github/license/TUDB-Labs/MoE-PEFT)](http://www.apache.org/licenses/LICENSE-2.0) -MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for high-throughput fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others. Key features of MoE PEFT Factory include: +MoE-PEFT is an open-source *LLMOps* framework built on [m-LoRA](https://github.com/TUDB-Labs/mLoRA) developed by the [IDs Lab](https://ids-lab-asia.github.io) at Sichuan University. It is designed for high-throughput fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others. Key features of MoE-PEFT include: - Concurrent fine-tuning of multiple adapters with a shared pre-trained model. - Support for multiple PEFT algorithms and various pre-trained models. -- Mo-LoRA (Mixture of LoRAs) optimization, mainly for [MixLoRA](https://github.com/TUDB-Labs/MixLoRA). +- MoE PEFT optimization, mainly for [MixLoRA](https://github.com/TUDB-Labs/MixLoRA). -You can try MoE PEFT Factory with [Google Colab](https://githubtocolab.com/TUDB-Labs/MoE-PEFT/blob/main/misc/finetune-demo.ipynb) before local installation. +You can try MoE-PEFT with [Google Colab](https://githubtocolab.com/TUDB-Labs/MoE-PEFT/blob/main/misc/finetune-demo.ipynb) before local installation. ## Supported Platform @@ -25,7 +25,7 @@ You can try MoE PEFT Factory with [Google Colab](https://githubtocolab.com/TUDB- | macOS | MPS | FP32, FP16, BF16 | ✗ | ✗ | | All | CPU | FP32, FP16, BF16 | ✗ | ✗ | -You can use the `MOE_PEFT_BACKEND_TYPE` environment variable to force MoE PEFT Factory to use a specific backend. For example, if you want MoE PEFT Factory to run only on CPU, you can set `MOE_PEFT_BACKEND_TYPE=CPU` before importing `moe_peft`. +You can use the `MOE_PEFT_BACKEND_TYPE` environment variable to force MoE-PEFT to use a specific backend. For example, if you want MoE-PEFT to run only on CPU, you can set `MOE_PEFT_BACKEND_TYPE=CPU` before importing `moe_peft`. ## Supported Pre-trained Models @@ -62,7 +62,7 @@ You can use the `MOE_PEFT_BACKEND_TYPE` environment variable to force MoE PEFT F *: Arguments of configuration file ### Notice of PEFT supports -1. MoE PEFT Factory supports specific optimized operators for these PEFT methods, which can effectively improve the computing performance during training, evaluation and inference. However, these operators may cause a certain degree of accuracy loss (less than 5%). You can disable the optimized operators by defining the `MOE_PEFT_EVALUATE_MODE` environment variable in advance. +1. MoE-PEFT supports specific optimized operators for these PEFT methods, which can effectively improve the computing performance during training, evaluation and inference. However, these operators may cause a certain degree of accuracy loss (less than 5%). You can disable the optimized operators by defining the `MOE_PEFT_EVALUATE_MODE` environment variable in advance. 2. Auxiliary Loss is not currently supported for Mo-LoRA (Mixture of LoRAs) methods other than MixLoRA. 3. You can check detailed arguments of MixLoRA in [TUDB-Labs/MixLoRA](https://github.com/TUDB-Labs/MixLoRA). @@ -76,7 +76,7 @@ You can use the `MOE_PEFT_BACKEND_TYPE` environment variable to force MoE PEFT F *: Arguments of `moe_peft.py` -MoE PEFT Factory only supports scaled-dot product attention (eager) by default. Additional requirements are necessary for flash attention. +MoE-PEFT only supports scaled-dot product attention (eager) by default. Additional requirements are necessary for flash attention. For flash attention, manual installation of the following dependencies is required: @@ -100,9 +100,9 @@ If any attention method is not specified, flash attention is used if available. *: Arguments of `moe_peft.py` -MoE PEFT Factory offers support for various model accuracy and quantization methods. By default, MoE PEFT Factory utilizes full precision (Float32), but users can opt for half precision (Float16) using `--fp16` or BrainFloat16 using `--bf16`. Enabling half precision reduces the model size by half, and for further reduction, quantization methods can be employed. +MoE-PEFT offers support for various model accuracy and quantization methods. By default, MoE-PEFT utilizes full precision (Float32), but users can opt for half precision (Float16) using `--fp16` or BrainFloat16 using `--bf16`. Enabling half precision reduces the model size by half, and for further reduction, quantization methods can be employed. -Quantization can be activated using `--load_4bit` for 4-bit quantization or `--load_8bit` for 8-bit quantization. However, when only quantization is enabled, MoE PEFT Factory utilizes Float32 for calculations. To achieve memory savings during training, users can combine quantization and half-precision modes. +Quantization can be activated using `--load_4bit` for 4-bit quantization or `--load_8bit` for 8-bit quantization. However, when only quantization is enabled, MoE-PEFT utilizes Float32 for calculations. To achieve memory savings during training, users can combine quantization and half-precision modes. To enable quantization support, please manually install `bitsandbytes`: @@ -110,13 +110,13 @@ To enable quantization support, please manually install `bitsandbytes`: pip3 install bitsandbytes==0.43.1 ``` -It's crucial to note that regardless of the settings, **LoRA weights are always calculated and stored at full precision**. For maintaining calculation accuracy, MoE PEFT Factory framework mandates the use of full precision for calculations when accuracy is imperative. +It's crucial to note that regardless of the settings, **LoRA weights are always calculated and stored at full precision**. For maintaining calculation accuracy, MoE-PEFT framework mandates the use of full precision for calculations when accuracy is imperative. For users with NVIDIA Ampere or newer GPU architectures, the `--tf32` option can be utilized to enable full-precision calculation acceleration. ## Offline Configuration -MoE PEFT Factory relies on **HuggingFace Hub** to download necessary models, datasets, etc. If you cannot access the Internet or need to deploy MoE PEFT Factory in an offline environment, please refer to the following guide. +MoE-PEFT relies on **HuggingFace Hub** to download necessary models, datasets, etc. If you cannot access the Internet or need to deploy MoE-PEFT in an offline environment, please refer to the following guide. 1. Use `git-lfs` manually downloads models and datasets from [HuggingFace Hub](https://huggingface.co). 2. Set `--data_path` to the local path to datasets when executing `launch.py gen`. @@ -135,11 +135,11 @@ Example of (4): `export MOE_PEFT_METRIC_PATH=/path-to-your-git-repo/evaluate/met ## Installation -Please refer to [MoE PEFT Factory Install Guide](./Install.md). +Please refer to [MoE-PEFT Install Guide](./Install.md). ## Quickstart -You can conveniently utilize MoE PEFT Factory via `launch.py`. The following example demonstrates a streamlined approach to training a dummy model with MoE PEFT Factory. +You can conveniently utilize MoE-PEFT via `launch.py`. The following example demonstrates a streamlined approach to training a dummy model with MoE-PEFT. ```bash # Generating configuration @@ -161,7 +161,7 @@ For further detailed usage information, please refer to the `help` command: python launch.py help ``` -## MoE PEFT Factory +## MoE-PEFT The `moe_peft.py` code is a starting point for finetuning on various datasets. @@ -197,7 +197,7 @@ docker run --gpus all -it --rm mikecovlee/moe_peft You can check all available tags from: [mikecovlee/moe_peft/tags](https://hub.docker.com/r/mikecovlee/moe_peft/tags) -Please note that this container only provides a proper environment to run MoE PEFT Factory. The codes of MoE PEFT Factory are not included. +Please note that this container only provides a proper environment to run MoE-PEFT. The codes of MoE-PEFT are not included. ## Copyright Copyright © 2023-2024 IDs Lab, Sichuan University diff --git a/inference.py b/inference.py index 8a173b4..7f99bdb 100644 --- a/inference.py +++ b/inference.py @@ -61,7 +61,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.stop_now = True -placeholder_text = "Could you provide an introduction to MoE PEFT Factory?" +placeholder_text = "Could you provide an introduction to MoE-PEFT?" def main( @@ -179,7 +179,7 @@ def generate_with_streaming(**kwargs): label="Output", ) ], - title="MoE PEFT Factory LLM Evaluator", + title="MoE-PEFT LLM Evaluator", description="Evaluate language models and LoRA weights", # noqa: E501 ).queue().launch(server_name=server_name, share=share_gradio) diff --git a/launch.py b/launch.py index 56b9afa..f509c13 100755 --- a/launch.py +++ b/launch.py @@ -166,15 +166,13 @@ def avail_tasks(): print("Available task names:") for name in moe_peft.tasks.task_dict.keys(): print(f" {name}") - print( - "These tasks can be trained and evaluated automatically using MoE PEFT Factory." - ) + print("These tasks can be trained and evaluated automatically using MoE-PEFT.") def show_help(): print( """ - Launcher of MoE PEFT Factory + Launcher of MoE-PEFT Usage: python launch.py COMMAND [ARGS...] Command: gen generate a configuration from template diff --git a/misc/finetune-demo.ipynb b/misc/finetune-demo.ipynb index 0a2f26e..612e5e8 100644 --- a/misc/finetune-demo.ipynb +++ b/misc/finetune-demo.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# MoE PEFT Factory: An Efficient LLM Fine-tuning Framework\n", + "# MoE-PEFT: An Efficient LLM Fine-Tuning Factory Optimized for MoE PEFT\n", "[![](https://github.com/TUDB-Labs/MoE-PEFT/actions/workflows/python-test.yml/badge.svg)](https://github.com/TUDB-Labs/MoE-PEFT/actions/workflows/python-test.yml)\n", "[![](https://img.shields.io/github/stars/TUDB-Labs/MoE-PEFT?logo=GitHub&style=flat)](https://github.com/TUDB-Labs/MoE-PEFT/stargazers)\n", "[![](https://img.shields.io/github/v/release/TUDB-Labs/MoE-PEFT?logo=Github)](https://github.com/TUDB-Labs/MoE-PEFT/releases/latest)\n", @@ -12,15 +12,13 @@ "[![](https://img.shields.io/docker/v/mikecovlee/moe_peft?logo=Docker&label=docker)](https://hub.docker.com/r/mikecovlee/moe_peft/tags)\n", "[![](https://img.shields.io/github/license/TUDB-Labs/MoE-PEFT)](http://www.apache.org/licenses/LICENSE-2.0)\n", "\n", - "MoE PEFT Factory is an open-source framework designed for efficient fine-tuning of multiple Large Language Models (LLMs) using LoRA and its variants. Key features of MoE PEFT Factory include:\n", + "MoE-PEFT is an open-source *LLMOps* framework built on [m-LoRA](https://github.com/TUDB-Labs/mLoRA) developed by the [IDs Lab](https://ids-lab-asia.github.io) at Sichuan University. It is designed for high-throughput fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others. Key features of MoE-PEFT include:\n", "\n", - "- Concurrent fine-tuning of multiple LoRA adapters.\n", + "- Concurrent fine-tuning of multiple adapters with a shared pre-trained model.\n", "\n", - "- Shared base model among multiple LoRA adapters.\n", + "- Support for multiple PEFT algorithms and various pre-trained models.\n", "\n", - "- Support for multiple LoRA variant algorithms and various base models.\n", - "\n", - "- Exclusive Mo-LoRA (Mixture of LoRAs) optimization for MixLoRA and its variants.\n", + "- MoE PEFT optimization, mainly for [MixLoRA](https://github.com/TUDB-Labs/MixLoRA).\n", "\n", "## About this notebook\n", "\n", @@ -31,7 +29,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Clone and install MoE PEFT Factory" + "## Clone and install MoE-PEFT" ] }, { @@ -122,7 +120,7 @@ "source": [ "generate_config = moe_peft.GenerateConfig(\n", " adapter_name=\"lora_0\",\n", - " prompts=[\"Could you provide an introduction to MoE PEFT Factory?\"],\n", + " prompts=[\"Could you provide an introduction to MoE-PEFT?\"],\n", " stop_token=\"\\n\",\n", ")\n", "\n", diff --git a/moe_peft.py b/moe_peft.py index b6c9c66..e592bd3 100644 --- a/moe_peft.py +++ b/moe_peft.py @@ -11,7 +11,7 @@ import moe_peft # Command Line Arguments -parser = argparse.ArgumentParser(description="MoE PEFT Factory main program") +parser = argparse.ArgumentParser(description="MoE-PEFT main program") parser.add_argument( "--base_model", type=str, required=True, help="Path to or name of base model" ) diff --git a/moe_peft/__init__.py b/moe_peft/__init__.py index e0cceac..26084a3 100644 --- a/moe_peft/__init__.py +++ b/moe_peft/__init__.py @@ -21,10 +21,10 @@ from .trainer import TrainConfig, train from .utils import is_package_available, setup_logging -assert is_package_available("torch", "2.3.0"), "MoE PEFT Factory requires torch>=2.3.0" +assert is_package_available("torch", "2.3.0"), "MoE-PEFT requires torch>=2.3.0" assert is_package_available( "transformers", "4.43.0" -), "MoE PEFT Factory requires transformers>=4.43.0" +), "MoE-PEFT requires transformers>=4.43.0" setup_logging() diff --git a/moe_peft/modules/mix_lora.py b/moe_peft/modules/mix_lora.py index 7a85aad..1f40503 100644 --- a/moe_peft/modules/mix_lora.py +++ b/moe_peft/modules/mix_lora.py @@ -543,7 +543,7 @@ def forward(self, router_outputs, attention_mask) -> torch.Tensor: router_logits = _switch_unpack_router_logits(router_outputs) z_loss = _switch_router_z_loss_func(router_logits) router_probs = F.softmax(router_logits, dim=-1) - # recompute expert indexes due to MoE PEFT Factory constraints + # recompute expert indexes due to MoE-PEFT constraints expert_indexes = _switch_recompute_expert_indices( router_probs, self.experts, self.expert_capacity_ ) diff --git a/moe_peft/prompter.py b/moe_peft/prompter.py index 937a705..fd81893 100644 --- a/moe_peft/prompter.py +++ b/moe_peft/prompter.py @@ -5,7 +5,7 @@ prompt_templates = { "moe_peft": { - "description": "Default Prompt Template Provided by MoE PEFT Factory", + "description": "Default Prompt Template Provided by MoE-PEFT", "prompt_input": "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Output:\n", "prompt_no_input": "### Instruction:\n{instruction}\n\n### Output:\n", "response_split": "### Output:", diff --git a/pyproject.toml b/pyproject.toml index 8bd1635..0bdc494 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "moe_peft" version = "1.0.0" -description = "An Efficient Factory to Build Multiple PEFT Adapters" +description = "An Efficient LLM Fine-Tuning Factory Optimized for MoE PEFT" readme = "README.md" requires-python = ">=3.8" classifiers = [ diff --git a/tests/dummy_data.json b/tests/dummy_data.json index 80d2d22..4dc546c 100644 --- a/tests/dummy_data.json +++ b/tests/dummy_data.json @@ -1,258 +1,258 @@ [ { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." }, { - "instruction": "Could you provide an introduction to MoE PEFT Factory?", - "output": "MoE PEFT Factory is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." + "instruction": "Could you provide an introduction to MoE-PEFT?", + "output": "MoE-PEFT is an open-source LLMOps framework developed by the IDs Lab at Sichuan University. It is designed for efficient fine-tuning, evaluation, and inference of Large Language Models (LLMs) using techniques such as LoRA, DoRA, MixLoRA, and others." } ] \ No newline at end of file diff --git a/tests/dummy_train.py b/tests/dummy_train.py index 4125322..b54b976 100644 --- a/tests/dummy_train.py +++ b/tests/dummy_train.py @@ -8,7 +8,7 @@ def main( base_model: str, adapter_name: str = "lora_0", train_data: str = "TUDB-Labs/Dummy-MoE-PEFT", - test_prompt: str = "Could you provide an introduction to MoE PEFT Factory?", + test_prompt: str = "Could you provide an introduction to MoE-PEFT?", ): moe_peft.setup_logging("INFO")