add export_device in webui #3333
Former-commit-id: 30ebd3652809d73941e0a5e4a8be11d989faf98d
This commit is contained in:
@@ -28,9 +28,9 @@ examples/
|
||||
│ ├── merge.sh: Merge LoRA weights into the pre-trained models
|
||||
│ └── quantize.sh: Quantize the fine-tuned model with AutoGPTQ
|
||||
├── inference/
|
||||
│ ├── cli_demo.sh: Launch a command line interface with LoRA adapters
|
||||
│ ├── api_demo.sh: Launch an OpenAI-style API with LoRA adapters
|
||||
│ ├── web_demo.sh: Launch a web interface with LoRA adapters
|
||||
│ ├── cli_demo.sh: Chat with fine-tuned model in the CLI with LoRA adapters
|
||||
│ ├── api_demo.sh: Chat with fine-tuned model in an OpenAI-style API with LoRA adapters
|
||||
│ ├── web_demo.sh: Chat with fine-tuned model in the Web browser with LoRA adapters
|
||||
│ └── evaluate.sh: Evaluate model on the MMLU/CMMLU/C-Eval benchmarks with LoRA adapters
|
||||
└── extras/
|
||||
├── galore/
|
||||
|
||||
@@ -8,4 +8,5 @@ CUDA_VISIBLE_DEVICES=0 python ../../src/export_model.py \
|
||||
--finetuning_type lora \
|
||||
--export_dir ../../models/llama2-7b-sft \
|
||||
--export_size 2 \
|
||||
--export_device cpu \
|
||||
--export_legacy_format False
|
||||
|
||||
Reference in New Issue
Block a user