simplify readme

Former-commit-id: 0da6ec2d516326fe9c7583ba71cd1778eb838178
This commit is contained in:
hiyouga
2024-04-02 20:07:43 +08:00
parent 117b67ea30
commit b12176d818
24 changed files with 244 additions and 890 deletions

View File

@@ -1,3 +1,12 @@
> [!WARNING]
> Merging LoRA weights into a quantized model is not supported.
> [!TIP]
> Use `--model_name_or_path path_to_model` solely to use the exported model or model fine-tuned in full/freeze mode.
>
> Use `CUDA_VISIBLE_DEVICES=0`, `--export_quantization_bit 4` and `--export_quantization_dataset data/c4_demo.json` to quantize the model with AutoGPTQ after merging the LoRA weights.
Usage:
- `merge.sh`: merge the lora weights

View File

@@ -1,6 +1,6 @@
#!/bin/bash
CUDA_VISIBLE_DEVICES=0 python ../../src/export_model.py \
CUDA_VISIBLE_DEVICES= python ../../src/export_model.py \
--model_name_or_path meta-llama/Llama-2-7b-hf \
--adapter_name_or_path ../../saves/LLaMA2-7B/lora/sft \
--template default \