【llm 部署运行videochat--完整教程】
# 申请llama权重
https://ai.meta.com/resources/models-and-libraries/llama-downloads/
-> 勾选三个模型
-> 等待接收右键信息
# 下载llama代码库
git clone https://github.com/facebookresearch/llama.git
cd llama
bash download.py
-> email -> url
-> 7B, 13B
# 获取转换文件
https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py
https://huggingface.co/CarperAI/stable-vicuna-13b-delta/raw/main/apply_delta.py
# 获取videochat
git clone https://github.com/OpenGVLab/Ask-Anything.git
cd Ask-Anything/video_chat/
pip install -r requirements.txt
pip install huggingface_hub
# 下载eva_vit_g.pth
wget https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/eva_vit_g.pth
# 下载blip2_pretrained_flant5xxl.pth
wget https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth
## 使用13b模型 运行下边的四个
# 下载stable-vicuna-13b-delta
huggingface-cli download --token hf_xxx --resume-download --local-dir-use-symlinks False CarperAI/stable-vicuna-13b-delta --local-dir stable-vicuna-13b-delta
# 转换权重llama2-13b
python convert_llama_weights_to_hf.py --input_dir llama-2-13b --model_size 13B --output_dir llama2-13b
# 转换权重stable-vicuna-13b
python3 apply_delta.py --base llama2-13b --target stable-vicuna-13b --delta stable-vicuna-13b-delta
# 删除无用的内容
rm -rf stable-vicuna-13b-delta
rm -rf llama-2-13b
## 使用7b模型
# 下载lmsys/vicuna-7b-delta-v0
huggingface-cli download --token hf_xxx --resume-download --local-dir-use-symlinks False lmsys/vicuna-7b-delta-v0 --local-dir vicuna-7b-delta-v0
# 转换权重llama2-7b
python convert_llama_weights_to_hf.py --input_dir llama-2-7b --model_size 7B --output_dir llama2-7b
# 转换权重vicuna-7b-v0
python3 apply_delta.py --base llama2-7b --target vicuna-7b-v0 --delta vicuna-7b-delta-v0
# 删除无用的
rm -rf vicuna-7b-delta-v0
rm -rf llama-2-7b
# 运行
python demo.py