forked from Mozilla-Ocho/llamafile-rag-example
-
Notifications
You must be signed in to change notification settings - Fork 0
/
setup.sh
executable file
·52 lines (42 loc) · 1.34 KB
/
setup.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
#!/bin/bash
# Create virtualenv
if [ ! -d "venv" ]; then
pyenv local 3.11
python3 -m venv venv
source venv/bin/activate
pip install --upgrade pip setuptools wheel
pip install -r requirements.txt
fi
# Create .env to store app settings (see also settings.py)
if [ ! -f ".env" ]; then cp -v .env.example .env; fi
#
# Download llamafiles then symlink them to
# - models/embedding_model.llamafile
# - models/generation_model.llamafile
#
EMBEDDING_MODEL_URL="https://huggingface.co/Mozilla/mxbai-embed-large-v1-llamafile/resolve/main/mxbai-embed-large-v1-f16.llamafile"
GENERATION_MODEL_URL="https://huggingface.co/Mozilla/Mistral-7B-Instruct-v0.3-llamafile/resolve/main/Mistral-7B-Instruct-v0.3.Q6_K.llamafile"
function url_to_filename() {
url=$1
filename="${url##*/}"
echo "${filename}"
}
mkdir -pv models
cd models || exit
if [ ! -f "embedding_model.llamafile" ]
then
# Download and symlink embedding model
wget -nc "${EMBEDDING_MODEL_URL}"
filename="$(url_to_filename "${EMBEDDING_MODEL_URL}")"
chmod +x "${filename}"
ln -s "${filename}" embedding_model.llamafile
fi
if [ ! -f "generation_model.llamafile" ]
then
# Download and symlink generation model
wget -nc "${GENERATION_MODEL_URL}"
filename="$(url_to_filename "${GENERATION_MODEL_URL}")"
chmod +x "${filename}"
ln -s "${filename}" generation_model.llamafile
fi
cd - || exit