diff --git a/README.md b/README.md index cbf23a6..410bfea 100644 --- a/README.md +++ b/README.md @@ -39,12 +39,11 @@ Run: Then, inside the Docker container: -```bash -apt install -y python3-tk # This needs some configuring from your part +```bash cd ezrknn-llm/rkllm-toolkit/examples/huggingface/ ``` -Now change the `test.py` with your preferred model. This container provides Qwen-1.8B and LLaMa2 Uncensored. By default, Qwen-1.8B is selected. +Now change the `test.py` with your preferred model. This container provides Qwen-1.8B since it is the best working one and very lightweight. Before converting the model, remember to run `git lfs pull` to download the model. To convert the model, run: diff --git a/docker/setup.sh b/docker/setup.sh index 0d0fb54..f726a42 100755 --- a/docker/setup.sh +++ b/docker/setup.sh @@ -2,9 +2,10 @@ # Dependencies apt update -apt install -y python3 pip git curl wget nano sudo +apt install -y python3 pip git curl wget nano sudo apt-utils # Better than using the default from Rockchip +git clone https://github.com/Pelochus/ezrknn-llm/ curl https://raw.githubusercontent.com/Pelochus/ezrknn-llm/master/install.sh | sudo bash # For running the test.py @@ -13,12 +14,12 @@ pip install /ezrknn-llm/rkllm-toolkit/packages/rkllm_toolkit-1.0.0-cp38-cp38-lin # Clone some compatible LLMs cd /ezrknn-llm/rkllm-toolkit/examples/huggingface git clone https://huggingface.co/Qwen/Qwen-1_8B-Chat -git clone https://huggingface.co/georgesung/llama2_7b_chat_uncensored # Done here to avoid cloning full repository for the Docker image -apt install -y git-lfs # python3-tk +apt install -y git-lfs + +# Needed +DEBIAN_FRONTEND=noninteractive apt install -y python3-tk # cd Qwen-1_8B-Chat # git lfs pull -# cd ../llama2_7b_chat_uncensored -# git lfs pull