37 lines
1.1 KiB
Python
37 lines
1.1 KiB
Python
from rkllm.api import RKLLM
|
|
from tqdm import tqdm
|
|
import torch
|
|
from torch import nn
|
|
import os
|
|
|
|
modelpath = '/models/Qwen3-4B-Instruct-2507-F16/Qwen3-4B-Instruct-2507-F16.gguf'
|
|
output = '/output/Qwen3-4B-Instruct-2507-F16-Q.rkllm'
|
|
os.makedirs("/output", exist_ok=True)
|
|
|
|
print(f"Загрузка модели из: {modelpath}")
|
|
|
|
llm = RKLLM()
|
|
ret = llm.load_gguf(model=modelpath)
|
|
# ret = llm.load_huggingface(model=modelpath, model_lora = None, device='cpu')
|
|
|
|
if ret != 0:
|
|
print(f"❌ Ошибка загрузки модели: {ret}")
|
|
exit(ret)
|
|
|
|
print("Компиляция для RK3588 (NPU)...")
|
|
ret = llm.build(do_quantization=True, optimization_level=1, quantized_dtype='w8a8',
|
|
quantized_algorithm='normal', target_platform='rk3588', num_npu_core=3, extra_qparams=None)
|
|
|
|
|
|
if ret != 0:
|
|
print(f"❌ Ошибка компиляции: {ret}")
|
|
exit(ret)
|
|
|
|
# Export rkllm model
|
|
ret = llm.export_rkllm(output)
|
|
if ret != 0:
|
|
print(f"❌ Ошибка экспорта: {ret}")
|
|
exit(ret)
|
|
|
|
print(f"✅ Модель скомпилирована: {output}")
|
|
print(f"Размер: {os.path.getsize(output) / 1024**3:.2f} GB") |