fix code to work
This commit is contained in:
@@ -4,23 +4,25 @@ import torch
|
||||
from torch import nn
|
||||
import os
|
||||
|
||||
modelpath = '/models/Qwen3-4B-Instruct-2507-a16w4/devkit'
|
||||
output = '/output/Qwen3-4B-Instruct-2507-a16w4.rkllm'
|
||||
modelpath = '/models/Qwen3-4B-Instruct-2507-F16/Qwen3-4B-Instruct-2507-F16.gguf'
|
||||
output = '/output/Qwen3-4B-Instruct-2507-F16-Q.rkllm'
|
||||
os.makedirs("/output", exist_ok=True)
|
||||
|
||||
print(f"Загрузка модели из: {modelpath}")
|
||||
|
||||
llm = RKLLM()
|
||||
ret = llm.load_huggingface(model=modelpath, model_lora = None, device='cpu')
|
||||
ret = llm.load_gguf(model=modelpath)
|
||||
# ret = llm.load_huggingface(model=modelpath, model_lora = None, device='cpu')
|
||||
|
||||
if ret != 0:
|
||||
print(f"❌ Ошибка загрузки модели: {ret}")
|
||||
exit(ret)
|
||||
|
||||
print("Компиляция для RK3588 (NPU)...")
|
||||
ret = llm.build(do_quantization=True, optimization_level=1, quantized_dtype='w4a16',
|
||||
ret = llm.build(do_quantization=True, optimization_level=1, quantized_dtype='w8a8',
|
||||
quantized_algorithm='normal', target_platform='rk3588', num_npu_core=3, extra_qparams=None)
|
||||
|
||||
|
||||
if ret != 0:
|
||||
print(f"❌ Ошибка компиляции: {ret}")
|
||||
exit(ret)
|
||||
|
||||
Reference in New Issue
Block a user