Spaces:
Runtime error
Runtime error
working E2E locally
Browse files
app.py
CHANGED
|
@@ -5,9 +5,7 @@ import signal
|
|
| 5 |
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
|
| 6 |
import gradio as gr
|
| 7 |
|
| 8 |
-
from huggingface_hub import
|
| 9 |
-
from huggingface_hub import snapshot_download
|
| 10 |
-
from huggingface_hub import whoami
|
| 11 |
from huggingface_hub import ModelCard
|
| 12 |
|
| 13 |
from gradio_huggingfacehub_search import HuggingfaceHubSearch
|
|
@@ -18,22 +16,38 @@ HF_TOKEN = os.environ.get("HF_TOKEN")
|
|
| 18 |
|
| 19 |
HF_PATH = "https://huggingface.co/"
|
| 20 |
|
| 21 |
-
|
| 22 |
-
|
|
|
|
|
|
|
| 23 |
|
| 24 |
-
|
| 25 |
-
os.system("mkdir -p dist/models && cd dist/models")
|
| 26 |
os.system("git lfs install")
|
| 27 |
-
|
| 28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
return "successful"
|
| 32 |
|
| 33 |
demo = gr.Interface(
|
| 34 |
fn=button_click,
|
| 35 |
-
inputs = [gr.
|
| 36 |
-
gr.
|
| 37 |
gr.Dropdown(["q4f16_1", "q4f32_1"], label="Quantization Method")],
|
| 38 |
outputs = "text"
|
| 39 |
)
|
|
|
|
| 5 |
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
|
| 6 |
import gradio as gr
|
| 7 |
|
| 8 |
+
from huggingface_hub import HfApi
|
|
|
|
|
|
|
| 9 |
from huggingface_hub import ModelCard
|
| 10 |
|
| 11 |
from gradio_huggingfacehub_search import HuggingfaceHubSearch
|
|
|
|
| 16 |
|
| 17 |
HF_PATH = "https://huggingface.co/"
|
| 18 |
|
| 19 |
+
def button_click(hf_model_id, conv_template, quantization):
|
| 20 |
+
api = HfApi()
|
| 21 |
+
model_dir_name = hf_model_id.split("/")[1]
|
| 22 |
+
mlc_model_name = model_dir_name + "-" + quantization + "-" + "MLC"
|
| 23 |
|
| 24 |
+
os.system("mkdir -p dist/models")
|
|
|
|
| 25 |
os.system("git lfs install")
|
| 26 |
+
|
| 27 |
+
api.snapshot_download(repo_id=hf_model_id, local_dir=f"./dist/models/{model_dir_name}")
|
| 28 |
+
|
| 29 |
+
os.system("mlc_llm convert_weight ./dist/models/" + model_dir_name + "/" + \
|
| 30 |
+
" --quantization " + quantization + \
|
| 31 |
+
" -o dist/" + mlc_model_name)
|
| 32 |
|
| 33 |
+
os.system("mlc_llm gen_config ./dist/models/" + model_dir_name + "/" + \
|
| 34 |
+
" --quantization " + quantization + " --conv-template " + conv_template + \
|
| 35 |
+
" -o dist/" + mlc_model_name + "/")
|
| 36 |
+
|
| 37 |
+
# push to HF
|
| 38 |
+
user_name = api.whoami()["name"]
|
| 39 |
+
api.create_repo(repo_id=f"{user_name}/{mlc_model_name}", private=True)
|
| 40 |
|
| 41 |
+
api.upload_large_folder(folder_path=f"./dist/{mlc_model_name}",
|
| 42 |
+
repo_id=f"{user_name}/{mlc_model_name}",
|
| 43 |
+
repo_type="model")
|
| 44 |
+
|
| 45 |
return "successful"
|
| 46 |
|
| 47 |
demo = gr.Interface(
|
| 48 |
fn=button_click,
|
| 49 |
+
inputs = [gr.Textbox(label="HF Model ID"),
|
| 50 |
+
gr.Dropdown(["tinyllama_v1_0", "qwen2"], label="Conversation Template"),
|
| 51 |
gr.Dropdown(["q4f16_1", "q4f32_1"], label="Quantization Method")],
|
| 52 |
outputs = "text"
|
| 53 |
)
|