| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
| model = AutoModelForSeq2SeqLM.from_pretrained("cv43/llmpot") | |
| tokenizer = AutoTokenizer.from_pretrained("cv43/llmpot") | |
| def generate_response(input_text): | |
| inputs = tokenizer(input_text, return_tensors="pt") | |
| output = model.generate(**inputs, max_length=128) | |
| return tokenizer.decode(output[0], skip_special_tokens=True) | |
| gr.Interface( | |
| fn=generate_response, | |
| inputs=gr.Textbox(label="Provide an mbtcp protocol request in hex format"), | |
| outputs=gr.Textbox(label="Response"), | |
| title="MBTCP Emulator", | |
| description="" | |
| ).launch() | |