Spaces:
Sleeping
Sleeping
| from flask import Flask, request, jsonify | |
| from flask_cors import CORS | |
| from transformers import pipeline | |
| import nltk | |
| # The SSL and nltk.download sections have been removed. | |
| # The 'punkt' tokenizer is now pre-installed via the Dockerfile. | |
| # --- Model Loading --- | |
| # We use a T5 (Transformer) model fine-tuned for Question Generation. | |
| print("Loading question generation model...") | |
| # The invalid 'revision' parameter has been removed. | |
| # The pipeline will now load the latest version of the model. | |
| question_generator = pipeline("text2text-generation", model="valhalla/t5-base-qg-hl") | |
| print("Model loaded successfully!") | |
| # --- Flask App Setup --- | |
| app = Flask(__name__) | |
| # Enable Cross-Origin Resource Sharing (CORS) to allow your frontend | |
| # to communicate with this API. | |
| CORS(app) | |
| # --- API Endpoint --- | |
| def generate_quiz(): | |
| """ | |
| This function handles the API request to generate a quiz. | |
| It expects a JSON payload with a 'context' key. | |
| """ | |
| # Get the JSON data from the request | |
| data = request.get_json() | |
| if not data or 'context' not in data: | |
| return jsonify({'error': 'No context provided in the request.'}), 400 | |
| context = data['context'] | |
| # 1. Split the context into individual sentences using the pre-downloaded tokenizer. | |
| sentences = nltk.sent_tokenize(context) | |
| # We'll limit the number of questions to avoid long processing times. | |
| max_questions = 10 | |
| sentences = sentences[:max_questions] | |
| generated_questions = [] | |
| print(f"Generating questions for {len(sentences)} sentences...") | |
| # 2. Generate a question for each sentence. | |
| for sentence in sentences: | |
| try: | |
| # The model generates a string which might contain multiple questions | |
| # separated by <sep>. We take the first one. | |
| output = question_generator(sentence, max_length=64, num_beams=4) | |
| question = output[0]['generated_text'].split('<sep>')[0].strip() | |
| # Basic validation to ensure the question is meaningful | |
| if question.endswith('?') and len(question.split()) > 3: | |
| generated_questions.append({'question': question, 'context': sentence}) | |
| except Exception as e: | |
| print(f"Error generating question for sentence: '{sentence}'. Error: {e}") | |
| print(f"Successfully generated {len(generated_questions)} questions.") | |
| # 3. Return the list of questions as a JSON response. | |
| return jsonify({'quiz': generated_questions}) | |