deepang1902 commited on
Commit
dc35908
·
verified ·
1 Parent(s): 65d03ce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -47
app.py CHANGED
@@ -1,66 +1,53 @@
1
  from flask import Flask, request, jsonify
2
  from flask_cors import CORS
3
  from transformers import pipeline
4
- import nltk
5
 
6
- # The SSL and nltk.download sections have been removed.
7
- # The 'punkt' tokenizer is now pre-installed via the Dockerfile.
8
-
9
- # --- Model Loading ---
10
- # We use a T5 (Transformer) model fine-tuned for Question Generation.
11
- print("Loading question generation model...")
12
- # The invalid 'revision' parameter has been removed.
13
- # The pipeline will now load the latest version of the model.
14
- question_generator = pipeline("text2text-generation", model="valhalla/t5-base-qg-hl")
15
  print("Model loaded successfully!")
16
 
17
-
18
- # --- Flask App Setup ---
19
  app = Flask(__name__)
20
- # Enable Cross-Origin Resource Sharing (CORS) to allow your frontend
21
- # to communicate with this API.
22
  CORS(app)
23
 
24
- # --- API Endpoint ---
25
  @app.route('/generate-quiz', methods=['POST'])
26
  def generate_quiz():
27
- """
28
- This function handles the API request to generate a quiz.
29
- It expects a JSON payload with a 'context' key.
30
- """
31
- # Get the JSON data from the request
32
  data = request.get_json()
33
  if not data or 'context' not in data:
34
  return jsonify({'error': 'No context provided in the request.'}), 400
35
 
36
  context = data['context']
37
-
38
- # 1. Split the context into individual sentences using the pre-downloaded tokenizer.
39
- sentences = nltk.sent_tokenize(context)
40
-
41
- # We'll limit the number of questions to avoid long processing times.
42
- max_questions = 10
43
- sentences = sentences[:max_questions]
44
 
45
  generated_questions = []
46
-
47
- print(f"Generating questions for {len(sentences)} sentences...")
48
-
49
- # 2. Generate a question for each sentence.
50
- for sentence in sentences:
51
- try:
52
- # The model generates a string which might contain multiple questions
53
- # separated by <sep>. We take the first one.
54
- output = question_generator(sentence, max_length=64, num_beams=4)
55
- question = output[0]['generated_text'].split('<sep>')[0].strip()
56
- # Basic validation to ensure the question is meaningful
57
- if question.endswith('?') and len(question.split()) > 3:
58
- generated_questions.append({'question': question, 'context': sentence})
59
- except Exception as e:
60
- print(f"Error generating question for sentence: '{sentence}'. Error: {e}")
61
-
62
-
63
- print(f"Successfully generated {len(generated_questions)} questions.")
64
-
65
- # 3. Return the list of questions as a JSON response.
 
 
 
 
 
 
66
  return jsonify({'quiz': generated_questions})
 
 
 
 
 
1
  from flask import Flask, request, jsonify
2
  from flask_cors import CORS
3
  from transformers import pipeline
4
+ import os
5
 
6
+ print("Loading End-to-End question generation model...")
7
+ question_generator = pipeline("text2text-generation", model="valhalla/t5-base-e2e-qg")
 
 
 
 
 
 
 
8
  print("Model loaded successfully!")
9
 
 
 
10
  app = Flask(__name__)
 
 
11
  CORS(app)
12
 
 
13
  @app.route('/generate-quiz', methods=['POST'])
14
  def generate_quiz():
 
 
 
 
 
15
  data = request.get_json()
16
  if not data or 'context' not in data:
17
  return jsonify({'error': 'No context provided in the request.'}), 400
18
 
19
  context = data['context']
20
+ print(f"Generating questions for the provided context...")
 
 
 
 
 
 
21
 
22
  generated_questions = []
23
+ try:
24
+ output = question_generator(
25
+ context,
26
+ max_length=64,
27
+ num_beams=5,
28
+ num_return_sequences=5,
29
+ early_stopping=True
30
+ )
31
+
32
+ question_set = set()
33
+ for item in output:
34
+ question_str = item['generated_text']
35
+ questions = question_str.split('<sep>')
36
+ for q in questions:
37
+ clean_q = q.strip()
38
+ if clean_q.endswith('?') and len(clean_q.split()) > 3:
39
+ question_set.add(clean_q)
40
+
41
+ generated_questions = [{'question': q, 'context': context} for q in question_set]
42
+ generated_questions = generated_questions[:10]
43
+
44
+ except Exception as e:
45
+ print(f"Error generating questions: {e}")
46
+ return jsonify({'error': 'Failed to generate quiz.'}), 500
47
+
48
+ print(f"Successfully generated {len(generated_questions)} unique questions.")
49
  return jsonify({'quiz': generated_questions})
50
+
51
+ if __name__ == '__main__':
52
+ port = int(os.environ.get("PORT", 5000))
53
+ app.run(host='0.0.0.0', port=port)