Spaces:
Running
Running
| # import streamlit as st | |
| # import torch | |
| # import pandas as pd | |
| # import numpy as np | |
| # from pathlib import Path | |
| # import sys | |
| # import plotly.express as px | |
| # import plotly.graph_objects as go | |
| # from transformers import BertTokenizer | |
| # import nltk | |
| # # Download required NLTK data | |
| # try: | |
| # nltk.data.find('tokenizers/punkt') | |
| # except LookupError: | |
| # nltk.download('punkt') | |
| # try: | |
| # nltk.data.find('corpora/stopwords') | |
| # except LookupError: | |
| # nltk.download('stopwords') | |
| # try: | |
| # nltk.data.find('tokenizers/punkt_tab') | |
| # except LookupError: | |
| # nltk.download('punkt_tab') | |
| # try: | |
| # nltk.data.find('corpora/wordnet') | |
| # except LookupError: | |
| # nltk.download('wordnet') | |
| # # Add project root to Python path | |
| # project_root = Path(__file__).parent.parent | |
| # sys.path.append(str(project_root)) | |
| # from src.models.hybrid_model import HybridFakeNewsDetector | |
| # from src.config.config import * | |
| # from src.data.preprocessor import TextPreprocessor | |
| # # Page config is set in main app.py | |
| # @st.cache_resource | |
| # def load_model_and_tokenizer(): | |
| # """Load the model and tokenizer (cached).""" | |
| # # Initialize model | |
| # model = HybridFakeNewsDetector( | |
| # bert_model_name=BERT_MODEL_NAME, | |
| # lstm_hidden_size=LSTM_HIDDEN_SIZE, | |
| # lstm_num_layers=LSTM_NUM_LAYERS, | |
| # dropout_rate=DROPOUT_RATE | |
| # ) | |
| # # Load trained weights | |
| # state_dict = torch.load(SAVED_MODELS_DIR / "final_model.pt", map_location=torch.device('cpu')) | |
| # # Filter out unexpected keys | |
| # model_state_dict = model.state_dict() | |
| # filtered_state_dict = {k: v for k, v in state_dict.items() if k in model_state_dict} | |
| # # Load the filtered state dict | |
| # model.load_state_dict(filtered_state_dict, strict=False) | |
| # model.eval() | |
| # # Initialize tokenizer | |
| # tokenizer = BertTokenizer.from_pretrained(BERT_MODEL_NAME) | |
| # return model, tokenizer | |
| # @st.cache_resource | |
| # def get_preprocessor(): | |
| # """Get the text preprocessor (cached).""" | |
| # return TextPreprocessor() | |
| # def predict_news(text): | |
| # """Predict if the given news is fake or real.""" | |
| # # Get model, tokenizer, and preprocessor from cache | |
| # model, tokenizer = load_model_and_tokenizer() | |
| # preprocessor = get_preprocessor() | |
| # # Preprocess text | |
| # processed_text = preprocessor.preprocess_text(text) | |
| # # Tokenize | |
| # encoding = tokenizer.encode_plus( | |
| # processed_text, | |
| # add_special_tokens=True, | |
| # max_length=MAX_SEQUENCE_LENGTH, | |
| # padding='max_length', | |
| # truncation=True, | |
| # return_attention_mask=True, | |
| # return_tensors='pt' | |
| # ) | |
| # # Get prediction | |
| # with torch.no_grad(): | |
| # outputs = model( | |
| # encoding['input_ids'], | |
| # encoding['attention_mask'] | |
| # ) | |
| # probabilities = torch.softmax(outputs['logits'], dim=1) | |
| # prediction = torch.argmax(outputs['logits'], dim=1) | |
| # attention_weights = outputs['attention_weights'] | |
| # # Convert attention weights to numpy and get the first sequence | |
| # attention_weights_np = attention_weights[0].cpu().numpy() | |
| # return { | |
| # 'prediction': prediction.item(), | |
| # 'label': 'FAKE' if prediction.item() == 1 else 'REAL', | |
| # 'confidence': torch.max(probabilities, dim=1)[0].item(), | |
| # 'probabilities': { | |
| # 'REAL': probabilities[0][0].item(), | |
| # 'FAKE': probabilities[0][1].item() | |
| # }, | |
| # 'attention_weights': attention_weights_np | |
| # } | |
| # def plot_confidence(probabilities): | |
| # """Plot prediction confidence.""" | |
| # fig = go.Figure(data=[ | |
| # go.Bar( | |
| # x=list(probabilities.keys()), | |
| # y=list(probabilities.values()), | |
| # text=[f'{p:.2%}' for p in probabilities.values()], | |
| # textposition='auto', | |
| # ) | |
| # ]) | |
| # fig.update_layout( | |
| # title='Prediction Confidence', | |
| # xaxis_title='Class', | |
| # yaxis_title='Probability', | |
| # yaxis_range=[0, 1] | |
| # ) | |
| # return fig | |
| # def plot_attention(text, attention_weights): | |
| # """Plot attention weights.""" | |
| # tokens = text.split() | |
| # attention_weights = attention_weights[:len(tokens)] # Truncate to match tokens | |
| # # Ensure attention weights are in the correct format | |
| # if isinstance(attention_weights, (list, np.ndarray)): | |
| # attention_weights = np.array(attention_weights).flatten() | |
| # # Format weights for display | |
| # formatted_weights = [f'{float(w):.2f}' for w in attention_weights] | |
| # fig = go.Figure(data=[ | |
| # go.Bar( | |
| # x=tokens, | |
| # y=attention_weights, | |
| # text=formatted_weights, | |
| # textposition='auto', | |
| # ) | |
| # ]) | |
| # fig.update_layout( | |
| # title='Attention Weights', | |
| # xaxis_title='Tokens', | |
| # yaxis_title='Attention Weight', | |
| # xaxis_tickangle=45 | |
| # ) | |
| # return fig | |
| # def main(): | |
| # st.title("π° Fake News Detection System") | |
| # st.write(""" | |
| # This application uses a hybrid deep learning model (BERT + BiLSTM + Attention) | |
| # to detect fake news articles. Enter a news article below to analyze it. | |
| # """) | |
| # # Sidebar | |
| # st.sidebar.title("About") | |
| # st.sidebar.info(""" | |
| # The model combines: | |
| # - BERT for contextual embeddings | |
| # - BiLSTM for sequence modeling | |
| # - Attention mechanism for interpretability | |
| # """) | |
| # # Main content | |
| # st.header("News Analysis") | |
| # # Text input | |
| # news_text = st.text_area( | |
| # "Enter the news article to analyze:", | |
| # height=200, | |
| # placeholder="Paste your news article here..." | |
| # ) | |
| # if st.button("Analyze"): | |
| # if news_text: | |
| # with st.spinner("Analyzing the news article..."): | |
| # # Get prediction | |
| # result = predict_news(news_text) | |
| # # Display result | |
| # col1, col2 = st.columns(2) | |
| # with col1: | |
| # st.subheader("Prediction") | |
| # if result['label'] == 'FAKE': | |
| # st.error(f"π΄ This news is likely FAKE (Confidence: {result['confidence']:.2%})") | |
| # else: | |
| # st.success(f"π’ This news is likely REAL (Confidence: {result['confidence']:.2%})") | |
| # with col2: | |
| # st.subheader("Confidence Scores") | |
| # st.plotly_chart(plot_confidence(result['probabilities']), use_container_width=True) | |
| # # Show attention visualization | |
| # st.subheader("Attention Analysis") | |
| # st.write(""" | |
| # The attention weights show which parts of the text the model focused on | |
| # while making its prediction. Higher weights indicate more important tokens. | |
| # """) | |
| # st.plotly_chart(plot_attention(news_text, result['attention_weights']), use_container_width=True) | |
| # # Show model explanation | |
| # st.subheader("Model Explanation") | |
| # if result['label'] == 'FAKE': | |
| # st.write(""" | |
| # The model identified this as fake news based on: | |
| # - Linguistic patterns typical of fake news | |
| # - Inconsistencies in the content | |
| # - Attention weights on suspicious phrases | |
| # """) | |
| # else: | |
| # st.write(""" | |
| # The model identified this as real news based on: | |
| # - Credible language patterns | |
| # - Consistent information | |
| # - Attention weights on factual statements | |
| # """) | |
| # else: | |
| # st.warning("Please enter a news article to analyze.") | |
| # if __name__ == "__main__": | |
| # main() | |
| import streamlit as st | |
| import torch | |
| import pandas as pd | |
| import numpy as np | |
| from pathlib import Path | |
| import sys | |
| import plotly.express as px | |
| import plotly.graph_objects as go | |
| from transformers import BertTokenizer | |
| import nltk | |
| # Download required NLTK data | |
| try: | |
| nltk.data.find('tokenizers/punkt') | |
| except LookupError: | |
| nltk.download('punkt') | |
| try: | |
| nltk.data.find('corpora/stopwords') | |
| except LookupError: | |
| nltk.download('stopwords') | |
| try: | |
| nltk.data.find('tokenizers/punkt_tab') | |
| except LookupError: | |
| nltk.download('punkt_tab') | |
| try: | |
| nltk.data.find('corpora/wordnet') | |
| except LookupError: | |
| nltk.download('wordnet') | |
| # Add project root to Python path | |
| project_root = Path(__file__).parent.parent | |
| sys.path.append(str(project_root)) | |
| from src.models.hybrid_model import HybridFakeNewsDetector | |
| from src.config.config import * | |
| from src.data.preprocessor import TextPreprocessor | |
| # Set page config | |
| st.set_page_config( | |
| page_title="TrueCheck - AI Fake News Detector", | |
| page_icon="π", | |
| layout="wide", | |
| initial_sidebar_state="collapsed" | |
| ) | |
| # Custom CSS for modern styling | |
| st.markdown(""" | |
| <style> | |
| /* Import Google Fonts */ | |
| @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap'); | |
| /* Global Styles */ | |
| .main { | |
| padding: 0; | |
| } | |
| .stApp { | |
| font-family: 'Inter', sans-serif; | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| min-height: 100vh; | |
| } | |
| /* Hide Streamlit elements */ | |
| #MainMenu {visibility: hidden;} | |
| footer {visibility: hidden;} | |
| .stDeployButton {display: none;} | |
| header {visibility: hidden;} | |
| /* Hero Section */ | |
| .hero-container { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| padding: 4rem 2rem; | |
| text-align: center; | |
| color: white; | |
| margin-bottom: 2rem; | |
| } | |
| .hero-title { | |
| font-size: 4rem; | |
| font-weight: 700; | |
| margin-bottom: 1rem; | |
| text-shadow: 2px 2px 4px rgba(0,0,0,0.3); | |
| background: linear-gradient(45deg, #fff, #e0e7ff); | |
| -webkit-background-clip: text; | |
| -webkit-text-fill-color: transparent; | |
| background-clip: text; | |
| } | |
| .hero-subtitle { | |
| font-size: 1.3rem; | |
| font-weight: 400; | |
| margin-bottom: 2rem; | |
| opacity: 0.9; | |
| max-width: 600px; | |
| margin-left: auto; | |
| margin-right: auto; | |
| line-height: 1.6; | |
| } | |
| /* Features Section */ | |
| .features-container { | |
| background: white; | |
| padding: 3rem 2rem; | |
| margin: 2rem 0; | |
| border-radius: 20px; | |
| box-shadow: 0 20px 40px rgba(0,0,0,0.1); | |
| } | |
| .features-grid { | |
| display: grid; | |
| grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); | |
| gap: 2rem; | |
| margin-top: 2rem; | |
| } | |
| .feature-card { | |
| background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%); | |
| padding: 2rem; | |
| border-radius: 16px; | |
| text-align: center; | |
| transition: transform 0.3s ease, box-shadow 0.3s ease; | |
| border: 1px solid #e2e8f0; | |
| } | |
| .feature-card:hover { | |
| transform: translateY(-10px); | |
| box-shadow: 0 20px 40px rgba(0,0,0,0.15); | |
| } | |
| .feature-icon { | |
| font-size: 3rem; | |
| margin-bottom: 1rem; | |
| display: block; | |
| } | |
| .feature-title { | |
| font-size: 1.2rem; | |
| font-weight: 600; | |
| color: #1e293b; | |
| margin-bottom: 0.5rem; | |
| } | |
| .feature-description { | |
| color: #64748b; | |
| line-height: 1.5; | |
| font-size: 0.95rem; | |
| } | |
| /* Main Content Section */ | |
| .main-content { | |
| background: white; | |
| padding: 3rem; | |
| border-radius: 20px; | |
| box-shadow: 0 20px 40px rgba(0,0,0,0.1); | |
| margin: 2rem 0; | |
| } | |
| .section-title { | |
| font-size: 2.5rem; | |
| font-weight: 700; | |
| text-align: center; | |
| color: #1e293b; | |
| margin-bottom: 1rem; | |
| } | |
| .section-description { | |
| text-align: center; | |
| color: #64748b; | |
| font-size: 1.1rem; | |
| margin-bottom: 2rem; | |
| max-width: 600px; | |
| margin-left: auto; | |
| margin-right: auto; | |
| line-height: 1.6; | |
| } | |
| /* Input Section */ | |
| .stTextArea > div > div > textarea { | |
| border-radius: 12px; | |
| border: 2px solid #e2e8f0; | |
| padding: 1rem; | |
| font-size: 1rem; | |
| transition: border-color 0.3s ease; | |
| font-family: 'Inter', sans-serif; | |
| } | |
| .stTextArea > div > div > textarea:focus { | |
| border-color: #667eea; | |
| box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1); | |
| } | |
| /* Button Styling */ | |
| .stButton > button { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| color: white; | |
| border: none; | |
| border-radius: 12px; | |
| padding: 0.75rem 2rem; | |
| font-size: 1.1rem; | |
| font-weight: 600; | |
| font-family: 'Inter', sans-serif; | |
| transition: all 0.3s ease; | |
| box-shadow: 0 4px 15px rgba(102, 126, 234, 0.4); | |
| width: 100%; | |
| } | |
| .stButton > button:hover { | |
| transform: translateY(-2px); | |
| box-shadow: 0 8px 25px rgba(102, 126, 234, 0.6); | |
| } | |
| /* Results Section */ | |
| .result-card { | |
| background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%); | |
| padding: 2rem; | |
| border-radius: 16px; | |
| margin: 1rem 0; | |
| box-shadow: 0 4px 15px rgba(0,0,0,0.1); | |
| } | |
| .success-message { | |
| background: linear-gradient(135deg, #dcfce7 0%, #bbf7d0 100%); | |
| color: #166534; | |
| padding: 1rem 1.5rem; | |
| border-radius: 12px; | |
| border-left: 4px solid #22c55e; | |
| font-weight: 500; | |
| margin: 1rem 0; | |
| } | |
| .error-message { | |
| background: linear-gradient(135deg, #fef2f2 0%, #fecaca 100%); | |
| color: #991b1b; | |
| padding: 1rem 1.5rem; | |
| border-radius: 12px; | |
| border-left: 4px solid #ef4444; | |
| font-weight: 500; | |
| margin: 1rem 0; | |
| } | |
| /* Footer */ | |
| .footer { | |
| background: linear-gradient(135deg, #1e293b 0%, #334155 100%); | |
| color: white; | |
| padding: 3rem 2rem 2rem; | |
| text-align: center; | |
| margin-top: 4rem; | |
| } | |
| .footer-content { | |
| max-width: 1200px; | |
| margin: 0 auto; | |
| } | |
| .footer-title { | |
| font-size: 1.5rem; | |
| font-weight: 600; | |
| margin-bottom: 1rem; | |
| } | |
| .footer-text { | |
| color: #94a3b8; | |
| margin-bottom: 2rem; | |
| line-height: 1.6; | |
| } | |
| .footer-links { | |
| display: flex; | |
| justify-content: center; | |
| gap: 2rem; | |
| margin-bottom: 2rem; | |
| } | |
| .footer-link { | |
| color: #94a3b8; | |
| text-decoration: none; | |
| transition: color 0.3s ease; | |
| } | |
| .footer-link:hover { | |
| color: white; | |
| } | |
| .footer-bottom { | |
| border-top: 1px solid #475569; | |
| padding-top: 2rem; | |
| color: #94a3b8; | |
| font-size: 0.9rem; | |
| } | |
| /* Responsive Design */ | |
| @media (max-width: 768px) { | |
| .hero-title { | |
| font-size: 3rem; | |
| } | |
| .features-grid { | |
| grid-template-columns: 1fr; | |
| } | |
| .main-content { | |
| padding: 2rem; | |
| } | |
| .footer-links { | |
| flex-direction: column; | |
| gap: 1rem; | |
| } | |
| } | |
| </style> | |
| """, unsafe_allow_html=True) | |
| def load_model_and_tokenizer(): | |
| """Load the model and tokenizer (cached).""" | |
| model = HybridFakeNewsDetector( | |
| bert_model_name=BERT_MODEL_NAME, | |
| lstm_hidden_size=LSTM_HIDDEN_SIZE, | |
| lstm_num_layers=LSTM_NUM_LAYERS, | |
| dropout_rate=DROPOUT_RATE | |
| ) | |
| state_dict = torch.load(SAVED_MODELS_DIR / "final_model.pt", map_location=torch.device('cpu')) | |
| model_state_dict = model.state_dict() | |
| filtered_state_dict = {k: v for k, v in state_dict.items() if k in model_state_dict} | |
| model.load_state_dict(filtered_state_dict, strict=False) | |
| model.eval() | |
| tokenizer = BertTokenizer.from_pretrained(BERT_MODEL_NAME) | |
| return model, tokenizer | |
| def get_preprocessor(): | |
| """Get the text preprocessor (cached).""" | |
| return TextPreprocessor() | |
| def predict_news(text): | |
| """Predict if the given news is fake or real.""" | |
| model, tokenizer = load_model_and_tokenizer() | |
| preprocessor = get_preprocessor() | |
| processed_text = preprocessor.preprocess_text(text) | |
| encoding = tokenizer.encode_plus( | |
| processed_text, | |
| add_special_tokens=True, | |
| max_length=MAX_SEQUENCE_LENGTH, | |
| padding='max_length', | |
| truncation=True, | |
| return_attention_mask=True, | |
| return_tensors='pt' | |
| ) | |
| with torch.no_grad(): | |
| outputs = model( | |
| encoding['input_ids'], | |
| encoding['attention_mask'] | |
| ) | |
| probabilities = torch.softmax(outputs['logits'], dim=1) | |
| prediction = torch.argmax(outputs['logits'], dim=1) | |
| attention_weights = outputs['attention_weights'] | |
| attention_weights_np = attention_weights[0].cpu().numpy() | |
| return { | |
| 'prediction': prediction.item(), | |
| 'label': 'FAKE' if prediction.item() == 1 else 'REAL', | |
| 'confidence': torch.max(probabilities, dim=1)[0].item(), | |
| 'probabilities': { | |
| 'REAL': probabilities[0][0].item(), | |
| 'FAKE': probabilities[0][1].item() | |
| }, | |
| 'attention_weights': attention_weights_np | |
| } | |
| def plot_confidence(probabilities): | |
| """Plot prediction confidence.""" | |
| fig = go.Figure(data=[ | |
| go.Bar( | |
| x=list(probabilities.keys()), | |
| y=list(probabilities.values()), | |
| text=[f'{p:.2%}' for p in probabilities.values()], | |
| textposition='auto', | |
| marker_color=['#22c55e', '#ef4444'], | |
| marker_line_color='rgba(0,0,0,0.1)', | |
| marker_line_width=1 | |
| ) | |
| ]) | |
| fig.update_layout( | |
| title={ | |
| 'text': 'Prediction Confidence', | |
| 'x': 0.5, | |
| 'xanchor': 'center', | |
| 'font': {'size': 18, 'family': 'Inter'} | |
| }, | |
| xaxis_title='Class', | |
| yaxis_title='Probability', | |
| yaxis_range=[0, 1], | |
| template='plotly_white', | |
| plot_bgcolor='rgba(0,0,0,0)', | |
| paper_bgcolor='rgba(0,0,0,0)', | |
| font={'family': 'Inter'} | |
| ) | |
| return fig | |
| def plot_attention(text, attention_weights): | |
| """Plot attention weights.""" | |
| tokens = text.split() | |
| attention_weights = attention_weights[:len(tokens)] | |
| if isinstance(attention_weights, (list, np.ndarray)): | |
| attention_weights = np.array(attention_weights).flatten() | |
| formatted_weights = [f'{float(w):.2f}' for w in attention_weights] | |
| # Create color scale based on attention weights | |
| colors = ['rgba(102, 126, 234, ' + str(0.3 + 0.7 * (w / max(attention_weights))) + ')' | |
| for w in attention_weights] | |
| fig = go.Figure(data=[ | |
| go.Bar( | |
| x=tokens, | |
| y=attention_weights, | |
| text=formatted_weights, | |
| textposition='auto', | |
| marker_color=colors, | |
| marker_line_color='rgba(102, 126, 234, 0.8)', | |
| marker_line_width=1 | |
| ) | |
| ]) | |
| fig.update_layout( | |
| title={ | |
| 'text': 'Attention Weights Analysis', | |
| 'x': 0.5, | |
| 'xanchor': 'center', | |
| 'font': {'size': 18, 'family': 'Inter'} | |
| }, | |
| xaxis_title='Tokens', | |
| yaxis_title='Attention Weight', | |
| xaxis_tickangle=45, | |
| template='plotly_white', | |
| plot_bgcolor='rgba(0,0,0,0)', | |
| paper_bgcolor='rgba(0,0,0,0)', | |
| font={'family': 'Inter'} | |
| ) | |
| return fig | |
| def main(): | |
| # Hero Section | |
| st.markdown(""" | |
| <div class="hero-container"> | |
| <h1 class="hero-title">π TrueCheck</h1> | |
| <p class="hero-subtitle"> | |
| Advanced AI-powered fake news detection using cutting-edge deep learning technology. | |
| Get instant, accurate analysis of news articles with our hybrid BERT-BiLSTM model. | |
| </p> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| # Features Section | |
| st.markdown(""" | |
| <div class="features-container"> | |
| <h2 style="text-align: center; font-size: 2rem; font-weight: 700; color: #1e293b; margin-bottom: 1rem;"> | |
| Why Choose TrueCheck? | |
| </h2> | |
| <p style="text-align: center; color: #64748b; font-size: 1.1rem; margin-bottom: 2rem;"> | |
| Our advanced AI model combines multiple technologies for superior accuracy | |
| </p> | |
| <div class="features-grid"> | |
| <div class="feature-card"> | |
| <span class="feature-icon">π€</span> | |
| <h3 class="feature-title">BERT Technology</h3> | |
| <p class="feature-description"> | |
| Utilizes state-of-the-art BERT transformer for deep contextual understanding of news content | |
| </p> | |
| </div> | |
| <div class="feature-card"> | |
| <span class="feature-icon">π§ </span> | |
| <h3 class="feature-title">BiLSTM Processing</h3> | |
| <p class="feature-description"> | |
| Bidirectional LSTM networks capture sequential patterns and dependencies in text structure | |
| </p> | |
| </div> | |
| <div class="feature-card"> | |
| <span class="feature-icon">ποΈ</span> | |
| <h3 class="feature-title">Attention Mechanism</h3> | |
| <p class="feature-description"> | |
| Advanced attention layers provide interpretable insights into model decision-making process | |
| </p> | |
| </div> | |
| </div> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| # Main Content Section | |
| st.markdown(""" | |
| <div class="main-content"> | |
| <h2 class="section-title">Analyze News Article</h2> | |
| <p class="section-description"> | |
| Paste any news article below and our AI will analyze it for authenticity. | |
| Get detailed insights including confidence scores and attention analysis. | |
| </p> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| # Input Section | |
| col1, col2, col3 = st.columns([1, 3, 1]) | |
| with col2: | |
| news_text = st.text_area( | |
| "", | |
| height=200, | |
| placeholder="π° Paste your news article here for analysis...", | |
| key="news_input" | |
| ) | |
| analyze_button = st.button("π Analyze Article", key="analyze_button") | |
| if analyze_button: | |
| if news_text: | |
| with st.spinner("π€ Analyzing the news article..."): | |
| result = predict_news(news_text) | |
| # Results Section | |
| st.markdown('<div class="main-content">', unsafe_allow_html=True) | |
| col1, col2 = st.columns([1, 1], gap="large") | |
| with col1: | |
| st.markdown("### π Prediction Result") | |
| if result['label'] == 'FAKE': | |
| st.markdown(f''' | |
| <div class="error-message"> | |
| π΄ <strong>FAKE NEWS DETECTED</strong><br> | |
| Confidence: {result["confidence"]:.2%} | |
| </div> | |
| ''', unsafe_allow_html=True) | |
| else: | |
| st.markdown(f''' | |
| <div class="success-message"> | |
| π’ <strong>AUTHENTIC NEWS</strong><br> | |
| Confidence: {result["confidence"]:.2%} | |
| </div> | |
| ''', unsafe_allow_html=True) | |
| with col2: | |
| st.markdown("### π Confidence Breakdown") | |
| st.plotly_chart(plot_confidence(result['probabilities']), use_container_width=True) | |
| st.markdown("### π― Attention Analysis") | |
| st.markdown(""" | |
| <p style="color: #64748b; text-align: center; margin-bottom: 2rem;"> | |
| The visualization below shows which words our AI model focused on while making its prediction. | |
| Darker colors indicate higher attention weights. | |
| </p> | |
| """, unsafe_allow_html=True) | |
| st.plotly_chart(plot_attention(news_text, result['attention_weights']), use_container_width=True) | |
| st.markdown("### π Detailed Analysis") | |
| if result['label'] == 'FAKE': | |
| st.markdown(""" | |
| <div class="result-card"> | |
| <h4 style="color: #ef4444; margin-bottom: 1rem;">β οΈ Fake News Indicators</h4> | |
| <ul style="color: #64748b; line-height: 1.8;"> | |
| <li><strong>Linguistic Patterns:</strong> The model detected language patterns commonly associated with misinformation</li> | |
| <li><strong>Content Inconsistencies:</strong> Identified potential factual inconsistencies or misleading statements</li> | |
| <li><strong>Attention Analysis:</strong> High attention weights on suspicious phrases and emotionally charged language</li> | |
| <li><strong>Structural Analysis:</strong> Text structure and flow patterns typical of fabricated content</li> | |
| </ul> | |
| <p style="color: #7c3aed; font-weight: 500; margin-top: 1rem;"> | |
| π‘ <strong>Recommendation:</strong> Verify this information through multiple reliable sources before sharing. | |
| </p> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| else: | |
| st.markdown(""" | |
| <div class="result-card"> | |
| <h4 style="color: #22c55e; margin-bottom: 1rem;">β Authentic News Indicators</h4> | |
| <ul style="color: #64748b; line-height: 1.8;"> | |
| <li><strong>Credible Language:</strong> Professional journalistic writing style and balanced reporting tone</li> | |
| <li><strong>Factual Consistency:</strong> Information appears coherent and factually consistent</li> | |
| <li><strong>Attention Analysis:</strong> Model focused on factual statements and objective reporting</li> | |
| <li><strong>Structural Integrity:</strong> Well-structured content following standard news article format</li> | |
| </ul> | |
| <p style="color: #7c3aed; font-weight: 500; margin-top: 1rem;"> | |
| π‘ <strong>Note:</strong> While likely authentic, always cross-reference important news from multiple sources. | |
| </p> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| st.markdown('</div>', unsafe_allow_html=True) | |
| else: | |
| st.markdown(''' | |
| <div class="main-content"> | |
| <div class="error-message" style="text-align: center;"> | |
| β οΈ Please enter a news article to analyze | |
| </div> | |
| </div> | |
| ''', unsafe_allow_html=True) | |
| # Footer | |
| st.markdown(""" | |
| <div class="footer"> | |
| <div class="footer-content"> | |
| <h3 class="footer-title">TrueCheck AI</h3> | |
| <p class="footer-text"> | |
| Empowering users with AI-driven news verification technology. | |
| Built with advanced deep learning models for accurate fake news detection. | |
| </p> | |
| <div class="footer-links"> | |
| <a href="#" class="footer-link">About</a> | |
| <a href="#" class="footer-link">How It Works</a> | |
| <a href="#" class="footer-link">Privacy Policy</a> | |
| <a href="#" class="footer-link">Contact</a> | |
| </div> | |
| <div class="footer-bottom"> | |
| <p>© 2025 TrueCheck AI. Built with β€οΈ using Streamlit, BERT, and PyTorch.</p> | |
| <p>Disclaimer: This tool provides AI-based analysis. Always verify important information through multiple sources.</p> | |
| </div> | |
| </div> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| if __name__ == "__main__": | |
| main() |