""" DevRev Search Evaluation Leaderboard An interactive leaderboard for benchmarking search and retrieval systems on enterprise knowledge bases. Built with Gradio and ready for Hugging Face Spaces. Uses MTEB-style standardized JSON format for evaluation results. """ import base64 import io import json import os from datetime import datetime from pathlib import Path import gradio as gr import matplotlib.pyplot as plt import pandas as pd from gradio_leaderboard import ColumnFilter, Leaderboard, SelectColumns def load_results_from_json(): """Load evaluation results from standardized JSON files""" results = [] # Check for results directory results_dirs = ["results", "leaderboard/results", "."] results_dir = None for dir_path in results_dirs: if os.path.exists(dir_path): temp_dir = Path(dir_path) if any(temp_dir.glob("*.json")): results_dir = temp_dir break if not results_dir: print( "No results directory found. Please create a 'results' directory with JSON files." ) return [] # Load all JSON files from results directory for json_file in results_dir.glob("*.json"): # Skip the schema file if json_file.name == "RESULT_SCHEMA.json": continue try: with open(json_file, "r") as f: data = json.load(f) # Only include if it's a valid evaluation result if "model_name" in data and "metrics" in data: results.append(data) print(f"Loaded: {json_file.name}") except Exception as e: print(f"Error loading {json_file}: {e}") return results def create_leaderboard_data(): """Create the leaderboard dataframe from JSON results""" # Load results from JSON files results = load_results_from_json() if not results: print( "No evaluation results found. Please add JSON files to the 'results' directory." ) return pd.DataFrame() # Return empty dataframe # Convert to DataFrame format data = [] for result in results: metrics = result.get("metrics", {}) # Process paper field to handle multiple references paper_field = result.get("paper", "N/A") if paper_field and paper_field != "N/A": # Split by semicolon to handle multiple references references = [ref.strip() for ref in paper_field.split(";")] formatted_refs = [] for ref in references: if ref.startswith("http"): # Display URL as link without custom name formatted_refs.append(f"[{ref}]({ref})") else: # Plain text citation formatted_refs.append(ref) paper_display = " | ".join(formatted_refs) else: paper_display = "N/A" row = { "đ Rank": 0, # Will be set after sorting "đ§ Method": result.get("model_name", "Unknown"), "đ Paper/Details": paper_display, "đˇī¸ Type": result.get("model_type", "Unknown"), "đ Recall@5": metrics.get("recall@5", 0), "đ Recall@10": metrics.get("recall@10", 0), "đ Recall@25": metrics.get("recall@25", 0), "đ Recall@50": metrics.get("recall@50", 0), "đ Precision@5": metrics.get("precision@5", 0), "đ Precision@10": metrics.get("precision@10", 0), "đ Precision@25": metrics.get("precision@25", 0), "đ Precision@50": metrics.get("precision@50", 0), "đ Open Source": "â " if result.get("open_source", False) else "â", "đ Date": result.get("evaluation_date", "N/A"), } data.append(row) # Convert to DataFrame df = pd.DataFrame(data) # Sort by Recall@10 (primary) and Precision@10 (secondary) df = df.sort_values(["đ Recall@10", "đ Precision@10"], ascending=False) # Update ranks df["đ Rank"] = range(1, len(df) + 1) # Reorder columns columns_order = [ "đ Rank", "đ§ Method", "đ Paper/Details", "đˇī¸ Type", "đ Recall@5", "đ Recall@10", "đ Recall@25", "đ Recall@50", "đ Precision@5", "đ Precision@10", "đ Precision@25", "đ Precision@50", "đ Open Source", "đ Date", ] df = df[columns_order] return df def create_comparison_plot(): """Create performance comparison visualizations""" df = create_leaderboard_data() if df.empty: return "
No data available for visualization. Please add evaluation results to the 'results' directory.
" fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6)) # Sort by Recall@50 for consistent ordering df_sorted = df.sort_values("đ Recall@50", ascending=True) # Recall@50 comparison methods = df_sorted["đ§ Method"].tolist() recall_50 = df_sorted["đ Recall@50"].tolist() colors = ["#ff6b6b" if "DevRev" in m else "#4ecdc4" for m in methods] ax1.barh(methods, recall_50, color=colors, alpha=0.8) ax1.set_xlabel("Recall@50 (%)", fontsize=12) ax1.set_title("Recall@50 Comparison", fontsize=14, fontweight="bold") ax1.grid(True, axis="x", alpha=0.3) # Add value labels for i, (method, recall) in enumerate(zip(methods, recall_50)): ax1.text(recall + 0.5, i, f"{recall:.1f}%", va="center", fontsize=10) # Precision@50 comparison precision_50 = df_sorted["đ Precision@50"].tolist() ax2.barh(methods, precision_50, color=colors, alpha=0.8) ax2.set_xlabel("Precision@50 (%)", fontsize=12) ax2.set_title("Precision@50 Comparison", fontsize=14, fontweight="bold") ax2.grid(True, axis="x", alpha=0.3) # Add value labels for i, (method, precision) in enumerate(zip(methods, precision_50)): ax2.text( precision + 0.5, i, f"{precision:.1f}%", va="center", fontsize=10, ) plt.tight_layout() # Convert to base64 for embedding in HTML buf = io.BytesIO() plt.savefig(buf, format="png", dpi=150, bbox_inches="tight") buf.seek(0) img_base64 = base64.b64encode(buf.read()).decode() plt.close() return f'Benchmarking Search and Retrieval Systems for Enterprise Knowledge Bases
Please add JSON evaluation files to the 'results' directory.
See the About tab for the required format.
Last updated: {datetime.now().strftime("%Y-%m-%d %H:%M UTC")}