""" DevRev Search Evaluation Leaderboard An interactive leaderboard for benchmarking search and retrieval systems on enterprise knowledge bases. Built with Gradio and ready for Hugging Face Spaces. Uses MTEB-style standardized JSON format for evaluation results. """ import base64 import io import json import os from datetime import datetime from pathlib import Path import gradio as gr import matplotlib.pyplot as plt import pandas as pd from gradio_leaderboard import ColumnFilter, Leaderboard, SelectColumns def load_results_from_json(): """Load evaluation results from standardized JSON files""" results = [] # Check for results directory results_dirs = ["results", "leaderboard/results", "."] results_dir = None for dir_path in results_dirs: if os.path.exists(dir_path): temp_dir = Path(dir_path) if any(temp_dir.glob("*.json")): results_dir = temp_dir break if not results_dir: print( "No results directory found. Please create a 'results' directory with JSON files." ) return [] # Load all JSON files from results directory for json_file in results_dir.glob("*.json"): # Skip the schema file if json_file.name == "RESULT_SCHEMA.json": continue try: with open(json_file, "r") as f: data = json.load(f) # Only include if it's a valid evaluation result if "model_name" in data and "metrics" in data: results.append(data) print(f"Loaded: {json_file.name}") except Exception as e: print(f"Error loading {json_file}: {e}") return results def create_leaderboard_data(): """Create the leaderboard dataframe from JSON results""" # Load results from JSON files results = load_results_from_json() if not results: print( "No evaluation results found. Please add JSON files to the 'results' directory." ) return pd.DataFrame() # Return empty dataframe # Convert to DataFrame format data = [] for result in results: metrics = result.get("metrics", {}) # Process paper field to handle multiple references paper_field = result.get("paper", "N/A") if paper_field and paper_field != "N/A": # Split by semicolon to handle multiple references references = [ref.strip() for ref in paper_field.split(";")] formatted_refs = [] for ref in references: if ref.startswith("http"): # Display URL as link without custom name formatted_refs.append(f"[{ref}]({ref})") else: # Plain text citation formatted_refs.append(ref) paper_display = " | ".join(formatted_refs) else: paper_display = "N/A" row = { "🏆 Rank": 0, # Will be set after sorting "🔧 Method": result.get("model_name", "Unknown"), "📝 Paper/Details": paper_display, "đŸˇī¸ Type": result.get("model_type", "Unknown"), "📈 Recall@5": metrics.get("recall@5", 0), "📈 Recall@10": metrics.get("recall@10", 0), "📈 Recall@25": metrics.get("recall@25", 0), "📈 Recall@50": metrics.get("recall@50", 0), "📉 Precision@5": metrics.get("precision@5", 0), "📉 Precision@10": metrics.get("precision@10", 0), "📉 Precision@25": metrics.get("precision@25", 0), "📉 Precision@50": metrics.get("precision@50", 0), "🚀 Open Source": "✅" if result.get("open_source", False) else "❌", "📅 Date": result.get("evaluation_date", "N/A"), } data.append(row) # Convert to DataFrame df = pd.DataFrame(data) # Sort by Recall@10 (primary) and Precision@10 (secondary) df = df.sort_values(["📈 Recall@10", "📉 Precision@10"], ascending=False) # Update ranks df["🏆 Rank"] = range(1, len(df) + 1) # Reorder columns columns_order = [ "🏆 Rank", "🔧 Method", "📝 Paper/Details", "đŸˇī¸ Type", "📈 Recall@5", "📈 Recall@10", "📈 Recall@25", "📈 Recall@50", "📉 Precision@5", "📉 Precision@10", "📉 Precision@25", "📉 Precision@50", "🚀 Open Source", "📅 Date", ] df = df[columns_order] return df def create_comparison_plot(): """Create performance comparison visualizations""" df = create_leaderboard_data() if df.empty: return "

No data available for visualization. Please add evaluation results to the 'results' directory.

" fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6)) # Sort by Recall@50 for consistent ordering df_sorted = df.sort_values("📈 Recall@50", ascending=True) # Recall@50 comparison methods = df_sorted["🔧 Method"].tolist() recall_50 = df_sorted["📈 Recall@50"].tolist() colors = ["#ff6b6b" if "DevRev" in m else "#4ecdc4" for m in methods] ax1.barh(methods, recall_50, color=colors, alpha=0.8) ax1.set_xlabel("Recall@50 (%)", fontsize=12) ax1.set_title("Recall@50 Comparison", fontsize=14, fontweight="bold") ax1.grid(True, axis="x", alpha=0.3) # Add value labels for i, (method, recall) in enumerate(zip(methods, recall_50)): ax1.text(recall + 0.5, i, f"{recall:.1f}%", va="center", fontsize=10) # Precision@50 comparison precision_50 = df_sorted["📉 Precision@50"].tolist() ax2.barh(methods, precision_50, color=colors, alpha=0.8) ax2.set_xlabel("Precision@50 (%)", fontsize=12) ax2.set_title("Precision@50 Comparison", fontsize=14, fontweight="bold") ax2.grid(True, axis="x", alpha=0.3) # Add value labels for i, (method, precision) in enumerate(zip(methods, precision_50)): ax2.text( precision + 0.5, i, f"{precision:.1f}%", va="center", fontsize=10, ) plt.tight_layout() # Convert to base64 for embedding in HTML buf = io.BytesIO() plt.savefig(buf, format="png", dpi=150, bbox_inches="tight") buf.seek(0) img_base64 = base64.b64encode(buf.read()).decode() plt.close() return f'' def create_interface(): """Create the Gradio interface with leaderboard and visualizations""" deep_link_js = r""" () => { function openAboutAndScroll() { if (window.location.hash !== "#about") return; // Switch to the About tab (Gradio tabs are rendered as role="tab" buttons) const tabs = Array.from(document.querySelectorAll('button[role="tab"]')); const aboutTab = tabs.find((b) => (b.innerText || "").includes("About")); if (aboutTab) aboutTab.click(); // The About content is mounted after tab switch; retry briefly. let attempts = 0; const timer = setInterval(() => { const el = document.getElementById("about"); if (el) { el.scrollIntoView({ behavior: "smooth", block: "start" }); clearInterval(timer); } attempts += 1; if (attempts > 25) clearInterval(timer); }, 200); } window.addEventListener("hashchange", openAboutAndScroll); openAboutAndScroll(); setTimeout(openAboutAndScroll, 600); } """ with gr.Blocks( title="DevRev Search Evaluation Leaderboard", js=deep_link_js ) as demo: # Header gr.HTML( """

🏆 DevRev Search Evaluation Leaderboard

Benchmarking Search and Retrieval Systems for Enterprise Knowledge Bases

""" ) # Tabs with gr.Tabs(): # Main Leaderboard Tab with gr.TabItem("🏆 Main Leaderboard"): gr.Markdown( """ ### Evaluation Overview This leaderboard displays metrics of search systems on the test queries present in [DevRev Search Dataset](https://huggingface.co/datasets/devrev/search). All methods are evaluated on the same set of agent support queries with consistent evaluation protocols. **Metrics**: Recall@K and Precision@K measure the effectiveness of retrieving relevant articles within the top K retrieved articles. **Leaderboard ranking**: Sorted by **Recall@10** (primary) and **Precision@10** (secondary). **To add your results**: Submission details are available in the [About](#about) section. """ ) # Get leaderboard data df = create_leaderboard_data() if not df.empty: # Configure which columns to display by default default_columns = [ "🏆 Rank", "🔧 Method", "đŸˇī¸ Type", "📈 Recall@10", "📈 Recall@50", "📉 Precision@10", "📉 Precision@50", "🚀 Open Source", ] # Define column filters type_column = ColumnFilter("đŸˇī¸ Type", type="checkboxgroup") open_source_column = ColumnFilter( "🚀 Open Source", type="checkboxgroup" ) # Create the interactive leaderboard Leaderboard( value=df, datatype=[ "number", "markdown", "markdown", "str", "number", "number", "number", "number", "number", "number", "number", "number", "str", "str", ], select_columns=SelectColumns( default_selection=default_columns, cant_deselect=[ "🏆 Rank", "🔧 Method", "📈 Recall@10", ], label="Select Columns to Display", ), search_columns=[ "🔧 Method", "📝 Paper/Details", "đŸˇī¸ Type", ], hide_columns=["📅 Date"], filter_columns=[type_column, open_source_column], interactive=False, ) else: gr.HTML( """

No Results Found

Please add JSON evaluation files to the 'results' directory.

See the About tab for the required format.

""" ) # About Tab with gr.TabItem("â„šī¸ About"): gr.Markdown( """ ## About This Leaderboard This leaderboard tracks the performance of various search and retrieval systems on the [DevRev Search Dataset](https://huggingface.co/datasets/devrev/search). ### 📊 Evaluation Metrics - **Recall@K**: The percentage of relevant article chunks retrieved in the top K article chunks - **Precision@K**: The percentage of retrieved article chunks that are relevant among the top K article chunks ### 📤 How to Submit 1. Run your retrieval on the test queries in DevRev Search Dataset 2. Submit the results in same format as annotated_queries in the dataset through email to prateek.jain@devrev.ai 3. Also include a **one-line system detail/link**, the **system type**, and whether it is **open source** ### 🔗 Resources - [Computer by DevRev](https://devrev.ai/meet-computer) - [DevRev Search Dataset](https://huggingface.co/datasets/devrev/search) ### 🙏 Acknowledgments Inspired by: - [MTEB Leaderboard](https://huggingface.co/spaces/mteb/leaderboard) - [Berkeley Function Calling Leaderboard](https://gorilla.cs.berkeley.edu/leaderboard) ### 📚 Citation ```bibtex @misc{devrev_search_leaderboard_2026, title={DevRev Search Leaderboard}, author={Research@DevRev}, year={2026}, url={https://huggingface.co/spaces/devrev/search} } ``` """, elem_id="about", ) # Footer gr.HTML( f"""

Last updated: {datetime.now().strftime("%Y-%m-%d %H:%M UTC")}

""" ) return demo # Create and launch the app if __name__ == "__main__": demo = create_interface() demo.launch(server_name="0.0.0.0", server_port=7860, share=True, show_api=False)