Sfoglia il codice sorgente

api admin interface

Mikkel Denker 1 anno fa
parent
commit
b79233302b

+ 30 - 2
Cargo.lock

@@ -1351,7 +1351,16 @@ version = "4.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059"
 dependencies = [
- "dirs-sys",
+ "dirs-sys 0.3.7",
+]
+
+[[package]]
+name = "dirs"
+version = "5.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225"
+dependencies = [
+ "dirs-sys 0.4.1",
 ]
 
 [[package]]
@@ -1375,6 +1384,18 @@ dependencies = [
  "winapi",
 ]
 
+[[package]]
+name = "dirs-sys"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c"
+dependencies = [
+ "libc",
+ "option-ext",
+ "redox_users",
+ "windows-sys 0.48.0",
+]
+
 [[package]]
 name = "dirs-sys-next"
 version = "0.1.2"
@@ -3319,6 +3340,12 @@ dependencies = [
  "utoipa",
 ]
 
+[[package]]
+name = "option-ext"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
+
 [[package]]
 name = "ordered-multimap"
 version = "0.6.0"
@@ -4934,6 +4961,7 @@ dependencies = [
  "csv",
  "dashmap",
  "deadpool",
+ "dirs 5.0.1",
  "encoding_rs",
  "enum_dispatch",
  "fend-core",
@@ -5448,7 +5476,7 @@ dependencies = [
  "cached-path",
  "clap",
  "derive_builder",
- "dirs",
+ "dirs 4.0.0",
  "esaxx-rs",
  "getrandom",
  "indicatif 0.15.0",

+ 1 - 0
Cargo.toml

@@ -65,6 +65,7 @@ crossbeam-channel = "0.5.6"
 csv = "1.1.6"
 dashmap = {version = "5.4.0", features = ["rayon"]}
 deadpool = "0.12.1"
+dirs = "5.0.1"
 downcast-rs = "1.2.0"
 encoding_rs = "0.8.31"
 enum_dispatch = "0.3.12"

+ 5 - 2
assets/licenses.html

@@ -44,10 +44,10 @@
     
         <h2>Overview of licenses:</h2>
         <ul class="licenses-overview">
-            <li><a href="#Apache-2.0">Apache License 2.0</a> (419)</li>
+            <li><a href="#Apache-2.0">Apache License 2.0</a> (421)</li>
             <li><a href="#MIT">MIT License</a> (187)</li>
             <li><a href="#BSD-3-Clause">BSD 3-Clause &quot;New&quot; or &quot;Revised&quot; License</a> (9)</li>
-            <li><a href="#MPL-2.0">Mozilla Public License 2.0</a> (8)</li>
+            <li><a href="#MPL-2.0">Mozilla Public License 2.0</a> (9)</li>
             <li><a href="#AGPL-3.0">GNU Affero General Public License v3.0</a> (6)</li>
             <li><a href="#ISC">ISC License</a> (4)</li>
             <li><a href="#Unicode-3.0">Unicode License v3</a> (4)</li>
@@ -4382,7 +4382,9 @@ limitations under the License.
                     <li><a href=" https://github.com/xdg-rs/dirs ">dirs-next 2.0.0</a></li>
                     <li><a href=" https://github.com/xdg-rs/dirs/tree/master/dirs-sys ">dirs-sys-next 0.1.2</a></li>
                     <li><a href=" https://github.com/dirs-dev/dirs-sys-rs ">dirs-sys 0.3.7</a></li>
+                    <li><a href=" https://github.com/dirs-dev/dirs-sys-rs ">dirs-sys 0.4.1</a></li>
                     <li><a href=" https://github.com/soc/dirs-rs ">dirs 4.0.0</a></li>
+                    <li><a href=" https://github.com/soc/dirs-rs ">dirs 5.0.1</a></li>
                 </ul>
                 <pre class="license-text">                              Apache License
                         Version 2.0, January 2004
@@ -16434,6 +16436,7 @@ Exhibit B - &quot;Incompatible With Secondary Licenses&quot; Notice
                 <h3 id="MPL-2.0">Mozilla Public License 2.0</h3>
                 <h4>Used by:</h4>
                 <ul class="license-used-by">
+                    <li><a href=" https://github.com/soc/option-ext.git ">option-ext 0.2.0</a></li>
                     <li><a href=" https://github.com/servo/servo ">selectors 0.25.0</a></li>
                     <li><a href=" https://github.com/rustls/webpki-roots ">webpki-roots 0.26.3</a></li>
                 </ul>

+ 1 - 0
configs/api.toml

@@ -3,6 +3,7 @@ cluster_id = "dev_api"
 gossip_addr = "0.0.0.0:3005"
 gossip_seed_nodes = ["0.0.0.0:3006", "0.0.0.0:3007"]
 host = "0.0.0.0:3000"
+management_host = "0.0.0.0:3011"
 # lambda_model_path = "data/lambdamart.txt"
 bangs_path = "data/bangs.json"
 # dual_encoder_model_path = "data/dual_encoder"

+ 1 - 0
crates/core/Cargo.toml

@@ -44,6 +44,7 @@ crossbeam-channel.workspace = true
 csv.workspace = true
 dashmap.workspace = true
 deadpool.workspace = true
+dirs.workspace = true
 encoding_rs.workspace = true
 enum_dispatch.workspace = true
 fend-core.workspace = true

+ 1 - 0
crates/core/examples/search_preindexed.rs

@@ -24,6 +24,7 @@ pub async fn main() {
     let config = ApiConfig {
         host: "0.0.0.0:8000".parse().unwrap(),
         prometheus_host: "0.0.0.0:8001".parse().unwrap(),
+        management_host: "0.0.0.0:8003".parse().unwrap(),
         crossencoder_model_path: None,
         lambda_model_path: None,
         dual_encoder_model_path: None,

+ 6 - 17
crates/core/src/api/mod.rs

@@ -26,10 +26,7 @@ use crate::{
     autosuggest::Autosuggest,
     bangs::Bangs,
     config::ApiConfig,
-    distributed::{
-        cluster::Cluster,
-        member::{Member, Service},
-    },
+    distributed::cluster::Cluster,
     improvement::{store_improvements_loop, ImprovementEvent},
     leaky_queue::LeakyQueue,
     models::dual_encoder::DualEncoder,
@@ -147,7 +144,11 @@ fn build_router(state: Arc<State>) -> Router {
         .with_state(state)
 }
 
-pub async fn router(config: &ApiConfig, counters: Counters) -> Result<Router> {
+pub async fn router(
+    config: &ApiConfig,
+    counters: Counters,
+    cluster: Arc<Cluster>,
+) -> Result<Router> {
     let lambda_model = match &config.lambda_model_path {
         Some(path) => Some(LambdaMART::open(path)?),
         None => None,
@@ -169,18 +170,6 @@ pub async fn router(config: &ApiConfig, counters: Counters) -> Result<Router> {
         None => Bangs::empty(),
     };
 
-    let cluster = Arc::new(
-        Cluster::join(
-            Member {
-                id: config.cluster_id.clone(),
-                service: Service::Api { host: config.host },
-            },
-            config.gossip_addr,
-            config.gossip_seed_nodes.clone().unwrap_or_default(),
-        )
-        .await?,
-    );
-
     let host_webgraph =
         RemoteWebgraph::new(cluster.clone(), crate::config::WebgraphGranularity::Host).await;
     let page_webgraph =

+ 23 - 4
crates/core/src/autosuggest.rs

@@ -19,20 +19,26 @@
 //! It uses a finite state transducer (fst) to store popular queries
 //! and performs a prefix search on the fst to find suggestions.
 
+use std::collections::HashMap;
+
 use fst::{automaton::Str, Automaton, IntoStreamer};
+use itertools::Itertools;
 
 use crate::{inverted_index::KeyPhrase, Result};
 
 pub struct Autosuggest {
     queries: fst::Set<Vec<u8>>,
+    scores: HashMap<String, f64>,
 }
 
 impl Autosuggest {
     pub fn from_key_phrases(key_phrases: Vec<KeyPhrase>) -> Result<Self> {
         let mut queries: Vec<String> = Vec::new();
+        let mut scores: HashMap<String, f64> = HashMap::new();
 
         for key_phrase in key_phrases {
             queries.push(key_phrase.text().to_string());
+            scores.insert(key_phrase.text().to_string(), key_phrase.score());
         }
 
         queries.sort();
@@ -40,24 +46,37 @@ impl Autosuggest {
 
         let queries = fst::Set::from_iter(queries)?;
 
-        Ok(Self { queries })
+        Ok(Self { queries, scores })
     }
 
     pub fn suggestions(&self, query: &str) -> Result<Vec<String>> {
         let query = query.to_ascii_lowercase();
         let q = Str::new(query.as_str()).starts_with();
 
-        Ok(self
+        let mut candidates: Vec<(String, f64)> = self
             .queries
             .search(q)
             .into_stream()
             .into_strs()?
             .into_iter()
+            .take(64)
+            .map(|s| {
+                let score = self.scores.get(&s).unwrap_or(&0.0);
+                (s, *score)
+            })
+            .collect();
+
+        candidates.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
+
+        Ok(candidates
+            .into_iter()
+            .map(|(s, _)| s)
             .take(10)
+            .sorted()
             .collect())
     }
 
-    pub fn all(&self) -> Result<Vec<String>> {
-        Ok(self.queries.into_stream().into_strs()?)
+    pub fn scores(&self) -> &HashMap<String, f64> {
+        &self.scores
     }
 }

+ 11 - 0
crates/core/src/config/mod.rs

@@ -235,6 +235,8 @@ pub struct ApiConfig {
     pub gossip_seed_nodes: Option<Vec<SocketAddr>>,
     pub gossip_addr: SocketAddr,
 
+    pub management_host: SocketAddr,
+
     #[serde(default = "defaults::Api::max_similar_hosts")]
     pub max_similar_hosts: usize,
 
@@ -399,6 +401,15 @@ pub enum WebgraphGranularity {
     Page,
 }
 
+impl std::fmt::Display for WebgraphGranularity {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match self {
+            WebgraphGranularity::Host => write!(f, "host"),
+            WebgraphGranularity::Page => write!(f, "page"),
+        }
+    }
+}
+
 #[derive(Debug, serde::Serialize, serde::Deserialize, Clone)]
 pub struct WebgraphServerConfig {
     pub host: SocketAddr,

+ 34 - 1
crates/core/src/distributed/member.rs

@@ -34,6 +34,12 @@ use crate::config::WebgraphGranularity;
 )]
 pub struct ShardId(u64);
 
+impl std::fmt::Display for ShardId {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        write!(f, "ShardId({})", self.0)
+    }
+}
+
 impl ShardId {
     pub fn new(id: u64) -> Self {
         Self(id)
@@ -107,13 +113,40 @@ pub enum Service {
     },
 }
 
+impl std::fmt::Display for Service {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match self {
+            Self::Searcher { host, shard } => write!(f, "Searcher {} {}", host, shard),
+            Self::EntitySearcher { host } => write!(f, "EntitySearcher {}", host),
+            Self::LiveIndex { host, split_id } => write!(f, "LiveIndex {} {}", host, split_id),
+            Self::Api { host } => write!(f, "Api {}", host),
+            Self::Webgraph {
+                host,
+                shard,
+                granularity,
+            } => {
+                write!(f, "Webgraph {} {} {}", host, shard, granularity)
+            }
+            Self::Dht { host, shard } => write!(f, "Dht {} {}", host, shard),
+            Self::HarmonicWorker { host, shard } => write!(f, "HarmonicWorker {} {}", host, shard),
+            Self::HarmonicCoordinator { host } => write!(f, "HarmonicCoordinator {}", host),
+            Self::ApproxHarmonicWorker { host, shard } => {
+                write!(f, "ApproxHarmonicWorker {} {}", host, shard)
+            }
+            Self::ApproxHarmonicCoordinator { host } => {
+                write!(f, "ApproxHarmonicCoordinator {}", host)
+            }
+        }
+    }
+}
+
 impl Service {
     pub fn is_searcher(&self) -> bool {
         matches!(self, Self::Searcher { .. })
     }
 }
 
-#[derive(PartialEq, Eq, Hash, Clone, Debug)]
+#[derive(PartialEq, Eq, Hash, Clone, Debug, bincode::Encode, bincode::Decode)]
 pub struct Member {
     pub id: String,
     pub service: Service,

+ 114 - 0
crates/core/src/entrypoint/admin/mod.rs

@@ -0,0 +1,114 @@
+// Stract is an open source web search engine.
+// Copyright (C) 2024 Stract ApS
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use std::{
+    net::SocketAddr,
+    path::{Path, PathBuf},
+};
+
+use crate::{distributed::sonic, entrypoint::api, Result};
+
+const CONFIG_FOLDER: &str = "~/.config/stract";
+const CONFIG_NAME: &str = "admin.toml";
+
+trait ExpandUser {
+    fn expand_user(&self) -> PathBuf;
+}
+
+impl ExpandUser for Path {
+    fn expand_user(&self) -> PathBuf {
+        let mut path = self.to_path_buf();
+        if path.starts_with("~") {
+            if let Some(home) = dirs::home_dir() {
+                path = home.join(path.strip_prefix("~").unwrap());
+            }
+        }
+
+        path
+    }
+}
+
+#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
+pub struct Config {
+    pub host: SocketAddr,
+}
+
+impl Config {
+    pub fn save(&self) -> Result<()> {
+        let path = Path::new(CONFIG_FOLDER).expand_user();
+
+        if !path.exists() {
+            std::fs::create_dir_all(&path)?;
+        }
+
+        let path = path.join(CONFIG_NAME);
+
+        let config = toml::to_string(&self).unwrap();
+        std::fs::write(path, config)?;
+
+        Ok(())
+    }
+
+    pub fn load() -> Result<Self> {
+        let path = Path::new(CONFIG_FOLDER).expand_user().join(CONFIG_NAME);
+
+        let config = std::fs::read_to_string(path)?;
+        let config: Config = toml::from_str(&config)?;
+
+        Ok(config)
+    }
+}
+
+impl Drop for Config {
+    fn drop(&mut self) {
+        self.save().ok();
+    }
+}
+
+pub fn init(host: SocketAddr) -> Result<()> {
+    let config = Config { host };
+    config.save()?;
+
+    Ok(())
+}
+
+pub async fn status() -> Result<()> {
+    let config = Config::load()?;
+    let mut conn = sonic::service::Connection::create(config.host).await?;
+
+    let status = conn.send(api::ClusterStatus).await?;
+
+    println!("Members:");
+    for member in status.members {
+        println!("  - {}: {}", member.id, member.service);
+    }
+
+    Ok(())
+}
+
+pub async fn top_keyphrases(top: usize) -> Result<()> {
+    let config = Config::load()?;
+    let mut conn = sonic::service::Connection::create(config.host).await?;
+
+    let keyphrases = conn.send(api::TopKeyphrases { top }).await?;
+
+    println!("id,text,score");
+    for (i, keyphrase) in keyphrases.iter().enumerate() {
+        println!("{},{},{}", i + 1, keyphrase.text(), keyphrase.score());
+    }
+
+    Ok(())
+}

+ 97 - 10
crates/core/src/entrypoint/api.rs

@@ -14,25 +14,32 @@
 // You should have received a copy of the GNU Affero General Public License
 // along with this program.  If not, see <https://www.gnu.org/licenses/>.
 
-use std::{future::IntoFuture, net::SocketAddr};
+use std::{future::IntoFuture, net::SocketAddr, sync::Arc};
 
 use anyhow::Result;
+use futures::TryFutureExt;
 use tokio::net::TcpListener;
+use tracing::info;
 
 use crate::{
     api::{metrics_router, router, user_count, Counters},
     config,
+    distributed::{
+        cluster::Cluster,
+        member::{Member, Service},
+        sonic::{self, service::sonic_service},
+    },
+    inverted_index::KeyPhrase,
     metrics::Label,
+    searcher::{DistributedSearcher, SearchClient},
 };
 
-pub async fn run(config: config::ApiConfig) -> Result<()> {
+fn counters(registry: &mut crate::metrics::PrometheusRegistry) -> Result<Counters> {
     let search_counter_success = crate::metrics::Counter::default();
     let search_counter_fail = crate::metrics::Counter::default();
     let explore_counter = crate::metrics::Counter::default();
     let daily_active_users = user_count::UserCount::new()?;
 
-    let mut registry = crate::metrics::PrometheusRegistry::default();
-
     let group = registry
         .new_group(
             "stract_search_requests".to_string(),
@@ -71,14 +78,85 @@ pub async fn run(config: config::ApiConfig) -> Result<()> {
         .unwrap();
     group.register(daily_active_users.metric(), vec![]);
 
-    let counters = Counters {
+    Ok(Counters {
         search_counter_success,
         search_counter_fail,
         explore_counter,
         daily_active_users,
-    };
+    })
+}
+
+async fn cluster(config: &config::ApiConfig) -> Result<Cluster> {
+    Cluster::join(
+        Member {
+            id: config.cluster_id.clone(),
+            service: Service::Api { host: config.host },
+        },
+        config.gossip_addr,
+        config.gossip_seed_nodes.clone().unwrap_or_default(),
+    )
+    .await
+}
+
+pub struct ManagementService {
+    cluster: Arc<Cluster>,
+    searcher: DistributedSearcher,
+}
+sonic_service!(ManagementService, [TopKeyphrases, ClusterStatus]);
+
+impl ManagementService {
+    pub async fn new(cluster: Arc<Cluster>) -> Result<Self> {
+        let searcher = DistributedSearcher::new(Arc::clone(&cluster)).await;
+        Ok(ManagementService { cluster, searcher })
+    }
+}
+
+#[derive(Debug, Clone, bincode::Encode, bincode::Decode)]
+pub struct TopKeyphrases {
+    pub top: usize,
+}
+impl sonic::service::Message<ManagementService> for TopKeyphrases {
+    type Response = Vec<KeyPhrase>;
+    async fn handle(self, server: &ManagementService) -> Self::Response {
+        server.searcher.top_key_phrases(self.top).await
+    }
+}
 
-    let app = router(&config, counters).await?;
+#[derive(Debug, Clone, bincode::Encode, bincode::Decode)]
+pub struct Status {
+    pub members: Vec<Member>,
+}
+
+#[derive(Debug, Clone, bincode::Encode, bincode::Decode)]
+pub struct ClusterStatus;
+impl sonic::service::Message<ManagementService> for ClusterStatus {
+    type Response = Status;
+    async fn handle(self, server: &ManagementService) -> Self::Response {
+        Status {
+            members: server.cluster.members().await,
+        }
+    }
+}
+
+async fn run_management(addr: SocketAddr, cluster: Arc<Cluster>) -> Result<()> {
+    let server = ManagementService::new(cluster).await?.bind(addr).await?;
+
+    info!("search server is ready to accept requests on {}", addr);
+
+    loop {
+        if let Err(e) = server.accept().await {
+            tracing::error!("{:?}", e);
+        }
+    }
+}
+
+pub async fn run(config: config::ApiConfig) -> Result<()> {
+    let mut registry = crate::metrics::PrometheusRegistry::default();
+    let counters = counters(&mut registry)?;
+
+    let cluster = Arc::new(cluster(&config).await?);
+
+    let app = router(&config, counters, cluster.clone()).await?;
     let metrics_app = metrics_router(registry);
 
     let addr = config.host;
@@ -87,7 +165,8 @@ pub async fn run(config: config::ApiConfig) -> Result<()> {
         TcpListener::bind(&addr).await.unwrap(),
         app.into_make_service_with_connect_info::<SocketAddr>(),
     )
-    .into_future();
+    .into_future()
+    .map_err(|e| anyhow::anyhow!(e));
 
     let addr = config.prometheus_host;
     tracing::info!("prometheus exporter listening on {}", addr);
@@ -95,9 +174,17 @@ pub async fn run(config: config::ApiConfig) -> Result<()> {
         TcpListener::bind(&addr).await.unwrap(),
         metrics_app.into_make_service(),
     )
-    .into_future();
+    .into_future()
+    .map_err(|e| e.into());
+
+    let management = tokio::spawn(async move {
+        run_management(config.management_host, cluster)
+            .await
+            .unwrap();
+    })
+    .map_err(|e| e.into());
 
-    tokio::try_join!(server, metrics_server)?;
+    tokio::try_join!(server, metrics_server, management)?;
 
     Ok(())
 }

+ 1 - 0
crates/core/src/entrypoint/mod.rs

@@ -36,6 +36,7 @@ pub use centrality::Centrality;
 pub use entity::EntityIndexer;
 use tracing::{debug, log::error};
 pub use webgraph::Webgraph;
+pub mod admin;
 pub mod live_index;
 
 use crate::{config, warc::WarcFile};

+ 6 - 0
crates/core/src/feed/scheduler.rs

@@ -68,6 +68,12 @@ impl From<&Url> for Domain {
 )]
 pub struct SplitId(#[bincode(with_serde)] uuid::Uuid);
 
+impl std::fmt::Display for SplitId {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        write!(f, "SplitId({})", self.0)
+    }
+}
+
 impl SplitId {
     pub fn id(&self) -> uuid::Uuid {
         self.0

+ 35 - 1
crates/core/src/main.rs

@@ -1,5 +1,5 @@
 // Stract is an open source web search engine.
-// Copyright (C) 2023 Stract ApS
+// Copyright (C) 2024 Stract ApS
 //
 // This program is free software: you can redistribute it and/or modify
 // it under the terms of the GNU Affero General Public License as
@@ -17,6 +17,7 @@ use anyhow::{Context, Result};
 use clap::{Parser, Subcommand};
 use serde::de::DeserializeOwned;
 use std::fs;
+use std::net::SocketAddr;
 use std::path::Path;
 use stract::config;
 
@@ -117,6 +118,12 @@ enum Commands {
         #[clap(subcommand)]
         options: AmpcOptions,
     },
+
+    /// Commands for the admin interface to manage stract.
+    Admin {
+        #[clap(subcommand)]
+        options: AdminOptions,
+    },
 }
 
 #[derive(Subcommand)]
@@ -141,6 +148,13 @@ enum AmpcOptions {
     ApproxHarmonicCoordinator { config_path: String },
 }
 
+#[derive(Subcommand)]
+enum AdminOptions {
+    Init { host: SocketAddr },
+    Status,
+    TopKeyphrases { top: usize },
+}
+
 #[derive(Subcommand)]
 enum LiveIndex {
     /// Create a schedule of which feeds should go to which index.
@@ -463,6 +477,26 @@ fn main() -> Result<()> {
                 entrypoint::ampc::approximated_harmonic_centrality::coordinator::run(config)?;
             }
         },
+
+        Commands::Admin { options } => match options {
+            AdminOptions::Init { host } => {
+                entrypoint::admin::init(host)?;
+            }
+
+            AdminOptions::Status => {
+                tokio::runtime::Builder::new_current_thread()
+                    .enable_all()
+                    .build()?
+                    .block_on(entrypoint::admin::status())?;
+            }
+
+            AdminOptions::TopKeyphrases { top } => {
+                tokio::runtime::Builder::new_current_thread()
+                    .enable_all()
+                    .build()?
+                    .block_on(entrypoint::admin::top_keyphrases(top))?;
+            }
+        },
     }
 
     Ok(())