lighthouse/
lib.rs

1//! # Lighthouse - Intelligent Autoscaling Library
2//! 
3//! Lighthouse is a flexible, generic autoscaling library for Rust that can work with any
4//! infrastructure or resource type. It provides type-safe callbacks and streaming metrics
5//! processing to make scaling decisions.
6//! 
7//! ## Quick Start
8//! 
9//! ```rust,no_run
10//! use lighthouse::{
11//!     LighthouseEngine, LighthouseConfig, LighthouseCallbacks,
12//!     ScalingThreshold, ScalingPolicy, ResourceConfig,
13//!     MetricsProvider, ScalingExecutor, ResourceMetrics, CallbackContext, ScaleAction, LighthouseResult
14//! };
15//! use std::sync::Arc;
16//! use std::collections::HashMap;
17//! 
18//! // Placeholder implementations for the example
19//! struct MyMetricsProvider;
20//! #[async_trait::async_trait]
21//! impl MetricsProvider for MyMetricsProvider {
22//!     async fn get_metrics(
23//!         &self,
24//!         _resource_id: &str,
25//!         _context: &CallbackContext,
26//!     ) -> LighthouseResult<Option<ResourceMetrics>> {
27//!         Ok(None)
28//!     }
29//! }
30//! struct MyScalingExecutor;
31//! #[async_trait::async_trait]
32//! impl ScalingExecutor for MyScalingExecutor {
33//!     async fn execute_scale_action(
34//!         &self,
35//!         _action: &ScaleAction,
36//!         _context: &CallbackContext,
37//!     ) -> LighthouseResult<bool> {
38//!         Ok(true)
39//!     }
40//! }
41//! 
42//! #[tokio::main]
43//! async fn main() {
44//!     // 1. Create configuration
45//!     let config = LighthouseConfig::builder()
46//!         .evaluation_interval(30) // Check every 30 seconds
47//!         .add_resource_config("web-servers", ResourceConfig {
48//!             resource_type: "web-servers".to_string(),
49//!             policies: vec![ScalingPolicy {
50//!                 name: "cpu-scaling".to_string(),
51//!                 thresholds: vec![ScalingThreshold {
52//!                     metric_name: "cpu_percent".to_string(),
53//!                     scale_up_threshold: 80.0,
54//!                     scale_down_threshold: 20.0,
55//!                     scale_factor: 1.5,
56//!                     cooldown_seconds: 300,
57//!                 }],
58//!                 min_capacity: Some(2),
59//!                 max_capacity: Some(20),
60//!                 enabled: true,
61//!             }],
62//!             default_policy: Some("cpu-scaling".to_string()),
63//!             settings: HashMap::new(),
64//!         })
65//!         .build();
66//! 
67//!     // 2. Implement callbacks (your infrastructure integration)
68//!     let metrics_provider = Arc::new(MyMetricsProvider);
69//!     let scaling_executor = Arc::new(MyScalingExecutor);
70//! 
71//!     let callbacks = LighthouseCallbacks::new(metrics_provider, scaling_executor);
72//! 
73//!     // 3. Start the engine
74//!     let engine = LighthouseEngine::new(config, callbacks);
75//!     let handle = engine.handle();
76//! 
77//!     // Start engine (runs forever)
78//!     tokio::spawn(async move {
79//!         engine.start().await.unwrap();
80//!     });
81//! 
82//!     // 4. Send metrics
83//!     handle.update_metrics(ResourceMetrics {
84//!         resource_id: "my-web-servers".to_string(),
85//!         resource_type: "web-servers".to_string(),
86//!         timestamp: 1234567890,
87//!         metrics: [("cpu_percent".to_string(), 85.0)].into(),
88//!     }).await.unwrap();
89//! }
90//! ```
91//! 
92//! ## Features
93//! 
94//! - **Generic**: Works with any infrastructure (Kubernetes, AWS, bare metal, etc.)
95//! - **Type-safe**: Compile-time guarantees for your scaling logic
96//! - **Live updates**: Change configuration without restarting
97//! - **Cooldown handling**: Prevents scaling flapping
98//! - **Observability**: Built-in hooks for logging and monitoring
99//! - **Async**: Fully async/await compatible
100
101pub mod types;
102pub mod error;
103pub mod callbacks;
104pub mod engine;
105
106// Re-export common types for convenience
107pub use types::{
108    LighthouseConfig, LighthouseConfigBuilder,
109    ResourceMetrics, ScaleAction, ScaleDirection,
110    ScalingThreshold, ScalingPolicy, ResourceConfig,
111    ResourceId, MetricValue, Timestamp,
112};
113
114pub use error::{LighthouseError, LighthouseResult};
115
116pub use callbacks::{
117    CallbackContext, LighthouseCallbacks,
118    MetricsProvider, ScalingExecutor, ScalingObserver,
119};
120
121pub use engine::{
122    LighthouseEngine, LighthouseHandle, EngineStatus,
123};
124
125/// Convenience builder for creating common scaling policies
126pub mod policies {
127    use crate::types::{ScalingThreshold, ScalingPolicy};
128
129    /// Create a simple CPU-based scaling policy
130    pub fn cpu_scaling_policy(
131        scale_up_threshold: f64,
132        scale_down_threshold: f64,
133        scale_factor: f64,
134        cooldown_seconds: u64,
135    ) -> ScalingPolicy {
136        ScalingPolicy {
137            name: "cpu-scaling".to_string(),
138            thresholds: vec![ScalingThreshold {
139                metric_name: "cpu_percent".to_string(),
140                scale_up_threshold,
141                scale_down_threshold,
142                scale_factor,
143                cooldown_seconds,
144            }],
145            min_capacity: None,
146            max_capacity: None,
147            enabled: true,
148        }
149    }
150
151    /// Create a memory-based scaling policy
152    pub fn memory_scaling_policy(
153        scale_up_threshold: f64,
154        scale_down_threshold: f64,
155        scale_factor: f64,
156        cooldown_seconds: u64,
157    ) -> ScalingPolicy {
158        ScalingPolicy {
159            name: "memory-scaling".to_string(),
160            thresholds: vec![ScalingThreshold {
161                metric_name: "memory_percent".to_string(),
162                scale_up_threshold,
163                scale_down_threshold,
164                scale_factor,
165                cooldown_seconds,
166            }],
167            min_capacity: None,
168            max_capacity: None,
169            enabled: true,
170        }
171    }
172
173    /// Create a request-rate based scaling policy
174    pub fn request_rate_scaling_policy(
175        scale_up_threshold: f64,
176        scale_down_threshold: f64,
177        scale_factor: f64,
178        cooldown_seconds: u64,
179    ) -> ScalingPolicy {
180        ScalingPolicy {
181            name: "request-rate-scaling".to_string(),
182            thresholds: vec![ScalingThreshold {
183                metric_name: "requests_per_second".to_string(),
184                scale_up_threshold,
185                scale_down_threshold,
186                scale_factor,
187                cooldown_seconds,
188            }],
189            min_capacity: None,
190            max_capacity: None,
191            enabled: true,
192        }
193    }
194
195    /// Create a multi-metric scaling policy
196    pub fn multi_metric_policy(
197        name: &str,
198        cpu_threshold: (f64, f64),
199        memory_threshold: (f64, f64),
200        scale_factor: f64,
201        cooldown_seconds: u64,
202    ) -> ScalingPolicy {
203        ScalingPolicy {
204            name: name.to_string(),
205            thresholds: vec![
206                ScalingThreshold {
207                    metric_name: "cpu_percent".to_string(),
208                    scale_up_threshold: cpu_threshold.0,
209                    scale_down_threshold: cpu_threshold.1,
210                    scale_factor,
211                    cooldown_seconds,
212                },
213                ScalingThreshold {
214                    metric_name: "memory_percent".to_string(),
215                    scale_up_threshold: memory_threshold.0,
216                    scale_down_threshold: memory_threshold.1,
217                    scale_factor,
218                    cooldown_seconds,
219                },
220            ],
221            min_capacity: None,
222            max_capacity: None,
223            enabled: true,
224        }
225    }
226}
227
228/// Utility functions for common operations
229pub mod utils {
230    use crate::types::{ResourceMetrics, MetricValue};
231    use std::collections::HashMap;
232
233    /// Create ResourceMetrics with a single metric
234    pub fn single_metric(
235        resource_id: &str,
236        resource_type: &str,
237        metric_name: &str,
238        value: MetricValue,
239    ) -> ResourceMetrics {
240        let mut metrics = HashMap::new();
241        metrics.insert(metric_name.to_string(), value);
242        
243        ResourceMetrics {
244            resource_id: resource_id.to_string(),
245            resource_type: resource_type.to_string(),
246            timestamp: current_timestamp(),
247            metrics,
248        }
249    }
250
251    /// Create ResourceMetrics with multiple metrics
252    pub fn multi_metrics(
253        resource_id: &str,
254        resource_type: &str,
255        metrics: Vec<(&str, MetricValue)>,
256    ) -> ResourceMetrics {
257        let metrics_map: HashMap<String, MetricValue> = metrics
258            .into_iter()
259            .map(|(k, v)| (k.to_string(), v))
260            .collect();
261
262        ResourceMetrics {
263            resource_id: resource_id.to_string(),
264            resource_type: resource_type.to_string(),
265            timestamp: current_timestamp(),
266            metrics: metrics_map,
267        }
268    }
269
270    /// Get current Unix timestamp
271    pub fn current_timestamp() -> u64 {
272        std::time::SystemTime::now()
273            .duration_since(std::time::UNIX_EPOCH)
274            .unwrap_or_default()
275            .as_secs()
276    }
277}
278
279#[cfg(test)]
280mod tests {
281    use super::*;
282    use std::{collections::HashMap, sync::Arc};
283    use tokio::sync::Mutex;
284
285    // Mock implementations for testing
286    struct MockMetricsProvider {
287        metrics: Arc<Mutex<HashMap<String, ResourceMetrics>>>,
288    }
289
290    #[async_trait::async_trait]
291    impl MetricsProvider for MockMetricsProvider {
292        async fn get_metrics(
293            &self,
294            resource_id: &str,
295            _context: &CallbackContext,
296        ) -> LighthouseResult<Option<ResourceMetrics>> {
297            let metrics = self.metrics.lock().await;
298            Ok(metrics.get(resource_id).cloned())
299        }
300    }
301
302    struct MockScalingExecutor;
303
304    #[async_trait::async_trait]
305    impl ScalingExecutor for MockScalingExecutor {
306        async fn execute_scale_action(
307            &self,
308            _action: &ScaleAction,
309            _context: &CallbackContext,
310        ) -> LighthouseResult<bool> {
311            Ok(true) // Always succeed
312        }
313    }
314
315    #[tokio::test]
316    async fn test_basic_engine_creation() {
317        let config = LighthouseConfig::default();
318        let callbacks = LighthouseCallbacks::new(
319            Arc::new(MockMetricsProvider {
320                metrics: Arc::new(Mutex::new(HashMap::new())),
321            }),
322            Arc::new(MockScalingExecutor),
323        );
324
325        let engine = LighthouseEngine::new(config, callbacks);
326        let handle = engine.handle();
327
328        // Test that we can get status
329        tokio::spawn(async move {
330            let _ = engine.start().await;
331        });
332
333        // Give engine time to start
334        tokio::time::sleep(std::time::Duration::from_millis(100)).await;
335
336        let status = handle.get_status().await.unwrap();
337        assert!(status.is_running);
338        
339        handle.shutdown().await.unwrap();
340    }
341}