libomni/types/volume.rs
1/// This file defines the `Volume` Enum and its associated methods and classes for managing volumes in a cluster.
2/// The `Volume` struct represents a storage volume in the cluster, including its ID, size, and status.
3use uuid::Uuid;
4use std::collections::HashMap;
5use chrono;
6
7/// Volume metadata for tracking volume details
8pub struct VolumeMetadata {
9 creation_time: chrono::DateTime<chrono::Utc>,
10 last_modified: chrono::DateTime<chrono::Utc>,
11 status: VolumeStatus,
12 labels: HashMap<String, String>, // For organization/selection
13}
14
15/// QoS configuration for controlling volume performance
16pub struct QoSConfig {
17 iops_limit: Option<u32>,
18 throughput_limit: Option<u64>, // bytes per second
19 iops_guarantee: Option<u32>,
20 throughput_guarantee: Option<u64>,
21 burstable: Option<BurstConfig>,
22}
23
24/// Configuration for burstable QoS performance
25pub struct BurstConfig {
26 duration: chrono::Duration,
27 iops_multiplier: f32,
28 throughput_multiplier: f32,
29}
30
31/// Security configuration for volumes
32pub struct SecurityConfig {
33 encryption_enabled: bool,
34 encryption_algorithm: Option<String>,
35 key_management: Option<KeyManagementType>,
36 access_policies: Vec<AccessPolicy>,
37}
38
39/// Key management types for volume encryption
40pub enum KeyManagementType {
41 Internal,
42 External { provider: String, config: HashMap<String, String> },
43 CustomerManaged,
44 HardwareSecurityModule { hsm_id: String },
45}
46
47/// Access policy for controlling volume operations
48pub struct AccessPolicy {
49 allowed_users: Vec<String>,
50 allowed_groups: Vec<String>,
51 allowed_operations: Vec<VolumeOperation>,
52}
53
54/// Possible operations that can be performed on a volume
55pub enum VolumeOperation {
56 Read,
57 Write,
58 Snapshot,
59 Delete,
60 Expand,
61 Clone,
62}
63
64/// Backup policy configuration
65pub struct BackupPolicy {
66 schedule: String, // cron format
67 retention: RetentionPolicy,
68 consistency_type: ConsistencyType,
69 target_location: String,
70}
71
72/// Types of consistency for backup operations
73pub enum ConsistencyType {
74 Crash,
75 Filesystem,
76 Application { pre_backup_hook: String, post_backup_hook: String },
77}
78
79/// Policy for retaining backups
80pub struct RetentionPolicy {
81 daily: u32,
82 weekly: u32,
83 monthly: u32,
84 yearly: u32,
85}
86
87/// Enum to represent the different types of volumes in OmniCloud.
88///
89/// This enum defines the various volume types that can be used in OmniCloud,
90/// allowing for flexibility in how data is stored and accessed.
91///
92/// The possible volume types include:
93/// - Ephemeral: A temporary volume that is killed when the app instance is killed,
94/// used for ephemeral storage within a single app instance.
95/// - Persistent: A persistent volume that can be shared across nodes in the cluster,
96/// used for applications that require data to persist across app restarts or need
97/// to maintain state beyond the lifecycle of a single app instance.
98/// - Shared: A network-shared volume that allows for data consistency and state management
99/// across app instances running on different nodes in the cluster.
100/// # WARNING
101/// Each volume type has its own characteristics and limitations,
102/// and it is important to choose the right type based on the application's
103/// requirements for data persistence, availability, and performance.
104pub enum Volume {
105 /// Represents a temporary volume killed when the app instance is killed
106 /// used for ephemeral storage within a single app instance.
107 ///
108 /// In the event that multiple app instances are running on the same node,
109 /// each instance will have its own version of the ephemeral volume, which
110 /// is not shared with the other instances.
111 ///
112 /// This allows for isolated storage for each app instance, ensuring that
113 /// data is not inadvertently shared or corrupted between instances.
114 /// This is useful for caching, temporary files, or any data that does not
115 /// need to persist beyond the lifecycle of the app instance.
116 Ephemeral(EphemeralVolume),
117 /// Represents a persistent volume that can be shared across nodes in the cluster
118 ///
119 /// This volume type is used for applications that require data to persist across
120 /// app restarts or need to maintain state beyond the lifecycle of a single app
121 /// instance. It is also used for applications that require data to be shared across
122 /// multiple app instances running on different nodes in the cluster.
123 ///
124 /// This volume type has a few different modes, each with its own characteristics:
125 ///
126 /// Local Persistent Volumes are stored on worker node local disks but managed
127 /// in a way that preserves the data even if the container using them is removed.
128 /// These volumes offer excellent performance due to their local nature but are
129 /// tied to a specific node. If that node fails, the volume becomes unavailable
130 /// until the node recovers. This approach is suitable for workloads that
131 /// prioritize performance over availability, or in clusters where node failures
132 /// are rare.
133 ///
134 /// Network-Attached Volumes are implemented via network storage protocols such
135 /// as NFS, iSCSI, or specialized storage vendor APIs. These volumes can be
136 /// accessed from any node in the cluster, allowing containers to be rescheduled
137 /// freely without losing access to their data. The tradeoff is increased latency
138 /// due to network communication, though modern networks and storage protocols can
139 /// minimize this impact. Network-attached volumes are ideal for workloads that
140 /// require flexibility in placement and moderate performance.
141 ///
142 /// Distributed Volumes are spread across multiple physical nodes for redundancy
143 /// and improved availability. Technologies like Ceph, GlusterFS, or Longhorn
144 /// underpin these volumes, storing multiple copies of the data or using erasure
145 /// coding to protect against node failures. Distributed volumes offer the best
146 /// combination of availability and performance, though they typically require
147 /// more resources due to the replication overhead. They're well-suited for
148 /// mission-critical applications where both performance and reliability are
149 /// important.
150 Persistent(PersistentVolume),
151 /// Represents a network-shared volume
152 ///
153 /// This volume type is used for applications that require data to be shared
154 /// across app instances running on different nodes in the cluster, and for applications
155 /// which require data integrity across in the event of a node failure.
156 ///
157 /// Multiple app instances running on different nodes can share this volume,
158 /// allowing for data consistency and state management across those instances.
159 ///
160 /// This is useful for distributed databases, shared logs, or any data that needs
161 /// to be consistent and available across the cluster.
162 ///
163 /// # WARNING
164 ///
165 /// This volume type is shared across nodes, which means that if multiple nodes are
166 /// running the same app, they will all share the same version of the persistent volume.
167 /// This can lead to data inconsistency if not managed properly, especially in the
168 /// event of a node failure when writing to a file or an accidental network partition.
169 Shared(SharedVolume),
170}
171
172pub struct EphemeralVolume {
173 id: Uuid, // Unique identifier for the volume
174 size: u64, // Size in bytes
175 name: String, // Name of the volume
176 metadata: VolumeMetadata, // Metadata for tracking volume status and details
177 qos: Option<QoSConfig>, // QoS settings for performance
178 security: Option<SecurityConfig>, // Security settings
179}
180
181pub struct SharedVolume {
182 id: Uuid, // Unique identifier for the volume
183 size: u64, // Size in bytes
184 name: String, // Name of the volume
185 status: String, // Status of the volume (e.g., "available", "in-use")
186 nodes: Vec<String>, // List of nodes sharing this volume
187 access_mode: AccessMode, // Access mode (RWO, ROX, RWX)
188 metadata: VolumeMetadata, // Metadata for tracking volume status and details
189 qos: Option<QoSConfig>, // QoS settings for performance
190 security: Option<SecurityConfig>, // Security settings
191 backup_policy: Option<BackupPolicy>, // Backup settings
192}
193
194/// Enum to represent persistent volumes in the cluster.
195/// This enum defines the different types of persistent volumes that can be used
196/// in the cluster, allowing for flexibility in how data is stored and accessed.
197/// The possible persistent volume types include:
198/// - Local: A volume that is stored on the local node, typically used for applications
199/// that require data to persist across restarts or need to maintain state beyond
200/// the lifecycle of a single app instance.
201/// - NetworkAttached: A volume that is shared across nodes in the cluster, allowing for
202/// data consistency and state management across app instances running on different nodes.
203/// - Distributed: A volume that is distributed across multiple nodes in the cluster,
204/// providing high availability and fault tolerance for applications that require
205/// data to be available even in the event of a node failure.
206///
207/// This enum is used in the `Volume` struct to define the type of persistent volume
208/// that is being used in the cluster.
209///
210/// # WARNING
211/// Each persistent volume type has its own characteristics and limitations,
212/// and it is important to choose the right type based on the application's
213/// requirements for data persistence, availability, and performance.
214/// Additionally, care should be taken to manage the lifecycle of persistent volumes
215/// to avoid data loss or inconsistency, especially in the event of node failures
216/// or network partitions.
217pub enum PersistentVolume {
218 Local {
219 id: Uuid, // Unique identifier for the volume
220 size: u64, // Size in bytes
221 name: String, // Name of the volume
222 status: String, // Status of the volume (e.g., "available", "in-use")
223 host_mount_path: String, // Path where the volume is mounted
224 metadata: VolumeMetadata, // Metadata for tracking volume status and details
225 qos: Option<QoSConfig>, // QoS settings for performance
226 security: Option<SecurityConfig>, // Security settings
227 backup_policy: Option<BackupPolicy>, // Backup settings
228 },
229 NetworkAttached {
230 id: Uuid, // Unique identifier for the volume
231 size: u64, // Size in bytes
232 name: String, // Name of the volume
233 status: String, // Status of the volume (e.g., "available", "in-use")
234 network_path: String, // Network path to the volume
235 metadata: VolumeMetadata, // Metadata for tracking volume status and details
236 qos: Option<QoSConfig>, // QoS settings for performance
237 security: Option<SecurityConfig>, // Security settings
238 backup_policy: Option<BackupPolicy>, // Backup settings
239 },
240 Distributed {
241 id: Uuid, // Unique identifier for the volume
242 size: u64, // Size in bytes
243 name: String, // Name of the volume
244 status: String, // Status of the volume (e.g., "available", "in-use")
245 nodes: Vec<String>, // List of nodes sharing this volume
246 metadata: VolumeMetadata, // Metadata for tracking volume status and details
247 qos: Option<QoSConfig>, // QoS settings for performance
248 security: Option<SecurityConfig>, // Security settings
249 backup_policy: Option<BackupPolicy>, // Backup settings
250 },
251}
252
253/// Enum to represent the status of a volume.
254///
255/// This enum defines the different states a volume can be in, allowing for
256/// better management and monitoring of the volume's lifecycle.
257///
258/// The possible statuses include:
259/// - Available: The volume is ready for use and not currently in use by any node.
260/// - InUse: The volume is currently being used by a node, identified by its node ID.
261/// - Offline: The volume is not currently accessible, with a timestamp indicating the last time it was seen online.
262/// - Blocked: The volume is blocked and cannot be used, possibly due to a failure or misconfiguration.
263/// - Error: The volume is in an error state, indicating a problem with the volume or its configuration.
264pub enum VolumeStatus {
265 Available,
266 InUse {
267 node_id: Uuid, // ID of the node using the volume
268 },
269 Offline {
270 last_seen: chrono::DateTime<chrono::Utc>, // Last time the volume was seen online
271 },
272 Blocked,
273 Error,
274}
275
276/// Enum to represent access modes for shared volumes.
277///
278/// This enum defines the different access modes that can be applied to shared volumes,
279/// allowing for flexibility in how volumes are accessed by different nodes.
280/// The access modes include:
281/// - ReadWriteOnce (RWO): The volume can be mounted as read-write by a single node.
282/// - ReadOnlyMany (ROX): The volume can be mounted as read-only by many nodes.
283/// - ReadWriteMany (RWX): The volume can be mounted as read-write by many nodes.
284///
285/// This enum is used in the `SharedVolume` struct to define how the volume can be accessed
286/// by different nodes in the cluster.
287pub enum AccessMode {
288 ReadWriteOnce,
289 ReadOnlyMany,
290 ReadWriteMany,
291}
292
293/// Snapshot of a volume at a point in time
294pub struct VolumeSnapshot {
295 id: Uuid,
296 source_volume_id: Uuid,
297 name: String,
298 creation_time: chrono::DateTime<chrono::Utc>,
299 size: u64,
300 consistency_type: ConsistencyType,
301}
302
303/// Error type for volume operations
304pub enum VolumeError {
305 NotFound,
306 AlreadyExists,
307 InsufficientCapacity,
308 AccessDenied,
309 InvalidState,
310 ValidationFailed(String),
311 DriverFailed(String),
312 Timeout,
313 Internal(String),
314}
315
316/// Configuration for creating a new volume
317pub struct VolumeConfig {
318 name: String,
319 size: u64,
320 volume_type: String,
321 access_mode: Option<AccessMode>,
322 qos: Option<QoSConfig>,
323 security: Option<SecurityConfig>,
324 backup_policy: Option<BackupPolicy>,
325 labels: HashMap<String, String>,
326}
327
328impl Volume {
329 /// Creates a new volume based on the provided configuration
330 pub fn create(config: VolumeConfig) -> Result<Self, VolumeError> {
331 // Implementation would go here
332 unimplemented!("Volume creation not yet implemented")
333 }
334
335 /// Deletes this volume
336 pub fn delete(&self) -> Result<(), VolumeError> {
337 // Implementation would go here
338 unimplemented!("Volume deletion not yet implemented")
339 }
340
341 /// Attaches this volume to a specified node
342 pub fn attach(&mut self, node_id: &str) -> Result<(), VolumeError> {
343 // Implementation would go here
344 unimplemented!("Volume attachment not yet implemented")
345 }
346
347 /// Detaches this volume from its current node
348 pub fn detach(&mut self) -> Result<(), VolumeError> {
349 // Implementation would go here
350 unimplemented!("Volume detachment not yet implemented")
351 }
352
353 /// Expands this volume to a new size
354 pub fn expand(&mut self, new_size: u64) -> Result<(), VolumeError> {
355 // Implementation would go here
356 unimplemented!("Volume expansion not yet implemented")
357 }
358
359 /// Creates a snapshot of this volume
360 pub fn snapshot(&self, name: &str) -> Result<VolumeSnapshot, VolumeError> {
361 // Implementation would go here
362 unimplemented!("Volume snapshot not yet implemented")
363 }
364
365 /// Restores this volume from a snapshot
366 pub fn restore_from_snapshot(&mut self, snapshot: &VolumeSnapshot) -> Result<(), VolumeError> {
367 // Implementation would go here
368 unimplemented!("Volume restore not yet implemented")
369 }
370
371 /// Creates a clone of this volume
372 pub fn clone(&self, name: &str) -> Result<Self, VolumeError> {
373 // Implementation would go here
374 unimplemented!("Volume cloning not yet implemented")
375 }
376
377 /// Transforms this volume to a different type
378 pub fn transform(&self, to_type: String) -> Result<Self, VolumeError> {
379 // Implementation would go here
380 unimplemented!("Volume transformation not yet implemented")
381 }
382
383 /// Checks the integrity of this volume
384 pub fn check_integrity(&self) -> Result<bool, VolumeError> {
385 // Implementation would go here
386 unimplemented!("Volume integrity checking not yet implemented")
387 }
388
389 /// Repairs this volume if possible
390 pub fn repair(&mut self) -> Result<(), VolumeError> {
391 // Implementation would go here
392 unimplemented!("Volume repair not yet implemented")
393 }
394
395 /// Updates the QoS configuration for this volume
396 pub fn update_qos(&mut self, qos: QoSConfig) -> Result<(), VolumeError> {
397 // Implementation would go here
398 unimplemented!("QoS update not yet implemented")
399 }
400
401 /// Updates the security configuration for this volume
402 pub fn update_security(&mut self, security: SecurityConfig) -> Result<(), VolumeError> {
403 // Implementation would go here
404 unimplemented!("Security update not yet implemented")
405 }
406
407 /// Updates the backup policy for this volume
408 pub fn update_backup_policy(&mut self, policy: BackupPolicy) -> Result<(), VolumeError> {
409 // Implementation would go here
410 unimplemented!("Backup policy update not yet implemented")
411 }
412}