Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions aggregation_mode/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 3 additions & 3 deletions aggregation_mode/db/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@ version = "0.1.0"
edition = "2021"

[dependencies]
serde = { workspace = true }
tokio = { version = "1"}
# TODO: enable tls
sqlx = { version = "0.8", features = [ "runtime-tokio", "postgres", "migrate" ] }

sqlx = { version = "0.8", features = [ "runtime-tokio", "postgres", "migrate" , "uuid", "bigdecimal"] }
tracing = { version = "0.1", features = ["log"] }

[[bin]]
name = "migrate"
Expand Down
2 changes: 2 additions & 0 deletions aggregation_mode/db/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1 +1,3 @@
pub mod orchestrator;
pub mod retry;
pub mod types;
253 changes: 253 additions & 0 deletions aggregation_mode/db/src/orchestrator.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,253 @@
use std::{
future::Future,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::Duration,
};

use sqlx::{postgres::PgPoolOptions, Pool, Postgres};

use crate::retry::{RetryConfig, RetryError};

#[derive(Debug, Clone, Copy)]
enum Operation {
Read,
Write,
}

/// A single DB node: connection pool plus shared health flags (used to prioritize nodes).
#[derive(Debug)]
struct DbNode {
pool: Pool<Postgres>,
last_read_failed: AtomicBool,
last_write_failed: AtomicBool,
}

/// Database orchestrator for running reads/writes across multiple PostgreSQL nodes with retry/backoff.
///
/// `DbOrchestartor` holds a list of database nodes (connection pools) and will:
/// - try nodes in a preferred order (healthy nodes first, then recently-failed nodes),
/// - mark nodes as failed on connection-type errors,
/// - retry transient failures with exponential backoff based on `retry_config`,
///
/// ## Thread-safe `Clone`
/// This type is cheap and thread-safe to clone:
/// - `nodes` is `Vec<Arc<DbNode>>`, so cloning only increments `Arc` ref-counts and shares the same pools/nodes,
/// - `sqlx::Pool<Postgres>` is internally reference-counted and designed to be cloned and used concurrently,
/// - the node health flags are `AtomicBool`, so updates are safe from multiple threads/tasks.
///
/// Clones share health state (the atomics) and the underlying pools, so all clones observe and influence
/// the same “preferred node” ordering decisions.
#[derive(Debug, Clone)]
pub struct DbOrchestartor {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should be called DbOrchestrator

nodes: Vec<Arc<DbNode>>,
retry_config: RetryConfig,
}

#[derive(Debug)]
pub enum DbOrchestartorError {
InvalidNumberOfConnectionUrls,
Sqlx(sqlx::Error),
}

impl std::fmt::Display for DbOrchestartorError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::InvalidNumberOfConnectionUrls => {
write!(f, "invalid number of connection URLs")
}
Self::Sqlx(e) => write!(f, "{e}"),
}
}
}

impl DbOrchestartor {
pub fn try_new(
connection_urls: &[String],
retry_config: RetryConfig,
) -> Result<Self, DbOrchestartorError> {
if connection_urls.is_empty() {
return Err(DbOrchestartorError::InvalidNumberOfConnectionUrls);
}

let nodes = connection_urls
.iter()
.map(|url| {
let pool = PgPoolOptions::new().max_connections(5).connect_lazy(url)?;

Ok(Arc::new(DbNode {
pool,
last_read_failed: AtomicBool::new(false),
last_write_failed: AtomicBool::new(false),
}))
})
.collect::<Result<Vec<_>, sqlx::Error>>()
.map_err(DbOrchestartorError::Sqlx)?;

Ok(Self {
nodes,
retry_config,
})
}

pub async fn write<T, Q, Fut>(&self, query: Q) -> Result<T, sqlx::Error>
where
Q: Fn(Pool<Postgres>) -> Fut,
Fut: Future<Output = Result<T, sqlx::Error>>,
{
self.query::<T, Q, Fut>(query, Operation::Write).await
}

pub async fn read<T, Q, Fut>(&self, query: Q) -> Result<T, sqlx::Error>
where
Q: Fn(Pool<Postgres>) -> Fut,
Fut: Future<Output = Result<T, sqlx::Error>>,
{
self.query::<T, Q, Fut>(query, Operation::Read).await
}

async fn query<T, Q, Fut>(&self, query_fn: Q, operation: Operation) -> Result<T, sqlx::Error>
where
Q: Fn(Pool<Postgres>) -> Fut,
Fut: Future<Output = Result<T, sqlx::Error>>,
{
let mut attempts = 0;
let mut delay = Duration::from_millis(self.retry_config.min_delay_millis);

loop {
match self.execute_once(&query_fn, operation).await {
Ok(value) => return Ok(value),
Err(RetryError::Permanent(err)) => return Err(err),
Err(RetryError::Transient(err)) => {
if attempts >= self.retry_config.max_delay_seconds {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't understand this comparison between the number of attempts and the max delay in seconds

return Err(err);
}

tracing::warn!(attempt = attempts, delay_milis = delay.as_millis(), error = ?err, "retrying after backoff");
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
tracing::warn!(attempt = attempts, delay_milis = delay.as_millis(), error = ?err, "retrying after backoff");
tracing::warn!(attempt = attempts, delay_millis = delay.as_millis(), error = ?err, "retrying after backoff");

tokio::time::sleep(delay).await;
delay = self.next_backoff_delay(delay);
attempts += 1;
}
}
}
}

// Exponential backoff with a hard cap.
//
// Each retry multiplies the previous delay by `retry_config.factor`,
// then clamps it to `max_delay_seconds`. This yields:
//
// d_{n+1} = min(max, d_n * factor) => d_n = min(max, d_initial * factor^n)
//
// Example starting at 500ms with factor = 2.0 (no jitter):
// retry 0: 0.5s
// retry 1: 1.0s
// retry 2: 2.0s
// retry 3: 4.0s
// retry 4: 8.0s
// ...
// until the delay reaches `max_delay_seconds`, after which it stays at that max.
// see reference: https://en.wikipedia.org/wiki/Exponential_backoff
// and here: https://docs.aws.amazon.com/prescriptive-guidance/latest/cloud-design-patterns/retry-backoff.html
fn next_backoff_delay(&self, current_delay: Duration) -> Duration {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe we can move this function to aggregation_mode/db/src/retry.rs, as it's related to the retry functionality.

let max: Duration = Duration::from_secs(self.retry_config.max_delay_seconds);
// Defensive: factor should be >= 1.0 for backoff, we clamp it to avoid shrinking/NaN.
let factor = f64::from(self.retry_config.factor).max(1.0);

let scaled_secs = current_delay.as_secs_f64() * factor;
let scaled_secs = if scaled_secs.is_finite() {
scaled_secs
} else {
max.as_secs_f64()
};

let scaled = Duration::from_secs_f64(scaled_secs);
if scaled > max {
max
} else {
scaled
}
}

async fn execute_once<T, Q, Fut>(
&self,
query_fn: &Q,
operation: Operation,
) -> Result<T, RetryError<sqlx::Error>>
where
Q: Fn(Pool<Postgres>) -> Fut,
Fut: Future<Output = Result<T, sqlx::Error>>,
{
let mut last_error = None;

for idx in self.preferred_order(operation) {
let node = &self.nodes[idx];
let pool = node.pool.clone();

match query_fn(pool).await {
Ok(res) => {
match operation {
Operation::Read => node.last_read_failed.store(false, Ordering::Relaxed),
Operation::Write => node.last_write_failed.store(false, Ordering::Relaxed),
};
return Ok(res);
}
Err(err) => {
if Self::is_connection_error(&err) {
tracing::warn!(node_index = idx, error = ?err, "database query failed");
match operation {
Operation::Read => node.last_read_failed.store(true, Ordering::Relaxed),
Operation::Write => {
node.last_write_failed.store(true, Ordering::Relaxed)
}
};
last_error = Some(err);
} else {
return Err(RetryError::Permanent(err));
}
}
};
}

Err(RetryError::Transient(
last_error.expect("write_op attempted without database nodes"),
))
}

fn preferred_order(&self, operation: Operation) -> Vec<usize> {
let mut preferred = Vec::with_capacity(self.nodes.len());
let mut fallback = Vec::new();

for (idx, node) in self.nodes.iter().enumerate() {
let failed = match operation {
Operation::Read => node.last_read_failed.load(Ordering::Relaxed),
Operation::Write => node.last_write_failed.load(Ordering::Relaxed),
};

if failed {
fallback.push(idx);
} else {
preferred.push(idx);
}
}

preferred.extend(fallback);
preferred
}

fn is_connection_error(error: &sqlx::Error) -> bool {
matches!(
error,
sqlx::Error::Io(_)
| sqlx::Error::Tls(_)
| sqlx::Error::Protocol(_)
| sqlx::Error::PoolTimedOut
| sqlx::Error::PoolClosed
| sqlx::Error::WorkerCrashed
| sqlx::Error::BeginFailed
| sqlx::Error::Database(_)
)
}
}
28 changes: 28 additions & 0 deletions aggregation_mode/db/src/retry.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
#[derive(Debug)]
pub(super) enum RetryError<E> {
Transient(E),
Permanent(E),
}

impl<E: std::fmt::Display> std::fmt::Display for RetryError<E> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
RetryError::Transient(e) => write!(f, "{e}"),
RetryError::Permanent(e) => write!(f, "{e}"),
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This could be simplified, as both arms have the same behavior.

}
}
}

impl<E: std::fmt::Display> std::error::Error for RetryError<E> where E: std::fmt::Debug {}

#[derive(Debug, Clone)]
pub struct RetryConfig {
/// * `min_delay_millis` - Initial delay before first retry attempt (in milliseconds)
pub min_delay_millis: u64,
/// * `factor` - Exponential backoff multiplier for retry delays
pub factor: f32,
/// * `max_times` - Maximum number of retry attempts
pub max_times: usize,
/// * `max_delay_seconds` - Maximum delay between retry attempts (in seconds)
pub max_delay_seconds: u64,
}
2 changes: 1 addition & 1 deletion aggregation_mode/db/src/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use sqlx::{
Type,
};

#[derive(Debug, Clone, Copy, PartialEq, Eq, Type)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Type, serde::Serialize)]
#[sqlx(type_name = "task_status", rename_all = "lowercase")]
pub enum TaskStatus {
Pending,
Expand Down
1 change: 1 addition & 0 deletions aggregation_mode/gateway/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ serde_yaml = { workspace = true }
agg_mode_sdk = { path = "../sdk"}
aligned-sdk = { workspace = true }
sp1-sdk = { workspace = true }
db = { workspace = true }
tracing = { version = "0.1", features = ["log"] }
tracing-subscriber = { version = "0.3.0", features = ["env-filter"] }
bincode = "1.3.3"
Expand Down
2 changes: 1 addition & 1 deletion aggregation_mode/gateway/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Config {
pub port: u16,
pub db_connection_url: String,
pub db_connection_urls: Vec<String>,
pub network: String,
pub max_daily_proofs_per_user: i64,
}
Expand Down
Loading
Loading