From 41e0ca0237bf22b3cb7cd920e7f12b3d20853284 Mon Sep 17 00:00:00 2001 From: Mateusz Jasiuk Date: Thu, 5 Dec 2024 10:03:29 +0100 Subject: [PATCH 01/29] refactor: insert bonds to use col count macro --- chain/src/main.rs | 10 ++------- chain/src/repository/pos.rs | 44 ++++++++++--------------------------- 2 files changed, 14 insertions(+), 40 deletions(-) diff --git a/chain/src/main.rs b/chain/src/main.rs index cd81f2fd0..5ef4d183e 100644 --- a/chain/src/main.rs +++ b/chain/src/main.rs @@ -462,14 +462,8 @@ async fn try_initial_query( validator_set, )?; - repository::pos::insert_bonds_in_chunks( - transaction_conn, - bonds, - )?; - repository::pos::insert_unbonds_in_chunks( - transaction_conn, - unbonds, - )?; + repository::pos::insert_bonds(transaction_conn, bonds)?; + repository::pos::insert_unbonds(transaction_conn, unbonds)?; repository::crawler_state::upsert_crawler_state( transaction_conn, diff --git a/chain/src/repository/pos.rs b/chain/src/repository/pos.rs index 83322f6b9..8bdea462c 100644 --- a/chain/src/repository/pos.rs +++ b/chain/src/repository/pos.rs @@ -17,6 +17,7 @@ use orm::validators::{ use shared::block::Epoch; use shared::bond::Bonds; use shared::id::Id; +use shared::tuple_len::TupleLen; use shared::unbond::{UnbondAddresses, Unbonds}; use shared::validator::{ValidatorMetadataChange, ValidatorSet}; @@ -28,12 +29,6 @@ struct UnbondsColCount { count: i64, } -#[derive(QueryableByName)] -struct BondsColCount { - #[diesel(sql_type = BigInt)] - count: i64, -} - pub fn clear_bonds( transaction_conn: &mut PgConnection, addresses: Vec<(Id, Id)>, @@ -66,31 +61,22 @@ pub fn clear_bonds( anyhow::Ok(()) } -pub fn insert_bonds_in_chunks( +pub fn insert_bonds( transaction_conn: &mut PgConnection, bonds: Bonds, ) -> anyhow::Result<()> { - let bonds_col_count = sql_query( - "SELECT COUNT(*) - FROM information_schema.columns - WHERE table_schema = 'public' - AND table_name = 'bonds';", - ) - .get_result::(transaction_conn)?; + let bonds_col_count = bonds::all_columns.len() as i64; - for chunk in bonds - // We have to divide MAX_PARAM_SIZE by the number of columns in the - // balances table to get the correct number of rows in the - // chunk. - .chunks((MAX_PARAM_SIZE as i64 / bonds_col_count.count) as usize) + for chunk in + bonds.chunks((MAX_PARAM_SIZE as i64 / bonds_col_count) as usize) { - insert_bonds(transaction_conn, chunk.to_vec())? + insert_bonds_chunk(transaction_conn, chunk.to_vec())? } anyhow::Ok(()) } -pub fn insert_bonds( +fn insert_bonds_chunk( transaction_conn: &mut PgConnection, bonds: Bonds, ) -> anyhow::Result<()> { @@ -127,31 +113,25 @@ pub fn insert_bonds( anyhow::Ok(()) } -pub fn insert_unbonds_in_chunks( +pub fn insert_unbonds( transaction_conn: &mut PgConnection, unbonds: Unbonds, ) -> anyhow::Result<()> { - let unbonds_col_count = sql_query( - "SELECT COUNT(*) - FROM information_schema.columns - WHERE table_schema = 'public' - AND table_name = 'unbonds';", - ) - .get_result::(transaction_conn)?; + let unbonds_col_count = unbonds::all_columns.len() as i64; for chunk in unbonds // We have to divide MAX_PARAM_SIZE by the number of columns in the // balances table to get the correct number of rows in the // chunk. - .chunks((MAX_PARAM_SIZE as i64 / unbonds_col_count.count) as usize) + .chunks((MAX_PARAM_SIZE as i64 / unbonds_col_count) as usize) { - insert_unbonds(transaction_conn, chunk.to_vec())? + insert_unbonds_chunk(transaction_conn, chunk.to_vec())? } anyhow::Ok(()) } -pub fn insert_unbonds( +fn insert_unbonds_chunk( transaction_conn: &mut PgConnection, unbonds: Unbonds, ) -> anyhow::Result<()> { From 2b8fcf5d644b6ea8a354ef3b5fcc808ca145faad Mon Sep 17 00:00:00 2001 From: Mateusz Jasiuk Date: Thu, 5 Dec 2024 10:18:59 +0100 Subject: [PATCH 02/29] feat: insert votes in chunks --- chain/src/repository/balance.rs | 10 ++++------ chain/src/repository/gov.rs | 20 ++++++++++++++++++++ chain/src/repository/mod.rs | 1 + chain/src/repository/pos.rs | 21 +++++---------------- chain/src/repository/utils.rs | 4 ++++ shared/src/tuple_len.rs | 10 +++++----- 6 files changed, 39 insertions(+), 27 deletions(-) create mode 100644 chain/src/repository/utils.rs diff --git a/chain/src/repository/balance.rs b/chain/src/repository/balance.rs index 3269765d0..101d08722 100644 --- a/chain/src/repository/balance.rs +++ b/chain/src/repository/balance.rs @@ -6,7 +6,8 @@ use orm::token::{IbcTokenInsertDb, TokenInsertDb}; use shared::balance::Balances; use shared::token::Token; use shared::tuple_len::TupleLen; -pub const MAX_PARAM_SIZE: u16 = u16::MAX; + +use super::utils::MAX_PARAM_SIZE; pub fn insert_balances( transaction_conn: &mut PgConnection, @@ -14,11 +15,8 @@ pub fn insert_balances( ) -> anyhow::Result<()> { let balances_col_count = balance_changes::all_columns.len() as i64; - for chunk in balances - // We have to divide MAX_PARAM_SIZE by the number of columns in the - // balances table to get the correct number of rows in the - // chunk. - .chunks((MAX_PARAM_SIZE as i64 / balances_col_count) as usize) + for chunk in + balances.chunks((MAX_PARAM_SIZE as i64 / balances_col_count) as usize) { diesel::insert_into(balance_changes::table) .values::<&Vec>( diff --git a/chain/src/repository/gov.rs b/chain/src/repository/gov.rs index ac62cfcde..7882ed6cb 100644 --- a/chain/src/repository/gov.rs +++ b/chain/src/repository/gov.rs @@ -7,8 +7,11 @@ use orm::governance_proposal::GovernanceProposalInsertDb; use orm::governance_votes::GovernanceProposalVoteInsertDb; use orm::schema::{governance_proposals, governance_votes}; use shared::proposal::{GovernanceProposal, TallyType}; +use shared::tuple_len::TupleLen; use shared::vote::GovernanceVote; +use super::utils::MAX_PARAM_SIZE; + pub fn insert_proposals( transaction_conn: &mut PgConnection, proposals: Vec<(GovernanceProposal, TallyType)>, @@ -45,6 +48,23 @@ pub fn insert_proposals( pub fn insert_votes( transaction_conn: &mut PgConnection, proposals_votes: HashSet, +) -> anyhow::Result<()> { + let votes_col_count = governance_votes::all_columns.len() as i64; + + for chunk in proposals_votes + .into_iter() + .collect::>() + .chunks((MAX_PARAM_SIZE as i64 / votes_col_count) as usize) + { + insert_votes_chunk(transaction_conn, chunk.to_vec())? + } + + anyhow::Ok(()) +} + +fn insert_votes_chunk( + transaction_conn: &mut PgConnection, + proposals_votes: Vec, ) -> anyhow::Result<()> { diesel::insert_into(governance_votes::table) .values::<&Vec>( diff --git a/chain/src/repository/mod.rs b/chain/src/repository/mod.rs index 611cf592a..efdb8fcdc 100644 --- a/chain/src/repository/mod.rs +++ b/chain/src/repository/mod.rs @@ -3,3 +3,4 @@ pub mod crawler_state; pub mod gov; pub mod pos; pub mod revealed_pk; +mod utils; diff --git a/chain/src/repository/pos.rs b/chain/src/repository/pos.rs index 8bdea462c..0960c229e 100644 --- a/chain/src/repository/pos.rs +++ b/chain/src/repository/pos.rs @@ -1,12 +1,10 @@ use std::collections::HashSet; use anyhow::Context; -use diesel::sql_types::BigInt; use diesel::upsert::excluded; use diesel::{ - sql_query, BoolExpressionMethods, ExpressionMethods, - OptionalEmptyChangesetExtension, PgConnection, QueryDsl, QueryableByName, - RunQueryDsl, SelectableHelper, + BoolExpressionMethods, ExpressionMethods, OptionalEmptyChangesetExtension, + PgConnection, QueryDsl, RunQueryDsl, SelectableHelper, }; use orm::bond::BondInsertDb; use orm::schema::{bonds, pos_rewards, unbonds, validators}; @@ -21,13 +19,7 @@ use shared::tuple_len::TupleLen; use shared::unbond::{UnbondAddresses, Unbonds}; use shared::validator::{ValidatorMetadataChange, ValidatorSet}; -pub const MAX_PARAM_SIZE: u16 = u16::MAX; - -#[derive(QueryableByName)] -struct UnbondsColCount { - #[diesel(sql_type = BigInt)] - count: i64, -} +use super::utils::MAX_PARAM_SIZE; pub fn clear_bonds( transaction_conn: &mut PgConnection, @@ -119,11 +111,8 @@ pub fn insert_unbonds( ) -> anyhow::Result<()> { let unbonds_col_count = unbonds::all_columns.len() as i64; - for chunk in unbonds - // We have to divide MAX_PARAM_SIZE by the number of columns in the - // balances table to get the correct number of rows in the - // chunk. - .chunks((MAX_PARAM_SIZE as i64 / unbonds_col_count) as usize) + for chunk in + unbonds.chunks((MAX_PARAM_SIZE as i64 / unbonds_col_count) as usize) { insert_unbonds_chunk(transaction_conn, chunk.to_vec())? } diff --git a/chain/src/repository/utils.rs b/chain/src/repository/utils.rs new file mode 100644 index 000000000..bd4b8ce6d --- /dev/null +++ b/chain/src/repository/utils.rs @@ -0,0 +1,4 @@ +// Represents maximum number of parameters that we can insert into postgres in +// one go. To get the number of rows that we can insert in one chunk, we have to +// divide MAX_PARAM_SIZE by the number of columns in the given table. +pub const MAX_PARAM_SIZE: u16 = u16::MAX; diff --git a/shared/src/tuple_len.rs b/shared/src/tuple_len.rs index 2410ac952..a4f51c73f 100644 --- a/shared/src/tuple_len.rs +++ b/shared/src/tuple_len.rs @@ -1,13 +1,13 @@ -// -// The TupleLen trait allows compile-time checking of the length of a tuple. This is useful for -// statically determining the number of columns in a diesel schema table. +// The TupleLen trait allows compile-time checking of the length of a tuple. +// This is useful for statically determining the number of columns in a diesel +// schema table. // // Use it like this: // // let num_columns = orm::schema::(table_name)::all_columns.len(); // -// If you need to support tuples with more than 12 elements, you can add more type parameters to -// the tuple! macro invocation at the bottom of this file. +// If you need to support tuples with more than 12 elements, you can add more +// type parameters to the tuple! macro invocation at the bottom of this file. // pub trait TupleLen { From fec6b0e928141852508af58d0274b99de8aaac44 Mon Sep 17 00:00:00 2001 From: Mateusz Jasiuk Date: Fri, 13 Dec 2024 09:20:21 +0100 Subject: [PATCH 03/29] feat: add active at query param to bonds and unbonds (#198) --- swagger.yml | 12 ++++++++++++ webserver/src/dto/pos.rs | 6 ++++++ webserver/src/handler/pos.rs | 4 ++-- webserver/src/repository/pos.rs | 24 ++++++++++++++++++++---- webserver/src/service/pos.rs | 6 ++++-- 5 files changed, 44 insertions(+), 8 deletions(-) diff --git a/swagger.yml b/swagger.yml index 6888fed45..27bc4e8b6 100644 --- a/swagger.yml +++ b/swagger.yml @@ -113,6 +113,12 @@ paths: type: integer minimum: 1 description: Pagination parameter + - in: query + name: activeAt + schema: + type: integer + minimum: 0 + description: Get all bonds that are active at this epoch responses: '200': description: A list of bonds. @@ -175,6 +181,12 @@ paths: type: integer minimum: 1 description: Pagination parameter + - in: query + name: activeAt + schema: + type: integer + minimum: 0 + description: Get all unbonds that are active at this epoch( < ) responses: '200': description: A list of unbonds. diff --git a/webserver/src/dto/pos.rs b/webserver/src/dto/pos.rs index 9a72bec3e..4a1a06978 100644 --- a/webserver/src/dto/pos.rs +++ b/webserver/src/dto/pos.rs @@ -64,15 +64,21 @@ pub enum MyValidatorKindDto { } #[derive(Clone, Serialize, Deserialize, Validate)] +#[serde(rename_all = "camelCase")] pub struct BondsDto { #[validate(range(min = 1, max = 10000))] pub page: Option, + #[validate(range(min = 0))] + pub active_at: Option, } #[derive(Clone, Serialize, Deserialize, Validate)] +#[serde(rename_all = "camelCase")] pub struct UnbondsDto { #[validate(range(min = 1, max = 10000))] pub page: Option, + #[validate(range(min = 0))] + pub active_at: Option, } #[derive(Clone, Serialize, Deserialize, Validate)] diff --git a/webserver/src/handler/pos.rs b/webserver/src/handler/pos.rs index d589c8ed1..a6b8e4c37 100644 --- a/webserver/src/handler/pos.rs +++ b/webserver/src/handler/pos.rs @@ -57,7 +57,7 @@ pub async fn get_bonds( let (bonds, total_pages, total_bonds) = state .pos_service - .get_bonds_by_address(address, page) + .get_bonds_by_address(address, page, query.active_at) .await?; let response = @@ -97,7 +97,7 @@ pub async fn get_unbonds( let (unbonds, total_pages, total_unbonds) = state .pos_service - .get_unbonds_by_address(address, page) + .get_unbonds_by_address(address, page, query.active_at) .await?; let response = diff --git a/webserver/src/repository/pos.rs b/webserver/src/repository/pos.rs index 327592369..e623b045f 100644 --- a/webserver/src/repository/pos.rs +++ b/webserver/src/repository/pos.rs @@ -60,12 +60,14 @@ pub trait PosRepositoryTrait { &self, address: String, page: i64, + active_at: Option, ) -> Result, String>; async fn find_unbonds_by_address( &self, address: String, page: i64, + active_at: Option, ) -> Result, String>; async fn find_merged_unbonds_by_address( @@ -183,12 +185,19 @@ impl PosRepositoryTrait for PosRepository { &self, address: String, page: i64, + active_at: Option, ) -> Result, String> { let conn = self.app_state.get_db_connection().await; conn.interact(move |conn| { - validators::table - .inner_join(bonds::table) + let mut query = + validators::table.inner_join(bonds::table).into_boxed(); + + if let Some(at) = active_at { + query = query.filter(bonds::dsl::start.le(at)); + } + + query .filter(bonds::dsl::address.eq(address)) .select((validators::all_columns, bonds::all_columns)) .paginate(page) @@ -235,12 +244,19 @@ impl PosRepositoryTrait for PosRepository { &self, address: String, page: i64, + active_at: Option, ) -> Result, String> { let conn = self.app_state.get_db_connection().await; conn.interact(move |conn| { - validators::table - .inner_join(unbonds::table) + let mut query = + validators::table.inner_join(unbonds::table).into_boxed(); + + if let Some(at) = active_at { + query = query.filter(unbonds::dsl::withdraw_epoch.lt(at)); + } + + query .filter(unbonds::dsl::address.eq(address)) .select((validators::all_columns, unbonds::all_columns)) .paginate(page) diff --git a/webserver/src/service/pos.rs b/webserver/src/service/pos.rs index 636dad679..a2c30cbad 100644 --- a/webserver/src/service/pos.rs +++ b/webserver/src/service/pos.rs @@ -102,6 +102,7 @@ impl PosService { &self, address: String, page: u64, + active_at: Option, ) -> Result<(Vec, u64, u64), PoSError> { let pos_state = self .pos_repo @@ -111,7 +112,7 @@ impl PosService { let (db_bonds, total_pages, total_items) = self .pos_repo - .find_bonds_by_address(address, page as i64) + .find_bonds_by_address(address, page as i64, active_at) .await .map_err(PoSError::Database)?; @@ -154,10 +155,11 @@ impl PosService { &self, address: String, page: u64, + active_at: Option, ) -> Result<(Vec, u64, u64), PoSError> { let (db_unbonds, total_pages, total_items) = self .pos_repo - .find_unbonds_by_address(address, page as i64) + .find_unbonds_by_address(address, page as i64, active_at) .await .map_err(PoSError::Database)?; From 68215a7d5acc65f03e3761768f5381bd59e1f582 Mon Sep 17 00:00:00 2001 From: Gianmarco Fraccaroli Date: Mon, 9 Dec 2024 12:27:00 +0100 Subject: [PATCH 04/29] fetch validators out of consensus set --- .../down.sql | 1 + .../up.sql | 3 + orm/src/schema.rs | 62 +++++---------- orm/src/transactions.rs | 4 + pos/src/main.rs | 24 +++++- pos/src/repository/pos.rs | 32 +++++++- pos/src/services/namada.rs | 75 +++++++++++++++++++ shared/src/transaction.rs | 19 +++++ shared/src/validator.rs | 13 ++++ 9 files changed, 184 insertions(+), 49 deletions(-) create mode 100644 orm/migrations/2024-12-10-104502_transaction_types/down.sql create mode 100644 orm/migrations/2024-12-10-104502_transaction_types/up.sql diff --git a/orm/migrations/2024-12-10-104502_transaction_types/down.sql b/orm/migrations/2024-12-10-104502_transaction_types/down.sql new file mode 100644 index 000000000..d9a93fe9a --- /dev/null +++ b/orm/migrations/2024-12-10-104502_transaction_types/down.sql @@ -0,0 +1 @@ +-- This file should undo anything in `up.sql` diff --git a/orm/migrations/2024-12-10-104502_transaction_types/up.sql b/orm/migrations/2024-12-10-104502_transaction_types/up.sql new file mode 100644 index 000000000..8adc085d4 --- /dev/null +++ b/orm/migrations/2024-12-10-104502_transaction_types/up.sql @@ -0,0 +1,3 @@ +-- Your SQL goes here +ALTER TYPE TRANSACTION_KIND ADD VALUE 'reactivate_validator'; +ALTER TYPE TRANSACTION_KIND ADD VALUE 'deactivate_validator'; \ No newline at end of file diff --git a/orm/src/schema.rs b/orm/src/schema.rs index cf18897c1..942414874 100644 --- a/orm/src/schema.rs +++ b/orm/src/schema.rs @@ -1,75 +1,47 @@ // @generated automatically by Diesel CLI. pub mod sql_types { - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "crawler_name"))] pub struct CrawlerName; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "governance_kind"))] pub struct GovernanceKind; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "governance_result"))] pub struct GovernanceResult; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "governance_tally_type"))] pub struct GovernanceTallyType; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[diesel(postgres_type(name = "payment_kind"))] + pub struct PaymentKind; + + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[diesel(postgres_type(name = "payment_recurrence"))] + pub struct PaymentRecurrence; + + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "token_type"))] pub struct TokenType; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "transaction_kind"))] pub struct TransactionKind; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "transaction_result"))] pub struct TransactionResult; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "validator_state"))] pub struct ValidatorState; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "vote_kind"))] pub struct VoteKind; } diff --git a/orm/src/transactions.rs b/orm/src/transactions.rs index 29a06182c..ca0f89d9e 100644 --- a/orm/src/transactions.rs +++ b/orm/src/transactions.rs @@ -26,6 +26,8 @@ pub enum TransactionKindDb { ChangeCommission, RevealPk, BecomeValidator, + ReactivateValidator, + DeactivateValidator, Unknown, } @@ -54,6 +56,8 @@ impl From for TransactionKindDb { TransactionKind::CommissionChange(_) => { TransactionKindDb::ChangeCommission } + TransactionKind::DeactivateValidator(_) => TransactionKindDb::DeactivateValidator, + TransactionKind::ReactivateValidator(_) => TransactionKindDb::ReactivateValidator, TransactionKind::RevealPk(_) => TransactionKindDb::RevealPk, TransactionKind::BecomeValidator(_) => { TransactionKindDb::BecomeValidator diff --git a/pos/src/main.rs b/pos/src/main.rs index 76f7842c1..f85c4f5a0 100644 --- a/pos/src/main.rs +++ b/pos/src/main.rs @@ -72,10 +72,30 @@ async fn crawling_fn( .await .into_rpc_error()?; + let missing_validators = + namada_service::get_missing_validators_state_from_db( + &conn, + validators_set.validators.clone(), + ) + .await; + let missing_validator_set = namada_service::get_validators_state( + &client, + missing_validators, + epoch_to_process, + ) + .await + .into_rpc_error()?; + + let complete_validators_set = validators_set.union(&missing_validator_set); + tracing::info!( - "Processing epoch {} with {} validators...", + "Processing epoch {} with {} validators in the consensus set and {} \ + missing (total {})...", epoch_to_process, + validators_set.validators.len(), + missing_validator_set.validators.len(), validators_set.validators.len() + + missing_validator_set.validators.len() ); let timestamp = DateTimeUtc::now().0.timestamp(); @@ -90,7 +110,7 @@ async fn crawling_fn( conn.build_transaction() .read_write() .run(|transaction_conn| { - let validators_dbo = &validators_set + let validators_dbo = &complete_validators_set .validators .into_iter() .map(ValidatorInsertDb::from_validator) diff --git a/pos/src/repository/pos.rs b/pos/src/repository/pos.rs index 3efaddd37..dd4a5ee86 100644 --- a/pos/src/repository/pos.rs +++ b/pos/src/repository/pos.rs @@ -1,8 +1,16 @@ +use std::collections::HashSet; + use anyhow::Context; +use deadpool_diesel::postgres::Object; +use diesel::dsl::not; use diesel::upsert::excluded; -use diesel::{ExpressionMethods, PgConnection, RunQueryDsl}; +use diesel::{ + ExpressionMethods, PgConnection, QueryDsl, RunQueryDsl, SelectableHelper, +}; use orm::schema::validators; -use orm::validators::ValidatorInsertDb; +use orm::validators::{ValidatorDb, ValidatorInsertDb}; +use shared::error::ContextDbInteractError; +use shared::validator::Validator; pub fn upsert_validators( transaction_conn: &mut PgConnection, @@ -25,3 +33,23 @@ pub fn upsert_validators( Ok(()) } + +pub async fn get_missing_validators( + conn: &Object, + validators: HashSet, +) -> anyhow::Result> { + conn.interact(move |conn| { + validators::table + .filter(not(validators::dsl::namada_address.eq_any( + validators + .into_iter() + .map(|validator| validator.address.to_owned().to_string()) + .collect::>(), + ))) + .select(ValidatorDb::as_select()) + .load(conn) + }) + .await + .context_db_interact_error()? + .context("Failed to read validator state from the db") +} diff --git a/pos/src/services/namada.rs b/pos/src/services/namada.rs index 9261648a8..b112ee1b8 100644 --- a/pos/src/services/namada.rs +++ b/pos/src/services/namada.rs @@ -1,14 +1,19 @@ use std::collections::HashSet; use anyhow::Context; +use deadpool_diesel::postgres::Object; use futures::{StreamExt, TryStreamExt}; use namada_core::chain::Epoch as NamadaSdkEpoch; +use namada_sdk::address::Address; use namada_sdk::rpc; +use orm::validators::ValidatorStateDb; use shared::block::Epoch; use shared::id::Id; use shared::validator::{Validator, ValidatorSet, ValidatorState}; use tendermint_rpc::HttpClient; +use crate::repository::pos::get_missing_validators; + pub async fn get_validator_set_at_epoch( client: &HttpClient, epoch: Epoch, @@ -91,6 +96,41 @@ pub async fn get_validator_set_at_epoch( Ok(ValidatorSet { validators, epoch }) } +pub async fn get_validators_state( + client: &HttpClient, + validators: Vec, + epoch: Epoch, +) -> anyhow::Result { + let namada_epoch = to_epoch(epoch); + + let validators = futures::stream::iter(validators) + .map(|mut validator| async move { + let validator_address = Address::from(validator.address.clone()); + let validator_state = rpc::get_validator_state( + client, + &validator_address, + Some(namada_epoch), + ) + .await + .with_context(|| { + format!("Failed to query validator {validator_address} state") + })?; + let validator_state = validator_state + .0 + .map(ValidatorState::from) + .unwrap_or(ValidatorState::Unknown); + + validator.state = validator_state; + + anyhow::Ok(validator) + }) + .buffer_unordered(100) + .try_collect::>() + .await?; + + Ok(ValidatorSet { validators, epoch }) +} + pub async fn get_current_epoch(client: &HttpClient) -> anyhow::Result { let epoch = rpc::query_epoch(client) .await @@ -99,6 +139,41 @@ pub async fn get_current_epoch(client: &HttpClient) -> anyhow::Result { Ok(epoch.0 as Epoch) } +pub async fn get_missing_validators_state_from_db( + conn: &Object, + validators: HashSet, +) -> Vec { + get_missing_validators(conn, validators) + .await + .unwrap_or_default() + .into_iter() + .map(|validator| Validator { + address: Id::Account(validator.namada_address), + voting_power: validator.voting_power.to_string(), + max_commission: validator.max_commission, + commission: validator.commission, + name: validator.name, + email: validator.email, + description: validator.description, + website: validator.website, + discord_handler: validator.discord_handle, + avatar: validator.avatar, + state: match validator.state { + ValidatorStateDb::Consensus => ValidatorState::Consensus, + ValidatorStateDb::BelowCapacity => { + ValidatorState::BelowCapacity + } + ValidatorStateDb::BelowThreshold => { + ValidatorState::BelowThreshold + } + ValidatorStateDb::Inactive => ValidatorState::Inactive, + ValidatorStateDb::Jailed => ValidatorState::Jailed, + ValidatorStateDb::Unknown => ValidatorState::Unknown, + }, + }) + .collect() +} + fn to_epoch(epoch: u32) -> NamadaSdkEpoch { NamadaSdkEpoch::from(epoch as u64) } diff --git a/shared/src/transaction.rs b/shared/src/transaction.rs index f8d4200c8..ebc5b5239 100644 --- a/shared/src/transaction.rs +++ b/shared/src/transaction.rs @@ -2,6 +2,7 @@ use std::collections::HashMap; use std::fmt::Display; use namada_governance::{InitProposalData, VoteProposalData}; +use namada_sdk::address::Address; use namada_sdk::borsh::BorshDeserialize; use namada_sdk::key::common::PublicKey; use namada_sdk::masp::ShieldedTransfer; @@ -48,6 +49,8 @@ pub enum TransactionKind { CommissionChange(Option), RevealPk(Option), BecomeValidator(Option>), + ReactivateValidator(Option
), + DeactivateValidator(Option
), Unknown, } @@ -152,6 +155,22 @@ impl TransactionKind { }; TransactionKind::RevealPk(data) } + "tx_deactivate_validator" => { + let data = if let Ok(data) = Address::try_from_slice(data) { + Some(data) + } else { + None + }; + TransactionKind::DeactivateValidator(data) + } + "tx_reactivate_validator" => { + let data = if let Ok(data) = Address::try_from_slice(data) { + Some(data) + } else { + None + }; + TransactionKind::ReactivateValidator(data) + } "tx_ibc" => { let data = if let Ok(data) = namada_ibc::decode_message::(data) diff --git a/shared/src/validator.rs b/shared/src/validator.rs index eead4e117..816190421 100644 --- a/shared/src/validator.rs +++ b/shared/src/validator.rs @@ -43,6 +43,19 @@ pub struct ValidatorSet { pub epoch: Epoch, } +impl ValidatorSet { + pub fn union(&self, validator_set: &ValidatorSet) -> Self { + ValidatorSet { + validators: self + .validators + .union(&validator_set.validators) + .cloned() + .collect::>(), + epoch: self.epoch, + } + } +} + #[derive(Debug, Clone, Hash, PartialEq, Eq)] pub struct Validator { pub address: Id, From dddd8a89915f2e8da81a2965ab2eb870f766339f Mon Sep 17 00:00:00 2001 From: Gianmarco Fraccaroli Date: Fri, 13 Dec 2024 11:46:18 +0100 Subject: [PATCH 05/29] return cargo tag --- swagger.yml | 2 ++ webserver/src/app.rs | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/swagger.yml b/swagger.yml index 27bc4e8b6..7c5bebef8 100644 --- a/swagger.yml +++ b/swagger.yml @@ -8,6 +8,8 @@ info: url: https://github.com/anoma/namada-indexer servers: - url: http://localhost:5001 + - url: https://namada-rpc.mandragora.io + - url: https://indexer.namada.tududes.com:443 paths: /health: get: diff --git a/webserver/src/app.rs b/webserver/src/app.rs index 5e5ae6d42..734ba4c17 100644 --- a/webserver/src/app.rs +++ b/webserver/src/app.rs @@ -151,7 +151,7 @@ impl ApplicationServer { .nest("/api/v1", routes) .merge(Router::new().route( "/health", - get(|| async { env!("VERGEN_GIT_SHA").to_string() }), + get(|| async { json!({"commit": env!("VERGEN_GIT_SHA").to_string(), "version": env!("CARGO_PKG_VERSION") }).to_string() }), )) .layer( ServiceBuilder::new() From 365a92b89fea422ef72358d1b35c77e62101b88a Mon Sep 17 00:00:00 2001 From: Gianmarco Fraccaroli Date: Tue, 10 Dec 2024 13:41:33 +0100 Subject: [PATCH 06/29] improve logic --- chain/src/main.rs | 11 +++- chain/src/repository/pos.rs | 31 ++++++++- .../up.sql | 3 +- .../down.sql | 1 + .../2024-12-10-110059_validator_states/up.sql | 4 ++ orm/src/schema.rs | 66 +++++++++++++++---- orm/src/transactions.rs | 46 ++++++------- orm/src/validators.rs | 14 ++++ pos/src/main.rs | 24 +------ pos/src/repository/pos.rs | 33 +--------- pos/src/services/namada.rs | 64 +++++++----------- shared/src/block.rs | 34 +++++++++- shared/src/transaction.rs | 9 +++ shared/src/validator.rs | 9 +++ webserver/src/response/pos.rs | 6 ++ webserver/src/response/transaction.rs | 56 +++++++--------- 16 files changed, 244 insertions(+), 167 deletions(-) create mode 100644 orm/migrations/2024-12-10-110059_validator_states/down.sql create mode 100644 orm/migrations/2024-12-10-110059_validator_states/up.sql diff --git a/chain/src/main.rs b/chain/src/main.rs index 5ef4d183e..b4292517e 100644 --- a/chain/src/main.rs +++ b/chain/src/main.rs @@ -200,12 +200,15 @@ async fn crawling_fn( proposals_votes.len() ); - let validators = block.validators(); + let validators = block.new_validators(); let validator_set = ValidatorSet { validators: validators.clone(), epoch, }; + let validators_state_change = block.update_validators_state(); + tracing::debug!("Updating {} validators state", validators_state_change.len()); + let addresses = block.bond_addresses(); let bonds = query_bonds(&client, addresses).await.into_rpc_error()?; tracing::debug!( @@ -270,6 +273,7 @@ async fn crawling_fn( withdraws = withdraw_addreses.len(), claimed_rewards = reward_claimers.len(), revealed_pks = revealed_pks.len(), + validator_state = validators_state_change.len(), epoch = epoch, first_block_in_epoch = first_block_in_epoch, block = block_height, @@ -304,6 +308,11 @@ async fn crawling_fn( validator_set, )?; + repository::pos::upsert_validator_state( + transaction_conn, + validators_state_change, + )?; + // We first remove all the bonds and then insert the new ones repository::pos::clear_bonds( transaction_conn, diff --git a/chain/src/repository/pos.rs b/chain/src/repository/pos.rs index 0960c229e..f85824e05 100644 --- a/chain/src/repository/pos.rs +++ b/chain/src/repository/pos.rs @@ -10,14 +10,17 @@ use orm::bond::BondInsertDb; use orm::schema::{bonds, pos_rewards, unbonds, validators}; use orm::unbond::UnbondInsertDb; use orm::validators::{ - ValidatorDb, ValidatorUpdateMetadataDb, ValidatorWithMetaInsertDb, + ValidatorDb, ValidatorStateDb, ValidatorUpdateMetadataDb, + ValidatorWithMetaInsertDb, }; use shared::block::Epoch; use shared::bond::Bonds; use shared::id::Id; use shared::tuple_len::TupleLen; use shared::unbond::{UnbondAddresses, Unbonds}; -use shared::validator::{ValidatorMetadataChange, ValidatorSet}; +use shared::validator::{ + ValidatorMetadataChange, ValidatorSet, ValidatorStateChange, +}; use super::utils::MAX_PARAM_SIZE; @@ -250,6 +253,30 @@ pub fn update_validator_metadata( anyhow::Ok(()) } +pub fn upsert_validator_state( + transaction_conn: &mut PgConnection, + validators_states: HashSet, +) -> anyhow::Result<()> { + for change in validators_states { + let state = ValidatorStateDb::from(change.state); + let validator_address = change.address.to_string(); + + diesel::update( + validators::table.filter( + validators::columns::namada_address.eq(validator_address), + ), + ) + .set(validators::columns::state.eq(state)) + .execute(transaction_conn) + .context(format!( + "Failed to update validator state for {}", + change.address + ))?; + } + + Ok(()) +} + pub fn upsert_validators( transaction_conn: &mut PgConnection, validators_set: ValidatorSet, diff --git a/orm/migrations/2024-12-10-104502_transaction_types/up.sql b/orm/migrations/2024-12-10-104502_transaction_types/up.sql index 8adc085d4..6656ce277 100644 --- a/orm/migrations/2024-12-10-104502_transaction_types/up.sql +++ b/orm/migrations/2024-12-10-104502_transaction_types/up.sql @@ -1,3 +1,4 @@ -- Your SQL goes here ALTER TYPE TRANSACTION_KIND ADD VALUE 'reactivate_validator'; -ALTER TYPE TRANSACTION_KIND ADD VALUE 'deactivate_validator'; \ No newline at end of file +ALTER TYPE TRANSACTION_KIND ADD VALUE 'deactivate_validator'; +ALTER TYPE TRANSACTION_KIND ADD VALUE 'unjail_validator'; \ No newline at end of file diff --git a/orm/migrations/2024-12-10-110059_validator_states/down.sql b/orm/migrations/2024-12-10-110059_validator_states/down.sql new file mode 100644 index 000000000..d9a93fe9a --- /dev/null +++ b/orm/migrations/2024-12-10-110059_validator_states/down.sql @@ -0,0 +1 @@ +-- This file should undo anything in `up.sql` diff --git a/orm/migrations/2024-12-10-110059_validator_states/up.sql b/orm/migrations/2024-12-10-110059_validator_states/up.sql new file mode 100644 index 000000000..ef893141f --- /dev/null +++ b/orm/migrations/2024-12-10-110059_validator_states/up.sql @@ -0,0 +1,4 @@ +-- Your SQL goes here +ALTER TYPE VALIDATOR_STATE ADD VALUE 'deactivating'; +ALTER TYPE VALIDATOR_STATE ADD VALUE 'reactivating'; +ALTER TYPE VALIDATOR_STATE ADD VALUE 'unjailing'; \ No newline at end of file diff --git a/orm/src/schema.rs b/orm/src/schema.rs index 942414874..0fb2d3085 100644 --- a/orm/src/schema.rs +++ b/orm/src/schema.rs @@ -1,47 +1,91 @@ // @generated automatically by Diesel CLI. pub mod sql_types { - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "crawler_name"))] pub struct CrawlerName; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "governance_kind"))] pub struct GovernanceKind; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "governance_result"))] pub struct GovernanceResult; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "governance_tally_type"))] pub struct GovernanceTallyType; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "payment_kind"))] pub struct PaymentKind; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "payment_recurrence"))] pub struct PaymentRecurrence; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "token_type"))] pub struct TokenType; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "transaction_kind"))] pub struct TransactionKind; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "transaction_result"))] pub struct TransactionResult; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "validator_state"))] pub struct ValidatorState; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "vote_kind"))] pub struct VoteKind; } diff --git a/orm/src/transactions.rs b/orm/src/transactions.rs index ca0f89d9e..db64448cc 100644 --- a/orm/src/transactions.rs +++ b/orm/src/transactions.rs @@ -28,6 +28,7 @@ pub enum TransactionKindDb { BecomeValidator, ReactivateValidator, DeactivateValidator, + UnjailValidator, Unknown, } @@ -35,34 +36,29 @@ impl From for TransactionKindDb { fn from(value: TransactionKind) -> Self { match value { TransactionKind::TransparentTransfer(_) => { - TransactionKindDb::TransparentTransfer + Self::TransparentTransfer } - TransactionKind::ShieldedTransfer(_) => { - TransactionKindDb::ShieldedTransfer + TransactionKind::ShieldedTransfer(_) => Self::ShieldedTransfer, + TransactionKind::IbcMsgTransfer(_) => Self::IbcMsgTransfer, + TransactionKind::Bond(_) => Self::Bond, + TransactionKind::Redelegation(_) => Self::Redelegation, + TransactionKind::Unbond(_) => Self::Unbond, + TransactionKind::Withdraw(_) => Self::Withdraw, + TransactionKind::ClaimRewards(_) => Self::ClaimRewards, + TransactionKind::ProposalVote(_) => Self::VoteProposal, + TransactionKind::InitProposal(_) => Self::InitProposal, + TransactionKind::MetadataChange(_) => Self::ChangeMetadata, + TransactionKind::CommissionChange(_) => Self::ChangeCommission, + TransactionKind::DeactivateValidator(_) => { + Self::DeactivateValidator } - TransactionKind::IbcMsgTransfer(_) => { - TransactionKindDb::IbcMsgTransfer + TransactionKind::ReactivateValidator(_) => { + Self::ReactivateValidator } - TransactionKind::Bond(_) => TransactionKindDb::Bond, - TransactionKind::Redelegation(_) => TransactionKindDb::Redelegation, - TransactionKind::Unbond(_) => TransactionKindDb::Unbond, - TransactionKind::Withdraw(_) => TransactionKindDb::Withdraw, - TransactionKind::ClaimRewards(_) => TransactionKindDb::ClaimRewards, - TransactionKind::ProposalVote(_) => TransactionKindDb::VoteProposal, - TransactionKind::InitProposal(_) => TransactionKindDb::InitProposal, - TransactionKind::MetadataChange(_) => { - TransactionKindDb::ChangeMetadata - } - TransactionKind::CommissionChange(_) => { - TransactionKindDb::ChangeCommission - } - TransactionKind::DeactivateValidator(_) => TransactionKindDb::DeactivateValidator, - TransactionKind::ReactivateValidator(_) => TransactionKindDb::ReactivateValidator, - TransactionKind::RevealPk(_) => TransactionKindDb::RevealPk, - TransactionKind::BecomeValidator(_) => { - TransactionKindDb::BecomeValidator - } - TransactionKind::Unknown => TransactionKindDb::Unknown, + TransactionKind::RevealPk(_) => Self::RevealPk, + TransactionKind::BecomeValidator(_) => Self::BecomeValidator, + TransactionKind::UnjailValidator(_) => Self::UnjailValidator, + TransactionKind::Unknown => Self::Unknown, } } } diff --git a/orm/src/validators.rs b/orm/src/validators.rs index 79027c0dc..395325a95 100644 --- a/orm/src/validators.rs +++ b/orm/src/validators.rs @@ -28,6 +28,9 @@ pub enum ValidatorStateDb { BelowThreshold, Inactive, Jailed, + Deactivating, + Reactivating, + Unjailing, Unknown, } @@ -39,6 +42,9 @@ impl From for ValidatorStateDb { ValidatorState::BelowThreshold => Self::BelowThreshold, ValidatorState::Inactive => Self::Inactive, ValidatorState::Jailed => Self::Jailed, + ValidatorState::Deactivating => Self::Deactivating, + ValidatorState::Reactivating => Self::Reactivating, + ValidatorState::Unjailing => Self::Unjailing, ValidatorState::Unknown => Self::Unknown, } } @@ -90,6 +96,14 @@ pub struct ValidatorWithMetaInsertDb { pub state: ValidatorStateDb, } +#[derive(Serialize, Insertable, Clone)] +#[diesel(table_name = validators)] +#[diesel(check_for_backend(diesel::pg::Pg))] +pub struct ValidatorStateChangeDb { + pub namada_address: String, + pub state: ValidatorStateDb, +} + #[derive(Serialize, AsChangeset, Clone)] #[diesel(table_name = validators)] #[diesel(check_for_backend(diesel::pg::Pg))] diff --git a/pos/src/main.rs b/pos/src/main.rs index f85c4f5a0..76f7842c1 100644 --- a/pos/src/main.rs +++ b/pos/src/main.rs @@ -72,30 +72,10 @@ async fn crawling_fn( .await .into_rpc_error()?; - let missing_validators = - namada_service::get_missing_validators_state_from_db( - &conn, - validators_set.validators.clone(), - ) - .await; - let missing_validator_set = namada_service::get_validators_state( - &client, - missing_validators, - epoch_to_process, - ) - .await - .into_rpc_error()?; - - let complete_validators_set = validators_set.union(&missing_validator_set); - tracing::info!( - "Processing epoch {} with {} validators in the consensus set and {} \ - missing (total {})...", + "Processing epoch {} with {} validators...", epoch_to_process, - validators_set.validators.len(), - missing_validator_set.validators.len(), validators_set.validators.len() - + missing_validator_set.validators.len() ); let timestamp = DateTimeUtc::now().0.timestamp(); @@ -110,7 +90,7 @@ async fn crawling_fn( conn.build_transaction() .read_write() .run(|transaction_conn| { - let validators_dbo = &complete_validators_set + let validators_dbo = &validators_set .validators .into_iter() .map(ValidatorInsertDb::from_validator) diff --git a/pos/src/repository/pos.rs b/pos/src/repository/pos.rs index dd4a5ee86..2a644ac2c 100644 --- a/pos/src/repository/pos.rs +++ b/pos/src/repository/pos.rs @@ -1,16 +1,8 @@ -use std::collections::HashSet; - use anyhow::Context; -use deadpool_diesel::postgres::Object; -use diesel::dsl::not; use diesel::upsert::excluded; -use diesel::{ - ExpressionMethods, PgConnection, QueryDsl, RunQueryDsl, SelectableHelper, -}; +use diesel::{ExpressionMethods, PgConnection, RunQueryDsl}; use orm::schema::validators; -use orm::validators::{ValidatorDb, ValidatorInsertDb}; -use shared::error::ContextDbInteractError; -use shared::validator::Validator; +use orm::validators::ValidatorInsertDb; pub fn upsert_validators( transaction_conn: &mut PgConnection, @@ -27,29 +19,10 @@ pub fn upsert_validators( .eq(excluded(validators::columns::max_commission)), validators::columns::commission .eq(excluded(validators::columns::commission)), + validators::columns::state.eq(excluded(validators::columns::state)), )) .execute(transaction_conn) .context("Failed to update validators in db")?; Ok(()) } - -pub async fn get_missing_validators( - conn: &Object, - validators: HashSet, -) -> anyhow::Result> { - conn.interact(move |conn| { - validators::table - .filter(not(validators::dsl::namada_address.eq_any( - validators - .into_iter() - .map(|validator| validator.address.to_owned().to_string()) - .collect::>(), - ))) - .select(ValidatorDb::as_select()) - .load(conn) - }) - .await - .context_db_interact_error()? - .context("Failed to read validator state from the db") -} diff --git a/pos/src/services/namada.rs b/pos/src/services/namada.rs index b112ee1b8..d3fa7af7c 100644 --- a/pos/src/services/namada.rs +++ b/pos/src/services/namada.rs @@ -1,19 +1,15 @@ use std::collections::HashSet; use anyhow::Context; -use deadpool_diesel::postgres::Object; use futures::{StreamExt, TryStreamExt}; use namada_core::chain::Epoch as NamadaSdkEpoch; use namada_sdk::address::Address; use namada_sdk::rpc; -use orm::validators::ValidatorStateDb; use shared::block::Epoch; use shared::id::Id; use shared::validator::{Validator, ValidatorSet, ValidatorState}; use tendermint_rpc::HttpClient; -use crate::repository::pos::get_missing_validators; - pub async fn get_validator_set_at_epoch( client: &HttpClient, epoch: Epoch, @@ -115,12 +111,35 @@ pub async fn get_validators_state( .with_context(|| { format!("Failed to query validator {validator_address} state") })?; + let validator_state = validator_state .0 .map(ValidatorState::from) .unwrap_or(ValidatorState::Unknown); - validator.state = validator_state; + let from_unjailing_state = + validator.state.eq(&ValidatorState::Unjailing) + && !validator_state.eq(&ValidatorState::Jailed); + let from_deactivating_state = + validator.state.eq(&ValidatorState::Deactivating) + && validator_state.eq(&ValidatorState::Inactive); + let from_reactivating_state = + validator.state.eq(&ValidatorState::Reactivating) + && !validator_state.eq(&ValidatorState::Inactive); + let from_concrete_state = ![ + ValidatorState::Deactivating, + ValidatorState::Reactivating, + ValidatorState::Unjailing, + ] + .contains(&validator.state); + + if from_unjailing_state + || from_deactivating_state + || from_reactivating_state + || from_concrete_state + { + validator.state = validator_state; + } anyhow::Ok(validator) }) @@ -139,41 +158,6 @@ pub async fn get_current_epoch(client: &HttpClient) -> anyhow::Result { Ok(epoch.0 as Epoch) } -pub async fn get_missing_validators_state_from_db( - conn: &Object, - validators: HashSet, -) -> Vec { - get_missing_validators(conn, validators) - .await - .unwrap_or_default() - .into_iter() - .map(|validator| Validator { - address: Id::Account(validator.namada_address), - voting_power: validator.voting_power.to_string(), - max_commission: validator.max_commission, - commission: validator.commission, - name: validator.name, - email: validator.email, - description: validator.description, - website: validator.website, - discord_handler: validator.discord_handle, - avatar: validator.avatar, - state: match validator.state { - ValidatorStateDb::Consensus => ValidatorState::Consensus, - ValidatorStateDb::BelowCapacity => { - ValidatorState::BelowCapacity - } - ValidatorStateDb::BelowThreshold => { - ValidatorState::BelowThreshold - } - ValidatorStateDb::Inactive => ValidatorState::Inactive, - ValidatorStateDb::Jailed => ValidatorState::Jailed, - ValidatorStateDb::Unknown => ValidatorState::Unknown, - }, - }) - .collect() -} - fn to_epoch(epoch: u32) -> NamadaSdkEpoch { NamadaSdkEpoch::from(epoch as u64) } diff --git a/shared/src/block.rs b/shared/src/block.rs index 3097c4348..d9a4ba09c 100644 --- a/shared/src/block.rs +++ b/shared/src/block.rs @@ -24,7 +24,9 @@ use crate::transaction::{ }; use crate::unbond::UnbondAddresses; use crate::utils::BalanceChange; -use crate::validator::{Validator, ValidatorMetadataChange, ValidatorState}; +use crate::validator::{ + Validator, ValidatorMetadataChange, ValidatorState, ValidatorStateChange, +}; use crate::vote::GovernanceVote; pub type Epoch = u32; @@ -506,7 +508,7 @@ impl Block { Some(recv_msg) } - pub fn validators(&self) -> HashSet { + pub fn new_validators(&self) -> HashSet { self.transactions .iter() .flat_map(|(_, txs)| txs) @@ -538,6 +540,34 @@ impl Block { .collect() } + pub fn update_validators_state(&self) -> HashSet { + self.transactions + .iter() + .flat_map(|(_, txs)| txs) + .filter(|tx| { + tx.data.is_some() + && tx.exit_code == TransactionExitStatus::Applied + }) + .filter_map(|tx| match &tx.kind { + TransactionKind::DeactivateValidator(data) => { + let data = data.clone().unwrap(); + Some(ValidatorStateChange { + address: Id::from(data), + state: ValidatorState::Deactivating, + }) + } + TransactionKind::ReactivateValidator(data) => { + let data = data.clone().unwrap(); + Some(ValidatorStateChange { + address: Id::from(data), + state: ValidatorState::Reactivating, + }) + } + _ => None, + }) + .collect() + } + pub fn bond_addresses(&self) -> HashSet { self.transactions .iter() diff --git a/shared/src/transaction.rs b/shared/src/transaction.rs index ebc5b5239..d86ca4ba7 100644 --- a/shared/src/transaction.rs +++ b/shared/src/transaction.rs @@ -51,6 +51,7 @@ pub enum TransactionKind { BecomeValidator(Option>), ReactivateValidator(Option
), DeactivateValidator(Option
), + UnjailValidator(Option
), Unknown, } @@ -182,6 +183,14 @@ impl TransactionKind { }; TransactionKind::IbcMsgTransfer(data.map(IbcMessage)) } + "tx_unjail_validator" => { + let data = if let Ok(data) = Address::try_from_slice(data) { + Some(data) + } else { + None + }; + TransactionKind::UnjailValidator(data) + } "tx_become_validator" => { let data = if let Ok(data) = BecomeValidator::try_from_slice(data) { diff --git a/shared/src/validator.rs b/shared/src/validator.rs index 816190421..da5e6a7d9 100644 --- a/shared/src/validator.rs +++ b/shared/src/validator.rs @@ -18,6 +18,9 @@ pub enum ValidatorState { BelowThreshold, Inactive, Jailed, + Deactivating, + Reactivating, + Unjailing, Unknown, } @@ -83,6 +86,12 @@ pub struct ValidatorMetadataChange { pub avatar: Option, } +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub struct ValidatorStateChange { + pub address: Id, + pub state: ValidatorState, +} + impl Validator { pub fn fake() -> Self { let address = diff --git a/webserver/src/response/pos.rs b/webserver/src/response/pos.rs index 6073ad177..a07eb8d53 100644 --- a/webserver/src/response/pos.rs +++ b/webserver/src/response/pos.rs @@ -16,6 +16,9 @@ pub enum ValidatorState { BelowThreshold, Inactive, Jailed, + Deactivating, + Reactivating, + Unjailing, Unknown, } @@ -27,6 +30,9 @@ impl From for ValidatorState { ValidatorStateDb::BelowThreshold => Self::BelowThreshold, ValidatorStateDb::Inactive => Self::Inactive, ValidatorStateDb::Jailed => Self::Jailed, + ValidatorStateDb::Deactivating => Self::Deactivating, + ValidatorStateDb::Reactivating => Self::Reactivating, + ValidatorStateDb::Unjailing => Self::Unjailing, ValidatorStateDb::Unknown => Self::Unknown, } } diff --git a/webserver/src/response/transaction.rs b/webserver/src/response/transaction.rs index 56d9b9b01..bcef64df0 100644 --- a/webserver/src/response/transaction.rs +++ b/webserver/src/response/transaction.rs @@ -29,6 +29,9 @@ pub enum TransactionKind { RevealPk, IbcMsgTransfer, BecomeValidator, + DeactivateValidator, + ReactivateValidator, + UnjailValidator, Unknown, } @@ -90,39 +93,26 @@ impl From for TransactionResult { impl From for TransactionKind { fn from(value: TransactionKindDb) -> Self { match value { - TransactionKindDb::TransparentTransfer => { - TransactionKind::TransparentTransfer - } - TransactionKindDb::ShieldedTransfer => { - TransactionKind::ShieldedTransfer - } - TransactionKindDb::ShieldingTransfer => { - TransactionKind::ShieldingTransfer - } - TransactionKindDb::UnshieldingTransfer => { - TransactionKind::UnshieldingTransfer - } - TransactionKindDb::Bond => TransactionKind::Bond, - TransactionKindDb::Redelegation => TransactionKind::Redelegation, - TransactionKindDb::Unbond => TransactionKind::Unbond, - TransactionKindDb::Withdraw => TransactionKind::Withdraw, - TransactionKindDb::ClaimRewards => TransactionKind::ClaimRewards, - TransactionKindDb::VoteProposal => TransactionKind::VoteProposal, - TransactionKindDb::InitProposal => TransactionKind::InitProposal, - TransactionKindDb::ChangeMetadata => { - TransactionKind::ChangeMetadata - } - TransactionKindDb::ChangeCommission => { - TransactionKind::ChangeCommission - } - TransactionKindDb::RevealPk => TransactionKind::RevealPk, - TransactionKindDb::Unknown => TransactionKind::Unknown, - TransactionKindDb::IbcMsgTransfer => { - TransactionKind::IbcMsgTransfer - } - TransactionKindDb::BecomeValidator => { - TransactionKind::BecomeValidator - } + TransactionKindDb::TransparentTransfer => Self::TransparentTransfer, + TransactionKindDb::ShieldedTransfer => Self::ShieldedTransfer, + TransactionKindDb::ShieldingTransfer => Self::ShieldingTransfer, + TransactionKindDb::UnshieldingTransfer => Self::UnshieldingTransfer, + TransactionKindDb::Bond => Self::Bond, + TransactionKindDb::Redelegation => Self::Redelegation, + TransactionKindDb::Unbond => Self::Unbond, + TransactionKindDb::Withdraw => Self::Withdraw, + TransactionKindDb::ClaimRewards => Self::ClaimRewards, + TransactionKindDb::VoteProposal => Self::VoteProposal, + TransactionKindDb::InitProposal => Self::InitProposal, + TransactionKindDb::ChangeMetadata => Self::ChangeMetadata, + TransactionKindDb::ChangeCommission => Self::ChangeCommission, + TransactionKindDb::RevealPk => Self::RevealPk, + TransactionKindDb::Unknown => Self::Unknown, + TransactionKindDb::IbcMsgTransfer => Self::IbcMsgTransfer, + TransactionKindDb::BecomeValidator => Self::BecomeValidator, + TransactionKindDb::ReactivateValidator => Self::ReactivateValidator, + TransactionKindDb::DeactivateValidator => Self::DeactivateValidator, + TransactionKindDb::UnjailValidator => Self::UnjailValidator, } } } From 838dff1dde64915f8c2eb1b3e29bfcc9718b3db6 Mon Sep 17 00:00:00 2001 From: Gianmarco Fraccaroli Date: Fri, 13 Dec 2024 14:17:58 +0100 Subject: [PATCH 07/29] update swagger --- swagger.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/swagger.yml b/swagger.yml index 27bc4e8b6..440c117e9 100644 --- a/swagger.yml +++ b/swagger.yml @@ -682,7 +682,7 @@ components: $ref: '#/components/schemas/ValidatorStatus' ValidatorStatus: type: string - enum: [consensus, belowCapacity, belowThreshold, inactive, jailed, unknown] + enum: [consensus, belowCapacity, belowThreshold, inactive, jailed, unknown, unjailing, deactivating, reactivating] Proposal: type: object required: [id, content, type, author, startEpoch, endEpoch, activationEpoch, startTime, endTime, currentTime, activationTime, status, yayVotes, nayVotes, abstainVotes, tallyType] @@ -922,7 +922,7 @@ components: type: string kind: type: string - enum: ["transparentTransfer", "shieldedTransfer", "shieldingTransfer", "unshieldingTransfer", "bond", "redelegation", "unbond", "withdraw", "claimRewards", "voteProposal", "initProposal", "changeMetadata", "changeCommission", "revealPk", "unknown"] + enum: ["transparentTransfer", "shieldedTransfer", "shieldingTransfer", "unshieldingTransfer", "bond", "redelegation", "unbond", "withdraw", "claimRewards", "voteProposal", "initProposal", "changeMetadata", "changeCommission", "revealPk", "deactivateValidator", "reactivateValidator", "unjailValidator", "unknown"] exitCode: type: string enum: [applied, rejected] From fb3108398e8ca1888b27ede07e684df3b0024208 Mon Sep 17 00:00:00 2001 From: Joel Nordell Date: Thu, 12 Dec 2024 16:32:36 -0600 Subject: [PATCH 08/29] feat: add block proposer to addresses_with_balance_change --- chain/src/main.rs | 33 +++++++++++++++++++++++++++------ chain/src/services/namada.rs | 36 +++++++++++++++++++++++++++++++++++- shared/src/block.rs | 4 ++-- 3 files changed, 64 insertions(+), 9 deletions(-) diff --git a/chain/src/main.rs b/chain/src/main.rs index 5ef4d183e..2e0d92081 100644 --- a/chain/src/main.rs +++ b/chain/src/main.rs @@ -1,3 +1,4 @@ +use std::collections::HashSet; use std::convert::identity; use std::sync::Arc; @@ -164,16 +165,36 @@ async fn crawling_fn( .map(Token::Ibc) .collect::>(); - let addresses = block.addresses_with_balance_change(native_token); + let addresses = block.addresses_with_balance_change(&native_token); + + let block_proposer_address = namada_service::get_block_proposer_address( + &client, + &block, + &native_token, + ) + .await + .into_rpc_error()?; + + let all_balance_changed_addresses = addresses + .iter() + .chain(block_proposer_address.iter()) + .cloned() + .collect::>(); + + let balances = namada_service::query_balance( + &client, + &all_balance_changed_addresses, + block_height, + ) + .await + .into_rpc_error()?; - let balances = - namada_service::query_balance(&client, &addresses, block_height) - .await - .into_rpc_error()?; tracing::debug!( block = block_height, + addresses = addresses.len(), + block_proposer_address = block_proposer_address.len(), "Updating balance for {} addresses...", - addresses.len() + all_balance_changed_addresses.len() ); let next_governance_proposal_id = diff --git a/chain/src/services/namada.rs b/chain/src/services/namada.rs index 1ef4774c8..05b6a9795 100644 --- a/chain/src/services/namada.rs +++ b/chain/src/services/namada.rs @@ -19,7 +19,7 @@ use namada_sdk::state::Key; use namada_sdk::token::Amount as NamadaSdkAmount; use namada_sdk::{borsh, rpc, token}; use shared::balance::{Amount, Balance, Balances}; -use shared::block::{BlockHeight, Epoch}; +use shared::block::{Block, BlockHeight, Epoch}; use shared::bond::{Bond, BondAddresses, Bonds}; use shared::id::Id; use shared::proposal::{GovernanceProposal, TallyType}; @@ -693,6 +693,40 @@ pub async fn get_validator_set_at_epoch( Ok(ValidatorSet { validators, epoch }) } +pub async fn get_block_proposer_address( + client: &HttpClient, + block: &Block, + native_token: &Id, +) -> anyhow::Result> { + let validator = RPC + .vp() + .pos() + .validator_by_tm_addr( + client, + &block.header.proposer_address.to_uppercase(), + ) + .await?; + + tracing::debug!( + block = block.header.height, + native_token = native_token.to_string(), + proposer_address = block.header.proposer_address, + namada_address = ?validator, + "Got block proposer address" + ); + + match validator { + Some(validator) => { + let balance_change = BalanceChange { + address: Id::from(validator), + token: Token::Native(native_token.clone()), + }; + Ok(std::iter::once(balance_change).collect()) + } + None => Ok(HashSet::new()), + } +} + pub async fn query_pipeline_length(client: &HttpClient) -> anyhow::Result { let pos_parameters = rpc::get_pos_params(client) .await diff --git a/shared/src/block.rs b/shared/src/block.rs index 3097c4348..7827455e2 100644 --- a/shared/src/block.rs +++ b/shared/src/block.rs @@ -339,7 +339,7 @@ impl Block { // TODO: move this and process_inner_tx_for_balance to a separate module pub fn addresses_with_balance_change( &self, - native_token: Id, + native_token: &Id, ) -> HashSet { self.transactions .iter() @@ -347,7 +347,7 @@ impl Block { let mut balance_changes: Vec = inners_txs .iter() .filter_map(|tx| { - self.process_inner_tx_for_balance(tx, &native_token) + self.process_inner_tx_for_balance(tx, native_token) }) .flatten() .collect(); From 8e7893f604b082e127b62e3a5ae3d7758e45817b Mon Sep 17 00:00:00 2001 From: Gianmarco Fraccaroli Date: Mon, 16 Dec 2024 13:33:04 +0100 Subject: [PATCH 09/29] fetch rewards in batches --- chain/src/services/namada.rs | 12 ++-- governance/src/services/namada.rs | 2 +- pos/src/services/namada.rs | 4 +- rewards/src/main.rs | 2 +- rewards/src/services/namada.rs | 96 +++++++++++++++++++++++++++---- 5 files changed, 94 insertions(+), 22 deletions(-) diff --git a/chain/src/services/namada.rs b/chain/src/services/namada.rs index 1ef4774c8..1c125bcda 100644 --- a/chain/src/services/namada.rs +++ b/chain/src/services/namada.rs @@ -110,7 +110,7 @@ pub async fn query_balance( }) }) .map(futures::future::ready) - .buffer_unordered(20) + .buffer_unordered(32) .collect::>() .await) } @@ -440,7 +440,7 @@ pub async fn query_bonds( Some(bonds) }) .map(futures::future::ready) - .buffer_unordered(20) + .buffer_unordered(32) .collect::>() .await; @@ -513,7 +513,7 @@ pub async fn query_unbonds( } }) .map(futures::future::ready) - .buffer_unordered(20) + .buffer_unordered(32) .collect::>() .await; @@ -573,7 +573,7 @@ pub async fn query_tallies( Some((proposal, tally_type)) }) .map(futures::future::ready) - .buffer_unordered(20) + .buffer_unordered(32) .collect::>() .await; @@ -603,7 +603,7 @@ pub async fn query_all_votes( Some(votes) }) .map(futures::future::ready) - .buffer_unordered(20) + .buffer_unordered(32) .collect::>() .await; @@ -686,7 +686,7 @@ pub async fn get_validator_set_at_epoch( state: validator_state }) }) - .buffer_unordered(100) + .buffer_unordered(32) .try_collect::>() .await?; diff --git a/governance/src/services/namada.rs b/governance/src/services/namada.rs index d3849b214..3a9d48401 100644 --- a/governance/src/services/namada.rs +++ b/governance/src/services/namada.rs @@ -63,7 +63,7 @@ pub async fn get_governance_proposals_updates( } }) .map(futures::future::ready) - .buffer_unordered(20) + .buffer_unordered(32) .collect::>() .await) } diff --git a/pos/src/services/namada.rs b/pos/src/services/namada.rs index d3fa7af7c..fa18e9e4a 100644 --- a/pos/src/services/namada.rs +++ b/pos/src/services/namada.rs @@ -85,7 +85,7 @@ pub async fn get_validator_set_at_epoch( state: validator_state }) }) - .buffer_unordered(100) + .buffer_unordered(32) .try_collect::>() .await?; @@ -143,7 +143,7 @@ pub async fn get_validators_state( anyhow::Ok(validator) }) - .buffer_unordered(100) + .buffer_unordered(32) .try_collect::>() .await?; diff --git a/rewards/src/main.rs b/rewards/src/main.rs index a28e09f54..74a108c0c 100644 --- a/rewards/src/main.rs +++ b/rewards/src/main.rs @@ -81,7 +81,7 @@ async fn crawling_fn( return Err(MainError::NoAction); } - tracing::info!("Starting to update proposals..."); + tracing::info!("Starting to update pos rewards..."); // TODO: change this by querying all the pairs in the database let delegations_pairs = namada_service::query_delegation_pairs(&client) diff --git a/rewards/src/services/namada.rs b/rewards/src/services/namada.rs index af020d3fd..197e580b4 100644 --- a/rewards/src/services/namada.rs +++ b/rewards/src/services/namada.rs @@ -1,4 +1,5 @@ use std::collections::HashSet; +use std::time::Duration; use anyhow::Context; use futures::StreamExt; @@ -37,13 +38,92 @@ pub async fn query_rewards( client: &HttpClient, delegation_pairs: HashSet, ) -> anyhow::Result> { - Ok(futures::stream::iter(delegation_pairs) + let mut all_rewards: Vec = Vec::new(); + + let batches: Vec> = delegation_pairs + .clone() + .into_iter() + .collect::>() + .chunks(32) + .map(|chunk| chunk.to_vec()) + .collect(); + + tracing::info!( + "Got {} batches with a total of {} rewards to query...", + batches.len(), + delegation_pairs.len() + ); + + let results = futures::stream::iter(batches) + .map(|batch| process_batch_with_retries(client, batch)) + .buffer_unordered(3) + .collect::>() + .await; + + for result in results { + match result { + Ok(mut rewards) => all_rewards.append(&mut rewards), + Err(err) => return Err(err) + } + } + + Ok(all_rewards) +} + +pub async fn get_current_epoch(client: &HttpClient) -> anyhow::Result { + let epoch = rpc::query_epoch(client) + .await + .context("Failed to query Namada's current epoch")?; + + Ok(epoch.0 as Epoch) +} + +async fn process_batch_with_retries( + client: &HttpClient, + batch: Vec, +) -> anyhow::Result> { + let mut retries = 0; + + loop { + let result = process_batch(client, batch.clone()).await; + + tracing::info!("Done batch..."); + + match result { + Ok(rewards) => return Ok(rewards), + Err(err) => { + retries += 1; + tracing::warn!( + "Batch reward failed (attempt {}/{}) - Error: {:?}", + retries, + 3, + err + ); + + if retries >= 3 { + tracing::error!( + "Batch reward failed after maximum retries." + ); + return Err(err); + } + tokio::time::sleep(Duration::from_secs(2)).await; + } + } + } +} + +async fn process_batch( + client: &HttpClient, + batch: Vec, +) -> anyhow::Result> { + Ok(futures::stream::iter(batch) .filter_map(|delegation| async move { - tracing::info!( + tracing::debug!( "Fetching rewards {} -> {} ...", delegation.validator_address, delegation.delegator_address ); + let reward = RPC .vp() .pos() @@ -55,7 +135,7 @@ pub async fn query_rewards( .await .ok()?; - tracing::info!( + tracing::debug!( "Done fetching reward for {} -> {}!", delegation.validator_address, delegation.delegator_address @@ -67,15 +147,7 @@ pub async fn query_rewards( }) }) .map(futures::future::ready) - .buffer_unordered(20) + .buffer_unordered(32) .collect::>() .await) } - -pub async fn get_current_epoch(client: &HttpClient) -> anyhow::Result { - let epoch = rpc::query_epoch(client) - .await - .context("Failed to query Namada's current epoch")?; - - Ok(epoch.0 as Epoch) -} From 051b57a0b5a4eb0617a1da3f535c5820d0433ed6 Mon Sep 17 00:00:00 2001 From: ruslanglaznyov Date: Mon, 16 Dec 2024 16:08:41 +0300 Subject: [PATCH 10/29] fix: insert rewards in batch --- rewards/src/repository/mod.rs | 1 + rewards/src/repository/pos_rewards.rs | 24 +++++++++++++++++++++++- rewards/src/repository/utils.rs | 4 ++++ 3 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 rewards/src/repository/utils.rs diff --git a/rewards/src/repository/mod.rs b/rewards/src/repository/mod.rs index 9c4ae7b13..399a24fdb 100644 --- a/rewards/src/repository/mod.rs +++ b/rewards/src/repository/mod.rs @@ -1,2 +1,3 @@ pub mod crawler_state; pub mod pos_rewards; +mod utils; diff --git a/rewards/src/repository/pos_rewards.rs b/rewards/src/repository/pos_rewards.rs index 1987926f5..255715c44 100644 --- a/rewards/src/repository/pos_rewards.rs +++ b/rewards/src/repository/pos_rewards.rs @@ -1,12 +1,33 @@ +use anyhow::Context; use diesel::upsert::excluded; use diesel::{ExpressionMethods, PgConnection, QueryDsl, RunQueryDsl}; use orm::pos_rewards::PosRewardInsertDb; use orm::schema::{pos_rewards, validators}; use shared::rewards::Reward; +use shared::tuple_len::TupleLen; + +use super::utils::MAX_PARAM_SIZE; pub fn upsert_rewards( transaction_conn: &mut PgConnection, rewards: Vec, +) -> anyhow::Result<()> { + let rewards_col_count = pos_rewards::all_columns.len() as i64; + + for chunk in rewards + .into_iter() + .collect::>() + .chunks((MAX_PARAM_SIZE as i64 / rewards_col_count) as usize) + { + upsert_rewards_chunk(transaction_conn, chunk.to_vec())?; + } + + anyhow::Ok(()) +} + +fn upsert_rewards_chunk( + transaction_conn: &mut PgConnection, + rewards: Vec, ) -> anyhow::Result<()> { diesel::insert_into(pos_rewards::table) .values::>( @@ -37,7 +58,8 @@ pub fn upsert_rewards( pos_rewards::columns::raw_amount .eq(excluded(pos_rewards::columns::raw_amount)), ) - .execute(transaction_conn)?; + .execute(transaction_conn) + .context("Failed to upsert rewards in db")?; Ok(()) } diff --git a/rewards/src/repository/utils.rs b/rewards/src/repository/utils.rs new file mode 100644 index 000000000..bd4b8ce6d --- /dev/null +++ b/rewards/src/repository/utils.rs @@ -0,0 +1,4 @@ +// Represents maximum number of parameters that we can insert into postgres in +// one go. To get the number of rows that we can insert in one chunk, we have to +// divide MAX_PARAM_SIZE by the number of columns in the given table. +pub const MAX_PARAM_SIZE: u16 = u16::MAX; From a99a85e14b65c2916ffdf6d7bfac247006952f6a Mon Sep 17 00:00:00 2001 From: Gianmarco Fraccaroli Date: Mon, 16 Dec 2024 14:21:23 +0100 Subject: [PATCH 11/29] minors --- rewards/src/services/namada.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rewards/src/services/namada.rs b/rewards/src/services/namada.rs index 197e580b4..ab9a42d04 100644 --- a/rewards/src/services/namada.rs +++ b/rewards/src/services/namada.rs @@ -60,6 +60,8 @@ pub async fn query_rewards( .collect::>() .await; + tracing::info!("Done fetching rewards!"); + for result in results { match result { Ok(mut rewards) => all_rewards.append(&mut rewards), @@ -87,8 +89,6 @@ async fn process_batch_with_retries( loop { let result = process_batch(client, batch.clone()).await; - tracing::info!("Done batch..."); - match result { Ok(rewards) => return Ok(rewards), Err(err) => { From 9128561a1184c9e12b91ac59e4a6a806a3f17b53 Mon Sep 17 00:00:00 2001 From: Mateusz Jasiuk Date: Mon, 16 Dec 2024 14:45:44 +0100 Subject: [PATCH 12/29] feat: print processed rewards batch --- rewards/src/services/namada.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/rewards/src/services/namada.rs b/rewards/src/services/namada.rs index ab9a42d04..06a5c225d 100644 --- a/rewards/src/services/namada.rs +++ b/rewards/src/services/namada.rs @@ -40,12 +40,13 @@ pub async fn query_rewards( ) -> anyhow::Result> { let mut all_rewards: Vec = Vec::new(); - let batches: Vec> = delegation_pairs + let batches: Vec<(usize, Vec)> = delegation_pairs .clone() .into_iter() .collect::>() .chunks(32) - .map(|chunk| chunk.to_vec()) + .enumerate() + .map(|(i, chunk)| (i, chunk.to_vec())) .collect(); tracing::info!( @@ -65,7 +66,7 @@ pub async fn query_rewards( for result in results { match result { Ok(mut rewards) => all_rewards.append(&mut rewards), - Err(err) => return Err(err) + Err(err) => return Err(err), } } @@ -82,15 +83,19 @@ pub async fn get_current_epoch(client: &HttpClient) -> anyhow::Result { async fn process_batch_with_retries( client: &HttpClient, - batch: Vec, + batch: (usize, Vec), ) -> anyhow::Result> { let mut retries = 0; + tracing::info!("Processing batch {}", batch.0); loop { - let result = process_batch(client, batch.clone()).await; + let result = process_batch(client, batch.1.clone()).await; match result { - Ok(rewards) => return Ok(rewards), + Ok(rewards) => { + tracing::info!("Batch {} done!", batch.0); + return Ok(rewards); + } Err(err) => { retries += 1; tracing::warn!( From 6588900a5781c1660ae1b3b06e66548764b81336 Mon Sep 17 00:00:00 2001 From: Joel Nordell Date: Mon, 9 Dec 2024 17:02:47 -0600 Subject: [PATCH 13/29] enhancement: store blocks while crawling transactions and chain --- chain/src/main.rs | 8 ++- chain/src/repository/balance.rs | 34 +++++++++++ chain/src/repository/block.rs | 32 +++++++++++ chain/src/repository/mod.rs | 1 + orm/Cargo.toml | 1 + .../2024-12-09-225148_init_blocks/down.sql | 9 +++ .../2024-12-09-225148_init_blocks/up.sql | 40 +++++++++++++ orm/src/blocks.rs | 56 +++++++++++++++++++ orm/src/lib.rs | 1 + orm/src/schema.rs | 14 +++++ shared/src/balance.rs | 2 +- shared/src/block.rs | 4 +- shared/src/id.rs | 4 +- transactions/src/main.rs | 8 ++- transactions/src/repository/block.rs | 32 +++++++++++ transactions/src/repository/mod.rs | 1 + 16 files changed, 240 insertions(+), 7 deletions(-) create mode 100644 chain/src/repository/block.rs create mode 100644 orm/migrations/2024-12-09-225148_init_blocks/down.sql create mode 100644 orm/migrations/2024-12-09-225148_init_blocks/up.sql create mode 100644 orm/src/blocks.rs create mode 100644 transactions/src/repository/block.rs diff --git a/chain/src/main.rs b/chain/src/main.rs index 4fb5f071f..6734277ed 100644 --- a/chain/src/main.rs +++ b/chain/src/main.rs @@ -142,7 +142,7 @@ async fn crawling_fn( .into_rpc_error()?; let block = Block::from( - tm_block_response, + &tm_block_response, &block_results, checksums, epoch, @@ -310,6 +310,12 @@ async fn crawling_fn( ibc_tokens, )?; + repository::block::upsert_block( + transaction_conn, + block, + tm_block_response, + )?; + repository::balance::insert_balances( transaction_conn, balances, diff --git a/chain/src/repository/balance.rs b/chain/src/repository/balance.rs index 101d08722..c09bbdb5c 100644 --- a/chain/src/repository/balance.rs +++ b/chain/src/repository/balance.rs @@ -80,10 +80,13 @@ mod tests { use namada_sdk::token::Amount as NamadaAmount; use namada_sdk::uint::MAX_SIGNED_VALUE; use orm::balances::BalanceDb; + use orm::blocks::BlockInsertDb; + use orm::schema::blocks; use orm::views::balances; use shared::balance::{Amount, Balance}; use shared::id::Id; use shared::token::IbcToken; + use std::collections::HashSet; use test_helpers::db::TestDb; use super::*; @@ -130,6 +133,8 @@ mod tests { insert_tokens(conn, vec![token.clone()])?; + seed_blocks_from_balances(conn, &vec![balance.clone()])?; + insert_balances(conn, vec![balance.clone()])?; let queried_balance = query_balance_by_address(conn, owner, token)?; @@ -175,6 +180,7 @@ mod tests { ..(balance.clone()) }; + seed_blocks_from_balances(conn, &vec![new_balance.clone()])?; insert_balances(conn, vec![new_balance])?; let queried_balance = @@ -376,6 +382,8 @@ mod tests { seed_tokens_from_balance(conn, fake_balances.clone())?; + seed_blocks_from_balances(conn, &fake_balances)?; + insert_balances(conn, fake_balances.clone())?; assert_eq!(query_all_balances(conn)?.len(), fake_balances.len()); @@ -410,6 +418,7 @@ mod tests { insert_tokens(conn, vec![token.clone()])?; + seed_blocks_from_balances(conn, &vec![balance.clone()])?; insert_balances(conn, vec![balance.clone()])?; let queried_balance = query_balance_by_address(conn, owner, token)?; @@ -442,6 +451,8 @@ mod tests { insert_tokens(conn, vec![token])?; + seed_blocks_from_balances(conn, &balances)?; + let res = insert_balances(conn, balances); assert!(res.is_ok()); @@ -475,6 +486,8 @@ mod tests { seed_tokens_from_balance(conn, balances.clone())?; + seed_blocks_from_balances(conn, &balances)?; + let res = insert_balances(conn, balances); assert!(res.is_ok()); @@ -500,12 +513,33 @@ mod tests { anyhow::Ok(()) } + fn seed_blocks_from_balances( + conn: &mut PgConnection, + balances: &Vec, + ) -> anyhow::Result<()> { + for height in balances + .into_iter() + .map(|balance| balance.height as i32) + .collect::>() + { + diesel::insert_into(blocks::table) + .values::<&BlockInsertDb>(&BlockInsertDb::fake(height)) + .on_conflict_do_nothing() + .execute(conn) + .context("Failed to insert block in db")?; + } + + anyhow::Ok(()) + } + fn seed_balance( conn: &mut PgConnection, balances: Vec, ) -> anyhow::Result<()> { seed_tokens_from_balance(conn, balances.clone())?; + seed_blocks_from_balances(conn, &balances)?; + diesel::insert_into(balance_changes::table) .values::<&Vec>( &balances diff --git a/chain/src/repository/block.rs b/chain/src/repository/block.rs new file mode 100644 index 000000000..5420dd939 --- /dev/null +++ b/chain/src/repository/block.rs @@ -0,0 +1,32 @@ +use anyhow::Context; +use diesel::upsert::excluded; +use diesel::{ExpressionMethods, PgConnection, RunQueryDsl}; +use orm::blocks::BlockInsertDb; +use orm::schema::blocks; +use shared::block::Block; +use tendermint_rpc::endpoint::block::Response as TendermintBlockResponse; + +pub fn upsert_block( + transaction_conn: &mut PgConnection, + block: Block, + tm_block_response: TendermintBlockResponse, +) -> anyhow::Result<()> { + diesel::insert_into(blocks::table) + .values::<&BlockInsertDb>(&BlockInsertDb::from(( + block, + tm_block_response, + ))) + .on_conflict(blocks::height) + .do_update() + .set(( + blocks::hash.eq(excluded(blocks::hash)), + blocks::app_hash.eq(excluded(blocks::app_hash)), + blocks::timestamp.eq(excluded(blocks::timestamp)), + blocks::proposer.eq(excluded(blocks::proposer)), + blocks::epoch.eq(excluded(blocks::epoch)), + )) + .execute(transaction_conn) + .context("Failed to insert block in db")?; + + anyhow::Ok(()) +} diff --git a/chain/src/repository/mod.rs b/chain/src/repository/mod.rs index efdb8fcdc..c5b0567b4 100644 --- a/chain/src/repository/mod.rs +++ b/chain/src/repository/mod.rs @@ -1,4 +1,5 @@ pub mod balance; +pub mod block; pub mod crawler_state; pub mod gov; pub mod pos; diff --git a/orm/Cargo.toml b/orm/Cargo.toml index 892fbd590..3d0234cf1 100644 --- a/orm/Cargo.toml +++ b/orm/Cargo.toml @@ -24,3 +24,4 @@ shared.workspace = true bigdecimal.workspace = true chrono.workspace = true serde_json.workspace = true +tendermint-rpc.workspace = true diff --git a/orm/migrations/2024-12-09-225148_init_blocks/down.sql b/orm/migrations/2024-12-09-225148_init_blocks/down.sql new file mode 100644 index 000000000..a57d19684 --- /dev/null +++ b/orm/migrations/2024-12-09-225148_init_blocks/down.sql @@ -0,0 +1,9 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE balance_changes + DROP CONSTRAINT fk_balance_changes_height; + +ALTER TABLE wrapper_transactions + DROP CONSTRAINT fk_wrapper_transactions_height; + +DROP TABLE IF EXISTS blocks; + diff --git a/orm/migrations/2024-12-09-225148_init_blocks/up.sql b/orm/migrations/2024-12-09-225148_init_blocks/up.sql new file mode 100644 index 000000000..2c519cce3 --- /dev/null +++ b/orm/migrations/2024-12-09-225148_init_blocks/up.sql @@ -0,0 +1,40 @@ +-- Your SQL goes here +CREATE TABLE blocks ( + height integer PRIMARY KEY, + hash VARCHAR(64), + app_hash varchar(64), + timestamp timestamp, + proposer varchar, + epoch int +); + +ALTER TABLE blocks + ADD UNIQUE (hash); + +CREATE INDEX index_blocks_epoch ON blocks (epoch); + +-- Populate null blocks for all existing wrapper_transactions and balance_changes to satisfy foreign key constraints +INSERT INTO blocks ( SELECT DISTINCT + height, + NULL::varchar AS hash, + NULL::varchar AS app_hash, + NULL::timestamp AS timestamp, + NULL::varchar AS proposer, + NULL::int AS epoch + FROM ( SELECT DISTINCT + block_height AS height + FROM + wrapper_transactions + UNION + SELECT DISTINCT + height + FROM + balance_changes)); + +-- Create foreign key constraints for wrapper_transactions and balance_changes +ALTER TABLE wrapper_transactions + ADD CONSTRAINT fk_wrapper_transactions_height FOREIGN KEY (block_height) REFERENCES blocks (height) ON DELETE RESTRICT; + +ALTER TABLE balance_changes + ADD CONSTRAINT fk_balance_changes_height FOREIGN KEY (height) REFERENCES blocks (height) ON DELETE RESTRICT; + diff --git a/orm/src/blocks.rs b/orm/src/blocks.rs new file mode 100644 index 000000000..e141ad175 --- /dev/null +++ b/orm/src/blocks.rs @@ -0,0 +1,56 @@ +use diesel::{Insertable, Queryable, Selectable}; +use shared::block::Block; +use tendermint_rpc::endpoint::block::Response as TendermintBlockResponse; + +use crate::schema::blocks; + +#[derive(Insertable, Clone, Queryable, Selectable, Debug)] +#[diesel(table_name = blocks)] +#[diesel(check_for_backend(diesel::pg::Pg))] +pub struct BlockInsertDb { + pub height: i32, + pub hash: String, + pub app_hash: String, + pub timestamp: chrono::NaiveDateTime, + pub proposer: String, + pub epoch: i32, +} + +pub type BlockDb = BlockInsertDb; + +impl From<(Block, TendermintBlockResponse)> for BlockInsertDb { + fn from( + (block, tm_block_response): (Block, TendermintBlockResponse), + ) -> Self { + let timestamp = chrono::DateTime::from_timestamp( + tm_block_response.block.header.time.unix_timestamp(), + 0, + ) + .expect("Invalid timestamp") + .naive_utc(); + + Self { + height: block.header.height as i32, + hash: block.hash.to_string(), + app_hash: block.header.app_hash.to_string(), + timestamp, + proposer: block.header.proposer_address, + epoch: block.epoch as i32, + } + } +} + +impl BlockInsertDb { + pub fn fake(height: i32) -> Self { + Self { + height, + hash: height.to_string(), // fake hash but ensures uniqueness with height + app_hash: "fake_app_hash".to_string(), // doesn't require uniqueness + timestamp: chrono::DateTime::from_timestamp(0, 0) + .unwrap() + .naive_utc(), + proposer: "fake_proposer".to_string(), + epoch: 0, + } + } +} diff --git a/orm/src/lib.rs b/orm/src/lib.rs index 24d01b10b..340f5ae82 100644 --- a/orm/src/lib.rs +++ b/orm/src/lib.rs @@ -1,4 +1,5 @@ pub mod balances; +pub mod blocks; pub mod bond; pub mod crawler_state; pub mod gas; diff --git a/orm/src/schema.rs b/orm/src/schema.rs index 0fb2d3085..58c232d9b 100644 --- a/orm/src/schema.rs +++ b/orm/src/schema.rs @@ -101,6 +101,19 @@ diesel::table! { } } +diesel::table! { + blocks (height) { + height -> Int4, + #[max_length = 64] + hash -> Varchar, + #[max_length = 64] + app_hash -> Varchar, + timestamp -> Timestamp, + proposer -> Varchar, + epoch -> Int4, + } +} + diesel::table! { bonds (id) { id -> Int4, @@ -305,6 +318,7 @@ diesel::joinable!(unbonds -> validators (validator_id)); diesel::allow_tables_to_appear_in_same_query!( balance_changes, + blocks, bonds, chain_parameters, crawler_state, diff --git a/shared/src/balance.rs b/shared/src/balance.rs index 9912f5f95..9c96d9ba9 100644 --- a/shared/src/balance.rs +++ b/shared/src/balance.rs @@ -110,7 +110,7 @@ impl Balance { owner: Id::Account(address.to_string()), token, amount: Amount::fake(), - height: (0..10000).fake::(), + height: 0, } } } diff --git a/shared/src/block.rs b/shared/src/block.rs index 8d22d430e..2af89ec43 100644 --- a/shared/src/block.rs +++ b/shared/src/block.rs @@ -109,7 +109,7 @@ pub struct Block { impl Block { pub fn from( - block_response: TendermintBlockResponse, + block_response: &TendermintBlockResponse, block_results: &BlockResult, checksums: Checksums, epoch: Epoch, @@ -147,7 +147,7 @@ impl Block { .to_string() .to_lowercase(), timestamp: block_response.block.header.time.to_string(), - app_hash: Id::from(block_response.block.header.app_hash), + app_hash: Id::from(&block_response.block.header.app_hash), }, transactions, epoch, diff --git a/shared/src/id.rs b/shared/src/id.rs index 556cee83f..f598ca70c 100644 --- a/shared/src/id.rs +++ b/shared/src/id.rs @@ -46,8 +46,8 @@ impl From for Id { } } -impl From for Id { - fn from(value: TendermintAppHash) -> Self { +impl From<&TendermintAppHash> for Id { + fn from(value: &TendermintAppHash) -> Self { Self::Hash(value.to_string()) } } diff --git a/transactions/src/main.rs b/transactions/src/main.rs index f6bbfc65e..a3c1e3bda 100644 --- a/transactions/src/main.rs +++ b/transactions/src/main.rs @@ -15,6 +15,7 @@ use shared::error::{AsDbError, AsRpcError, ContextDbInteractError, MainError}; use tendermint_rpc::HttpClient; use transactions::app_state::AppState; use transactions::config::AppConfig; +use transactions::repository::block as block_repo; use transactions::repository::transactions as transaction_repo; use transactions::services::{ db as db_service, namada as namada_service, @@ -115,7 +116,7 @@ async fn crawling_fn( let block_results = BlockResult::from(tm_block_results_response); let block = Block::from( - tm_block_response.clone(), + &tm_block_response, &block_results, checksums, 1_u32, @@ -151,6 +152,11 @@ async fn crawling_fn( conn.build_transaction() .read_write() .run(|transaction_conn| { + block_repo::upsert_block( + transaction_conn, + block, + tm_block_response, + )?; transaction_repo::insert_wrapper_transactions( transaction_conn, wrapper_txs, diff --git a/transactions/src/repository/block.rs b/transactions/src/repository/block.rs new file mode 100644 index 000000000..5420dd939 --- /dev/null +++ b/transactions/src/repository/block.rs @@ -0,0 +1,32 @@ +use anyhow::Context; +use diesel::upsert::excluded; +use diesel::{ExpressionMethods, PgConnection, RunQueryDsl}; +use orm::blocks::BlockInsertDb; +use orm::schema::blocks; +use shared::block::Block; +use tendermint_rpc::endpoint::block::Response as TendermintBlockResponse; + +pub fn upsert_block( + transaction_conn: &mut PgConnection, + block: Block, + tm_block_response: TendermintBlockResponse, +) -> anyhow::Result<()> { + diesel::insert_into(blocks::table) + .values::<&BlockInsertDb>(&BlockInsertDb::from(( + block, + tm_block_response, + ))) + .on_conflict(blocks::height) + .do_update() + .set(( + blocks::hash.eq(excluded(blocks::hash)), + blocks::app_hash.eq(excluded(blocks::app_hash)), + blocks::timestamp.eq(excluded(blocks::timestamp)), + blocks::proposer.eq(excluded(blocks::proposer)), + blocks::epoch.eq(excluded(blocks::epoch)), + )) + .execute(transaction_conn) + .context("Failed to insert block in db")?; + + anyhow::Ok(()) +} diff --git a/transactions/src/repository/mod.rs b/transactions/src/repository/mod.rs index 0824d7a9c..5ae69f54d 100644 --- a/transactions/src/repository/mod.rs +++ b/transactions/src/repository/mod.rs @@ -1 +1,2 @@ +pub mod block; pub mod transactions; From daac8191e76fe8c060b97f9021ba961205dafa5e Mon Sep 17 00:00:00 2001 From: Joel Nordell Date: Mon, 16 Dec 2024 14:12:51 -0600 Subject: [PATCH 14/29] update diesel schema for blocks --- orm/src/blocks.rs | 35 ++++++++++++++++++----------------- orm/src/schema.rs | 12 +++++++----- 2 files changed, 25 insertions(+), 22 deletions(-) diff --git a/orm/src/blocks.rs b/orm/src/blocks.rs index e141ad175..7d40fae8c 100644 --- a/orm/src/blocks.rs +++ b/orm/src/blocks.rs @@ -9,11 +9,11 @@ use crate::schema::blocks; #[diesel(check_for_backend(diesel::pg::Pg))] pub struct BlockInsertDb { pub height: i32, - pub hash: String, - pub app_hash: String, - pub timestamp: chrono::NaiveDateTime, - pub proposer: String, - pub epoch: i32, + pub hash: Option, + pub app_hash: Option, + pub timestamp: Option, + pub proposer: Option, + pub epoch: Option, } pub type BlockDb = BlockInsertDb; @@ -31,11 +31,11 @@ impl From<(Block, TendermintBlockResponse)> for BlockInsertDb { Self { height: block.header.height as i32, - hash: block.hash.to_string(), - app_hash: block.header.app_hash.to_string(), - timestamp, - proposer: block.header.proposer_address, - epoch: block.epoch as i32, + hash: Some(block.hash.to_string()), + app_hash: Some(block.header.app_hash.to_string()), + timestamp: Some(timestamp), + proposer: Some(block.header.proposer_address), + epoch: Some(block.epoch as i32), } } } @@ -44,13 +44,14 @@ impl BlockInsertDb { pub fn fake(height: i32) -> Self { Self { height, - hash: height.to_string(), // fake hash but ensures uniqueness with height - app_hash: "fake_app_hash".to_string(), // doesn't require uniqueness - timestamp: chrono::DateTime::from_timestamp(0, 0) - .unwrap() - .naive_utc(), - proposer: "fake_proposer".to_string(), - epoch: 0, + hash: Some(height.to_string()), /* fake hash but ensures uniqueness + * with height */ + app_hash: Some("fake_app_hash".to_string()), // doesn't require uniqueness + timestamp: Some( + chrono::DateTime::from_timestamp(0, 0).unwrap().naive_utc(), + ), + proposer: Some("fake_proposer".to_string()), + epoch: Some(0), } } } diff --git a/orm/src/schema.rs b/orm/src/schema.rs index 58c232d9b..796a8d6ae 100644 --- a/orm/src/schema.rs +++ b/orm/src/schema.rs @@ -105,12 +105,12 @@ diesel::table! { blocks (height) { height -> Int4, #[max_length = 64] - hash -> Varchar, + hash -> Nullable, #[max_length = 64] - app_hash -> Varchar, - timestamp -> Timestamp, - proposer -> Varchar, - epoch -> Int4, + app_hash -> Nullable, + timestamp -> Nullable, + proposer -> Nullable, + epoch -> Nullable, } } @@ -308,6 +308,7 @@ diesel::table! { } } +diesel::joinable!(balance_changes -> blocks (height)); diesel::joinable!(balance_changes -> token (token)); diesel::joinable!(bonds -> validators (validator_id)); diesel::joinable!(governance_votes -> governance_proposals (proposal_id)); @@ -315,6 +316,7 @@ diesel::joinable!(ibc_token -> token (address)); diesel::joinable!(inner_transactions -> wrapper_transactions (wrapper_id)); diesel::joinable!(pos_rewards -> validators (validator_id)); diesel::joinable!(unbonds -> validators (validator_id)); +diesel::joinable!(wrapper_transactions -> blocks (block_height)); diesel::allow_tables_to_appear_in_same_query!( balance_changes, From 64d042a2c263d5f5002dc5d5404febb0581d1b64 Mon Sep 17 00:00:00 2001 From: Joel Nordell Date: Mon, 16 Dec 2024 14:35:42 -0600 Subject: [PATCH 15/29] upsert block before balances during initial_query --- chain/src/main.rs | 106 +++++++++++++++++++++++++-------------- transactions/src/main.rs | 5 +- 2 files changed, 70 insertions(+), 41 deletions(-) diff --git a/chain/src/main.rs b/chain/src/main.rs index 6734277ed..51a9b96c7 100644 --- a/chain/src/main.rs +++ b/chain/src/main.rs @@ -28,6 +28,7 @@ use shared::error::{AsDbError, AsRpcError, ContextDbInteractError, MainError}; use shared::id::Id; use shared::token::Token; use shared::validator::ValidatorSet; +use tendermint_rpc::endpoint::block::Response as TendermintBlockResponse; use tendermint_rpc::HttpClient; use tokio_retry::strategy::{jitter, ExponentialBackoff}; use tokio_retry::Retry; @@ -64,6 +65,7 @@ async fn main() -> Result<(), MainError> { initial_query( &client, &conn, + checksums.clone(), config.initial_query_retry_time, config.initial_query_retry_attempts, ) @@ -108,46 +110,15 @@ async fn crawling_fn( return Err(MainError::NoAction); } - tracing::debug!(block = block_height, "Query block..."); - let tm_block_response = - tendermint_service::query_raw_block_at_height(&client, block_height) - .await - .into_rpc_error()?; - tracing::debug!( - block = block_height, - "Raw block contains {} txs...", - tm_block_response.block.data.len() - ); - - tracing::debug!(block = block_height, "Query block results..."); - let tm_block_results_response = - tendermint_service::query_raw_block_results_at_height( - &client, - block_height, - ) - .await - .into_rpc_error()?; - let block_results = BlockResult::from(tm_block_results_response); - - tracing::debug!(block = block_height, "Query epoch..."); - let epoch = - namada_service::get_epoch_at_block_height(&client, block_height) - .await - .into_rpc_error()?; - tracing::debug!(block = block_height, "Query first block in epoch..."); let first_block_in_epoch = namada_service::get_first_block_in_epoch(&client) .await .into_rpc_error()?; - let block = Block::from( - &tm_block_response, - &block_results, - checksums, - epoch, - block_height, - ); + let (block, tm_block_response, epoch) = + get_block(block_height, &client, checksums).await?; + tracing::debug!( block = block_height, txs = block.transactions.len(), @@ -228,7 +199,10 @@ async fn crawling_fn( }; let validators_state_change = block.update_validators_state(); - tracing::debug!("Updating {} validators state", validators_state_change.len()); + tracing::debug!( + "Updating {} validators state", + validators_state_change.len() + ); let addresses = block.bond_addresses(); let bonds = query_bonds(&client, addresses).await.into_rpc_error()?; @@ -391,29 +365,35 @@ async fn crawling_fn( async fn initial_query( client: &HttpClient, conn: &Object, + checksums: Checksums, retry_time: u64, retry_attempts: usize, ) -> Result<(), MainError> { let retry_strategy = ExponentialBackoff::from_millis(retry_time) .map(jitter) .take(retry_attempts); - Retry::spawn(retry_strategy, || try_initial_query(client, conn)).await + Retry::spawn(retry_strategy, || { + try_initial_query(client, conn, checksums.clone()) + }) + .await } async fn try_initial_query( client: &HttpClient, conn: &Object, + checksums: Checksums, ) -> Result<(), MainError> { tracing::debug!("Querying initial data..."); let block_height = query_last_block_height(client).await.into_rpc_error()?; - let epoch = namada_service::get_epoch_at_block_height(client, block_height) - .await - .into_rpc_error()?; + let first_block_in_epoch = namada_service::get_first_block_in_epoch(client) .await .into_rpc_error()?; + let (block, tm_block_response, epoch) = + get_block(block_height, client, checksums.clone()).await?; + let tokens = query_tokens(client).await.into_rpc_error()?; // This can sometimes fail if the last block height in the node has moved @@ -473,6 +453,12 @@ async fn try_initial_query( .run(|transaction_conn| { repository::balance::insert_tokens(transaction_conn, tokens)?; + repository::block::upsert_block( + transaction_conn, + block, + tm_block_response, + )?; + tracing::debug!( block = block_height, "Inserting {} balances...", @@ -549,3 +535,45 @@ async fn update_crawler_timestamp( .and_then(identity) .into_db_error() } + +async fn get_block( + block_height: u32, + client: &HttpClient, + checksums: Checksums, +) -> Result<(Block, TendermintBlockResponse, u32), MainError> { + tracing::debug!(block = block_height, "Query block..."); + let tm_block_response = + tendermint_service::query_raw_block_at_height(client, block_height) + .await + .into_rpc_error()?; + tracing::debug!( + block = block_height, + "Raw block contains {} txs...", + tm_block_response.block.data.len() + ); + + tracing::debug!(block = block_height, "Query block results..."); + let tm_block_results_response = + tendermint_service::query_raw_block_results_at_height( + client, + block_height, + ) + .await + .into_rpc_error()?; + let block_results = BlockResult::from(tm_block_results_response); + + tracing::debug!(block = block_height, "Query epoch..."); + let epoch = namada_service::get_epoch_at_block_height(client, block_height) + .await + .into_rpc_error()?; + + let block = Block::from( + &tm_block_response, + &block_results, + checksums, + epoch, + block_height, + ); + + Ok((block, tm_block_response, epoch)) +} diff --git a/transactions/src/main.rs b/transactions/src/main.rs index a3c1e3bda..8d3e491fc 100644 --- a/transactions/src/main.rs +++ b/transactions/src/main.rs @@ -15,8 +15,9 @@ use shared::error::{AsDbError, AsRpcError, ContextDbInteractError, MainError}; use tendermint_rpc::HttpClient; use transactions::app_state::AppState; use transactions::config::AppConfig; -use transactions::repository::block as block_repo; -use transactions::repository::transactions as transaction_repo; +use transactions::repository::{ + block as block_repo, transactions as transaction_repo, +}; use transactions::services::{ db as db_service, namada as namada_service, tendermint as tendermint_service, From 593ef204ab7f3545f44aea818091d36dc4ff52b3 Mon Sep 17 00:00:00 2001 From: Joel Nordell Date: Mon, 16 Dec 2024 17:22:43 -0600 Subject: [PATCH 16/29] Store the validator namada address on blocks instead of the tendermint address --- chain/src/main.rs | 34 +++++++++++++++++++++-------- chain/src/services/namada.rs | 33 +++++----------------------- orm/src/blocks.rs | 2 +- shared/src/block.rs | 6 ++++- shared/src/header.rs | 3 ++- transactions/src/main.rs | 16 ++++++++++++++ transactions/src/services/namada.rs | 13 +++++++++++ 7 files changed, 68 insertions(+), 39 deletions(-) diff --git a/chain/src/main.rs b/chain/src/main.rs index 51a9b96c7..c56fcf0cc 100644 --- a/chain/src/main.rs +++ b/chain/src/main.rs @@ -27,6 +27,7 @@ use shared::crawler_state::ChainCrawlerState; use shared::error::{AsDbError, AsRpcError, ContextDbInteractError, MainError}; use shared::id::Id; use shared::token::Token; +use shared::utils::BalanceChange; use shared::validator::ValidatorSet; use tendermint_rpc::endpoint::block::Response as TendermintBlockResponse; use tendermint_rpc::HttpClient; @@ -138,13 +139,14 @@ async fn crawling_fn( let addresses = block.addresses_with_balance_change(&native_token); - let block_proposer_address = namada_service::get_block_proposer_address( - &client, - &block, - &native_token, - ) - .await - .into_rpc_error()?; + let block_proposer_address = block + .header + .proposer_address_namada + .as_ref() + .map(|address| BalanceChange { + address: Id::Account(address.clone()), + token: Token::Native(native_token.clone()), + }); let all_balance_changed_addresses = addresses .iter() @@ -162,8 +164,7 @@ async fn crawling_fn( tracing::debug!( block = block_height, - addresses = addresses.len(), - block_proposer_address = block_proposer_address.len(), + addresses = all_balance_changed_addresses.len(), "Updating balance for {} addresses...", all_balance_changed_addresses.len() ); @@ -567,9 +568,24 @@ async fn get_block( .await .into_rpc_error()?; + let proposer_address_namada = namada_service::get_validator_namada_address( + client, + &Id::from(&tm_block_response.block.header.proposer_address), + ) + .await + .into_rpc_error()?; + + tracing::info!( + block = block_height, + tm_address = tm_block_response.block.header.proposer_address.to_string(), + namada_address = ?proposer_address_namada, + "Got block proposer address" + ); + let block = Block::from( &tm_block_response, &block_results, + &proposer_address_namada, checksums, epoch, block_height, diff --git a/chain/src/services/namada.rs b/chain/src/services/namada.rs index 95479b5bc..6912c1fbc 100644 --- a/chain/src/services/namada.rs +++ b/chain/src/services/namada.rs @@ -19,7 +19,7 @@ use namada_sdk::state::Key; use namada_sdk::token::Amount as NamadaSdkAmount; use namada_sdk::{borsh, rpc, token}; use shared::balance::{Amount, Balance, Balances}; -use shared::block::{Block, BlockHeight, Epoch}; +use shared::block::{BlockHeight, Epoch}; use shared::bond::{Bond, BondAddresses, Bonds}; use shared::id::Id; use shared::proposal::{GovernanceProposal, TallyType}; @@ -693,38 +693,17 @@ pub async fn get_validator_set_at_epoch( Ok(ValidatorSet { validators, epoch }) } -pub async fn get_block_proposer_address( +pub async fn get_validator_namada_address( client: &HttpClient, - block: &Block, - native_token: &Id, -) -> anyhow::Result> { + tm_addr: &Id, +) -> anyhow::Result> { let validator = RPC .vp() .pos() - .validator_by_tm_addr( - client, - &block.header.proposer_address.to_uppercase(), - ) + .validator_by_tm_addr(client, &tm_addr.to_string().to_uppercase()) .await?; - tracing::debug!( - block = block.header.height, - native_token = native_token.to_string(), - proposer_address = block.header.proposer_address, - namada_address = ?validator, - "Got block proposer address" - ); - - match validator { - Some(validator) => { - let balance_change = BalanceChange { - address: Id::from(validator), - token: Token::Native(native_token.clone()), - }; - Ok(std::iter::once(balance_change).collect()) - } - None => Ok(HashSet::new()), - } + Ok(validator.map(Id::from)) } pub async fn query_pipeline_length(client: &HttpClient) -> anyhow::Result { diff --git a/orm/src/blocks.rs b/orm/src/blocks.rs index 7d40fae8c..379c398bb 100644 --- a/orm/src/blocks.rs +++ b/orm/src/blocks.rs @@ -34,7 +34,7 @@ impl From<(Block, TendermintBlockResponse)> for BlockInsertDb { hash: Some(block.hash.to_string()), app_hash: Some(block.header.app_hash.to_string()), timestamp: Some(timestamp), - proposer: Some(block.header.proposer_address), + proposer: block.header.proposer_address_namada, epoch: Some(block.epoch as i32), } } diff --git a/shared/src/block.rs b/shared/src/block.rs index 2af89ec43..465ad40fe 100644 --- a/shared/src/block.rs +++ b/shared/src/block.rs @@ -111,6 +111,7 @@ impl Block { pub fn from( block_response: &TendermintBlockResponse, block_results: &BlockResult, + proposer_address_namada: &Option, // Provide the namada address of the proposer, if available checksums: Checksums, epoch: Epoch, block_height: BlockHeight, @@ -140,12 +141,15 @@ impl Block { header: BlockHeader { height: block_response.block.header.height.value() as BlockHeight, - proposer_address: block_response + proposer_address_tm: block_response .block .header .proposer_address .to_string() .to_lowercase(), + proposer_address_namada: proposer_address_namada + .as_ref() + .map(Id::to_string), timestamp: block_response.block.header.time.to_string(), app_hash: Id::from(&block_response.block.header.app_hash), }, diff --git a/shared/src/header.rs b/shared/src/header.rs index 792618895..decf3a9f9 100644 --- a/shared/src/header.rs +++ b/shared/src/header.rs @@ -4,7 +4,8 @@ use crate::block::BlockHeight; #[derive(Debug, Clone, Default)] pub struct BlockHeader { pub height: BlockHeight, - pub proposer_address: String, + pub proposer_address_tm: String, + pub proposer_address_namada: Option, pub timestamp: String, pub app_hash: Id, } diff --git a/transactions/src/main.rs b/transactions/src/main.rs index 8d3e491fc..36c9bc56d 100644 --- a/transactions/src/main.rs +++ b/transactions/src/main.rs @@ -12,6 +12,7 @@ use shared::checksums::Checksums; use shared::crawler::crawl; use shared::crawler_state::BlockCrawlerState; use shared::error::{AsDbError, AsRpcError, ContextDbInteractError, MainError}; +use shared::id::Id; use tendermint_rpc::HttpClient; use transactions::app_state::AppState; use transactions::config::AppConfig; @@ -116,9 +117,24 @@ async fn crawling_fn( .into_rpc_error()?; let block_results = BlockResult::from(tm_block_results_response); + let proposer_address_namada = namada_service::get_validator_namada_address( + &client, + &Id::from(&tm_block_response.block.header.proposer_address), + ) + .await + .into_rpc_error()?; + + tracing::debug!( + block = block_height, + tm_address = tm_block_response.block.header.proposer_address.to_string(), + namada_address = ?proposer_address_namada, + "Got block proposer address" + ); + let block = Block::from( &tm_block_response, &block_results, + &proposer_address_namada, checksums, 1_u32, block_height, diff --git a/transactions/src/services/namada.rs b/transactions/src/services/namada.rs index 51d412202..cde1d642e 100644 --- a/transactions/src/services/namada.rs +++ b/transactions/src/services/namada.rs @@ -55,3 +55,16 @@ pub async fn query_tx_code_hash( None } } + +pub async fn get_validator_namada_address( + client: &HttpClient, + tm_addr: &Id, +) -> anyhow::Result> { + let validator = RPC + .vp() + .pos() + .validator_by_tm_addr(client, &tm_addr.to_string().to_uppercase()) + .await?; + + Ok(validator.map(Id::from)) +} From fe544d564aa0cf0646cf6ff8a5f1514553490dfd Mon Sep 17 00:00:00 2001 From: Gianmarco Fraccaroli Date: Mon, 2 Dec 2024 12:48:13 +0100 Subject: [PATCH 17/29] parsing ack tx --- .../2024-07-04-103941_crawler_state/down.sql | 1 + .../2024-12-01-170248_ibc_ack/up.sql | 9 ++ .../down.sql | 3 + orm/src/schema.rs | 100 ++++++++---------- shared/src/block_result.rs | 73 +++++++++++-- shared/src/transaction.rs | 34 ++++++ transactions/Cargo.toml | 2 + transactions/src/services/tx.rs | 91 ++++++++++++++++ 8 files changed, 248 insertions(+), 65 deletions(-) create mode 100644 orm/migrations/2024-12-01-170248_ibc_ack/up.sql create mode 100644 transactions/src/services/tx.rs diff --git a/orm/migrations/2024-07-04-103941_crawler_state/down.sql b/orm/migrations/2024-07-04-103941_crawler_state/down.sql index 9122f91e4..23de38c6d 100644 --- a/orm/migrations/2024-07-04-103941_crawler_state/down.sql +++ b/orm/migrations/2024-07-04-103941_crawler_state/down.sql @@ -1,4 +1,5 @@ -- This file should undo anything in `up.sql` DROP TABLE crawler_state; + DROP TYPE CRAWLER_NAME; diff --git a/orm/migrations/2024-12-01-170248_ibc_ack/up.sql b/orm/migrations/2024-12-01-170248_ibc_ack/up.sql new file mode 100644 index 000000000..f8ba62400 --- /dev/null +++ b/orm/migrations/2024-12-01-170248_ibc_ack/up.sql @@ -0,0 +1,9 @@ +-- Your SQL goes here +CREATE TYPE IBC_STATUS AS ENUM ('fail', 'success', 'timeout', 'unknown'); + +CREATE TABLE ibc_ack ( + id VARCHAR PRIMARY KEY, + tx_hash VARCHAR NOT NULL, + timeout INT NOT NULL, + status IBC_STATUS +); diff --git a/orm/migrations/2024-12-10-104502_transaction_types/down.sql b/orm/migrations/2024-12-10-104502_transaction_types/down.sql index d9a93fe9a..566ff9fd1 100644 --- a/orm/migrations/2024-12-10-104502_transaction_types/down.sql +++ b/orm/migrations/2024-12-10-104502_transaction_types/down.sql @@ -1 +1,4 @@ -- This file should undo anything in `up.sql` +DROP TABLE ibc_ack; + +DROP TYPE IBC_STATUS; \ No newline at end of file diff --git a/orm/src/schema.rs b/orm/src/schema.rs index 796a8d6ae..320a7c8d4 100644 --- a/orm/src/schema.rs +++ b/orm/src/schema.rs @@ -1,91 +1,51 @@ // @generated automatically by Diesel CLI. pub mod sql_types { - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "crawler_name"))] pub struct CrawlerName; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "governance_kind"))] pub struct GovernanceKind; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "governance_result"))] pub struct GovernanceResult; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "governance_tally_type"))] pub struct GovernanceTallyType; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[diesel(postgres_type(name = "ibc_status"))] + pub struct IbcStatus; + + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "payment_kind"))] pub struct PaymentKind; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "payment_recurrence"))] pub struct PaymentRecurrence; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "token_type"))] pub struct TokenType; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "transaction_kind"))] pub struct TransactionKind; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "transaction_result"))] pub struct TransactionResult; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "validator_state"))] pub struct ValidatorState; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "vote_kind"))] pub struct VoteKind; } @@ -209,6 +169,18 @@ diesel::table! { } } +diesel::table! { + use diesel::sql_types::*; + use super::sql_types::IbcStatus; + + ibc_ack (id) { + id -> Varchar, + tx_hash -> Varchar, + timeout -> Int4, + status -> Nullable, + } +} + diesel::table! { ibc_token (address) { #[max_length = 45] @@ -243,6 +215,21 @@ diesel::table! { } } +diesel::table! { + use diesel::sql_types::*; + use super::sql_types::PaymentRecurrence; + use super::sql_types::PaymentKind; + + public_good_funding (id) { + id -> Int4, + proposal_id -> Int4, + payment_recurrence -> PaymentRecurrence, + payment_kind -> PaymentKind, + receipient -> Varchar, + amount -> Numeric, + } +} + diesel::table! { revealed_pk (id) { id -> Int4, @@ -315,6 +302,7 @@ diesel::joinable!(governance_votes -> governance_proposals (proposal_id)); diesel::joinable!(ibc_token -> token (address)); diesel::joinable!(inner_transactions -> wrapper_transactions (wrapper_id)); diesel::joinable!(pos_rewards -> validators (validator_id)); +diesel::joinable!(public_good_funding -> governance_proposals (proposal_id)); diesel::joinable!(unbonds -> validators (validator_id)); diesel::joinable!(wrapper_transactions -> blocks (block_height)); @@ -328,9 +316,11 @@ diesel::allow_tables_to_appear_in_same_query!( gas_price, governance_proposals, governance_votes, + ibc_ack, ibc_token, inner_transactions, pos_rewards, + public_good_funding, revealed_pk, token, unbonds, diff --git a/shared/src/block_result.rs b/shared/src/block_result.rs index d0e7f08d8..f24145693 100644 --- a/shared/src/block_result.rs +++ b/shared/src/block_result.rs @@ -1,6 +1,7 @@ use std::collections::BTreeMap; use std::str::FromStr; +use namada_ibc::apps::transfer::types::events::AckEvent; use namada_tx::data::TxResult; use tendermint_rpc::endpoint::block_results::Response as TendermintBlockResultResponse; @@ -10,6 +11,7 @@ use crate::transaction::TransactionExitStatus; #[derive(Debug, Clone)] pub enum EventKind { Applied, + SendPacket, Unknown, } @@ -17,6 +19,7 @@ impl From<&String> for EventKind { fn from(value: &String) -> Self { match value.as_str() { "tx/applied" => Self::Applied, + "send_packet" => Self::SendPacket, _ => Self::Unknown, } } @@ -32,7 +35,7 @@ pub struct BlockResult { #[derive(Debug, Clone)] pub struct Event { pub kind: EventKind, - pub attributes: Option, + pub attributes: Option, } #[derive(Debug, Clone, Default, Copy)] @@ -107,7 +110,7 @@ impl BatchResults { } #[derive(Debug, Clone, Default)] -pub struct TxAttributes { +pub struct TxApplied { pub code: TxEventStatusCode, pub gas: u64, pub hash: Id, @@ -116,14 +119,48 @@ pub struct TxAttributes { pub info: String, } -impl TxAttributes { +#[derive(Debug, Clone, Default)] +pub struct SendPacket { + pub source_port: String, + pub dest_port: String, + pub source_channel: String, + pub dest_channel: String, + pub sequence: String, +} + +#[derive(Debug, Clone)] +pub enum TxAttributesType { + TxApplied(TxApplied), + SendPacket(SendPacket) +} + +impl TxAttributesType { pub fn deserialize( event_kind: &EventKind, attributes: &BTreeMap, ) -> Option { match event_kind { EventKind::Unknown => None, - EventKind::Applied => Some(Self { + EventKind::SendPacket => { + let source_port = + attributes.get("packet_src_port").unwrap().to_owned(); + let dest_port = + attributes.get("packet_dst_port").unwrap().to_owned(); + let source_channel = + attributes.get("packet_src_channel").unwrap().to_owned(); + let dest_channel = + attributes.get("packet_dst_channel").unwrap().to_owned(); + let sequence = + attributes.get("packet_sequence").unwrap().to_owned(); + Some(Self::SendPacket(SendPacket { + source_port, + dest_port, + source_channel, + dest_channel, + sequence, + })) + } + EventKind::Applied => Some(Self::TxApplied(TxApplied { code: attributes .get("code") .map(|code| TxEventStatusCode::from(code.as_str())) @@ -153,7 +190,7 @@ impl TxAttributes { }) .unwrap(), info: attributes.get("info").unwrap().to_owned(), - }), + })), } } } @@ -177,7 +214,7 @@ impl From for BlockResult { }, ); let attributes = - TxAttributes::deserialize(&kind, &raw_attributes); + TxAttributesType::deserialize(&kind, &raw_attributes); Event { kind, attributes } }) .collect::>(); @@ -198,7 +235,7 @@ impl From for BlockResult { }, ); let attributes = - TxAttributes::deserialize(&kind, &raw_attributes); + TxAttributesType::deserialize(&kind, &raw_attributes); Event { kind, attributes } }) .collect::>(); @@ -221,7 +258,15 @@ impl BlockResult { let exit_status = self .end_events .iter() - .filter_map(|event| event.attributes.clone()) + .filter_map(|event| { + if let Some(TxAttributesType::TxApplied(data)) = + &event.attributes + { + Some(data.clone()) + } else { + None + } + }) .find(|attributes| attributes.hash.eq(tx_hash)) .map(|attributes| attributes.clone().code) .map(TransactionExitStatus::from); @@ -237,7 +282,15 @@ impl BlockResult { let exit_status = self .end_events .iter() - .filter_map(|event| event.attributes.clone()) + .filter_map(|event| { + if let Some(TxAttributesType::TxApplied(data)) = + &event.attributes + { + Some(data.clone()) + } else { + None + } + }) .find(|attributes| attributes.hash.eq(wrapper_hash)) .map(|attributes| attributes.batch.is_successful(inner_hash)) .map(|successful| match successful { @@ -246,4 +299,4 @@ impl BlockResult { }); exit_status.unwrap_or(TransactionExitStatus::Rejected) } -} +} \ No newline at end of file diff --git a/shared/src/transaction.rs b/shared/src/transaction.rs index d86ca4ba7..7dbbb0307 100644 --- a/shared/src/transaction.rs +++ b/shared/src/transaction.rs @@ -423,3 +423,37 @@ impl Transaction { self.extra_sections.get(§ion_id).cloned() } } + +#[derive(Debug, Clone)] +pub enum IbcAckStatus { + Success, + Fail, + Timeout, + Unknown, +} + +#[derive(Debug, Clone)] +pub struct IbcAck { + pub sequence_number: String, + pub source_port: String, + pub dest_port: String, + pub source_channel: String, + pub dest_channel: String, + pub status: IbcAckStatus, +} + +impl IbcAck { + pub fn id_source(&self) -> String { + format!( + "{}/{}/{}", + self.source_port, self.source_channel, self.sequence_number + ) + } + + pub fn id_dest(&self) -> String { + format!( + "{}/{}/{}", + self.dest_port, self.dest_channel, self.sequence_number + ) + } +} diff --git a/transactions/Cargo.toml b/transactions/Cargo.toml index 57ff1f28d..7967dc86b 100644 --- a/transactions/Cargo.toml +++ b/transactions/Cargo.toml @@ -27,6 +27,8 @@ deadpool-diesel.workspace = true diesel.workspace = true diesel_migrations.workspace = true orm.workspace = true +clap-verbosity-flag.workspace = true +serde_json.workspace = true [build-dependencies] vergen = { version = "8.0.0", features = ["build", "git", "gitcl"] } diff --git a/transactions/src/services/tx.rs b/transactions/src/services/tx.rs new file mode 100644 index 000000000..7b1928bc5 --- /dev/null +++ b/transactions/src/services/tx.rs @@ -0,0 +1,91 @@ +use namada_sdk::ibc::core::{ + channel::types::{acknowledgement::AcknowledgementStatus, msgs::PacketMsg}, + handler::types::msgs::MsgEnvelope, +}; +use shared::{ + block_result::{BlockResult, SendPacket, TxAttributesType}, + transaction::{IbcAck, IbcAckStatus, InnerTransaction, TransactionKind}, +}; + +pub fn get_ibc_packets(block_results: &BlockResult) -> Vec { + block_results + .end_events + .iter() + .filter_map(|event| { + if let Some(attributes) = &event.attributes { + match attributes { + TxAttributesType::SendPacket(packet) => { + Some(packet.to_owned()) + } + _ => None, + } + } else { + None + } + }) + .collect::>() +} + +pub fn get_ibc_recv_ack(inner_txs: &Vec) -> Vec { + inner_txs.iter().filter_map(|tx| match tx.kind.clone() { + TransactionKind::IbcMsgTransfer(ibc_message) => match ibc_message { + Some(ibc_message) => match ibc_message.0 { + namada_sdk::ibc::IbcMessage::Envelope(msg_envelope) => { + match *msg_envelope { + MsgEnvelope::Packet(packet_msg) => match packet_msg { + PacketMsg::Recv(_) => None, + PacketMsg::Ack(msg) => { + let ack = match serde_json::from_slice::< + AcknowledgementStatus, + >( + msg.acknowledgement.as_bytes() + ) { + Ok(status) => IbcAck { + sequence_number: msg.packet.seq_on_a.to_string(), + source_port: msg.packet.port_id_on_a.to_string(), + dest_port: msg.packet.port_id_on_b.to_string(), + source_channel: msg.packet.chan_id_on_a.to_string(), + dest_channel: msg.packet.chan_id_on_b.to_string(), + status: match status { + AcknowledgementStatus::Success(_) => IbcAckStatus::Success, + AcknowledgementStatus::Error(_) => IbcAckStatus::Fail, + }, + }, + Err(_) => IbcAck { + sequence_number: msg.packet.seq_on_a.to_string(), + source_port: msg.packet.port_id_on_a.to_string(), + dest_port: msg.packet.port_id_on_b.to_string(), + source_channel: msg.packet.chan_id_on_a.to_string(), + dest_channel: msg.packet.chan_id_on_b.to_string(), + status: IbcAckStatus::Unknown, + }, + }; + Some(ack) + } + PacketMsg::Timeout(msg) => Some(IbcAck { + sequence_number: msg.packet.seq_on_a.to_string(), + source_port: msg.packet.port_id_on_a.to_string(), + dest_port: msg.packet.port_id_on_b.to_string(), + source_channel: msg.packet.chan_id_on_a.to_string(), + dest_channel: msg.packet.chan_id_on_b.to_string(), + status: IbcAckStatus::Timeout, + }), + PacketMsg::TimeoutOnClose(msg) => Some(IbcAck { + sequence_number: msg.packet.seq_on_a.to_string(), + source_port: msg.packet.port_id_on_a.to_string(), + dest_port: msg.packet.port_id_on_b.to_string(), + source_channel: msg.packet.chan_id_on_a.to_string(), + dest_channel: msg.packet.chan_id_on_b.to_string(), + status: IbcAckStatus::Timeout, + }), + }, + _ => None, + } + } + _ => None, + }, + None => None, + }, + _ => None, + }).collect() +} From e7900849c220c82a7ce084bf473732b83b1e533c Mon Sep 17 00:00:00 2001 From: Gianmarco Fraccaroli Date: Mon, 2 Dec 2024 16:00:16 +0100 Subject: [PATCH 18/29] parse sequence + ack + tx id --- orm/src/ibc.rs | 55 +++++++ orm/src/lib.rs | 1 + orm/src/schema.rs | 72 +++++++-- shared/src/block_result.rs | 9 +- shared/src/transaction.rs | 35 +++++ transactions/src/main.rs | 28 +++- transactions/src/repository/transactions.rs | 49 +++++- transactions/src/services/mod.rs | 1 + transactions/src/services/tx.rs | 156 +++++++++++--------- 9 files changed, 318 insertions(+), 88 deletions(-) create mode 100644 orm/src/ibc.rs diff --git a/orm/src/ibc.rs b/orm/src/ibc.rs new file mode 100644 index 000000000..e939b9cbe --- /dev/null +++ b/orm/src/ibc.rs @@ -0,0 +1,55 @@ +use diesel::{AsChangeset, Insertable}; +use serde::{Deserialize, Serialize}; +use shared::transaction::{IbcAckStatus, IbcSequence}; + +use crate::schema::ibc_ack; + +#[derive(Debug, Clone, Serialize, Deserialize, diesel_derive_enum::DbEnum)] +#[ExistingTypePath = "crate::schema::sql_types::IbcStatus"] +pub enum IbcAckStatusDb { + Unknown, + Timeout, + Fail, + Success, +} + +impl From for IbcAckStatusDb { + fn from(value: IbcAckStatus) -> Self { + match value { + IbcAckStatus::Success => Self::Success, + IbcAckStatus::Fail => Self::Fail, + IbcAckStatus::Timeout => Self::Timeout, + IbcAckStatus::Unknown => Self::Unknown, + } + } +} + +#[derive(Serialize, Insertable, AsChangeset, Clone)] +#[diesel(table_name = ibc_ack)] +#[diesel(check_for_backend(diesel::pg::Pg))] +pub struct IbcAckDb { + pub id: String, + pub tx_hash: String, + pub timeout: i32, + pub status: IbcAckStatusDb, +} + +pub type IbcAckInsertDb = IbcAckDb; + +impl From for IbcAckInsertDb { + fn from(value: IbcSequence) -> Self { + Self { + id: value.id(), + tx_hash: value.tx_id.to_string(), + timeout: value.timeout as i32, + status: IbcAckStatusDb::Unknown, + } + } +} + +#[derive(Serialize, AsChangeset, Clone)] +#[diesel(table_name = ibc_ack)] +#[diesel(check_for_backend(diesel::pg::Pg))] +pub struct IbcSequencekStatusUpdateDb { + pub status: IbcAckStatusDb, +} diff --git a/orm/src/lib.rs b/orm/src/lib.rs index 340f5ae82..4cce8ef60 100644 --- a/orm/src/lib.rs +++ b/orm/src/lib.rs @@ -7,6 +7,7 @@ pub mod governance_proposal; pub mod governance_votes; pub mod group_by_macros; pub mod helpers; +pub mod ibc; pub mod migrations; pub mod parameters; pub mod pos_rewards; diff --git a/orm/src/schema.rs b/orm/src/schema.rs index 320a7c8d4..c373c02c9 100644 --- a/orm/src/schema.rs +++ b/orm/src/schema.rs @@ -1,51 +1,99 @@ // @generated automatically by Diesel CLI. pub mod sql_types { - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "crawler_name"))] pub struct CrawlerName; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "governance_kind"))] pub struct GovernanceKind; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "governance_result"))] pub struct GovernanceResult; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "governance_tally_type"))] pub struct GovernanceTallyType; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "ibc_status"))] pub struct IbcStatus; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "payment_kind"))] pub struct PaymentKind; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "payment_recurrence"))] pub struct PaymentRecurrence; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "token_type"))] pub struct TokenType; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "transaction_kind"))] pub struct TransactionKind; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "transaction_result"))] pub struct TransactionResult; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "validator_state"))] pub struct ValidatorState; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "vote_kind"))] pub struct VoteKind; } diff --git a/shared/src/block_result.rs b/shared/src/block_result.rs index f24145693..b6899b663 100644 --- a/shared/src/block_result.rs +++ b/shared/src/block_result.rs @@ -1,7 +1,6 @@ use std::collections::BTreeMap; use std::str::FromStr; -use namada_ibc::apps::transfer::types::events::AckEvent; use namada_tx::data::TxResult; use tendermint_rpc::endpoint::block_results::Response as TendermintBlockResultResponse; @@ -125,6 +124,7 @@ pub struct SendPacket { pub dest_port: String, pub source_channel: String, pub dest_channel: String, + pub timeout_timestamp: u64, pub sequence: String, } @@ -152,11 +152,18 @@ impl TxAttributesType { attributes.get("packet_dst_channel").unwrap().to_owned(); let sequence = attributes.get("packet_sequence").unwrap().to_owned(); + let timeout_timestamp = attributes + .get("packet_timeout_timestamp") + .unwrap_or(&"0".to_string()) + .parse::() + .unwrap_or_default() + .to_owned(); Some(Self::SendPacket(SendPacket { source_port, dest_port, source_channel, dest_channel, + timeout_timestamp, sequence, })) } diff --git a/shared/src/transaction.rs b/shared/src/transaction.rs index 7dbbb0307..8dfa64909 100644 --- a/shared/src/transaction.rs +++ b/shared/src/transaction.rs @@ -424,6 +424,30 @@ impl Transaction { } } +#[derive(Debug, Clone)] +pub struct IbcSequence { + pub sequence_number: String, + pub source_port: String, + pub dest_port: String, + pub source_channel: String, + pub dest_channel: String, + pub timeout: u64, + pub tx_id: Id, +} + +impl IbcSequence { + pub fn id(&self) -> String { + format!( + "{}/{}/{}/{}/{}", + self.dest_port, + self.dest_channel, + self.source_port, + self.source_channel, + self.sequence_number + ) + } +} + #[derive(Debug, Clone)] pub enum IbcAckStatus { Success, @@ -456,4 +480,15 @@ impl IbcAck { self.dest_port, self.dest_channel, self.sequence_number ) } + + pub fn id(&self) -> String { + format!( + "{}/{}/{}/{}/{}", + self.dest_port, + self.dest_channel, + self.source_port, + self.source_channel, + self.sequence_number + ) + } } diff --git a/transactions/src/main.rs b/transactions/src/main.rs index 36c9bc56d..32243bea1 100644 --- a/transactions/src/main.rs +++ b/transactions/src/main.rs @@ -21,7 +21,7 @@ use transactions::repository::{ }; use transactions::services::{ db as db_service, namada as namada_service, - tendermint as tendermint_service, + tendermint as tendermint_service, tx as tx_service, }; #[tokio::main] @@ -143,11 +143,17 @@ async fn crawling_fn( let inner_txs = block.inner_txs(); let wrapper_txs = block.wrapper_txs(); - tracing::debug!( - block = block_height, - txs = inner_txs.len(), - "Deserialized {} txs...", - wrapper_txs.len() + inner_txs.len() + let ibc_sequence_packet = + tx_service::get_ibc_packets(&block_results, &inner_txs); + let ibc_ack_packet = tx_service::get_ibc_ack_packet(&inner_txs); + + tracing::info!( + "Deserialized {} wrappers, {} inners, {} ibc sequence numbers and {} ibc acks \ + events...", + wrapper_txs.len(), + inner_txs.len(), + ibc_sequence_packet.len(), + ibc_ack_packet.len() ); // Because transaction crawler starts from block 1 we read timestamp from @@ -187,6 +193,16 @@ async fn crawling_fn( crawler_state, )?; + transaction_repo::insert_ibc_sequence( + transaction_conn, + ibc_sequence_packet, + )?; + + transaction_repo::update_ibc_sequence( + transaction_conn, + ibc_ack_packet, + )?; + anyhow::Ok(()) }) }) diff --git a/transactions/src/repository/transactions.rs b/transactions/src/repository/transactions.rs index 8fb982179..4b5d403b3 100644 --- a/transactions/src/repository/transactions.rs +++ b/transactions/src/repository/transactions.rs @@ -1,12 +1,20 @@ use anyhow::Context; use chrono::NaiveDateTime; use diesel::upsert::excluded; -use diesel::{ExpressionMethods, PgConnection, RunQueryDsl}; +use diesel::{ + ExpressionMethods, OptionalEmptyChangesetExtension, PgConnection, + RunQueryDsl, +}; use orm::crawler_state::{BlockStateInsertDb, CrawlerNameDb}; -use orm::schema::{crawler_state, inner_transactions, wrapper_transactions}; +use orm::ibc::{IbcAckInsertDb, IbcAckStatusDb, IbcSequencekStatusUpdateDb}; +use orm::schema::{ + crawler_state, ibc_ack, inner_transactions, wrapper_transactions, +}; use orm::transactions::{InnerTransactionInsertDb, WrapperTransactionInsertDb}; use shared::crawler_state::{BlockCrawlerState, CrawlerName}; -use shared::transaction::{InnerTransaction, WrapperTransaction}; +use shared::transaction::{ + IbcAck, IbcSequence, InnerTransaction, WrapperTransaction, +}; pub fn insert_inner_transactions( transaction_conn: &mut PgConnection, @@ -76,3 +84,38 @@ pub fn update_crawler_timestamp( anyhow::Ok(()) } + +pub fn insert_ibc_sequence( + transaction_conn: &mut PgConnection, + ibc_sequences: Vec, +) -> anyhow::Result<()> { + diesel::insert_into(ibc_ack::table) + .values::>( + ibc_sequences + .into_iter() + .map(IbcAckInsertDb::from) + .collect(), + ) + .execute(transaction_conn) + .context("Failed to update crawler state in db")?; + + anyhow::Ok(()) +} + +pub fn update_ibc_sequence( + transaction_conn: &mut PgConnection, + ibc_acks: Vec, +) -> anyhow::Result<()> { + for ack in ibc_acks { + let ack_update = IbcSequencekStatusUpdateDb { + status: IbcAckStatusDb::from(ack.status.clone()), + }; + diesel::update(ibc_ack::table) + .set(ack_update) + .filter(ibc_ack::dsl::id.eq(ack.id())) + .execute(transaction_conn) + .optional_empty_changeset() + .context("Failed to update validator metadata in db")?; + } + anyhow::Ok(()) +} diff --git a/transactions/src/services/mod.rs b/transactions/src/services/mod.rs index a9dfa39f9..5c0b9ddc2 100644 --- a/transactions/src/services/mod.rs +++ b/transactions/src/services/mod.rs @@ -1,3 +1,4 @@ pub mod db; pub mod namada; pub mod tendermint; +pub mod tx; \ No newline at end of file diff --git a/transactions/src/services/tx.rs b/transactions/src/services/tx.rs index 7b1928bc5..833421442 100644 --- a/transactions/src/services/tx.rs +++ b/transactions/src/services/tx.rs @@ -1,22 +1,49 @@ -use namada_sdk::ibc::core::{ - channel::types::{acknowledgement::AcknowledgementStatus, msgs::PacketMsg}, - handler::types::msgs::MsgEnvelope, -}; -use shared::{ - block_result::{BlockResult, SendPacket, TxAttributesType}, - transaction::{IbcAck, IbcAckStatus, InnerTransaction, TransactionKind}, +use namada_sdk::ibc::core::channel::types::acknowledgement::AcknowledgementStatus; +use namada_sdk::ibc::core::channel::types::msgs::PacketMsg; +use namada_sdk::ibc::core::handler::types::msgs::MsgEnvelope; +use shared::block_result::{BlockResult, TxAttributesType}; +use shared::ser::IbcMessage; +use shared::transaction::{ + IbcAck, IbcAckStatus, IbcSequence, InnerTransaction, TransactionExitStatus, + TransactionKind, }; -pub fn get_ibc_packets(block_results: &BlockResult) -> Vec { +pub fn get_ibc_packets( + block_results: &BlockResult, + inner_txs: &[InnerTransaction], +) -> Vec { + let mut ibc_txs = inner_txs + .iter() + .filter_map(|tx| { + if matches!( + tx.kind, + TransactionKind::IbcMsgTransfer(Some(IbcMessage(_))) + ) && matches!(tx.exit_code, TransactionExitStatus::Applied) + { + Some(tx.tx_id.clone()) + } else { + None + } + }) + .collect::>(); + + ibc_txs.reverse(); + block_results .end_events .iter() .filter_map(|event| { if let Some(attributes) = &event.attributes { match attributes { - TxAttributesType::SendPacket(packet) => { - Some(packet.to_owned()) - } + TxAttributesType::SendPacket(packet) => Some(IbcSequence { + sequence_number: packet.sequence.clone(), + source_port: packet.source_port.clone(), + dest_port: packet.dest_port.clone(), + source_channel: packet.source_channel.clone(), + dest_channel: packet.dest_channel.clone(), + timeout: packet.timeout_timestamp, + tx_id: ibc_txs.pop().unwrap(), + }), _ => None, } } else { @@ -26,65 +53,62 @@ pub fn get_ibc_packets(block_results: &BlockResult) -> Vec { .collect::>() } -pub fn get_ibc_recv_ack(inner_txs: &Vec) -> Vec { +pub fn get_ibc_ack_packet(inner_txs: &[InnerTransaction]) -> Vec { inner_txs.iter().filter_map(|tx| match tx.kind.clone() { - TransactionKind::IbcMsgTransfer(ibc_message) => match ibc_message { - Some(ibc_message) => match ibc_message.0 { - namada_sdk::ibc::IbcMessage::Envelope(msg_envelope) => { - match *msg_envelope { - MsgEnvelope::Packet(packet_msg) => match packet_msg { - PacketMsg::Recv(_) => None, - PacketMsg::Ack(msg) => { - let ack = match serde_json::from_slice::< - AcknowledgementStatus, - >( - msg.acknowledgement.as_bytes() - ) { - Ok(status) => IbcAck { - sequence_number: msg.packet.seq_on_a.to_string(), - source_port: msg.packet.port_id_on_a.to_string(), - dest_port: msg.packet.port_id_on_b.to_string(), - source_channel: msg.packet.chan_id_on_a.to_string(), - dest_channel: msg.packet.chan_id_on_b.to_string(), - status: match status { - AcknowledgementStatus::Success(_) => IbcAckStatus::Success, - AcknowledgementStatus::Error(_) => IbcAckStatus::Fail, - }, + TransactionKind::IbcMsgTransfer(Some(ibc_message)) => match ibc_message.0 { + namada_sdk::ibc::IbcMessage::Envelope(msg_envelope) => { + match *msg_envelope { + MsgEnvelope::Packet(packet_msg) => match packet_msg { + PacketMsg::Recv(_) => None, + PacketMsg::Ack(msg) => { + let ack = match serde_json::from_slice::< + AcknowledgementStatus, + >( + msg.acknowledgement.as_bytes() + ) { + Ok(status) => IbcAck { + sequence_number: msg.packet.seq_on_a.to_string(), + source_port: msg.packet.port_id_on_a.to_string(), + dest_port: msg.packet.port_id_on_b.to_string(), + source_channel: msg.packet.chan_id_on_a.to_string(), + dest_channel: msg.packet.chan_id_on_b.to_string(), + status: match status { + AcknowledgementStatus::Success(_) => IbcAckStatus::Success, + AcknowledgementStatus::Error(_) => IbcAckStatus::Fail, }, - Err(_) => IbcAck { - sequence_number: msg.packet.seq_on_a.to_string(), - source_port: msg.packet.port_id_on_a.to_string(), - dest_port: msg.packet.port_id_on_b.to_string(), - source_channel: msg.packet.chan_id_on_a.to_string(), - dest_channel: msg.packet.chan_id_on_b.to_string(), - status: IbcAckStatus::Unknown, - }, - }; - Some(ack) - } - PacketMsg::Timeout(msg) => Some(IbcAck { - sequence_number: msg.packet.seq_on_a.to_string(), - source_port: msg.packet.port_id_on_a.to_string(), - dest_port: msg.packet.port_id_on_b.to_string(), - source_channel: msg.packet.chan_id_on_a.to_string(), - dest_channel: msg.packet.chan_id_on_b.to_string(), - status: IbcAckStatus::Timeout, - }), - PacketMsg::TimeoutOnClose(msg) => Some(IbcAck { - sequence_number: msg.packet.seq_on_a.to_string(), - source_port: msg.packet.port_id_on_a.to_string(), - dest_port: msg.packet.port_id_on_b.to_string(), - source_channel: msg.packet.chan_id_on_a.to_string(), - dest_channel: msg.packet.chan_id_on_b.to_string(), - status: IbcAckStatus::Timeout, - }), - }, - _ => None, - } + }, + Err(_) => IbcAck { + sequence_number: msg.packet.seq_on_a.to_string(), + source_port: msg.packet.port_id_on_a.to_string(), + dest_port: msg.packet.port_id_on_b.to_string(), + source_channel: msg.packet.chan_id_on_a.to_string(), + dest_channel: msg.packet.chan_id_on_b.to_string(), + status: IbcAckStatus::Unknown, + }, + }; + Some(ack) + } + PacketMsg::Timeout(msg) => Some(IbcAck { + sequence_number: msg.packet.seq_on_a.to_string(), + source_port: msg.packet.port_id_on_a.to_string(), + dest_port: msg.packet.port_id_on_b.to_string(), + source_channel: msg.packet.chan_id_on_a.to_string(), + dest_channel: msg.packet.chan_id_on_b.to_string(), + status: IbcAckStatus::Timeout, + }), + PacketMsg::TimeoutOnClose(msg) => Some(IbcAck { + sequence_number: msg.packet.seq_on_a.to_string(), + source_port: msg.packet.port_id_on_a.to_string(), + dest_port: msg.packet.port_id_on_b.to_string(), + source_channel: msg.packet.chan_id_on_a.to_string(), + dest_channel: msg.packet.chan_id_on_b.to_string(), + status: IbcAckStatus::Timeout, + }), + }, + _ => None, } - _ => None, }, - None => None, + _ => None }, _ => None, }).collect() From 4d5886fd0aa728295d2d7f29e6ff88d62579d914 Mon Sep 17 00:00:00 2001 From: Gianmarco Fraccaroli Date: Tue, 3 Dec 2024 09:20:19 +0100 Subject: [PATCH 19/29] added handler --- .../2024-12-01-170248_ibc_ack/up.sql | 4 +- orm/src/ibc.rs | 9 ++- orm/src/schema.rs | 76 ++++--------------- shared/src/block.rs | 3 - shared/src/block_result.rs | 3 + shared/src/ser.rs | 17 ++++- transactions/src/main.rs | 1 - webserver/src/app.rs | 5 +- webserver/src/error/api.rs | 4 + webserver/src/error/ibc.rs | 28 +++++++ webserver/src/error/mod.rs | 1 + webserver/src/handler/ibc.rs | 19 +++++ webserver/src/handler/mod.rs | 1 + webserver/src/repository/ibc.rs | 45 +++++++++++ webserver/src/repository/mod.rs | 1 + webserver/src/response/ibc.rs | 16 ++++ webserver/src/response/mod.rs | 1 + webserver/src/service/ibc.rs | 42 ++++++++++ webserver/src/service/mod.rs | 1 + webserver/src/state/common.rs | 3 + 20 files changed, 203 insertions(+), 77 deletions(-) create mode 100644 webserver/src/error/ibc.rs create mode 100644 webserver/src/handler/ibc.rs create mode 100644 webserver/src/repository/ibc.rs create mode 100644 webserver/src/response/ibc.rs create mode 100644 webserver/src/service/ibc.rs diff --git a/orm/migrations/2024-12-01-170248_ibc_ack/up.sql b/orm/migrations/2024-12-01-170248_ibc_ack/up.sql index f8ba62400..652a3e23e 100644 --- a/orm/migrations/2024-12-01-170248_ibc_ack/up.sql +++ b/orm/migrations/2024-12-01-170248_ibc_ack/up.sql @@ -4,6 +4,6 @@ CREATE TYPE IBC_STATUS AS ENUM ('fail', 'success', 'timeout', 'unknown'); CREATE TABLE ibc_ack ( id VARCHAR PRIMARY KEY, tx_hash VARCHAR NOT NULL, - timeout INT NOT NULL, - status IBC_STATUS + timeout BIGINT NOT NULL, + status IBC_STATUS NOT NULL ); diff --git a/orm/src/ibc.rs b/orm/src/ibc.rs index e939b9cbe..0583f47e5 100644 --- a/orm/src/ibc.rs +++ b/orm/src/ibc.rs @@ -1,4 +1,5 @@ -use diesel::{AsChangeset, Insertable}; +use diesel::prelude::Queryable; +use diesel::{AsChangeset, Insertable, Selectable}; use serde::{Deserialize, Serialize}; use shared::transaction::{IbcAckStatus, IbcSequence}; @@ -24,13 +25,13 @@ impl From for IbcAckStatusDb { } } -#[derive(Serialize, Insertable, AsChangeset, Clone)] +#[derive(Serialize, Queryable, Insertable, Selectable, Clone, Debug)] #[diesel(table_name = ibc_ack)] #[diesel(check_for_backend(diesel::pg::Pg))] pub struct IbcAckDb { pub id: String, pub tx_hash: String, - pub timeout: i32, + pub timeout: i64, pub status: IbcAckStatusDb, } @@ -41,7 +42,7 @@ impl From for IbcAckInsertDb { Self { id: value.id(), tx_hash: value.tx_id.to_string(), - timeout: value.timeout as i32, + timeout: value.timeout as i64, status: IbcAckStatusDb::Unknown, } } diff --git a/orm/src/schema.rs b/orm/src/schema.rs index c373c02c9..9ff237292 100644 --- a/orm/src/schema.rs +++ b/orm/src/schema.rs @@ -1,99 +1,51 @@ // @generated automatically by Diesel CLI. pub mod sql_types { - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "crawler_name"))] pub struct CrawlerName; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "governance_kind"))] pub struct GovernanceKind; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "governance_result"))] pub struct GovernanceResult; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "governance_tally_type"))] pub struct GovernanceTallyType; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "ibc_status"))] pub struct IbcStatus; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "payment_kind"))] pub struct PaymentKind; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "payment_recurrence"))] pub struct PaymentRecurrence; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "token_type"))] pub struct TokenType; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "transaction_kind"))] pub struct TransactionKind; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "transaction_result"))] pub struct TransactionResult; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "validator_state"))] pub struct ValidatorState; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "vote_kind"))] pub struct VoteKind; } @@ -224,8 +176,8 @@ diesel::table! { ibc_ack (id) { id -> Varchar, tx_hash -> Varchar, - timeout -> Int4, - status -> Nullable, + timeout -> Int8, + status -> IbcStatus, } } diff --git a/shared/src/block.rs b/shared/src/block.rs index 465ad40fe..ccfb0c563 100644 --- a/shared/src/block.rs +++ b/shared/src/block.rs @@ -104,7 +104,6 @@ pub struct Block { pub hash: Id, pub header: BlockHeader, pub transactions: Vec<(WrapperTransaction, Vec)>, - pub epoch: Epoch, } impl Block { @@ -113,7 +112,6 @@ impl Block { block_results: &BlockResult, proposer_address_namada: &Option, // Provide the namada address of the proposer, if available checksums: Checksums, - epoch: Epoch, block_height: BlockHeight, ) -> Self { let transactions = block_response @@ -154,7 +152,6 @@ impl Block { app_hash: Id::from(&block_response.block.header.app_hash), }, transactions, - epoch, } } diff --git a/shared/src/block_result.rs b/shared/src/block_result.rs index b6899b663..ac5f91a69 100644 --- a/shared/src/block_result.rs +++ b/shared/src/block_result.rs @@ -158,6 +158,9 @@ impl TxAttributesType { .parse::() .unwrap_or_default() .to_owned(); + + tracing::error!("{}", timeout_timestamp); + Some(Self::SendPacket(SendPacket { source_port, dest_port, diff --git a/shared/src/ser.rs b/shared/src/ser.rs index a776b783f..a9e9ee609 100644 --- a/shared/src/ser.rs +++ b/shared/src/ser.rs @@ -3,6 +3,7 @@ use std::str::FromStr; use namada_core::address::Address; use namada_core::masp::MaspTxId; +use namada_sdk::borsh::BorshSerializeExt; use namada_sdk::ibc::IbcMessage as NamadaIbcMessage; use namada_sdk::token::{ Account as NamadaAccount, DenominatedAmount as NamadaDenominatedAmount, @@ -10,6 +11,7 @@ use namada_sdk::token::{ }; use serde::ser::SerializeStruct; use serde::{Deserialize, Serialize}; +use subtle_encoding::hex; #[derive(Debug, Clone)] pub struct AccountsMap(pub BTreeMap); @@ -120,10 +122,19 @@ impl Serialize for IbcMessage { state.end() } - NamadaIbcMessage::Envelope(_) => { - let state = serializer.serialize_struct("IbcEnvelope", 0)?; + NamadaIbcMessage::Envelope(data) => { + let mut state = + serializer.serialize_struct("IbcEnvelope", 1)?; + + // todo: implement this bs :( - // TODO: serialize envelope message correctly + state.serialize_field( + "data", + &String::from_utf8_lossy(&hex::encode( + data.serialize_to_vec(), + )) + .into_owned(), + )?; state.end() } diff --git a/transactions/src/main.rs b/transactions/src/main.rs index 32243bea1..5249deb98 100644 --- a/transactions/src/main.rs +++ b/transactions/src/main.rs @@ -136,7 +136,6 @@ async fn crawling_fn( &block_results, &proposer_address_namada, checksums, - 1_u32, block_height, ); diff --git a/webserver/src/app.rs b/webserver/src/app.rs index 734ba4c17..935eb0d57 100644 --- a/webserver/src/app.rs +++ b/webserver/src/app.rs @@ -21,8 +21,8 @@ use crate::config::AppConfig; use crate::handler::{ balance as balance_handlers, chain as chain_handlers, crawler_state as crawler_state_handlers, gas as gas_handlers, - governance as gov_handlers, pk as pk_handlers, pos as pos_handlers, - transaction as transaction_handlers, + governance as gov_handlers, ibc as ibc_handler, pk as pk_handlers, + pos as pos_handlers, transaction as transaction_handlers }; use crate::state::common::CommonState; @@ -129,6 +129,7 @@ impl ApplicationServer { "/chain/epoch/latest", get(chain_handlers::get_last_processed_epoch), ) + .route("/ibc/:tx_id/status", get(ibc_handler::get_ibc_status)) .route( "/crawlers/timestamps", get(crawler_state_handlers::get_crawlers_timestamps), diff --git a/webserver/src/error/api.rs b/webserver/src/error/api.rs index 1db8ed5f3..1d0045a7c 100644 --- a/webserver/src/error/api.rs +++ b/webserver/src/error/api.rs @@ -6,6 +6,7 @@ use super::chain::ChainError; use super::crawler_state::CrawlerStateError; use super::gas::GasError; use super::governance::GovernanceError; +use super::ibc::IbcError; use super::pos::PoSError; use super::revealed_pk::RevealedPkError; use super::transaction::TransactionError; @@ -27,6 +28,8 @@ pub enum ApiError { #[error(transparent)] GasError(#[from] GasError), #[error(transparent)] + IbcError(#[from] IbcError), + #[error(transparent)] CrawlerStateError(#[from] CrawlerStateError), } @@ -40,6 +43,7 @@ impl IntoResponse for ApiError { ApiError::GovernanceError(error) => error.into_response(), ApiError::RevealedPkError(error) => error.into_response(), ApiError::GasError(error) => error.into_response(), + ApiError::IbcError(error) => error.into_response(), ApiError::CrawlerStateError(error) => error.into_response(), } } diff --git a/webserver/src/error/ibc.rs b/webserver/src/error/ibc.rs new file mode 100644 index 000000000..869d738ce --- /dev/null +++ b/webserver/src/error/ibc.rs @@ -0,0 +1,28 @@ +use axum::http::StatusCode; +use axum::response::IntoResponse; +use thiserror::Error; + +use crate::response::api::ApiErrorResponse; + +#[derive(Error, Debug)] +pub enum IbcError { + #[error("Revealed public key {0} not found")] + NotFound(u64), + #[error("Database error: {0}")] + Database(String), + #[error("Unknown error: {0}")] + Unknown(String), +} + +impl IntoResponse for IbcError { + fn into_response(self) -> axum::response::Response { + let status_code = match self { + IbcError::NotFound(_) => StatusCode::NOT_FOUND, + IbcError::Unknown(_) | IbcError::Database(_) => { + StatusCode::INTERNAL_SERVER_ERROR + } + }; + + ApiErrorResponse::send(status_code.as_u16(), Some(self.to_string())) + } +} diff --git a/webserver/src/error/mod.rs b/webserver/src/error/mod.rs index 94f2c2724..67b033aa3 100644 --- a/webserver/src/error/mod.rs +++ b/webserver/src/error/mod.rs @@ -4,6 +4,7 @@ pub mod chain; pub mod crawler_state; pub mod gas; pub mod governance; +pub mod ibc; pub mod pos; pub mod revealed_pk; pub mod transaction; diff --git a/webserver/src/handler/ibc.rs b/webserver/src/handler/ibc.rs new file mode 100644 index 000000000..ecf8ff83d --- /dev/null +++ b/webserver/src/handler/ibc.rs @@ -0,0 +1,19 @@ +use axum::extract::{Path, State}; +use axum::http::HeaderMap; +use axum::Json; +use axum_macros::debug_handler; + +use crate::error::api::ApiError; +use crate::response::ibc::IbcAck; +use crate::state::common::CommonState; + +#[debug_handler] +pub async fn get_ibc_status( + _headers: HeaderMap, + Path(tx_id): Path, + State(state): State, +) -> Result, ApiError> { + let ibc_ack_status = state.ibc_service.get_ack_by_tx_id(tx_id).await?; + + Ok(Json(ibc_ack_status)) +} diff --git a/webserver/src/handler/mod.rs b/webserver/src/handler/mod.rs index be4fc91e8..87f96ddbc 100644 --- a/webserver/src/handler/mod.rs +++ b/webserver/src/handler/mod.rs @@ -3,6 +3,7 @@ pub mod chain; pub mod crawler_state; pub mod gas; pub mod governance; +pub mod ibc; pub mod pk; pub mod pos; pub mod transaction; diff --git a/webserver/src/repository/ibc.rs b/webserver/src/repository/ibc.rs new file mode 100644 index 000000000..e55ef4a60 --- /dev/null +++ b/webserver/src/repository/ibc.rs @@ -0,0 +1,45 @@ +use axum::async_trait; +use diesel::{ExpressionMethods, QueryDsl, RunQueryDsl, SelectableHelper}; +use orm::ibc::IbcAckDb; +use orm::schema::ibc_ack; + +use crate::appstate::AppState; + +#[derive(Clone)] +pub struct IbcRepository { + pub(crate) app_state: AppState, +} + +#[async_trait] +pub trait IbcRepositoryTrait { + fn new(app_state: AppState) -> Self; + + async fn find_ibc_ack( + &self, + id: String, + ) -> Result, String>; +} + +#[async_trait] +impl IbcRepositoryTrait for IbcRepository { + fn new(app_state: AppState) -> Self { + Self { app_state } + } + + async fn find_ibc_ack( + &self, + id: String, + ) -> Result, String> { + let conn = self.app_state.get_db_connection().await; + + conn.interact(move |conn| { + ibc_ack::table + .filter(ibc_ack::dsl::tx_hash.eq(id)) + .select(IbcAckDb::as_select()) + .first(conn) + .ok() + }) + .await + .map_err(|e| e.to_string()) + } +} diff --git a/webserver/src/repository/mod.rs b/webserver/src/repository/mod.rs index 0ffcdf615..0db518011 100644 --- a/webserver/src/repository/mod.rs +++ b/webserver/src/repository/mod.rs @@ -2,6 +2,7 @@ pub mod balance; pub mod chain; pub mod gas; pub mod governance; +pub mod ibc; pub mod pos; pub mod revealed_pk; pub mod tranasaction; diff --git a/webserver/src/response/ibc.rs b/webserver/src/response/ibc.rs new file mode 100644 index 000000000..2277c4c42 --- /dev/null +++ b/webserver/src/response/ibc.rs @@ -0,0 +1,16 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub enum IbcAckStatus { + Success, + Fail, + Timeout, + Unknown, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct IbcAck { + pub status: IbcAckStatus, +} diff --git a/webserver/src/response/mod.rs b/webserver/src/response/mod.rs index d5201da87..676be021e 100644 --- a/webserver/src/response/mod.rs +++ b/webserver/src/response/mod.rs @@ -4,6 +4,7 @@ pub mod chain; pub mod crawler_state; pub mod gas; pub mod governance; +pub mod ibc; pub mod pos; pub mod revealed_pk; pub mod transaction; diff --git a/webserver/src/service/ibc.rs b/webserver/src/service/ibc.rs new file mode 100644 index 000000000..a322b3601 --- /dev/null +++ b/webserver/src/service/ibc.rs @@ -0,0 +1,42 @@ +use orm::ibc::IbcAckStatusDb; + +use crate::appstate::AppState; +use crate::error::ibc::IbcError; +use crate::repository::ibc::{IbcRepository, IbcRepositoryTrait}; +use crate::response::ibc::{IbcAck, IbcAckStatus}; + +#[derive(Clone)] +pub struct IbcService { + pub ibc_repo: IbcRepository, +} + +impl IbcService { + pub fn new(app_state: AppState) -> Self { + Self { + ibc_repo: IbcRepository::new(app_state), + } + } + + pub async fn get_ack_by_tx_id( + &self, + tx_id: String, + ) -> Result { + self.ibc_repo + .find_ibc_ack(tx_id) + .await + .map_err(IbcError::Database) + .map(|ack| match ack { + Some(ack) => IbcAck { + status: match ack.status { + IbcAckStatusDb::Unknown => IbcAckStatus::Unknown, + IbcAckStatusDb::Timeout => IbcAckStatus::Timeout, + IbcAckStatusDb::Fail => IbcAckStatus::Fail, + IbcAckStatusDb::Success => IbcAckStatus::Success, + }, + }, + None => IbcAck { + status: IbcAckStatus::Unknown, + }, + }) + } +} diff --git a/webserver/src/service/mod.rs b/webserver/src/service/mod.rs index 3dda95f0c..a5566429a 100644 --- a/webserver/src/service/mod.rs +++ b/webserver/src/service/mod.rs @@ -3,6 +3,7 @@ pub mod chain; pub mod crawler_state; pub mod gas; pub mod governance; +pub mod ibc; pub mod pos; pub mod revealed_pk; pub mod transaction; diff --git a/webserver/src/state/common.rs b/webserver/src/state/common.rs index f50fafb06..8c2ff5703 100644 --- a/webserver/src/state/common.rs +++ b/webserver/src/state/common.rs @@ -7,6 +7,7 @@ use crate::service::chain::ChainService; use crate::service::crawler_state::CrawlerStateService; use crate::service::gas::GasService; use crate::service::governance::GovernanceService; +use crate::service::ibc::IbcService; use crate::service::pos::PosService; use crate::service::revealed_pk::RevealedPkService; use crate::service::transaction::TransactionService; @@ -21,6 +22,7 @@ pub struct CommonState { pub gas_service: GasService, pub transaction_service: TransactionService, pub crawler_state_service: CrawlerStateService, + pub ibc_service: IbcService, pub client: HttpClient, pub config: AppConfig, } @@ -36,6 +38,7 @@ impl CommonState { gas_service: GasService::new(data.clone()), transaction_service: TransactionService::new(data.clone()), crawler_state_service: CrawlerStateService::new(data.clone()), + ibc_service: IbcService::new(data.clone()), client, config, } From 1e15f3bb582feaf36c78281a66caec357349dc1c Mon Sep 17 00:00:00 2001 From: Gianmarco Fraccaroli Date: Fri, 13 Dec 2024 16:56:05 +0100 Subject: [PATCH 20/29] update swagger --- swagger.yml | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/swagger.yml b/swagger.yml index ec99692d1..99d7f7164 100644 --- a/swagger.yml +++ b/swagger.yml @@ -621,6 +621,30 @@ paths: application/json: schema: $ref: '#/components/schemas/InnerTransaction' + /api/v1/ibc/{tx_id}/status: + get: + summary: Get the status of an IBC transfer by tx id + parameters: + - in: path + name: tx_id + schema: + type: string + required: true + description: Tx id hash + responses: + '200': + description: Status of the IBC transfer + content: + application/json: + schema: + type: array + items: + type: object + required: [status] + properties: + name: + type: string + enum: [unknown, timeout, success, fail] /api/v1/crawlers/timestamps: get: summary: Get timestamps of the last activity of the crawlers From 510b829aa01304075887acfc0848e2bd0c192030 Mon Sep 17 00:00:00 2001 From: Gianmarco Fraccaroli Date: Mon, 16 Dec 2024 14:38:38 +0100 Subject: [PATCH 21/29] minors --- orm/src/schema.rs | 72 ++++++++++++++++++++++++++------ rustfmt.toml | 1 - shared/src/block_result.rs | 4 +- transactions/src/main.rs | 4 +- transactions/src/services/mod.rs | 2 +- webserver/src/app.rs | 2 +- 6 files changed, 66 insertions(+), 19 deletions(-) diff --git a/orm/src/schema.rs b/orm/src/schema.rs index 9ff237292..7437f41fe 100644 --- a/orm/src/schema.rs +++ b/orm/src/schema.rs @@ -1,51 +1,99 @@ // @generated automatically by Diesel CLI. pub mod sql_types { - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "crawler_name"))] pub struct CrawlerName; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "governance_kind"))] pub struct GovernanceKind; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "governance_result"))] pub struct GovernanceResult; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "governance_tally_type"))] pub struct GovernanceTallyType; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "ibc_status"))] pub struct IbcStatus; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "payment_kind"))] pub struct PaymentKind; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "payment_recurrence"))] pub struct PaymentRecurrence; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "token_type"))] pub struct TokenType; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "transaction_kind"))] pub struct TransactionKind; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "transaction_result"))] pub struct TransactionResult; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "validator_state"))] pub struct ValidatorState; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "vote_kind"))] pub struct VoteKind; } diff --git a/rustfmt.toml b/rustfmt.toml index f7a0911e7..6bfc04d34 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -24,7 +24,6 @@ format_macro_matchers = true format_strings = true group_imports = "StdExternalCrate" hard_tabs = false -show_parse_errors = true ignore = [] imports_granularity = "Module" imports_indent = "Block" diff --git a/shared/src/block_result.rs b/shared/src/block_result.rs index ac5f91a69..e6c17483b 100644 --- a/shared/src/block_result.rs +++ b/shared/src/block_result.rs @@ -131,7 +131,7 @@ pub struct SendPacket { #[derive(Debug, Clone)] pub enum TxAttributesType { TxApplied(TxApplied), - SendPacket(SendPacket) + SendPacket(SendPacket), } impl TxAttributesType { @@ -309,4 +309,4 @@ impl BlockResult { }); exit_status.unwrap_or(TransactionExitStatus::Rejected) } -} \ No newline at end of file +} diff --git a/transactions/src/main.rs b/transactions/src/main.rs index 5249deb98..abd728c7d 100644 --- a/transactions/src/main.rs +++ b/transactions/src/main.rs @@ -147,8 +147,8 @@ async fn crawling_fn( let ibc_ack_packet = tx_service::get_ibc_ack_packet(&inner_txs); tracing::info!( - "Deserialized {} wrappers, {} inners, {} ibc sequence numbers and {} ibc acks \ - events...", + "Deserialized {} wrappers, {} inners, {} ibc sequence numbers and {} \ + ibc acks events...", wrapper_txs.len(), inner_txs.len(), ibc_sequence_packet.len(), diff --git a/transactions/src/services/mod.rs b/transactions/src/services/mod.rs index 5c0b9ddc2..233652f55 100644 --- a/transactions/src/services/mod.rs +++ b/transactions/src/services/mod.rs @@ -1,4 +1,4 @@ pub mod db; pub mod namada; pub mod tendermint; -pub mod tx; \ No newline at end of file +pub mod tx; diff --git a/webserver/src/app.rs b/webserver/src/app.rs index 935eb0d57..fdbda7c5c 100644 --- a/webserver/src/app.rs +++ b/webserver/src/app.rs @@ -22,7 +22,7 @@ use crate::handler::{ balance as balance_handlers, chain as chain_handlers, crawler_state as crawler_state_handlers, gas as gas_handlers, governance as gov_handlers, ibc as ibc_handler, pk as pk_handlers, - pos as pos_handlers, transaction as transaction_handlers + pos as pos_handlers, transaction as transaction_handlers, }; use crate::state::common::CommonState; From 1b9153930633f6f11d10900dfcda7f3936213be3 Mon Sep 17 00:00:00 2001 From: Gianmarco Fraccaroli Date: Tue, 17 Dec 2024 10:23:06 +0100 Subject: [PATCH 22/29] minors --- chain/src/repository/balance.rs | 10 +++++----- shared/src/block.rs | 3 +++ transactions/src/main.rs | 1 + 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/chain/src/repository/balance.rs b/chain/src/repository/balance.rs index c09bbdb5c..a9ee0af01 100644 --- a/chain/src/repository/balance.rs +++ b/chain/src/repository/balance.rs @@ -133,7 +133,7 @@ mod tests { insert_tokens(conn, vec![token.clone()])?; - seed_blocks_from_balances(conn, &vec![balance.clone()])?; + seed_blocks_from_balances(conn, &[balance.clone()])?; insert_balances(conn, vec![balance.clone()])?; @@ -180,7 +180,7 @@ mod tests { ..(balance.clone()) }; - seed_blocks_from_balances(conn, &vec![new_balance.clone()])?; + seed_blocks_from_balances(conn, &[new_balance.clone()])?; insert_balances(conn, vec![new_balance])?; let queried_balance = @@ -418,7 +418,7 @@ mod tests { insert_tokens(conn, vec![token.clone()])?; - seed_blocks_from_balances(conn, &vec![balance.clone()])?; + seed_blocks_from_balances(conn, &[balance.clone()])?; insert_balances(conn, vec![balance.clone()])?; let queried_balance = query_balance_by_address(conn, owner, token)?; @@ -515,10 +515,10 @@ mod tests { fn seed_blocks_from_balances( conn: &mut PgConnection, - balances: &Vec, + balances: &[Balance], ) -> anyhow::Result<()> { for height in balances - .into_iter() + .iter() .map(|balance| balance.height as i32) .collect::>() { diff --git a/shared/src/block.rs b/shared/src/block.rs index ccfb0c563..efd3fb4be 100644 --- a/shared/src/block.rs +++ b/shared/src/block.rs @@ -104,6 +104,7 @@ pub struct Block { pub hash: Id, pub header: BlockHeader, pub transactions: Vec<(WrapperTransaction, Vec)>, + pub epoch: Epoch } impl Block { @@ -112,6 +113,7 @@ impl Block { block_results: &BlockResult, proposer_address_namada: &Option, // Provide the namada address of the proposer, if available checksums: Checksums, + epoch: Epoch, block_height: BlockHeight, ) -> Self { let transactions = block_response @@ -152,6 +154,7 @@ impl Block { app_hash: Id::from(&block_response.block.header.app_hash), }, transactions, + epoch } } diff --git a/transactions/src/main.rs b/transactions/src/main.rs index abd728c7d..fbaf35a5e 100644 --- a/transactions/src/main.rs +++ b/transactions/src/main.rs @@ -136,6 +136,7 @@ async fn crawling_fn( &block_results, &proposer_address_namada, checksums, + 1_u32, // placeholder, we dont need the epoch here block_height, ); From 679b9d71fd6553d9224e35c6e8429d90490c34a0 Mon Sep 17 00:00:00 2001 From: Gianmarco Fraccaroli Date: Tue, 17 Dec 2024 11:00:06 +0100 Subject: [PATCH 23/29] added gas_used to thw wrapper tx --- .../down.sql | 2 + .../up.sql | 2 + orm/src/schema.rs | 94 +++---------------- orm/src/transactions.rs | 2 + shared/src/block_result.rs | 17 ++++ shared/src/transaction.rs | 3 + 6 files changed, 37 insertions(+), 83 deletions(-) create mode 100644 orm/migrations/2024-12-17-095036_transaction_gas_used/down.sql create mode 100644 orm/migrations/2024-12-17-095036_transaction_gas_used/up.sql diff --git a/orm/migrations/2024-12-17-095036_transaction_gas_used/down.sql b/orm/migrations/2024-12-17-095036_transaction_gas_used/down.sql new file mode 100644 index 000000000..0e2cb6fad --- /dev/null +++ b/orm/migrations/2024-12-17-095036_transaction_gas_used/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE wrapper_transactions DROP COLUMN gas_used; \ No newline at end of file diff --git a/orm/migrations/2024-12-17-095036_transaction_gas_used/up.sql b/orm/migrations/2024-12-17-095036_transaction_gas_used/up.sql new file mode 100644 index 000000000..2ace24652 --- /dev/null +++ b/orm/migrations/2024-12-17-095036_transaction_gas_used/up.sql @@ -0,0 +1,2 @@ +-- Your SQL goes here +ALTER TABLE wrapper_transactions ADD COLUMN gas_used VARCHAR; \ No newline at end of file diff --git a/orm/src/schema.rs b/orm/src/schema.rs index 7437f41fe..00eb0f2a7 100644 --- a/orm/src/schema.rs +++ b/orm/src/schema.rs @@ -1,99 +1,43 @@ // @generated automatically by Diesel CLI. pub mod sql_types { - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "crawler_name"))] pub struct CrawlerName; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "governance_kind"))] pub struct GovernanceKind; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "governance_result"))] pub struct GovernanceResult; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "governance_tally_type"))] pub struct GovernanceTallyType; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "ibc_status"))] pub struct IbcStatus; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] - #[diesel(postgres_type(name = "payment_kind"))] - pub struct PaymentKind; - - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] - #[diesel(postgres_type(name = "payment_recurrence"))] - pub struct PaymentRecurrence; - - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "token_type"))] pub struct TokenType; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "transaction_kind"))] pub struct TransactionKind; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "transaction_result"))] pub struct TransactionResult; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "validator_state"))] pub struct ValidatorState; - #[derive( - diesel::query_builder::QueryId, - std::fmt::Debug, - diesel::sql_types::SqlType, - )] + #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] #[diesel(postgres_type(name = "vote_kind"))] pub struct VoteKind; } @@ -263,21 +207,6 @@ diesel::table! { } } -diesel::table! { - use diesel::sql_types::*; - use super::sql_types::PaymentRecurrence; - use super::sql_types::PaymentKind; - - public_good_funding (id) { - id -> Int4, - proposal_id -> Int4, - payment_recurrence -> PaymentRecurrence, - payment_kind -> PaymentKind, - receipient -> Varchar, - amount -> Numeric, - } -} - diesel::table! { revealed_pk (id) { id -> Int4, @@ -340,6 +269,7 @@ diesel::table! { block_height -> Int4, exit_code -> TransactionResult, atomic -> Bool, + gas_used -> Nullable, } } @@ -350,7 +280,6 @@ diesel::joinable!(governance_votes -> governance_proposals (proposal_id)); diesel::joinable!(ibc_token -> token (address)); diesel::joinable!(inner_transactions -> wrapper_transactions (wrapper_id)); diesel::joinable!(pos_rewards -> validators (validator_id)); -diesel::joinable!(public_good_funding -> governance_proposals (proposal_id)); diesel::joinable!(unbonds -> validators (validator_id)); diesel::joinable!(wrapper_transactions -> blocks (block_height)); @@ -368,7 +297,6 @@ diesel::allow_tables_to_appear_in_same_query!( ibc_token, inner_transactions, pos_rewards, - public_good_funding, revealed_pk, token, unbonds, diff --git a/orm/src/transactions.rs b/orm/src/transactions.rs index db64448cc..db980d28b 100644 --- a/orm/src/transactions.rs +++ b/orm/src/transactions.rs @@ -114,6 +114,7 @@ pub struct WrapperTransactionInsertDb { pub fee_payer: String, pub fee_token: String, pub gas_limit: String, + pub gas_used: Option, pub block_height: i32, pub exit_code: TransactionResultDb, pub atomic: bool, @@ -128,6 +129,7 @@ impl WrapperTransactionInsertDb { fee_payer: tx.fee.gas_payer.to_string(), fee_token: tx.fee.gas_token.to_string(), gas_limit: tx.fee.gas, + gas_used: tx.fee.gas_used, block_height: tx.block_height as i32, exit_code: TransactionResultDb::from(tx.exit_code), atomic: tx.atomic, diff --git a/shared/src/block_result.rs b/shared/src/block_result.rs index e6c17483b..4f6a9e261 100644 --- a/shared/src/block_result.rs +++ b/shared/src/block_result.rs @@ -284,6 +284,23 @@ impl BlockResult { exit_status.unwrap_or(TransactionExitStatus::Rejected) } + pub fn gas_used(&self, tx_hash: &Id) -> Option { + self + .end_events + .iter() + .filter_map(|event| { + if let Some(TxAttributesType::TxApplied(data)) = + &event.attributes + { + Some(data.clone()) + } else { + None + } + }) + .find(|attributes| attributes.hash.eq(tx_hash)) + .map(|attributes| attributes.gas.to_string()) + } + pub fn is_inner_tx_accepted( &self, wrapper_hash: &Id, diff --git a/shared/src/transaction.rs b/shared/src/transaction.rs index 8dfa64909..3f0e27f6b 100644 --- a/shared/src/transaction.rs +++ b/shared/src/transaction.rs @@ -280,6 +280,7 @@ impl InnerTransaction { #[derive(Debug, Clone)] pub struct Fee { pub gas: String, + pub gas_used: Option, pub amount_per_gas_unit: String, pub gas_payer: Id, pub gas_token: Id, @@ -301,9 +302,11 @@ impl Transaction { let wrapper_tx_id = Id::from(transaction.header_hash()); let wrapper_tx_status = block_results.is_wrapper_tx_applied(&wrapper_tx_id); + let gas_used = block_results.gas_used(&wrapper_tx_id); let fee = Fee { gas: Uint::from(wrapper.gas_limit).to_string(), + gas_used, amount_per_gas_unit: wrapper .fee .amount_per_gas_unit From 321ca546daf265b259869df6587a8202f155247e Mon Sep 17 00:00:00 2001 From: Gianmarco Fraccaroli Date: Tue, 17 Dec 2024 11:08:01 +0100 Subject: [PATCH 24/29] update endpoint and swagger --- swagger.yml | 2 ++ webserver/src/response/transaction.rs | 2 ++ 2 files changed, 4 insertions(+) diff --git a/swagger.yml b/swagger.yml index 99d7f7164..f49110da6 100644 --- a/swagger.yml +++ b/swagger.yml @@ -936,6 +936,8 @@ components: type: string gasLimit: type: string + gasUsed: + type: string blockHeight: type: string innerTransactions: diff --git a/webserver/src/response/transaction.rs b/webserver/src/response/transaction.rs index bcef64df0..46dfc6c1d 100644 --- a/webserver/src/response/transaction.rs +++ b/webserver/src/response/transaction.rs @@ -42,6 +42,7 @@ pub struct WrapperTransaction { pub fee_payer: String, pub fee_token: String, pub gas_limit: String, + pub gas_used: Option, pub block_height: u64, pub inner_transactions: Vec, pub exit_code: TransactionResult, @@ -124,6 +125,7 @@ impl From for WrapperTransaction { fee_payer: value.fee_payer, fee_token: value.fee_token, gas_limit: value.gas_limit, + gas_used: value.gas_used, block_height: value.block_height as u64, inner_transactions: vec![], exit_code: TransactionResult::from(value.exit_code), From 5b30e97ec2b0f5b0ba0146cf6bd065b5a595c0e9 Mon Sep 17 00:00:00 2001 From: Gianmarco Fraccaroli Date: Tue, 17 Dec 2024 12:33:44 +0100 Subject: [PATCH 25/29] fix ibc_ack migration --- orm/migrations/2024-12-01-170248_ibc_ack/down.sql | 1 + 1 file changed, 1 insertion(+) create mode 100644 orm/migrations/2024-12-01-170248_ibc_ack/down.sql diff --git a/orm/migrations/2024-12-01-170248_ibc_ack/down.sql b/orm/migrations/2024-12-01-170248_ibc_ack/down.sql new file mode 100644 index 000000000..0e3dc5ef1 --- /dev/null +++ b/orm/migrations/2024-12-01-170248_ibc_ack/down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS ibc_ack; \ No newline at end of file From 509c992b86409641c8f347b2a737a4392cf73413 Mon Sep 17 00:00:00 2001 From: Gianmarco Fraccaroli Date: Tue, 17 Dec 2024 12:33:50 +0100 Subject: [PATCH 26/29] fix ibc_ack migration --- orm/migrations/2024-12-10-104502_transaction_types/down.sql | 2 -- 1 file changed, 2 deletions(-) diff --git a/orm/migrations/2024-12-10-104502_transaction_types/down.sql b/orm/migrations/2024-12-10-104502_transaction_types/down.sql index 566ff9fd1..0a1dceb69 100644 --- a/orm/migrations/2024-12-10-104502_transaction_types/down.sql +++ b/orm/migrations/2024-12-10-104502_transaction_types/down.sql @@ -1,4 +1,2 @@ -- This file should undo anything in `up.sql` -DROP TABLE ibc_ack; - DROP TYPE IBC_STATUS; \ No newline at end of file From 001e1e05628fff832294cc814817369594543f0e Mon Sep 17 00:00:00 2001 From: Mateusz Jasiuk Date: Tue, 17 Dec 2024 12:21:49 +0100 Subject: [PATCH 27/29] feat: return blocks by timestamp or height --- chain/src/repository/balance.rs | 3 +- orm/src/blocks.rs | 6 ++- shared/src/block.rs | 8 ++-- swagger.yml | 50 +++++++++++++++++++++ webserver/Cargo.toml | 1 + webserver/src/app.rs | 17 ++++++-- webserver/src/error/api.rs | 4 ++ webserver/src/error/block.rs | 28 ++++++++++++ webserver/src/error/mod.rs | 1 + webserver/src/handler/block.rs | 30 +++++++++++++ webserver/src/handler/mod.rs | 1 + webserver/src/repository/block.rs | 72 +++++++++++++++++++++++++++++++ webserver/src/repository/mod.rs | 1 + webserver/src/response/block.rs | 28 ++++++++++++ webserver/src/response/mod.rs | 1 + webserver/src/service/block.rs | 52 ++++++++++++++++++++++ webserver/src/service/mod.rs | 1 + webserver/src/state/common.rs | 3 ++ 18 files changed, 297 insertions(+), 10 deletions(-) create mode 100644 webserver/src/error/block.rs create mode 100644 webserver/src/handler/block.rs create mode 100644 webserver/src/repository/block.rs create mode 100644 webserver/src/response/block.rs create mode 100644 webserver/src/service/block.rs diff --git a/chain/src/repository/balance.rs b/chain/src/repository/balance.rs index a9ee0af01..253e8c3ef 100644 --- a/chain/src/repository/balance.rs +++ b/chain/src/repository/balance.rs @@ -73,6 +73,8 @@ pub fn insert_tokens( #[cfg(test)] mod tests { + use std::collections::HashSet; + use anyhow::Context; use diesel::{ BoolExpressionMethods, ExpressionMethods, QueryDsl, SelectableHelper, @@ -86,7 +88,6 @@ mod tests { use shared::balance::{Amount, Balance}; use shared::id::Id; use shared::token::IbcToken; - use std::collections::HashSet; use test_helpers::db::TestDb; use super::*; diff --git a/orm/src/blocks.rs b/orm/src/blocks.rs index 379c398bb..09a03a616 100644 --- a/orm/src/blocks.rs +++ b/orm/src/blocks.rs @@ -44,9 +44,11 @@ impl BlockInsertDb { pub fn fake(height: i32) -> Self { Self { height, - hash: Some(height.to_string()), /* fake hash but ensures uniqueness + hash: Some(height.to_string()), /* fake hash but ensures + * uniqueness * with height */ - app_hash: Some("fake_app_hash".to_string()), // doesn't require uniqueness + app_hash: Some("fake_app_hash".to_string()), /* doesn't require + * uniqueness */ timestamp: Some( chrono::DateTime::from_timestamp(0, 0).unwrap().naive_utc(), ), diff --git a/shared/src/block.rs b/shared/src/block.rs index efd3fb4be..44b812327 100644 --- a/shared/src/block.rs +++ b/shared/src/block.rs @@ -104,14 +104,16 @@ pub struct Block { pub hash: Id, pub header: BlockHeader, pub transactions: Vec<(WrapperTransaction, Vec)>, - pub epoch: Epoch + pub epoch: Epoch, } impl Block { pub fn from( block_response: &TendermintBlockResponse, block_results: &BlockResult, - proposer_address_namada: &Option, // Provide the namada address of the proposer, if available + proposer_address_namada: &Option, /* Provide the namada address + * of the proposer, if + * available */ checksums: Checksums, epoch: Epoch, block_height: BlockHeight, @@ -154,7 +156,7 @@ impl Block { app_hash: Id::from(&block_response.block.header.app_hash), }, transactions, - epoch + epoch, } } diff --git a/swagger.yml b/swagger.yml index 99d7f7164..a4444e3a7 100644 --- a/swagger.yml +++ b/swagger.yml @@ -645,6 +645,40 @@ paths: name: type: string enum: [unknown, timeout, success, fail] + /api/v1/block/height/{value}: + get: + summary: Get the block by height + parameters: + - in: path + name: value + schema: + type: number + required: true + description: Block height + responses: + '200': + description: Block info + content: + application/json: + schema: + $ref: '#/components/schemas/Block' + /api/v1/block/timestamp/{value}: + get: + summary: Get the block by timestamp + parameters: + - in: path + name: value + schema: + type: number + required: true + description: Block timestamp + responses: + '200': + description: Block info + content: + application/json: + schema: + $ref: '#/components/schemas/Block' /api/v1/crawlers/timestamps: get: summary: Get timestamps of the last activity of the crawlers @@ -979,3 +1013,19 @@ components: type: string data: type: string + Block: + type: object + required: [height] + properties: + height: + type: string + hash: + type: string + appHash: + type: string + timestamp: + type: string + proposer: + type: string + epoch: + type: string diff --git a/webserver/Cargo.toml b/webserver/Cargo.toml index 39fcb69fa..5968f2f1a 100644 --- a/webserver/Cargo.toml +++ b/webserver/Cargo.toml @@ -22,6 +22,7 @@ production = [] [dependencies] axum.workspace = true +chrono.workspace = true tokio.workspace = true tower.workspace = true tower-http.workspace = true diff --git a/webserver/src/app.rs b/webserver/src/app.rs index fdbda7c5c..1dc8357ff 100644 --- a/webserver/src/app.rs +++ b/webserver/src/app.rs @@ -19,10 +19,11 @@ use tower_http::trace::TraceLayer; use crate::appstate::AppState; use crate::config::AppConfig; use crate::handler::{ - balance as balance_handlers, chain as chain_handlers, - crawler_state as crawler_state_handlers, gas as gas_handlers, - governance as gov_handlers, ibc as ibc_handler, pk as pk_handlers, - pos as pos_handlers, transaction as transaction_handlers, + balance as balance_handlers, block as block_handlers, + chain as chain_handlers, crawler_state as crawler_state_handlers, + gas as gas_handlers, governance as gov_handlers, ibc as ibc_handler, + pk as pk_handlers, pos as pos_handlers, + transaction as transaction_handlers, }; use crate::state::common::CommonState; @@ -136,6 +137,14 @@ impl ApplicationServer { ) // Server sent events endpoints .route("/chain/status", get(chain_handlers::chain_status)) + .route( + "/block/height/:value", + get(block_handlers::get_block_by_height), + ) + .route( + "/block/timestamp/:value", + get(block_handlers::get_block_by_timestamp), + ) .route( "/metrics", get(|| async move { metric_handle.render() }), diff --git a/webserver/src/error/api.rs b/webserver/src/error/api.rs index 1d0045a7c..4a826564f 100644 --- a/webserver/src/error/api.rs +++ b/webserver/src/error/api.rs @@ -2,6 +2,7 @@ use axum::response::{IntoResponse, Response}; use thiserror::Error; use super::balance::BalanceError; +use super::block::BlockError; use super::chain::ChainError; use super::crawler_state::CrawlerStateError; use super::gas::GasError; @@ -13,6 +14,8 @@ use super::transaction::TransactionError; #[derive(Error, Debug)] pub enum ApiError { + #[error(transparent)] + BlockError(#[from] BlockError), #[error(transparent)] TransactionError(#[from] TransactionError), #[error(transparent)] @@ -36,6 +39,7 @@ pub enum ApiError { impl IntoResponse for ApiError { fn into_response(self) -> Response { match self { + ApiError::BlockError(error) => error.into_response(), ApiError::TransactionError(error) => error.into_response(), ApiError::ChainError(error) => error.into_response(), ApiError::PoSError(error) => error.into_response(), diff --git a/webserver/src/error/block.rs b/webserver/src/error/block.rs new file mode 100644 index 000000000..43648a432 --- /dev/null +++ b/webserver/src/error/block.rs @@ -0,0 +1,28 @@ +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use thiserror::Error; + +use crate::response::api::ApiErrorResponse; + +#[derive(Error, Debug)] +pub enum BlockError { + #[error("Block not found error at {0}: {1}")] + NotFound(String, String), + #[error("Database error: {0}")] + Database(String), + #[error("Unknown error: {0}")] + Unknown(String), +} + +impl IntoResponse for BlockError { + fn into_response(self) -> Response { + let status_code = match self { + BlockError::Unknown(_) | BlockError::Database(_) => { + StatusCode::INTERNAL_SERVER_ERROR + } + BlockError::NotFound(_, _) => StatusCode::NOT_FOUND, + }; + + ApiErrorResponse::send(status_code.as_u16(), Some(self.to_string())) + } +} diff --git a/webserver/src/error/mod.rs b/webserver/src/error/mod.rs index 67b033aa3..e41db3de2 100644 --- a/webserver/src/error/mod.rs +++ b/webserver/src/error/mod.rs @@ -1,5 +1,6 @@ pub mod api; pub mod balance; +pub mod block; pub mod chain; pub mod crawler_state; pub mod gas; diff --git a/webserver/src/handler/block.rs b/webserver/src/handler/block.rs new file mode 100644 index 000000000..ed4bb01b3 --- /dev/null +++ b/webserver/src/handler/block.rs @@ -0,0 +1,30 @@ +use axum::extract::{Path, State}; +use axum::http::HeaderMap; +use axum::Json; +use axum_macros::debug_handler; + +use crate::error::api::ApiError; +use crate::response::block::Block; +use crate::state::common::CommonState; + +#[debug_handler] +pub async fn get_block_by_height( + _headers: HeaderMap, + Path(value): Path, + State(state): State, +) -> Result, ApiError> { + let block = state.block_service.get_block_by_height(value).await?; + + Ok(Json(block)) +} + +#[debug_handler] +pub async fn get_block_by_timestamp( + _headers: HeaderMap, + Path(value): Path, + State(state): State, +) -> Result, ApiError> { + let block = state.block_service.get_block_by_timestamp(value).await?; + + Ok(Json(block)) +} diff --git a/webserver/src/handler/mod.rs b/webserver/src/handler/mod.rs index 87f96ddbc..f48188102 100644 --- a/webserver/src/handler/mod.rs +++ b/webserver/src/handler/mod.rs @@ -1,4 +1,5 @@ pub mod balance; +pub mod block; pub mod chain; pub mod crawler_state; pub mod gas; diff --git a/webserver/src/repository/block.rs b/webserver/src/repository/block.rs new file mode 100644 index 000000000..cb13f4ee1 --- /dev/null +++ b/webserver/src/repository/block.rs @@ -0,0 +1,72 @@ +use axum::async_trait; +use diesel::{ExpressionMethods, QueryDsl, RunQueryDsl, SelectableHelper}; +use orm::blocks::BlockDb; +use orm::schema::blocks; + +use crate::appstate::AppState; + +#[derive(Clone)] +pub struct BlockRepository { + pub(crate) app_state: AppState, +} + +#[async_trait] +pub trait BlockRepositoryTrait { + fn new(app_state: AppState) -> Self; + + async fn find_block_by_height( + &self, + height: i32, + ) -> Result, String>; + + async fn find_block_by_timestamp( + &self, + timestamp: i64, + ) -> Result, String>; +} + +#[async_trait] +impl BlockRepositoryTrait for BlockRepository { + fn new(app_state: AppState) -> Self { + Self { app_state } + } + + async fn find_block_by_height( + &self, + height: i32, + ) -> Result, String> { + let conn = self.app_state.get_db_connection().await; + + conn.interact(move |conn| { + blocks::table + .filter(blocks::dsl::height.eq(height)) + .select(BlockDb::as_select()) + .first(conn) + .ok() + }) + .await + .map_err(|e| e.to_string()) + } + + /// Gets the last block preceeding the given timestamp + async fn find_block_by_timestamp( + &self, + timestamp: i64, + ) -> Result, String> { + let conn = self.app_state.get_db_connection().await; + let timestamp = chrono::DateTime::from_timestamp(timestamp, 0) + .expect("Invalid timestamp") + .naive_utc(); + + conn.interact(move |conn| { + blocks::table + .filter(blocks::timestamp.le(timestamp)) + .order(blocks::timestamp.desc()) + .select(BlockDb::as_select()) + .first(conn) + .ok() + }) + .await + .map_err(|e| e.to_string()) + } +} diff --git a/webserver/src/repository/mod.rs b/webserver/src/repository/mod.rs index 0db518011..448aefb63 100644 --- a/webserver/src/repository/mod.rs +++ b/webserver/src/repository/mod.rs @@ -1,4 +1,5 @@ pub mod balance; +pub mod block; pub mod chain; pub mod gas; pub mod governance; diff --git a/webserver/src/response/block.rs b/webserver/src/response/block.rs new file mode 100644 index 000000000..3ab9b51c0 --- /dev/null +++ b/webserver/src/response/block.rs @@ -0,0 +1,28 @@ +use orm::blocks::BlockDb; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Block { + pub height: i32, + pub hash: Option, + pub app_hash: Option, + pub timestamp: Option, + pub proposer: Option, + pub epoch: Option, +} + +impl From for Block { + fn from(block_db: BlockDb) -> Self { + Self { + height: block_db.height, + hash: block_db.hash, + app_hash: block_db.app_hash, + timestamp: block_db + .timestamp + .map(|t| t.and_utc().timestamp().to_string()), + proposer: block_db.proposer, + epoch: block_db.epoch.map(|e| e.to_string()), + } + } +} diff --git a/webserver/src/response/mod.rs b/webserver/src/response/mod.rs index 676be021e..980eb325d 100644 --- a/webserver/src/response/mod.rs +++ b/webserver/src/response/mod.rs @@ -1,5 +1,6 @@ pub mod api; pub mod balance; +pub mod block; pub mod chain; pub mod crawler_state; pub mod gas; diff --git a/webserver/src/service/block.rs b/webserver/src/service/block.rs new file mode 100644 index 000000000..54c7d73e9 --- /dev/null +++ b/webserver/src/service/block.rs @@ -0,0 +1,52 @@ +use crate::appstate::AppState; +use crate::error::block::BlockError; +use crate::repository::block::{BlockRepository, BlockRepositoryTrait}; +use crate::response::block::Block; + +#[derive(Clone)] +pub struct BlockService { + block_repo: BlockRepository, +} + +impl BlockService { + pub fn new(app_state: AppState) -> Self { + Self { + block_repo: BlockRepository::new(app_state), + } + } + + pub async fn get_block_by_height( + &self, + height: i32, + ) -> Result { + let block = self + .block_repo + .find_block_by_height(height) + .await + .map_err(BlockError::Database)?; + let block = block.ok_or(BlockError::NotFound( + "height".to_string(), + height.to_string(), + ))?; + + Ok(Block::from(block)) + } + + pub async fn get_block_by_timestamp( + &self, + timestamp: i64, + ) -> Result { + let block = self + .block_repo + .find_block_by_timestamp(timestamp) + .await + .map_err(BlockError::Database)?; + + let block = block.ok_or(BlockError::NotFound( + "timestamp".to_string(), + timestamp.to_string(), + ))?; + + Ok(Block::from(block)) + } +} diff --git a/webserver/src/service/mod.rs b/webserver/src/service/mod.rs index a5566429a..1178a89f7 100644 --- a/webserver/src/service/mod.rs +++ b/webserver/src/service/mod.rs @@ -1,4 +1,5 @@ pub mod balance; +pub mod block; pub mod chain; pub mod crawler_state; pub mod gas; diff --git a/webserver/src/state/common.rs b/webserver/src/state/common.rs index 8c2ff5703..f4a9363bd 100644 --- a/webserver/src/state/common.rs +++ b/webserver/src/state/common.rs @@ -3,6 +3,7 @@ use namada_sdk::tendermint_rpc::HttpClient; use crate::appstate::AppState; use crate::config::AppConfig; use crate::service::balance::BalanceService; +use crate::service::block::BlockService; use crate::service::chain::ChainService; use crate::service::crawler_state::CrawlerStateService; use crate::service::gas::GasService; @@ -15,6 +16,7 @@ use crate::service::transaction::TransactionService; #[derive(Clone)] pub struct CommonState { pub pos_service: PosService, + pub block_service: BlockService, pub gov_service: GovernanceService, pub balance_service: BalanceService, pub chain_service: ChainService, @@ -30,6 +32,7 @@ pub struct CommonState { impl CommonState { pub fn new(client: HttpClient, config: AppConfig, data: AppState) -> Self { Self { + block_service: BlockService::new(data.clone()), pos_service: PosService::new(data.clone()), gov_service: GovernanceService::new(data.clone()), balance_service: BalanceService::new(data.clone()), From fbbd3bd0e05a5b51ffc53751e9102ade8a17bc76 Mon Sep 17 00:00:00 2001 From: Gianmarco Fraccaroli Date: Tue, 17 Dec 2024 12:37:18 +0100 Subject: [PATCH 28/29] fmt --- chain/src/repository/balance.rs | 3 +- orm/src/blocks.rs | 6 ++-- orm/src/schema.rs | 60 +++++++++++++++++++++++++++------ shared/src/block.rs | 8 +++-- shared/src/block_result.rs | 3 +- 5 files changed, 62 insertions(+), 18 deletions(-) diff --git a/chain/src/repository/balance.rs b/chain/src/repository/balance.rs index a9ee0af01..253e8c3ef 100644 --- a/chain/src/repository/balance.rs +++ b/chain/src/repository/balance.rs @@ -73,6 +73,8 @@ pub fn insert_tokens( #[cfg(test)] mod tests { + use std::collections::HashSet; + use anyhow::Context; use diesel::{ BoolExpressionMethods, ExpressionMethods, QueryDsl, SelectableHelper, @@ -86,7 +88,6 @@ mod tests { use shared::balance::{Amount, Balance}; use shared::id::Id; use shared::token::IbcToken; - use std::collections::HashSet; use test_helpers::db::TestDb; use super::*; diff --git a/orm/src/blocks.rs b/orm/src/blocks.rs index 379c398bb..09a03a616 100644 --- a/orm/src/blocks.rs +++ b/orm/src/blocks.rs @@ -44,9 +44,11 @@ impl BlockInsertDb { pub fn fake(height: i32) -> Self { Self { height, - hash: Some(height.to_string()), /* fake hash but ensures uniqueness + hash: Some(height.to_string()), /* fake hash but ensures + * uniqueness * with height */ - app_hash: Some("fake_app_hash".to_string()), // doesn't require uniqueness + app_hash: Some("fake_app_hash".to_string()), /* doesn't require + * uniqueness */ timestamp: Some( chrono::DateTime::from_timestamp(0, 0).unwrap().naive_utc(), ), diff --git a/orm/src/schema.rs b/orm/src/schema.rs index 00eb0f2a7..5698805aa 100644 --- a/orm/src/schema.rs +++ b/orm/src/schema.rs @@ -1,43 +1,83 @@ // @generated automatically by Diesel CLI. pub mod sql_types { - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "crawler_name"))] pub struct CrawlerName; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "governance_kind"))] pub struct GovernanceKind; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "governance_result"))] pub struct GovernanceResult; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "governance_tally_type"))] pub struct GovernanceTallyType; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "ibc_status"))] pub struct IbcStatus; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "token_type"))] pub struct TokenType; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "transaction_kind"))] pub struct TransactionKind; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "transaction_result"))] pub struct TransactionResult; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "validator_state"))] pub struct ValidatorState; - #[derive(diesel::query_builder::QueryId, std::fmt::Debug, diesel::sql_types::SqlType)] + #[derive( + diesel::query_builder::QueryId, + std::fmt::Debug, + diesel::sql_types::SqlType, + )] #[diesel(postgres_type(name = "vote_kind"))] pub struct VoteKind; } diff --git a/shared/src/block.rs b/shared/src/block.rs index efd3fb4be..44b812327 100644 --- a/shared/src/block.rs +++ b/shared/src/block.rs @@ -104,14 +104,16 @@ pub struct Block { pub hash: Id, pub header: BlockHeader, pub transactions: Vec<(WrapperTransaction, Vec)>, - pub epoch: Epoch + pub epoch: Epoch, } impl Block { pub fn from( block_response: &TendermintBlockResponse, block_results: &BlockResult, - proposer_address_namada: &Option, // Provide the namada address of the proposer, if available + proposer_address_namada: &Option, /* Provide the namada address + * of the proposer, if + * available */ checksums: Checksums, epoch: Epoch, block_height: BlockHeight, @@ -154,7 +156,7 @@ impl Block { app_hash: Id::from(&block_response.block.header.app_hash), }, transactions, - epoch + epoch, } } diff --git a/shared/src/block_result.rs b/shared/src/block_result.rs index 4f6a9e261..a140abe9d 100644 --- a/shared/src/block_result.rs +++ b/shared/src/block_result.rs @@ -285,8 +285,7 @@ impl BlockResult { } pub fn gas_used(&self, tx_hash: &Id) -> Option { - self - .end_events + self.end_events .iter() .filter_map(|event| { if let Some(TxAttributesType::TxApplied(data)) = From ccc9c618da02877c33a2e1456a66530074d37bc0 Mon Sep 17 00:00:00 2001 From: Mateusz Jasiuk Date: Tue, 17 Dec 2024 13:29:37 +0100 Subject: [PATCH 29/29] chore: update version in cargo toml --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 098fe6ad6..a184ebe5b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,7 +8,7 @@ authors = ["Heliax "] edition = "2021" license = "GPL-3.0" readme = "README.md" -version = "1.0.0" +version = "1.1.4" [workspace.dependencies] clokwerk = "0.4.0"