From 2faf12e310c5485fb5beb756cdb90459afad1fff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ale=C5=A1=20Bizjak?= Date: Tue, 5 Apr 2022 11:42:28 +0200 Subject: [PATCH 01/15] Use the existing Cargo.lock file when building static libraries. --- scripts/static-libraries/build-static-libraries.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/scripts/static-libraries/build-static-libraries.sh b/scripts/static-libraries/build-static-libraries.sh index 853545503e..ebbaacf25b 100755 --- a/scripts/static-libraries/build-static-libraries.sh +++ b/scripts/static-libraries/build-static-libraries.sh @@ -21,9 +21,6 @@ cd /build ############################################################################################################################# ## Build the project -cargo update --manifest-path /build/concordium-base/rust-src/Cargo.toml -cargo check --manifest-path /build/concordium-base/rust-src/Cargo.toml - stack build --profile --flag "concordium-consensus:-dynamic" --stack-yaml /build/concordium-consensus/stack.static.yaml for lib in $(find /build/concordium-consensus/.stack-work -type f -name "*.a" ! -name "*_p.a"); do From cf6eeb363a4757252fba34a735f238f31f12ae49 Mon Sep 17 00:00:00 2001 From: Cem Eliguzel Date: Tue, 5 Apr 2022 13:50:56 +0200 Subject: [PATCH 02/15] A probable fix for the issue #245: Collector hangs after error Sets the timeout for gRPC calls to 30secs. --- concordium-node/src/bin/collector.rs | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/concordium-node/src/bin/collector.rs b/concordium-node/src/bin/collector.rs index d25d2d39c3..c47be05080 100644 --- a/concordium-node/src/bin/collector.rs +++ b/concordium-node/src/bin/collector.rs @@ -110,6 +110,13 @@ struct ConfigCli { env = "CONCORDIUM_NODE_COLLECTOR_ARTIFICIAL_START_DELAY" )] pub artificial_start_delay: u64, + #[structopt( + long = "grpc-timeout", + help = "Time (in seconds) for gRPC request timeouts", + default_value = "30", + env = "CONCORDIUM_NODE_COLLECTOR_GRPC_TIMEOUT" + )] + pub grpc_timeout: u64, #[structopt( long = "max-grpc-failures-allowed", help = "Maximum allowed times a gRPC call can fail before terminating the program", @@ -169,8 +176,7 @@ async fn main() { trace!("Failure count is {}/{}", grpc_failure_count, conf.max_grpc_failures_allowed); for (node_name, grpc_host) in conf.node_names.iter().zip(conf.grpc_hosts.iter()) { trace!("Processing node {}/{}", node_name, grpc_host); - match collect_data(node_name.clone(), grpc_host.to_owned(), &conf.grpc_auth_token).await - { + match collect_data(node_name.clone(), grpc_host.to_owned(), &conf).await { Ok(node_info) => { trace!("Node data collected successfully from {}/{}", node_name, grpc_host); match rmp_serde::encode::to_vec(&node_info) { @@ -210,14 +216,19 @@ async fn main() { async fn collect_data<'a>( node_name: NodeName, grpc_host: String, - grpc_auth_token: &str, + conf: &ConfigCli, ) -> anyhow::Result { + let grpc_auth_token = &conf.grpc_auth_token; + let grpc_timeout = conf.grpc_timeout; info!( "Collecting node information via gRPC from {}/{}/{}", node_name, grpc_host, grpc_auth_token ); - - let channel = Channel::from_shared(grpc_host).unwrap().connect().await?; + let channel = Channel::from_shared(grpc_host) + .unwrap() + .timeout(Duration::from_secs(grpc_timeout)) + .connect() + .await?; let mut client = grpc_api::p2p_client::P2pClient::new(channel); let empty_req = || req_with_auth!(grpc_api::Empty {}, grpc_auth_token); From 4be87583b1723391dc91527715503ebbd9fbd1be Mon Sep 17 00:00:00 2001 From: Cem Eliguzel Date: Tue, 5 Apr 2022 10:14:35 +0200 Subject: [PATCH 03/15] Fixes issue #244: Collector to keep querying Removes the parameter for maximum allowed times a gRPC call can fail and keeps collector querying forever. --- concordium-node/src/bin/collector.rs | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/concordium-node/src/bin/collector.rs b/concordium-node/src/bin/collector.rs index c47be05080..331ac1983f 100644 --- a/concordium-node/src/bin/collector.rs +++ b/concordium-node/src/bin/collector.rs @@ -5,11 +5,9 @@ use concordium_node::{common::grpc_api, req_with_auth, utils::setup_logger}; use serde_json::Value; use std::{ borrow::ToOwned, - default::Default, fmt, process::exit, str::FromStr, - sync::atomic::{AtomicUsize, Ordering as AtomicOrdering}, thread, time::Duration, }; @@ -117,13 +115,6 @@ struct ConfigCli { env = "CONCORDIUM_NODE_COLLECTOR_GRPC_TIMEOUT" )] pub grpc_timeout: u64, - #[structopt( - long = "max-grpc-failures-allowed", - help = "Maximum allowed times a gRPC call can fail before terminating the program", - default_value = "50", - env = "CONCORDIUM_NODE_COLLECTOR_MAX_GRPC_FAILURES_ALLOWED" - )] - pub max_grpc_failures_allowed: u64, #[cfg(target_os = "macos")] #[structopt( long = "use-mac-log", @@ -169,11 +160,8 @@ async fn main() { thread::sleep(Duration::from_millis(conf.artificial_start_delay)); } - let atomic_counter: AtomicUsize = Default::default(); #[allow(unreachable_code)] loop { - let grpc_failure_count = atomic_counter.load(AtomicOrdering::Relaxed); - trace!("Failure count is {}/{}", grpc_failure_count, conf.max_grpc_failures_allowed); for (node_name, grpc_host) in conf.node_names.iter().zip(conf.grpc_hosts.iter()) { trace!("Processing node {}/{}", node_name, grpc_host); match collect_data(node_name.clone(), grpc_host.to_owned(), &conf).await { @@ -194,18 +182,12 @@ async fn main() { } } Err(e) => { - let _ = atomic_counter.fetch_add(1, AtomicOrdering::SeqCst); error!( "gRPC failed with \"{}\" for {}, sleeping for {} ms", e, &grpc_host, conf.collector_interval ); } } - - if grpc_failure_count + 1 >= conf.max_grpc_failures_allowed as usize { - error!("Too many gRPC failures, exiting!"); - exit(1); - } } trace!("Sleeping for {} ms", conf.collector_interval); thread::sleep(Duration::from_millis(conf.collector_interval)); From 8834079cbd024366e9fbb0af36f4621dae1e7f9d Mon Sep 17 00:00:00 2001 From: Cem Eliguzel Date: Tue, 5 Apr 2022 10:32:40 +0200 Subject: [PATCH 04/15] Fixes source formatting. --- concordium-node/src/bin/collector.rs | 31 +++++++++++----------------- 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/concordium-node/src/bin/collector.rs b/concordium-node/src/bin/collector.rs index 331ac1983f..084c020158 100644 --- a/concordium-node/src/bin/collector.rs +++ b/concordium-node/src/bin/collector.rs @@ -3,14 +3,7 @@ use collector_backend::{IsInBakingCommittee, NodeInfo}; use concordium_node::utils::setup_macos_logger; use concordium_node::{common::grpc_api, req_with_auth, utils::setup_logger}; use serde_json::Value; -use std::{ - borrow::ToOwned, - fmt, - process::exit, - str::FromStr, - thread, - time::Duration, -}; +use std::{borrow::ToOwned, fmt, process::exit, str::FromStr, thread, time::Duration}; use structopt::StructOpt; use tonic::{metadata::MetadataValue, transport::channel::Channel, Request}; #[macro_use] @@ -48,7 +41,7 @@ struct ConfigCli { env = "CONCORDIUM_NODE_COLLECTOR_GRPC_AUTHENTICATION_TOKEN", hide_env_values = true )] - pub grpc_auth_token: String, + pub grpc_auth_token: String, #[structopt( long = "grpc-host", help = "gRPC host to collect from", @@ -56,51 +49,51 @@ struct ConfigCli { env = "CONCORDIUM_NODE_COLLECTOR_GRPC_HOST", use_delimiter = true // default delimiter is a comma )] - pub grpc_hosts: Vec, + pub grpc_hosts: Vec, #[structopt( long = "node-name", help = "Node name", env = "CONCORDIUM_NODE_COLLECTOR_NODE_NAME", use_delimiter = true // default delimiter is a comma )] - pub node_names: Vec, + pub node_names: Vec, #[structopt( long = "collector-url", help = "Alias submitted of the node collected from", default_value = "http://localhost:3000/post/nodes", env = "CONCORDIUM_NODE_COLLECTOR_URL" )] - pub collector_url: String, + pub collector_url: String, #[structopt( long = "print-config", help = "Print out config struct", env = "CONCORDIUM_NODE_COLLECTOR_PRINT_CONFIG" )] - pub print_config: bool, + pub print_config: bool, #[structopt( long = "debug", short = "d", help = "Debug mode", env = "CONCORDIUM_NODE_COLLECTOR_DEBUG" )] - pub debug: bool, + pub debug: bool, #[structopt(long = "trace", help = "Trace mode", env = "CONCORDIUM_NODE_COLLECTOR_TRACE")] - pub trace: bool, + pub trace: bool, #[structopt(long = "info", help = "Info mode", env = "CONCORDIUM_NODE_COLLECTOR_INFO")] - pub info: bool, + pub info: bool, #[structopt( long = "no-log-timestamp", help = "Do not output timestamp in log output", env = "CONCORDIUM_NODE_COLLECTOR_NO_LOG_TIMESTAMP" )] - pub no_log_timestamp: bool, + pub no_log_timestamp: bool, #[structopt( long = "collect-interval", help = "Interval in miliseconds to sleep between runs of the collector", default_value = "5000", env = "CONCORDIUM_NODE_COLLECTOR_COLLECT_INTERVAL" )] - pub collector_interval: u64, + pub collector_interval: u64, #[structopt( long = "artificial-start-delay", help = "Time (in ms) to delay when the first gRPC request is sent to the node", @@ -125,7 +118,7 @@ struct ConfigCli { env = "CONCORDIUM_NODE_COLLECTOR_USE_MAC_LOG", conflicts_with = "log-config" )] - pub use_mac_log: Option, + pub use_mac_log: Option, } #[tokio::main] From 2a26496c7d869acbd5791deba42257b018d0a627 Mon Sep 17 00:00:00 2001 From: Cem Eliguzel Date: Tue, 5 Apr 2022 12:54:07 +0200 Subject: [PATCH 05/15] Replaces `thread::sleep` with corresponding tokio artifacts. --- concordium-node/src/bin/collector.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/concordium-node/src/bin/collector.rs b/concordium-node/src/bin/collector.rs index 084c020158..a7bb390c49 100644 --- a/concordium-node/src/bin/collector.rs +++ b/concordium-node/src/bin/collector.rs @@ -3,7 +3,7 @@ use collector_backend::{IsInBakingCommittee, NodeInfo}; use concordium_node::utils::setup_macos_logger; use concordium_node::{common::grpc_api, req_with_auth, utils::setup_logger}; use serde_json::Value; -use std::{borrow::ToOwned, fmt, process::exit, str::FromStr, thread, time::Duration}; +use std::{borrow::ToOwned, fmt, process::exit, str::FromStr, time::Duration}; use structopt::StructOpt; use tonic::{metadata::MetadataValue, transport::channel::Channel, Request}; #[macro_use] @@ -150,9 +150,10 @@ async fn main() { if conf.artificial_start_delay > 0 { info!("Delaying first collection from the node for {} ms", conf.artificial_start_delay); - thread::sleep(Duration::from_millis(conf.artificial_start_delay)); + tokio::time::sleep(Duration::from_millis(conf.artificial_start_delay)).await; } + let mut interval = tokio::time::interval(Duration::from_millis(conf.collector_interval)); #[allow(unreachable_code)] loop { for (node_name, grpc_host) in conf.node_names.iter().zip(conf.grpc_hosts.iter()) { @@ -183,7 +184,7 @@ async fn main() { } } trace!("Sleeping for {} ms", conf.collector_interval); - thread::sleep(Duration::from_millis(conf.collector_interval)); + interval.tick().await; } } From b28161f710d85239dd123a534bb269bf8846857b Mon Sep 17 00:00:00 2001 From: Cem Eliguzel Date: Tue, 5 Apr 2022 21:02:50 +0200 Subject: [PATCH 06/15] Updates changelog. --- CHANGELOG.md | 4 +++- concordium-node/src/bin/collector.rs | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 67b394e0ef..dc12297111 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,7 +22,9 @@ This affects all queries whose input was a block or transaction hash. These queries now return `InvalidArgument` error, as opposed to `Unknown` which they returned previously. - +- Fix issue #244: Collector to keep querying. Remove the parameter for maximum allowed + times a gRPC call can fail and keeps `node-collector` querying forever. + ## concordium-node 3.0.1 - Fix a starvation bug in some cases of parallel node queries. diff --git a/concordium-node/src/bin/collector.rs b/concordium-node/src/bin/collector.rs index a7bb390c49..ee93954ca2 100644 --- a/concordium-node/src/bin/collector.rs +++ b/concordium-node/src/bin/collector.rs @@ -107,7 +107,7 @@ struct ConfigCli { default_value = "30", env = "CONCORDIUM_NODE_COLLECTOR_GRPC_TIMEOUT" )] - pub grpc_timeout: u64, + pub grpc_timeout: u64, #[cfg(target_os = "macos")] #[structopt( long = "use-mac-log", From e89aa1a435af71011fc043f291c998d03a55e4ef Mon Sep 17 00:00:00 2001 From: Cem Eliguzel Date: Wed, 6 Apr 2022 17:09:32 +0200 Subject: [PATCH 07/15] Fixes the issue #259: Expose foundation account address Adds the option of querying AccountInfo by using account index value. --- concordium-consensus/src/Concordium/External.hs | 11 +++++++---- .../src/Concordium/GlobalState/Basic/BlockState.hs | 4 ++++ .../src/Concordium/GlobalState/BlockState.hs | 5 +++++ .../src/Concordium/GlobalState/Paired.hs | 11 +++++++++++ .../Concordium/GlobalState/Persistent/BlockState.hs | 5 +++++ concordium-consensus/src/Concordium/Queries.hs | 8 +++++--- 6 files changed, 37 insertions(+), 7 deletions(-) diff --git a/concordium-consensus/src/Concordium/External.hs b/concordium-consensus/src/Concordium/External.hs index e8281246c6..cb03a4c425 100644 --- a/concordium-consensus/src/Concordium/External.hs +++ b/concordium-consensus/src/Concordium/External.hs @@ -891,14 +891,17 @@ decodeBlockHash blockcstr = readMaybe <$> peekCString blockcstr decodeAccountAddress :: CString -> IO (Either String AccountAddress) decodeAccountAddress acctstr = addressFromBytes <$> BS.packCString acctstr --- |Decode a null-terminated string as either an account address (base-58) or a +-- |Decode a null-terminated string as either an account address (base-58), account index (Int) or a -- credential registration ID (base-16). -decodeAccountAddressOrCredId :: CString -> IO (Maybe (Either CredentialRegistrationID AccountAddress)) +decodeAccountAddressOrCredId :: CString -> IO (Maybe AccountIdentifier) decodeAccountAddressOrCredId str = do bs <- BS.packCString str return $ case addressFromBytes bs of - Left _ -> Left <$> bsDeserializeBase16 bs - Right acc -> Just $ Right acc + Left _ -> Just $ + case bsDeserializeBase16 bs of + Nothing -> AI $ read (BS.unpack bs) + Just cid -> CID cid + Right acc -> Just $ AA acc -- |Decode an instance address from a null-terminated JSON-encoded string. decodeInstanceAddress :: CString -> IO (Maybe ContractAddress) diff --git a/concordium-consensus/src/Concordium/GlobalState/Basic/BlockState.hs b/concordium-consensus/src/Concordium/GlobalState/Basic/BlockState.hs index fda9973b83..0c2e16376d 100644 --- a/concordium-consensus/src/Concordium/GlobalState/Basic/BlockState.hs +++ b/concordium-consensus/src/Concordium/GlobalState/Basic/BlockState.hs @@ -397,6 +397,10 @@ instance (IsProtocolVersion pv, Monad m) => BS.BlockStateQuery (PureBlockStateMo Nothing -> return Nothing Just ai -> return $ (ai, ) <$> bs ^? blockAccounts . Accounts.indexedAccount ai + {-# INLINE getAccountByIndex #-} + getAccountByIndex bs ai = + return $ (ai, ) <$> bs ^? blockAccounts . Accounts.indexedAccount ai + {-# INLINE getBakerAccount #-} getBakerAccount bs (BakerId ai) = return $ bs ^? blockAccounts . Accounts.indexedAccount ai diff --git a/concordium-consensus/src/Concordium/GlobalState/BlockState.hs b/concordium-consensus/src/Concordium/GlobalState/BlockState.hs index d497e5f852..f2492c2a04 100644 --- a/concordium-consensus/src/Concordium/GlobalState/BlockState.hs +++ b/concordium-consensus/src/Concordium/GlobalState/BlockState.hs @@ -203,6 +203,9 @@ class AccountOperations m => BlockStateQuery m where -- |Query an account by the id of the credential that belonged to it. getAccountByCredId :: BlockState m -> CredentialRegistrationID -> m (Maybe (AccountIndex, Account m)) + -- |Query an account by the account index that belonged to it. + getAccountByIndex :: BlockState m -> AccountIndex -> m (Maybe (AccountIndex, Account m)) + -- |Get the contract state from the contract table of the state instance. getContractInstance :: BlockState m -> ContractAddress -> m (Maybe Instance) @@ -644,6 +647,7 @@ instance (Monad (t m), MonadTrans t, BlockStateQuery m) => BlockStateQuery (MGST getAccount s = lift . getAccount s accountExists s = lift . accountExists s getAccountByCredId s = lift . getAccountByCredId s + getAccountByIndex s = lift . getAccountByIndex s getBakerAccount s = lift . getBakerAccount s getContractInstance s = lift . getContractInstance s getModuleList = lift . getModuleList @@ -674,6 +678,7 @@ instance (Monad (t m), MonadTrans t, BlockStateQuery m) => BlockStateQuery (MGST {-# INLINE getAccount #-} {-# INLINE accountExists #-} {-# INLINE getAccountByCredId #-} + {-# INLINE getAccountByIndex #-} {-# INLINE getBakerAccount #-} {-# INLINE getContractInstance #-} {-# INLINE getModuleList #-} diff --git a/concordium-consensus/src/Concordium/GlobalState/Paired.hs b/concordium-consensus/src/Concordium/GlobalState/Paired.hs index f46a583e51..5db3ca89e0 100644 --- a/concordium-consensus/src/Concordium/GlobalState/Paired.hs +++ b/concordium-consensus/src/Concordium/GlobalState/Paired.hs @@ -209,6 +209,17 @@ instance (Monad m, C.HasGlobalStateContext (PairGSContext lc rc) r, BlockStateQu return Nothing (Nothing, _) -> error $ "Cannot get account with credid " ++ show cid ++ " in left implementation" (_, Nothing) -> error $ "Cannot get account with credid " ++ show cid ++ " in right implementation" + getAccountByIndex (ls, rs) idx = do + a1 <- coerceBSML (getAccountByIndex ls idx) + a2 <- coerceBSMR (getAccountByIndex rs idx) + case (a1, a2) of + (Just (ai1, a1'), Just (ai2, a2')) -> + assert ((getHash a1' :: H.Hash) == getHash a2' && ai1 == ai2) $ + return $ Just (ai1, (a1', a2')) + (Nothing, Nothing) -> + return Nothing + (Nothing, _) -> error $ "Cannot get account by index " ++ show idx ++ " in left implementation" + (_, Nothing) -> error $ "Cannot get account by index " ++ show idx ++ " in right implementation" getBakerAccount (ls, rs) bid = do a1 <- coerceBSML (getBakerAccount ls bid) a2 <- coerceBSMR (getBakerAccount rs bid) diff --git a/concordium-consensus/src/Concordium/GlobalState/Persistent/BlockState.hs b/concordium-consensus/src/Concordium/GlobalState/Persistent/BlockState.hs index e864e9b837..f776e7870d 100644 --- a/concordium-consensus/src/Concordium/GlobalState/Persistent/BlockState.hs +++ b/concordium-consensus/src/Concordium/GlobalState/Persistent/BlockState.hs @@ -889,6 +889,10 @@ doGetAccountByCredId pbs cid = do bsp <- loadPBS pbs Accounts.getAccountByCredId cid (bspAccounts bsp) +doGetAccountByIndex :: (IsProtocolVersion pv, MonadBlobStore m) => PersistentBlockState pv -> AccountIndex -> m (Maybe (AccountIndex, PersistentAccount pv)) +doGetAccountByIndex pbs idx = do + bsp <- loadPBS pbs + fmap (idx, ) <$> Accounts.indexedAccount idx (bspAccounts bsp) doGetAccountIndex :: (IsProtocolVersion pv, MonadBlobStore m) => PersistentBlockState pv -> AccountAddress -> m (Maybe AccountIndex) doGetAccountIndex pbs addr = do @@ -1276,6 +1280,7 @@ instance (IsProtocolVersion pv, PersistentState r m) => BlockStateQuery (Persist getAccount = doGetAccount . hpbsPointers accountExists = doGetAccountExists . hpbsPointers getAccountByCredId = doGetAccountByCredId . hpbsPointers + getAccountByIndex = doGetAccountByIndex . hpbsPointers getContractInstance = doGetInstance . hpbsPointers getModuleList = doGetModuleList . hpbsPointers getAccountList = doAccountList . hpbsPointers diff --git a/concordium-consensus/src/Concordium/Queries.hs b/concordium-consensus/src/Concordium/Queries.hs index 795f84e67c..9afa885db1 100644 --- a/concordium-consensus/src/Concordium/Queries.hs +++ b/concordium-consensus/src/Concordium/Queries.hs @@ -459,19 +459,21 @@ getModuleList :: BlockHash -> MVR gsconf finconf (Maybe [ModuleRef]) getModuleList = liftSkovQueryBlock $ BS.getModuleList <=< blockState -- |Get the details of an account in the block state. --- The account can be given either via an address, or via a credential registration id. +-- The account can be given via an address, an account index or a credential registration id. -- In the latter case we lookup the account the credential is associated with, even if it was -- removed from the account. getAccountInfo :: BlockHash -> - Either CredentialRegistrationID AccountAddress -> + AccountIdentifier -> MVR gsconf finconf (Maybe AccountInfo) getAccountInfo blockHash acct = join <$> liftSkovQueryBlock ( \bp -> do bs <- blockState bp - macc <- either (BS.getAccountByCredId bs) (BS.getAccount bs) acct + macc <- case acct of AA addr -> BS.getAccount bs addr + AI idx -> BS.getAccountByIndex bs idx + CID crid -> BS.getAccountByCredId bs crid forM macc $ \(aiAccountIndex, acc) -> do aiAccountNonce <- BS.getAccountNonce acc aiAccountAmount <- BS.getAccountAmount acc From 1f54519b7768e35f7dfe79b64be13f3dde954dd7 Mon Sep 17 00:00:00 2001 From: Cem Eliguzel Date: Thu, 7 Apr 2022 08:04:34 +0200 Subject: [PATCH 08/15] Update concordium-consensus/src/Concordium/Queries.hs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Aleš Bizjak --- concordium-consensus/src/Concordium/Queries.hs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/concordium-consensus/src/Concordium/Queries.hs b/concordium-consensus/src/Concordium/Queries.hs index 9afa885db1..d9b80bfcb0 100644 --- a/concordium-consensus/src/Concordium/Queries.hs +++ b/concordium-consensus/src/Concordium/Queries.hs @@ -471,9 +471,10 @@ getAccountInfo blockHash acct = <$> liftSkovQueryBlock ( \bp -> do bs <- blockState bp - macc <- case acct of AA addr -> BS.getAccount bs addr - AI idx -> BS.getAccountByIndex bs idx - CID crid -> BS.getAccountByCredId bs crid + macc <- case acct of + AA addr -> BS.getAccount bs addr + AI idx -> BS.getAccountByIndex bs idx + CID crid -> BS.getAccountByCredId bs crid forM macc $ \(aiAccountIndex, acc) -> do aiAccountNonce <- BS.getAccountNonce acc aiAccountAmount <- BS.getAccountAmount acc From 397dcd31f31ddae342c712e14ce3737966cc7e49 Mon Sep 17 00:00:00 2001 From: Cem Eliguzel Date: Thu, 7 Apr 2022 08:18:01 +0200 Subject: [PATCH 09/15] Fixes after review comments. --- concordium-consensus/src/Concordium/External.hs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/concordium-consensus/src/Concordium/External.hs b/concordium-consensus/src/Concordium/External.hs index cb03a4c425..44af30e5eb 100644 --- a/concordium-consensus/src/Concordium/External.hs +++ b/concordium-consensus/src/Concordium/External.hs @@ -891,16 +891,16 @@ decodeBlockHash blockcstr = readMaybe <$> peekCString blockcstr decodeAccountAddress :: CString -> IO (Either String AccountAddress) decodeAccountAddress acctstr = addressFromBytes <$> BS.packCString acctstr --- |Decode a null-terminated string as either an account address (base-58), account index (Int) or a +-- |Decode a null-terminated string as either an account address (base-58), account index (AccountIndex) or a -- credential registration ID (base-16). decodeAccountAddressOrCredId :: CString -> IO (Maybe AccountIdentifier) decodeAccountAddressOrCredId str = do bs <- BS.packCString str return $ case addressFromBytes bs of - Left _ -> Just $ + Left _ -> case bsDeserializeBase16 bs of - Nothing -> AI $ read (BS.unpack bs) - Just cid -> CID cid + Nothing -> AI <$> readMaybe (BS.unpack bs) + Just cid -> Just $ CID cid Right acc -> Just $ AA acc -- |Decode an instance address from a null-terminated JSON-encoded string. From f82d40ba97855518b88211e39bbd02090251f2e1 Mon Sep 17 00:00:00 2001 From: Cem Eliguzel Date: Thu, 7 Apr 2022 09:14:48 +0200 Subject: [PATCH 10/15] Updates the `concordium-base` reference. This is to include `AccountIdentifier` type from `concordium-base`. --- concordium-base | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/concordium-base b/concordium-base index 39b313c79f..62922b9e78 160000 --- a/concordium-base +++ b/concordium-base @@ -1 +1 @@ -Subproject commit 39b313c79fa55da9a9f861735321e55877f757ac +Subproject commit 62922b9e7829a97eb85aa92571e7a1a4dc2aa9f7 From a9cc3869ded9a2308be09714553d77f9bee09058 Mon Sep 17 00:00:00 2001 From: Cem Eliguzel Date: Thu, 7 Apr 2022 09:20:23 +0200 Subject: [PATCH 11/15] Updates changelog. --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dc12297111..f98c837027 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,8 @@ which they returned previously. - Fix issue #244: Collector to keep querying. Remove the parameter for maximum allowed times a gRPC call can fail and keeps `node-collector` querying forever. +- Fix the issue #259: Expose foundation account address. The account information now can + be queried by using an account index as well. ## concordium-node 3.0.1 From df847a77868741735bc51fa1bbc651473ae55a9c Mon Sep 17 00:00:00 2001 From: Cem Eliguzel Date: Fri, 8 Apr 2022 07:42:49 +0200 Subject: [PATCH 12/15] Update CHANGELOG.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Aleš Bizjak --- CHANGELOG.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f98c837027..dad4cc99f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,8 +24,7 @@ which they returned previously. - Fix issue #244: Collector to keep querying. Remove the parameter for maximum allowed times a gRPC call can fail and keeps `node-collector` querying forever. -- Fix the issue #259: Expose foundation account address. The account information now can - be queried by using an account index as well. +- `GetAccountInfo` endpoint supports querying the account via the account index. ## concordium-node 3.0.1 From 4d6ab9eec83e0576ac037ebc74ba83edb238e004 Mon Sep 17 00:00:00 2001 From: Cem Eliguzel Date: Fri, 8 Apr 2022 07:47:30 +0200 Subject: [PATCH 13/15] Moves decodeAccountAddressOrCredId to concordium-base and renames to decodeAccountIdentifier --- concordium-base | 2 +- .../src/Concordium/External.hs | 18 +++--------------- concordium-consensus/src/Concordium/Queries.hs | 6 +++--- 3 files changed, 7 insertions(+), 19 deletions(-) diff --git a/concordium-base b/concordium-base index 62922b9e78..764e519b66 160000 --- a/concordium-base +++ b/concordium-base @@ -1 +1 @@ -Subproject commit 62922b9e7829a97eb85aa92571e7a1a4dc2aa9f7 +Subproject commit 764e519b669236e8aa0e8967c8aaec0913cc89dd diff --git a/concordium-consensus/src/Concordium/External.hs b/concordium-consensus/src/Concordium/External.hs index 44af30e5eb..6b50505a3e 100644 --- a/concordium-consensus/src/Concordium/External.hs +++ b/concordium-consensus/src/Concordium/External.hs @@ -29,7 +29,6 @@ import qualified Data.FixedByteString as FBS import Concordium.Afgjort.Finalize.Types (FinalizationInstance (FinalizationInstance)) import Concordium.Birk.Bake import Concordium.Constants.Time (defaultEarlyBlockThreshold, defaultMaxBakingDelay) -import Concordium.Crypto.ByteStringHelpers import Concordium.GlobalState import Concordium.GlobalState.Persistent.LMDB (addDatabaseVersion) import Concordium.GlobalState.Persistent.TreeState (InitException (..)) @@ -891,18 +890,6 @@ decodeBlockHash blockcstr = readMaybe <$> peekCString blockcstr decodeAccountAddress :: CString -> IO (Either String AccountAddress) decodeAccountAddress acctstr = addressFromBytes <$> BS.packCString acctstr --- |Decode a null-terminated string as either an account address (base-58), account index (AccountIndex) or a --- credential registration ID (base-16). -decodeAccountAddressOrCredId :: CString -> IO (Maybe AccountIdentifier) -decodeAccountAddressOrCredId str = do - bs <- BS.packCString str - return $ case addressFromBytes bs of - Left _ -> - case bsDeserializeBase16 bs of - Nothing -> AI <$> readMaybe (BS.unpack bs) - Just cid -> Just $ CID cid - Right acc -> Just $ AA acc - -- |Decode an instance address from a null-terminated JSON-encoded string. decodeInstanceAddress :: CString -> IO (Maybe ContractAddress) decodeInstanceAddress inststr = AE.decodeStrict <$> BS.packCString inststr @@ -1074,8 +1061,9 @@ getModuleList cptr blockcstr = do getAccountInfo :: StablePtr ConsensusRunner -> CString -> CString -> IO CString getAccountInfo cptr blockcstr acctcstr = do mblock <- decodeBlockHash blockcstr - maccount <- decodeAccountAddressOrCredId acctcstr - case (mblock, maccount) of + acctbs <- BS.packCString acctcstr + let account = decodeAccountIdentifier acctbs + case (mblock, account) of (Just bh, Just acct) -> jsonQuery cptr (Q.getAccountInfo bh acct) _ -> jsonCString AE.Null diff --git a/concordium-consensus/src/Concordium/Queries.hs b/concordium-consensus/src/Concordium/Queries.hs index d9b80bfcb0..7fd719cdea 100644 --- a/concordium-consensus/src/Concordium/Queries.hs +++ b/concordium-consensus/src/Concordium/Queries.hs @@ -472,9 +472,9 @@ getAccountInfo blockHash acct = ( \bp -> do bs <- blockState bp macc <- case acct of - AA addr -> BS.getAccount bs addr - AI idx -> BS.getAccountByIndex bs idx - CID crid -> BS.getAccountByCredId bs crid + AccAddress addr -> BS.getAccount bs addr + AccIndex idx -> BS.getAccountByIndex bs idx + CredRegID crid -> BS.getAccountByCredId bs crid forM macc $ \(aiAccountIndex, acc) -> do aiAccountNonce <- BS.getAccountNonce acc aiAccountAmount <- BS.getAccountAmount acc From 11097b6d1179251e2eda64b14e28d55a97af0bc3 Mon Sep 17 00:00:00 2001 From: Cem Eliguzel Date: Wed, 20 Apr 2022 09:28:35 +0300 Subject: [PATCH 14/15] Fixes #138:Removes `genesis_tester` feature. --- concordium-node/Cargo.toml | 6 ------ concordium-node/README.md | 1 - scripts/test-genesis.sh | 15 --------------- 3 files changed, 22 deletions(-) delete mode 100755 scripts/test-genesis.sh diff --git a/concordium-node/Cargo.toml b/concordium-node/Cargo.toml index f6ee53fc53..be2ef75b62 100644 --- a/concordium-node/Cargo.toml +++ b/concordium-node/Cargo.toml @@ -20,7 +20,6 @@ static = [ ] profiling = [ "static" ] collector = [ "reqwest/default-tls", "serde/derive", "rmp-serde", "collector-backend" ] database_emitter = [] -genesis_tester = [ "tempfile" ] [profile.release] codegen-units = 1 @@ -143,11 +142,6 @@ name = "database_emitter" path = "src/bin/database_emitter.rs" required-features = [ "database_emitter" ] -[[bin]] -name = "genesis_tester" -path = "src/bin/genesis_tester.rs" -required-features = [ "genesis_tester" ] - [[bench]] name = "p2p_lib_benchmark" required-features = [ "test_utils" ] diff --git a/concordium-node/README.md b/concordium-node/README.md index 89fe358523..d5f2951db1 100644 --- a/concordium-node/README.md +++ b/concordium-node/README.md @@ -21,7 +21,6 @@ * profiling - build against haskell libraries with profiling support enabled (Linux only) * collector - enables the build of the node-collector and backend * database_emitter - enables building the database emitter binary to inject a database exported to a set of nodes -* genesis_tester - a tool used by a CI to validate the genesis data * dedup_benchmarks - enable support in the benchmarks for deduplication queues ## Building the node diff --git a/scripts/test-genesis.sh b/scripts/test-genesis.sh deleted file mode 100755 index f0446d144f..0000000000 --- a/scripts/test-genesis.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash -set -e -GENESIS_TESTER_BIN="concordium-node/target/debug/genesis_tester" - -rm -rf genesis-data-test -mkdir genesis-data-test -tar xzf genesis-data/$1 -C genesis-data-test - -if [ -n "$2" ]; then - echo "Testing genesis $1 and private key $2" - $GENESIS_TESTER_BIN --genesis-file genesis-data-test/genesis_data/genesis.dat --private-key-file genesis-data-test/genesis_data/baker-$2-credentials.json -else - echo "Testing genesis $1" - $GENESIS_TESTER_BIN --genesis-file genesis-data-test/genesis_data/genesis.dat -fi From c0eac467d87cbdc61caa4fd9d5e4d81de4a0f0d7 Mon Sep 17 00:00:00 2001 From: Cem Eliguzel Date: Wed, 20 Apr 2022 10:58:15 +0300 Subject: [PATCH 15/15] Removes `network_stress_test` and `database_emitter` binaries. --- .github/workflows/build-test.yaml | 4 +- concordium-node/Cargo.toml | 11 -- concordium-node/README.md | 1 - concordium-node/src/bin/database_emitter.rs | 124 -------------------- concordium-node/src/configuration.rs | 51 +------- 5 files changed, 8 insertions(+), 183 deletions(-) delete mode 100644 concordium-node/src/bin/database_emitter.rs diff --git a/.github/workflows/build-test.yaml b/.github/workflows/build-test.yaml index 455474312d..31ef437375 100644 --- a/.github/workflows/build-test.yaml +++ b/.github/workflows/build-test.yaml @@ -174,9 +174,9 @@ jobs: - name: Run clippy (without extra features) run: | cargo clippy --manifest-path concordium-node/Cargo.toml --all -- -Dclippy::all - - name: Run clippy (with features 'instrumentation', 'collector', 'network_dump', 'database_emitter') + - name: Run clippy (with features 'instrumentation', 'collector', 'network_dump') run: | - cargo clippy --manifest-path concordium-node/Cargo.toml --features=instrumentation,collector,network_dump,database_emitter --all -- -Dclippy::all + cargo clippy --manifest-path concordium-node/Cargo.toml --features=instrumentation,collector,network_dump --all -- -Dclippy::all - name: Run clippy on collector backend run: | cargo clippy --manifest-path collector-backend/Cargo.toml -- -Dclippy::all diff --git a/concordium-node/Cargo.toml b/concordium-node/Cargo.toml index be2ef75b62..eec440aeeb 100644 --- a/concordium-node/Cargo.toml +++ b/concordium-node/Cargo.toml @@ -19,7 +19,6 @@ network_dump = [] static = [ ] profiling = [ "static" ] collector = [ "reqwest/default-tls", "serde/derive", "rmp-serde", "collector-backend" ] -database_emitter = [] [profile.release] codegen-units = 1 @@ -128,20 +127,10 @@ name = "node-collector" path = "src/bin/collector.rs" required-features = [ "collector" ] -[[bin]] -name = "network_stress_test" -path = "src/bin/network_stress_test.rs" -required-features = [ "test_utils" ] - [[bin]] name = "bootstrap_checker" path = "src/bin/bootstrap_checker.rs" -[[bin]] -name = "database_emitter" -path = "src/bin/database_emitter.rs" -required-features = [ "database_emitter" ] - [[bench]] name = "p2p_lib_benchmark" required-features = [ "test_utils" ] diff --git a/concordium-node/README.md b/concordium-node/README.md index d5f2951db1..f702de4455 100644 --- a/concordium-node/README.md +++ b/concordium-node/README.md @@ -20,7 +20,6 @@ * static - build against static haskell libraries (Linux only) * profiling - build against haskell libraries with profiling support enabled (Linux only) * collector - enables the build of the node-collector and backend -* database_emitter - enables building the database emitter binary to inject a database exported to a set of nodes * dedup_benchmarks - enable support in the benchmarks for deduplication queues ## Building the node diff --git a/concordium-node/src/bin/database_emitter.rs b/concordium-node/src/bin/database_emitter.rs deleted file mode 100644 index cb7ea1e304..0000000000 --- a/concordium-node/src/bin/database_emitter.rs +++ /dev/null @@ -1,124 +0,0 @@ -#![recursion_limit = "1024"] -#[macro_use] -extern crate log; - -// Force the system allocator on every platform -use std::alloc::System; -#[global_allocator] -static A: System = System; - -use anyhow::{bail, Context}; -use concordium_node::{ - common::PeerType, - consensus_ffi::helpers::PacketType, - network::NetworkId, - p2p::{ - connectivity::{connect, send_broadcast_message}, - maintenance::{spawn, P2PNode}, - }, - stats_export_service::instantiate_stats_export_engine, - utils, -}; -use crypto_common::Serial; -use std::{fs::File, io::prelude::*, net::ToSocketAddrs, sync::Arc, thread, time::Duration}; - -fn main() -> anyhow::Result<()> { - let (mut conf, _app_prefs) = utils::get_config_and_logging_setup()?; - - conf.connection.no_bootstrap_dns = true; - conf.connection.desired_nodes = conf.connection.connect_to.len() as u16; - - let stats_export_service = instantiate_stats_export_engine(&conf)?; - - let (node, server, poll) = P2PNode::new( - conf.common.id, - &conf, - PeerType::Node, - stats_export_service, - Arc::new(Default::default()), - ) - .context("Failed to create the node")?; - - spawn(&node, server, poll, None); - - conf.connection.connect_to.iter().for_each( - |host: &String| match ToSocketAddrs::to_socket_addrs(&host) { - Ok(addrs) => { - for addr in addrs { - let _ = connect(&node, PeerType::Node, addr, None, false) - .map_err(|e| error!("{}", e)); - } - } - Err(err) => error!("Can't parse configured addresses to connect to: {}", err), - }, - ); - - info!("Sleeping to let network connections settle"); - thread::sleep(Duration::from_millis(10000)); - if !(node.connections().read().unwrap()).is_empty() { - info!("Connected to network"); - - if let Ok(mut file) = File::open(&conf.database_emitter.import_file) { - let mut counter = 0; - loop { - let mut block_len_buffer = [0; 8]; - if let Ok(read_bytes) = file.read(&mut block_len_buffer) { - if read_bytes != block_len_buffer.len() { - if read_bytes == 0 { - info!("No more blocks to be read from file"); - } else { - error!("No enough bytes to read"); - } - break; - } - let block_size = u64::from_be_bytes(block_len_buffer); - info!( - "Block#{} - will read {} bytes for block from file {}", - counter, block_size, &conf.database_emitter.import_file - ); - let mut blocks_data_buffer = vec![0; block_size as usize]; - if let Ok(blocks_bytes_read) = file.read(&mut blocks_data_buffer[..]) { - if blocks_bytes_read != block_size as usize { - error!( - "The file didn't contain all the {} byte(s) needed to properly \ - read the block!", - block_size - ); - break; - } - if counter < conf.database_emitter.skip_first { - info!("- skipping as per request"); - counter += 1; - continue; - } - let mut data_out = vec![0; 0]; - (PacketType::Block as u8).serial(&mut data_out); - data_out.extend(blocks_data_buffer); - info!( - "- Sent {} byte(s)", - send_broadcast_message( - &node, - vec![], - NetworkId::from(conf.common.network_ids.clone()[0]), - Arc::from(data_out), - ) - ); - } else { - bail!("Error reading block!"); - } - } else { - bail!("Can't read size of next block from file!"); - } - if counter != 0 && counter % conf.database_emitter.batch_sizes == 0 { - info!("Will stall for {} ms", &conf.database_emitter.delay_between_batches); - thread::sleep(Duration::from_millis( - conf.database_emitter.delay_between_batches, - )); - } - counter += 1; - } - } - } - - node.close_and_join() -} diff --git a/concordium-node/src/configuration.rs b/concordium-node/src/configuration.rs index 9fd94cbff7..25dcf9d162 100644 --- a/concordium-node/src/configuration.rs +++ b/concordium-node/src/configuration.rs @@ -80,42 +80,6 @@ pub const DATABASE_SUB_DIRECTORY_NAME: &str = "database-v4"; /// being dropped prematurely. const KEEP_ALIVE_FACTOR: u8 = 3; -#[cfg(feature = "database_emitter")] -#[derive(StructOpt, Debug)] -// Parameters related to the database emitter. -pub struct DatabaseEmitterConfig { - #[structopt( - long = "import-file", - help = "File to import from", - env = "CONCORDIUM_NODE_DB_EMITTER_IMPORT_FILE" - )] - pub import_file: String, - - #[structopt( - long = "batches-delay", - help = "Delay between batches in miliseconds", - default_value = "2000", - env = "CONCORDIUM_NODE_DB_EMITTER_BATCHES_DELAY" - )] - pub delay_between_batches: u64, - - #[structopt( - long = "batch-size", - help = "Size of each batch to emit", - default_value = "40", - env = "CONCORDIUM_NODE_DB_EMITTER_BATCH_SIZES" - )] - pub batch_sizes: u64, - - #[structopt( - long = "skip-first", - help = "Amount of the initial blocks to skip", - default_value = "0", - env = "CONCORDIUM_NODE_DB_EMITTER_SKIP_FIRST" - )] - pub skip_first: u64, -} - #[cfg(feature = "instrumentation")] #[derive(StructOpt, Debug)] // Parameters related to Prometheus. @@ -778,22 +742,19 @@ pub struct MacOsConfig { #[structopt(about = "Concordium P2P node.")] pub struct Config { #[structopt(flatten)] - pub common: CommonConfig, + pub common: CommonConfig, #[cfg(feature = "instrumentation")] #[structopt(flatten)] - pub prometheus: PrometheusConfig, - #[structopt(flatten)] - pub connection: ConnectionConfig, + pub prometheus: PrometheusConfig, #[structopt(flatten)] - pub cli: CliConfig, + pub connection: ConnectionConfig, #[structopt(flatten)] - pub bootstrapper: BootstrapperConfig, - #[cfg(feature = "database_emitter")] + pub cli: CliConfig, #[structopt(flatten)] - pub database_emitter: DatabaseEmitterConfig, + pub bootstrapper: BootstrapperConfig, #[cfg(target_os = "macos")] #[structopt(flatten)] - pub macos: MacOsConfig, + pub macos: MacOsConfig, } impl Config {