From 11097b6d1179251e2eda64b14e28d55a97af0bc3 Mon Sep 17 00:00:00 2001 From: Cem Eliguzel Date: Wed, 20 Apr 2022 09:28:35 +0300 Subject: [PATCH 1/2] Fixes #138:Removes `genesis_tester` feature. --- concordium-node/Cargo.toml | 6 ------ concordium-node/README.md | 1 - scripts/test-genesis.sh | 15 --------------- 3 files changed, 22 deletions(-) delete mode 100755 scripts/test-genesis.sh diff --git a/concordium-node/Cargo.toml b/concordium-node/Cargo.toml index f6ee53fc53..be2ef75b62 100644 --- a/concordium-node/Cargo.toml +++ b/concordium-node/Cargo.toml @@ -20,7 +20,6 @@ static = [ ] profiling = [ "static" ] collector = [ "reqwest/default-tls", "serde/derive", "rmp-serde", "collector-backend" ] database_emitter = [] -genesis_tester = [ "tempfile" ] [profile.release] codegen-units = 1 @@ -143,11 +142,6 @@ name = "database_emitter" path = "src/bin/database_emitter.rs" required-features = [ "database_emitter" ] -[[bin]] -name = "genesis_tester" -path = "src/bin/genesis_tester.rs" -required-features = [ "genesis_tester" ] - [[bench]] name = "p2p_lib_benchmark" required-features = [ "test_utils" ] diff --git a/concordium-node/README.md b/concordium-node/README.md index 89fe358523..d5f2951db1 100644 --- a/concordium-node/README.md +++ b/concordium-node/README.md @@ -21,7 +21,6 @@ * profiling - build against haskell libraries with profiling support enabled (Linux only) * collector - enables the build of the node-collector and backend * database_emitter - enables building the database emitter binary to inject a database exported to a set of nodes -* genesis_tester - a tool used by a CI to validate the genesis data * dedup_benchmarks - enable support in the benchmarks for deduplication queues ## Building the node diff --git a/scripts/test-genesis.sh b/scripts/test-genesis.sh deleted file mode 100755 index f0446d144f..0000000000 --- a/scripts/test-genesis.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash -set -e -GENESIS_TESTER_BIN="concordium-node/target/debug/genesis_tester" - -rm -rf genesis-data-test -mkdir genesis-data-test -tar xzf genesis-data/$1 -C genesis-data-test - -if [ -n "$2" ]; then - echo "Testing genesis $1 and private key $2" - $GENESIS_TESTER_BIN --genesis-file genesis-data-test/genesis_data/genesis.dat --private-key-file genesis-data-test/genesis_data/baker-$2-credentials.json -else - echo "Testing genesis $1" - $GENESIS_TESTER_BIN --genesis-file genesis-data-test/genesis_data/genesis.dat -fi From c0eac467d87cbdc61caa4fd9d5e4d81de4a0f0d7 Mon Sep 17 00:00:00 2001 From: Cem Eliguzel Date: Wed, 20 Apr 2022 10:58:15 +0300 Subject: [PATCH 2/2] Removes `network_stress_test` and `database_emitter` binaries. --- .github/workflows/build-test.yaml | 4 +- concordium-node/Cargo.toml | 11 -- concordium-node/README.md | 1 - concordium-node/src/bin/database_emitter.rs | 124 -------------------- concordium-node/src/configuration.rs | 51 +------- 5 files changed, 8 insertions(+), 183 deletions(-) delete mode 100644 concordium-node/src/bin/database_emitter.rs diff --git a/.github/workflows/build-test.yaml b/.github/workflows/build-test.yaml index 455474312d..31ef437375 100644 --- a/.github/workflows/build-test.yaml +++ b/.github/workflows/build-test.yaml @@ -174,9 +174,9 @@ jobs: - name: Run clippy (without extra features) run: | cargo clippy --manifest-path concordium-node/Cargo.toml --all -- -Dclippy::all - - name: Run clippy (with features 'instrumentation', 'collector', 'network_dump', 'database_emitter') + - name: Run clippy (with features 'instrumentation', 'collector', 'network_dump') run: | - cargo clippy --manifest-path concordium-node/Cargo.toml --features=instrumentation,collector,network_dump,database_emitter --all -- -Dclippy::all + cargo clippy --manifest-path concordium-node/Cargo.toml --features=instrumentation,collector,network_dump --all -- -Dclippy::all - name: Run clippy on collector backend run: | cargo clippy --manifest-path collector-backend/Cargo.toml -- -Dclippy::all diff --git a/concordium-node/Cargo.toml b/concordium-node/Cargo.toml index be2ef75b62..eec440aeeb 100644 --- a/concordium-node/Cargo.toml +++ b/concordium-node/Cargo.toml @@ -19,7 +19,6 @@ network_dump = [] static = [ ] profiling = [ "static" ] collector = [ "reqwest/default-tls", "serde/derive", "rmp-serde", "collector-backend" ] -database_emitter = [] [profile.release] codegen-units = 1 @@ -128,20 +127,10 @@ name = "node-collector" path = "src/bin/collector.rs" required-features = [ "collector" ] -[[bin]] -name = "network_stress_test" -path = "src/bin/network_stress_test.rs" -required-features = [ "test_utils" ] - [[bin]] name = "bootstrap_checker" path = "src/bin/bootstrap_checker.rs" -[[bin]] -name = "database_emitter" -path = "src/bin/database_emitter.rs" -required-features = [ "database_emitter" ] - [[bench]] name = "p2p_lib_benchmark" required-features = [ "test_utils" ] diff --git a/concordium-node/README.md b/concordium-node/README.md index d5f2951db1..f702de4455 100644 --- a/concordium-node/README.md +++ b/concordium-node/README.md @@ -20,7 +20,6 @@ * static - build against static haskell libraries (Linux only) * profiling - build against haskell libraries with profiling support enabled (Linux only) * collector - enables the build of the node-collector and backend -* database_emitter - enables building the database emitter binary to inject a database exported to a set of nodes * dedup_benchmarks - enable support in the benchmarks for deduplication queues ## Building the node diff --git a/concordium-node/src/bin/database_emitter.rs b/concordium-node/src/bin/database_emitter.rs deleted file mode 100644 index cb7ea1e304..0000000000 --- a/concordium-node/src/bin/database_emitter.rs +++ /dev/null @@ -1,124 +0,0 @@ -#![recursion_limit = "1024"] -#[macro_use] -extern crate log; - -// Force the system allocator on every platform -use std::alloc::System; -#[global_allocator] -static A: System = System; - -use anyhow::{bail, Context}; -use concordium_node::{ - common::PeerType, - consensus_ffi::helpers::PacketType, - network::NetworkId, - p2p::{ - connectivity::{connect, send_broadcast_message}, - maintenance::{spawn, P2PNode}, - }, - stats_export_service::instantiate_stats_export_engine, - utils, -}; -use crypto_common::Serial; -use std::{fs::File, io::prelude::*, net::ToSocketAddrs, sync::Arc, thread, time::Duration}; - -fn main() -> anyhow::Result<()> { - let (mut conf, _app_prefs) = utils::get_config_and_logging_setup()?; - - conf.connection.no_bootstrap_dns = true; - conf.connection.desired_nodes = conf.connection.connect_to.len() as u16; - - let stats_export_service = instantiate_stats_export_engine(&conf)?; - - let (node, server, poll) = P2PNode::new( - conf.common.id, - &conf, - PeerType::Node, - stats_export_service, - Arc::new(Default::default()), - ) - .context("Failed to create the node")?; - - spawn(&node, server, poll, None); - - conf.connection.connect_to.iter().for_each( - |host: &String| match ToSocketAddrs::to_socket_addrs(&host) { - Ok(addrs) => { - for addr in addrs { - let _ = connect(&node, PeerType::Node, addr, None, false) - .map_err(|e| error!("{}", e)); - } - } - Err(err) => error!("Can't parse configured addresses to connect to: {}", err), - }, - ); - - info!("Sleeping to let network connections settle"); - thread::sleep(Duration::from_millis(10000)); - if !(node.connections().read().unwrap()).is_empty() { - info!("Connected to network"); - - if let Ok(mut file) = File::open(&conf.database_emitter.import_file) { - let mut counter = 0; - loop { - let mut block_len_buffer = [0; 8]; - if let Ok(read_bytes) = file.read(&mut block_len_buffer) { - if read_bytes != block_len_buffer.len() { - if read_bytes == 0 { - info!("No more blocks to be read from file"); - } else { - error!("No enough bytes to read"); - } - break; - } - let block_size = u64::from_be_bytes(block_len_buffer); - info!( - "Block#{} - will read {} bytes for block from file {}", - counter, block_size, &conf.database_emitter.import_file - ); - let mut blocks_data_buffer = vec![0; block_size as usize]; - if let Ok(blocks_bytes_read) = file.read(&mut blocks_data_buffer[..]) { - if blocks_bytes_read != block_size as usize { - error!( - "The file didn't contain all the {} byte(s) needed to properly \ - read the block!", - block_size - ); - break; - } - if counter < conf.database_emitter.skip_first { - info!("- skipping as per request"); - counter += 1; - continue; - } - let mut data_out = vec![0; 0]; - (PacketType::Block as u8).serial(&mut data_out); - data_out.extend(blocks_data_buffer); - info!( - "- Sent {} byte(s)", - send_broadcast_message( - &node, - vec![], - NetworkId::from(conf.common.network_ids.clone()[0]), - Arc::from(data_out), - ) - ); - } else { - bail!("Error reading block!"); - } - } else { - bail!("Can't read size of next block from file!"); - } - if counter != 0 && counter % conf.database_emitter.batch_sizes == 0 { - info!("Will stall for {} ms", &conf.database_emitter.delay_between_batches); - thread::sleep(Duration::from_millis( - conf.database_emitter.delay_between_batches, - )); - } - counter += 1; - } - } - } - - node.close_and_join() -} diff --git a/concordium-node/src/configuration.rs b/concordium-node/src/configuration.rs index 9fd94cbff7..25dcf9d162 100644 --- a/concordium-node/src/configuration.rs +++ b/concordium-node/src/configuration.rs @@ -80,42 +80,6 @@ pub const DATABASE_SUB_DIRECTORY_NAME: &str = "database-v4"; /// being dropped prematurely. const KEEP_ALIVE_FACTOR: u8 = 3; -#[cfg(feature = "database_emitter")] -#[derive(StructOpt, Debug)] -// Parameters related to the database emitter. -pub struct DatabaseEmitterConfig { - #[structopt( - long = "import-file", - help = "File to import from", - env = "CONCORDIUM_NODE_DB_EMITTER_IMPORT_FILE" - )] - pub import_file: String, - - #[structopt( - long = "batches-delay", - help = "Delay between batches in miliseconds", - default_value = "2000", - env = "CONCORDIUM_NODE_DB_EMITTER_BATCHES_DELAY" - )] - pub delay_between_batches: u64, - - #[structopt( - long = "batch-size", - help = "Size of each batch to emit", - default_value = "40", - env = "CONCORDIUM_NODE_DB_EMITTER_BATCH_SIZES" - )] - pub batch_sizes: u64, - - #[structopt( - long = "skip-first", - help = "Amount of the initial blocks to skip", - default_value = "0", - env = "CONCORDIUM_NODE_DB_EMITTER_SKIP_FIRST" - )] - pub skip_first: u64, -} - #[cfg(feature = "instrumentation")] #[derive(StructOpt, Debug)] // Parameters related to Prometheus. @@ -778,22 +742,19 @@ pub struct MacOsConfig { #[structopt(about = "Concordium P2P node.")] pub struct Config { #[structopt(flatten)] - pub common: CommonConfig, + pub common: CommonConfig, #[cfg(feature = "instrumentation")] #[structopt(flatten)] - pub prometheus: PrometheusConfig, - #[structopt(flatten)] - pub connection: ConnectionConfig, + pub prometheus: PrometheusConfig, #[structopt(flatten)] - pub cli: CliConfig, + pub connection: ConnectionConfig, #[structopt(flatten)] - pub bootstrapper: BootstrapperConfig, - #[cfg(feature = "database_emitter")] + pub cli: CliConfig, #[structopt(flatten)] - pub database_emitter: DatabaseEmitterConfig, + pub bootstrapper: BootstrapperConfig, #[cfg(target_os = "macos")] #[structopt(flatten)] - pub macos: MacOsConfig, + pub macos: MacOsConfig, } impl Config {