aboutsummaryrefslogtreecommitdiff
path: root/src/garage/cli
diff options
context:
space:
mode:
Diffstat (limited to 'src/garage/cli')
-rw-r--r--src/garage/cli/layout.rs129
-rw-r--r--src/garage/cli/structs.rs13
2 files changed, 113 insertions, 29 deletions
diff --git a/src/garage/cli/layout.rs b/src/garage/cli/layout.rs
index 3884bb92..5056e57d 100644
--- a/src/garage/cli/layout.rs
+++ b/src/garage/cli/layout.rs
@@ -14,8 +14,8 @@ pub async fn cli_layout_command_dispatch(
rpc_host: NodeID,
) -> Result<(), Error> {
match cmd {
- LayoutOperation::Assign(configure_opt) => {
- cmd_assign_role(system_rpc_endpoint, rpc_host, configure_opt).await
+ LayoutOperation::Assign(assign_opt) => {
+ cmd_assign_role(system_rpc_endpoint, rpc_host, assign_opt).await
}
LayoutOperation::Remove(remove_opt) => {
cmd_remove_role(system_rpc_endpoint, rpc_host, remove_opt).await
@@ -27,6 +27,9 @@ pub async fn cli_layout_command_dispatch(
LayoutOperation::Revert(revert_opt) => {
cmd_revert_layout(system_rpc_endpoint, rpc_host, revert_opt).await
}
+ LayoutOperation::Config(config_opt) => {
+ cmd_config_layout(system_rpc_endpoint, rpc_host, config_opt).await
+ }
}
}
@@ -166,7 +169,7 @@ pub async fn cmd_show_layout(
rpc_cli: &Endpoint<SystemRpc, ()>,
rpc_host: NodeID,
) -> Result<(), Error> {
- let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
+ let layout = fetch_layout(rpc_cli, rpc_host).await?;
println!("==== CURRENT CLUSTER LAYOUT ====");
if !print_cluster_layout(&layout) {
@@ -176,30 +179,44 @@ pub async fn cmd_show_layout(
println!();
println!("Current cluster layout version: {}", layout.version);
- if print_staging_role_changes(&layout) {
- layout.roles.merge(&layout.staging);
-
- println!();
- println!("==== NEW CLUSTER LAYOUT AFTER APPLYING CHANGES ====");
- if !print_cluster_layout(&layout) {
- println!("No nodes have a role in the new layout.");
- }
- println!();
+ let has_role_changes = print_staging_role_changes(&layout);
+ let has_param_changes = print_staging_parameters_changes(&layout);
+ if has_role_changes || has_param_changes {
+ let v = layout.version;
+ let res_apply = layout.apply_staged_changes(Some(v + 1));
// this will print the stats of what partitions
// will move around when we apply
- if layout.calculate_partition_assignation() {
- println!("To enact the staged role changes, type:");
- println!();
- println!(" garage layout apply --version {}", layout.version + 1);
- println!();
- println!(
- "You can also revert all proposed changes with: garage layout revert --version {}",
- layout.version + 1
- );
- } else {
- println!("Not enough nodes have an assigned role to maintain enough copies of data.");
- println!("This new layout cannot yet be applied.");
+ match res_apply {
+ Ok((layout, msg)) => {
+ println!();
+ println!("==== NEW CLUSTER LAYOUT AFTER APPLYING CHANGES ====");
+ if !print_cluster_layout(&layout) {
+ println!("No nodes have a role in the new layout.");
+ }
+ println!();
+
+ for line in msg.iter() {
+ println!("{}", line);
+ }
+ println!("To enact the staged role changes, type:");
+ println!();
+ println!(" garage layout apply --version {}", v + 1);
+ println!();
+ println!(
+ "You can also revert all proposed changes with: garage layout revert --version {}",
+ v + 1)
+ }
+ Err(Error::Message(s)) => {
+ println!("Error while trying to compute the assignation: {}", s);
+ println!("This new layout cannot yet be applied.");
+ println!(
+ "You can also revert all proposed changes with: garage layout revert --version {}",
+ v + 1)
+ }
+ _ => {
+ println!("Unknown Error");
+ }
}
}
@@ -213,7 +230,10 @@ pub async fn cmd_apply_layout(
) -> Result<(), Error> {
let layout = fetch_layout(rpc_cli, rpc_host).await?;
- let layout = layout.apply_staged_changes(apply_opt.version)?;
+ let (layout, msg) = layout.apply_staged_changes(apply_opt.version)?;
+ for line in msg.iter() {
+ println!("{}", line);
+ }
send_layout(rpc_cli, rpc_host, layout).await?;
@@ -238,6 +258,37 @@ pub async fn cmd_revert_layout(
Ok(())
}
+pub async fn cmd_config_layout(
+ rpc_cli: &Endpoint<SystemRpc, ()>,
+ rpc_host: NodeID,
+ config_opt: ConfigLayoutOpt,
+) -> Result<(), Error> {
+ let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
+
+ match config_opt.redundancy {
+ None => (),
+ Some(r) => {
+ if r > layout.replication_factor {
+ println!(
+ "The zone redundancy must be smaller or equal to the \
+ replication factor ({}).",
+ layout.replication_factor
+ );
+ } else if r < 1 {
+ println!("The zone redundancy must be at least 1.");
+ } else {
+ layout
+ .staged_parameters
+ .update(LayoutParameters { zone_redundancy: r });
+ println!("The new zone redundancy has been saved ({}).", r);
+ }
+ }
+ }
+
+ send_layout(rpc_cli, rpc_host, layout).await?;
+ Ok(())
+}
+
// --- utility ---
pub async fn fetch_layout(
@@ -269,21 +320,29 @@ pub async fn send_layout(
}
pub fn print_cluster_layout(layout: &ClusterLayout) -> bool {
- let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()];
+ let mut table = vec!["ID\tTags\tZone\tCapacity\tUsable".to_string()];
for (id, _, role) in layout.roles.items().iter() {
let role = match &role.0 {
Some(r) => r,
_ => continue,
};
let tags = role.tags.join(",");
+ let usage = layout.get_node_usage(id).unwrap_or(0);
+ let capacity = layout.get_node_capacity(id).unwrap_or(1);
table.push(format!(
- "{:?}\t{}\t{}\t{}",
+ "{:?}\t{}\t{}\t{}\t{} ({:.1}%)",
id,
tags,
role.zone,
- role.capacity_string()
+ role.capacity_string(),
+ usage as u32 * layout.partition_size,
+ (100.0 * usage as f32 * layout.partition_size as f32) / (capacity as f32)
));
}
+ println!();
+ println!("Parameters of the layout computation:");
+ println!("Zone redundancy: {}", layout.parameters.zone_redundancy);
+ println!();
if table.len() == 1 {
false
} else {
@@ -292,6 +351,20 @@ pub fn print_cluster_layout(layout: &ClusterLayout) -> bool {
}
}
+pub fn print_staging_parameters_changes(layout: &ClusterLayout) -> bool {
+ let has_changes = layout.staged_parameters.get().clone() != layout.parameters;
+ if has_changes {
+ println!();
+ println!("==== NEW LAYOUT PARAMETERS ====");
+ println!(
+ "Zone redundancy: {}",
+ layout.staged_parameters.get().zone_redundancy
+ );
+ println!();
+ }
+ has_changes
+}
+
pub fn print_staging_role_changes(layout: &ClusterLayout) -> bool {
let has_changes = layout
.staging
diff --git a/src/garage/cli/structs.rs b/src/garage/cli/structs.rs
index cb085813..64798952 100644
--- a/src/garage/cli/structs.rs
+++ b/src/garage/cli/structs.rs
@@ -87,6 +87,10 @@ pub enum LayoutOperation {
#[structopt(name = "remove", version = garage_version())]
Remove(RemoveRoleOpt),
+ /// Configure parameters value for the layout computation
+ #[structopt(name = "config", version = garage_version())]
+ Config(ConfigLayoutOpt),
+
/// Show roles currently assigned to nodes and changes staged for commit
#[structopt(name = "show", version = garage_version())]
Show,
@@ -110,7 +114,7 @@ pub struct AssignRoleOpt {
#[structopt(short = "z", long = "zone")]
pub(crate) zone: Option<String>,
- /// Capacity (in relative terms, use 1 to represent your smallest server)
+ /// Capacity (in relative terms)
#[structopt(short = "c", long = "capacity")]
pub(crate) capacity: Option<u32>,
@@ -134,6 +138,13 @@ pub struct RemoveRoleOpt {
}
#[derive(StructOpt, Debug)]
+pub struct ConfigLayoutOpt {
+ /// Zone redundancy parameter
+ #[structopt(short = "r", long = "redundancy")]
+ pub(crate) redundancy: Option<usize>,
+}
+
+#[derive(StructOpt, Debug)]
pub struct ApplyLayoutOpt {
/// Version number of new configuration: this command will fail if
/// it is not exactly 1 + the previous configuration's version