Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sourcery refactored master branch #2

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 4 additions & 7 deletions deployer/billing/importers.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def build_gcp_query(cluster: dict, service_id=None):
assert re.match(r"^[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}$", service_id, re.I)
by_service = f'AND service.id = "{service_id}"'

query = f"""
return f"""
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function build_gcp_query refactored with the following changes:

SELECT
invoice.month as month,
project.id as project,
Expand All @@ -54,7 +54,6 @@ def build_gcp_query(cluster: dict, service_id=None):
ORDER BY invoice.month ASC
;
"""
return query


class BigqueryGCPBillingCostImporter:
Expand Down Expand Up @@ -135,8 +134,7 @@ def _run_query(self, start_month, end_month):
start = start_month.timestamp()
end = end_month.timestamp()
step = "24h"
rows = p.query_range(prom_query, start, end, step)
return rows
return p.query_range(prom_query, start, end, step)
Comment on lines -138 to +137
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function PrometheusUtilizationImporter._run_query refactored with the following changes:


def clean_query_dataframe(self, df):
df = self.clean_namespace_labels(df)
Expand Down Expand Up @@ -165,7 +163,7 @@ def clean_namespace_labels(self, df):
def combine_support(self, df):
df["support_combined"] = 0.0
if "support" in df:
df["support_combined"] = df["support_combined"] + df["support"]
df["support_combined"] += df["support"]
Comment on lines -168 to +166
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function PrometheusUtilizationImporter.combine_support refactored with the following changes:

  • Replace assignment with augmented assignment (aug-assign)


if "kube-system" in df:
df["support_combined"] = df["support_combined"] + df["kube-system"]
Expand Down Expand Up @@ -210,8 +208,7 @@ def get_dedicated_cluster_costs(cluster, start_month, end_month):
pandas.DataFrame of costs.
"""
bq_importer = BigqueryGCPBillingCostImporter(cluster)
result = bq_importer.get_costs(start_month, end_month)
return result
return bq_importer.get_costs(start_month, end_month)
Comment on lines -213 to +211
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function get_dedicated_cluster_costs refactored with the following changes:



def get_shared_cluster_utilization(cluster, start_month, end_month):
Expand Down
46 changes: 19 additions & 27 deletions deployer/cilogon_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,7 @@ def build_request_headers(admin_id, admin_secret):
def build_request_url(id=None):
url = "https://cilogon.org/oauth2/oidc-cm"

if id is None:
return url

return str(URL(url).with_query({"client_id": id}))
return url if id is None else str(URL(url).with_query({"client_id": id}))
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function build_request_url refactored with the following changes:



def build_client_details(cluster_name, hub_name, callback_url):
Expand Down Expand Up @@ -125,10 +122,7 @@ def stored_client_id_same_with_cilogon_records(
return False

cilogon_client_id = cilogon_client.get("client_id", None)
if cilogon_client_id != stored_client_id:
return False

return True
return cilogon_client_id == stored_client_id
Comment on lines -128 to +125
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function stored_client_id_same_with_cilogon_records refactored with the following changes:



def print_not_ok_request_message(response):
Expand Down Expand Up @@ -159,8 +153,7 @@ def create_client(

# Check if there's a client id already stored in the config file for this hub
if Path(config_filename).is_file():
client_id = load_client_id_from_file(config_filename)
if client_id:
if client_id := load_client_id_from_file(config_filename):
Comment on lines -162 to +156
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function create_client refactored with the following changes:

print_colour(
f"Found existing CILogon client app in {config_filename}.", "yellow"
)
Expand Down Expand Up @@ -294,23 +287,7 @@ def delete_client(admin_id, admin_secret, cluster_name, hub_name, client_id=None
cluster_name, hub_name
)

if not client_id:
if Path(config_filename).is_file():
client_id = load_client_id_from_file(config_filename)
# Nothing to do if no client has been found
if not client_id:
print_colour(
"No `client_id` to delete was provided and couldn't find any in `config_filename`",
"red",
)
return
else:
print_colour(
f"No `client_id` to delete was provided and couldn't find any {config_filename} file",
"red",
)
return
else:
if client_id:
Comment on lines -297 to +290
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function delete_client refactored with the following changes:

if not stored_client_id_same_with_cilogon_records(
admin_id,
admin_secret,
Expand All @@ -324,6 +301,21 @@ def delete_client(admin_id, admin_secret, cluster_name, hub_name, client_id=None
)
return

elif Path(config_filename).is_file():
client_id = load_client_id_from_file(config_filename)
# Nothing to do if no client has been found
if not client_id:
print_colour(
"No `client_id` to delete was provided and couldn't find any in `config_filename`",
"red",
)
return
else:
print_colour(
f"No `client_id` to delete was provided and couldn't find any {config_filename} file",
"red",
)
return
print(f"Deleting the CILogon client details for {client_id}...")
headers = build_request_headers(admin_id, admin_secret)
response = requests.delete(build_request_url(client_id), headers=headers, timeout=5)
Expand Down
4 changes: 1 addition & 3 deletions deployer/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,9 +84,7 @@ def deploy_support(self, cert_manager_version):
str(support_dir),
]

for values_file in values_files:
cmd.append(f"--values={values_file}")

cmd.extend(f"--values={values_file}" for values_file in values_files)
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function Cluster.deploy_support refactored with the following changes:

print_colour(f"Running {' '.join([str(c) for c in cmd])}")
subprocess.check_call(cmd)

Expand Down
10 changes: 5 additions & 5 deletions deployer/deployer.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def deploy_grafana_dashboards(
# Add GRAFANA_TOKEN to the environment variables for
# jupyterhub/grafana-dashboards' deploy.py script
deploy_script_env = os.environ.copy()
deploy_script_env.update({"GRAFANA_TOKEN": grafana_token})
deploy_script_env["GRAFANA_TOKEN"] = grafana_token
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function deploy_grafana_dashboards refactored with the following changes:

try:
print_colour(f"Deploying grafana dashboards to {cluster_name}...")
subprocess.check_call(
Expand Down Expand Up @@ -239,9 +239,9 @@ def generate_helm_upgrade_jobs(
"reason_for_redeploy": "",
}

# Check if this cluster file has been modified. If so, set boolean flags to True
intersection = set(changed_filepaths).intersection([str(cluster_file)])
if intersection:
if intersection := set(changed_filepaths).intersection(
[str(cluster_file)]
):
Comment on lines -242 to +244
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function generate_helm_upgrade_jobs refactored with the following changes:

This removes the following comments ( why? ):

# Check if this cluster file has been modified. If so, set boolean flags to True

print_colour(
f"This cluster.yaml file has been modified. Generating jobs to upgrade all hubs and the support chart on THIS cluster: {cluster_name}"
)
Expand Down Expand Up @@ -348,7 +348,7 @@ def run_hub_health_check(
elif len(hub_indx) > 1:
print_colour("ERROR: More than one hub with this name found!")
sys.exit(1)
elif len(hub_indx) == 0:
elif not hub_indx:
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function run_hub_health_check refactored with the following changes:

print_colour("ERROR: No hubs with this name found!")
sys.exit(1)

Expand Down
9 changes: 2 additions & 7 deletions deployer/file_acquisition.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,12 +85,10 @@ def build_absolute_path_to_hub_encrypted_config_file(cluster_name, hub_name):
Note that file doesn't need to exist.
"""
cluster_config_dir_path = find_absolute_path_to_cluster_file(cluster_name).parent
encrypted_file_path = cluster_config_dir_path.joinpath(
return cluster_config_dir_path.joinpath(
f"enc-{hub_name}.secret.values.yaml"
)

return encrypted_file_path
Comment on lines -88 to -92
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function build_absolute_path_to_hub_encrypted_config_file refactored with the following changes:



def persist_config_in_encrypted_file(encrypted_file, new_config):
"""
Expand Down Expand Up @@ -196,10 +194,7 @@ def get_decrypted_file(original_filepath):
with open(original_filepath) as f:
# JSON files output by terraform have hard tabs, the *one* thing that is
# valid JSON but not valid YAML
if ext.endswith("json"):
loader_func = json.load
else:
loader_func = yaml.load
loader_func = json.load if ext.endswith("json") else yaml.load
Comment on lines -199 to +197
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function get_decrypted_file refactored with the following changes:

try:
content = loader_func(f)
except ScannerError:
Expand Down
4 changes: 2 additions & 2 deletions deployer/generate/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ def generate_support_files(cluster_config_directory, vars):
print_colour("Generating the prometheus credentials encrypted file...", "yellow")
alphabet = string.ascii_letters + string.digits
credentials = {
"username": "".join(secrets.choice(alphabet) for i in range(64)),
"password": "".join(secrets.choice(alphabet) for i in range(64)),
"username": "".join(secrets.choice(alphabet) for _ in range(64)),
"password": "".join(secrets.choice(alphabet) for _ in range(64)),
Comment on lines -48 to +49
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function generate_support_files refactored with the following changes:

}
with open(
REPO_ROOT / "config/clusters/templates/common/support.secret.values.yaml"
Expand Down
10 changes: 2 additions & 8 deletions deployer/grafana/central_grafana.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def build_datasource_details(cluster_name):
# Get the credentials of this prometheus instance
prometheus_creds = get_cluster_prometheus_creds(cluster_name)

datasource_details = {
return {
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function build_datasource_details refactored with the following changes:

"name": cluster_name,
"type": "prometheus",
"access": "proxy",
Expand All @@ -52,8 +52,6 @@ def build_datasource_details(cluster_name):
"secureJsonData": {"basicAuthPassword": prometheus_creds["password"]},
}

return datasource_details


def build_datasource_request_headers(cluster_name):
"""
Expand All @@ -64,14 +62,12 @@ def build_datasource_request_headers(cluster_name):
"""
token = get_grafana_token(cluster_name)

headers = {
return {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {token}",
}

return headers
Comment on lines -67 to -73
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function build_datasource_request_headers refactored with the following changes:



def get_clusters_used_as_datasources(cluster_name, datasource_endpoint):
"""
Expand Down Expand Up @@ -153,8 +149,6 @@ def update_central_grafana_datasources(
"yellow",
)
exceptions += 1
pass

Comment on lines -156 to -157
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function update_central_grafana_datasources refactored with the following changes:

if exceptions:
print_colour(
f"Failed to add {exceptions} clusters as datasources. See errors above!",
Expand Down
23 changes: 10 additions & 13 deletions deployer/grafana/grafana_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,14 @@ def get_grafana_url(cluster_name):
with open(config_file) as f:
support_config = yaml.load(f)

grafana_tls_config = (
if grafana_tls_config := (
support_config.get("grafana", {}).get("ingress", {}).get("tls", [])
)
if not grafana_tls_config:
):
# We only have one tls host right now. Modify this when things change.
return "https://" + grafana_tls_config[0]["hosts"][0]
else:
raise ValueError(f"grafana.ingress.tls config for {cluster_name} missing!")

# We only have one tls host right now. Modify this when things change.
return "https://" + grafana_tls_config[0]["hosts"][0]
Comment on lines -24 to -31
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function get_grafana_url refactored with the following changes:



def get_cluster_prometheus_address(cluster_name):
"""
Expand All @@ -56,21 +55,19 @@ def get_cluster_prometheus_address(cluster_name):
f"`prometheusIngressAuthSecret` wasn't configured for {cluster_name}"
)

tls_config = (
if tls_config := (
support_config.get("prometheus", {})
.get("server", {})
.get("ingress", {})
.get("tls", [])
)

if not tls_config:
):
# We only have one tls host right now. Modify this when things change.
return tls_config[0]["hosts"][0]
else:
raise ValueError(
f"No tls config was found for the prometheus instance of {cluster_name}"
)

# We only have one tls host right now. Modify this when things change.
return tls_config[0]["hosts"][0]
Comment on lines -59 to -72
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function get_cluster_prometheus_address refactored with the following changes:



def get_cluster_prometheus_creds(cluster_name):
"""
Expand Down
31 changes: 12 additions & 19 deletions deployer/helm_upgrade_decision.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,11 +151,9 @@ def generate_hub_matrix_jobs(
cluster_file.parent.joinpath(values_file)
for values_file in hub.get("helm_chart_values_files", {})
]
# Establish if any of this hub's helm chart values files have been
# modified
intersection = added_or_modified_files.intersection(values_files)

if intersection:
if intersection := added_or_modified_files.intersection(
values_files
):
Comment on lines -154 to +156
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function generate_hub_matrix_jobs refactored with the following changes:

This removes the following comments ( why? ):

# modified
# Establish if any of this hub's helm chart values files have been

# If at least one of the helm chart values files associated with
# this hub has been modified, add it to list of matrix jobs to be
# upgraded
Expand Down Expand Up @@ -228,9 +226,7 @@ def generate_support_matrix_jobs(
# Empty list to store the matrix definitions in
matrix_jobs = []

# Double-check that support is defined for this cluster.
support_config = cluster_config.get("support", {})
if support_config:
if support_config := cluster_config.get("support", {}):
Comment on lines -231 to +229
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function generate_support_matrix_jobs refactored with the following changes:

This removes the following comments ( why? ):

# Double-check that support is defined for this cluster.

if upgrade_support_on_all_clusters or upgrade_support_on_this_cluster:
# We know we're upgrading support on all clusters, so just add the cluster
# name to the list of matrix jobs and move on
Expand All @@ -250,9 +246,9 @@ def generate_support_matrix_jobs(
cluster_file.parent.joinpath(values_file)
for values_file in support_config.get("helm_chart_values_files", {})
]
intersection = added_or_modified_files.intersection(values_files)

if intersection:
if intersection := added_or_modified_files.intersection(
values_files
):
matrix_job = cluster_info.copy()
matrix_job["upgrade_support"] = True
matrix_job[
Expand Down Expand Up @@ -382,14 +378,11 @@ def ensure_support_staging_jobs_have_correct_keys(
# upgrade_staging key present, even if we just set it to False
for job in support_and_staging_matrix_jobs:
if "upgrade_staging" not in job.keys():
# Get a list of prod hubs running on the same cluster this staging job will
# run on
hubs_on_this_cluster = [
if hubs_on_this_cluster := [
hub["hub_name"]
for hub in prod_hub_matrix_jobs
if hub["cluster_name"] == job["cluster_name"]
]
if hubs_on_this_cluster:
]:
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function ensure_support_staging_jobs_have_correct_keys refactored with the following changes:

This removes the following comments ( why? ):

# run on
# Get a list of prod hubs running on the same cluster this staging job will

# There are prod hubs on this cluster that require an upgrade, and so we
# also upgrade staging
job["upgrade_staging"] = True
Expand Down Expand Up @@ -431,9 +424,9 @@ def assign_staging_jobs_for_missing_clusters(
support_staging_clusters = {
job["cluster_name"] for job in support_and_staging_matrix_jobs
}
missing_clusters = prod_hub_clusters.difference(support_staging_clusters)

if missing_clusters:
if missing_clusters := prod_hub_clusters.difference(
support_staging_clusters
):
Comment on lines -434 to +429
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function assign_staging_jobs_for_missing_clusters refactored with the following changes:

# Generate support/staging jobs for clusters that don't have them but do have
# prod hub jobs. We assume they are missing because neither the support chart
# nor staging hub needed an upgrade. We set upgrade_support to False. However,
Expand Down
10 changes: 4 additions & 6 deletions deployer/hub.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,9 @@ def deploy(self, dask_gateway_version, debug, dry_run):
subprocess.check_call(["kubectl", "apply", "-f", manifest_url])

with get_decrypted_files(
self.cluster.config_path.joinpath(p)
for p in self.spec["helm_chart_values_files"]
) as values_files:
self.cluster.config_path.joinpath(p)
for p in self.spec["helm_chart_values_files"]
) as values_files:
Comment on lines -60 to +62
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function Hub.deploy refactored with the following changes:

cmd = [
"helm",
"upgrade",
Expand All @@ -78,9 +78,7 @@ def deploy(self, dask_gateway_version, debug, dry_run):
cmd.append("--debug")

# Add on the values files
for values_file in values_files:
cmd.append(f"--values={values_file}")

cmd.extend(f"--values={values_file}" for values_file in values_files)
# join method will fail on the PosixPath element if not transformed
# into a string first
print_colour(f"Running {' '.join([str(c) for c in cmd])}")
Expand Down
Loading