Skip to content

Commit

Permalink
Enable ruff check that can auto-fix (#3644)
Browse files Browse the repository at this point in the history
  • Loading branch information
cbeauchesne authored Dec 10, 2024
1 parent 6dea1a7 commit c0a6d73
Show file tree
Hide file tree
Showing 59 changed files with 554 additions and 819 deletions.
10 changes: 3 additions & 7 deletions conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def pytest_configure(config):
def pytest_sessionstart(session):

# get the terminal to allow logging directly in stdout
setattr(logger, "terminal", session.config.pluginmanager.get_plugin("terminalreporter"))
logger.terminal = session.config.pluginmanager.get_plugin("terminalreporter")

# if only collect tests, do not start the scenario
if not session.config.option.collectonly:
Expand Down Expand Up @@ -162,11 +162,7 @@ def _collect_item_metadata(item):

if skip_reason is not None:
# if any irrelevant declaration exists, it is the one we need to expose
if skip_reason.startswith("irrelevant"):
result["details"] = skip_reason

# otherwise, we keep the first one we found
elif result["details"] is None:
if skip_reason.startswith("irrelevant") or result["details"] is None:
result["details"] = skip_reason

if result["details"]:
Expand Down Expand Up @@ -257,7 +253,7 @@ def pytest_pycollect_makeitem(collector, name, obj):


def pytest_collection_modifyitems(session, config, items: list[pytest.Item]):
"""unselect items that are not included in the current scenario"""
"""Unselect items that are not included in the current scenario"""

logger.debug("pytest_collection_modifyitems")

Expand Down
18 changes: 3 additions & 15 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -77,24 +77,15 @@ ignore = [
"ARG001", # unused function argument
"ARG002", # unused method argument
"ARG005", # unused lambda argument
"B010", # wat is zat?
"BLE001", # TBD
"C901", # code complexity, TBD
"COM819",
"COM819", # no extra comma, TBD
"D200",
"D210", # docstring format
"D202", # docstring format
"D208", # docstring format
"D205", # docstring format
"D209", # docstring format
"D401", # docstring format
"D403", # docstring format
"D404", # docstring format
"D410", # docstring format
"D411", # docstring format
"D413", # docstring format
"D212", # docstring format
"D417", # docstring format
"D417",
"E501", # line too long
"E722", # TBD
"E741",
Expand All @@ -110,7 +101,6 @@ ignore = [
"PLR0915", # too many statements, may be replaced by a higher default value
"PLR1714",
"PLR2004",
"PLR5501",
"PTH100",
"PTH102",
"PTH110",
Expand All @@ -131,16 +121,13 @@ ignore = [
"SIM102",
"SIM108",
"SIM110", # TBD
"SIM114",
"SIM401", # code quality, TBD
"SLF001",
"TRY002",
"TRY003", # this is a full project to enable this
"TRY201",
"TRY300",
"UP007",
"UP015",
"UP024",
"UP038", # we really want this? TBD


Expand All @@ -154,6 +141,7 @@ ignore = [
"D104", # Missing docstring in public package
"D105", # Missing docstring in magic method
"D107", # Missing docstring in `__init__`
"D202", # blank line after docstring
"D203", # cause a warning
"D211", # no-blank-line-before-class
"D213", # multi-line-summary-second-line
Expand Down
2 changes: 1 addition & 1 deletion utils/_context/_scenarios/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
class scenarios:
@staticmethod
def all_endtoend_scenarios(test_object):
"""particular use case where a klass applies on all scenarios"""
"""Particular use case where a klass applies on all scenarios"""

# Check that no scenario has been already declared
for marker in getattr(test_object, "pytestmark", []):
Expand Down
5 changes: 3 additions & 2 deletions utils/_context/_scenarios/auto_injection.py
Original file line number Diff line number Diff line change
Expand Up @@ -443,8 +443,9 @@ def __init__(


class InstallerAutoInjectionScenarioProfiling(_VirtualMachineScenario):
""" As Profiling is not included in GA (2024/11) we reduce the number of VMS to speed up the execution
Until we fix the performance problems on the AWS architecture and speed up the tests"""
"""As Profiling is not included in GA (2024/11) we reduce the number of VMS to speed up the execution
Until we fix the performance problems on the AWS architecture and speed up the tests
"""

def __init__(
self,
Expand Down
8 changes: 4 additions & 4 deletions utils/_context/_scenarios/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def _create_log_subfolder(self, subfolder, remove_if_exists=False):
Path(path).mkdir(parents=True, exist_ok=True)

def __call__(self, test_object):
"""handles @scenarios.scenario_name"""
"""Handles @scenarios.scenario_name"""

# Check that no scenario has been already declared
for marker in getattr(test_object, "pytestmark", []):
Expand Down Expand Up @@ -117,7 +117,7 @@ def pytest_configure(self, config):
self.configure(config)

def pytest_sessionstart(self, session):
"""called at the very begining of the process"""
"""Called at the very begining of the process"""

logger.terminal.write_sep("=", "test context", bold=True)

Expand All @@ -139,10 +139,10 @@ def get_warmups(self):
]

def post_setup(self):
"""called after test setup"""
"""Called after test setup"""

def close_targets(self):
"""called at the end of the process"""
"""Called at the end of the process"""

@property
def host_log_folder(self):
Expand Down
35 changes: 19 additions & 16 deletions utils/_context/_scenarios/docker_ssi.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@


class DockerSSIScenario(Scenario):
"""Scenario test the ssi installer on a docker environment and runs APM test agent """
"""Scenario test the ssi installer on a docker environment and runs APM test agent"""

def __init__(self, name, doc, scenario_groups=None) -> None:
super().__init__(name, doc=doc, github_workflow="dockerssi", scenario_groups=scenario_groups)
Expand Down Expand Up @@ -140,7 +140,7 @@ def close_targets(self):
self.ssi_image_builder.push_base_image()

def fill_context(self, json_tested_components):
""" After extract the components from the weblog, fill the context with the data """
"""After extract the components from the weblog, fill the context with the data"""

image_internal_name = SupportedImages().get_internal_name_from_base_image(self._base_image, self._arch)
self.configuration["os"] = image_internal_name
Expand Down Expand Up @@ -211,7 +211,7 @@ def configuration(self):


class DockerSSIImageBuilder:
""" Manages the docker image building for the SSI scenario """
"""Manages the docker image building for the SSI scenario"""

def __init__(
self, base_weblog, base_image, library, arch, installable_runtime, push_base_images, force_build
Expand Down Expand Up @@ -240,7 +240,7 @@ def configure(self):
self.ssi_all_docker_tag = f"ssi_all_{self.docker_tag}"

def build_weblog(self):
""" Manages the build process of the weblog image """
"""Manages the build process of the weblog image"""
if not self.exist_base_image() or self._push_base_images or self._force_build:
# Build the base image
self.build_lang_deps_image()
Expand All @@ -253,7 +253,7 @@ def build_weblog(self):
)

def exist_base_image(self):
""" Check if the base image is available in the docker registry """
"""Check if the base image is available in the docker registry"""
try:
get_docker_client().images.pull(self._docker_registry_tag)
logger.info("Base image found on the registry")
Expand All @@ -263,7 +263,7 @@ def exist_base_image(self):
return False

def push_base_image(self):
""" Push the base image to the docker registry. Base image contains: lang (if it's needed) and ssi installer (only with the installer, without ssi autoinject )"""
"""Push the base image to the docker registry. Base image contains: lang (if it's needed) and ssi installer (only with the installer, without ssi autoinject )"""
if self.should_push_base_images:
logger.stdout(f"Pushing base image to the registry: {self._docker_registry_tag}")
try:
Expand All @@ -275,7 +275,7 @@ def push_base_image(self):
logger.exception(f"Failed to push docker image: {e}")

def get_base_docker_tag(self):
""" Resolves and format the docker tag for the base image """
"""Resolves and format the docker tag for the base image"""
runtime = (
resolve_runtime_version(self._library, self._installable_runtime) + "_" if self._installable_runtime else ""
)
Expand All @@ -288,10 +288,11 @@ def get_base_docker_tag(self):
)

def build_lang_deps_image(self):
""" Build the lang image. Install the language runtime on the base image.
"""Build the lang image. Install the language runtime on the base image.
We also install some linux deps for the ssi installer
If there is not runtime installation requirement, we install only the linux deps
Base lang contains the scrit to install the runtime and the script to install dependencies """
Base lang contains the scrit to install the runtime and the script to install dependencies
"""
dockerfile_template = None
try:
if self._installable_runtime:
Expand Down Expand Up @@ -327,7 +328,7 @@ def build_lang_deps_image(self):
raise e

def build_ssi_installer_image(self):
""" Build the ssi installer image. Install only the ssi installer on the image """
"""Build the ssi installer image. Install only the ssi installer on the image"""
try:
logger.stdout(
f"[tag:{self.ssi_installer_docker_tag}]Installing DD installer on base image [{self.docker_tag}]."
Expand All @@ -350,8 +351,9 @@ def build_ssi_installer_image(self):
raise e

def build_weblog_image(self, ssi_installer_docker_tag):
""" Build the final weblog image. Uses base ssi installer image, install
the full ssi (to perform the auto inject) and build the weblog image """
"""Build the final weblog image. Uses base ssi installer image, install
the full ssi (to perform the auto inject) and build the weblog image
"""

weblog_docker_tag = "weblog-injection:latest"
logger.stdout(f"Building docker final weblog image with tag: {weblog_docker_tag}")
Expand Down Expand Up @@ -389,9 +391,10 @@ def build_weblog_image(self, ssi_installer_docker_tag):
raise e

def tested_components(self):
""" Extract weblog versions of lang runtime, agent, installer, tracer.
"""Extract weblog versions of lang runtime, agent, installer, tracer.
Also extracts the weblog url env variable
Return json with the data"""
Return json with the data
"""
logger.info("Weblog extract tested components")
result = get_docker_client().containers.run(
image=self._weblog_docker_image, command=f"/tested_components.sh {self.dd_lang}", remove=True
Expand All @@ -400,7 +403,7 @@ def tested_components(self):
return json.loads(result.decode("utf-8").replace("'", '"'))

def print_docker_build_logs(self, image_tag, build_logs):
""" Print the docker build logs to docker_build.log file """
"""Print the docker build logs to docker_build.log file"""
scenario_name = context.scenario.name
vm_logger(scenario_name, "docker_build").info("***************************************************************")
vm_logger(scenario_name, "docker_build").info(f" Building docker image with tag: {image_tag} ")
Expand All @@ -412,7 +415,7 @@ def print_docker_build_logs(self, image_tag, build_logs):
vm_logger(scenario_name, "docker_build").info(line)

def print_docker_push_logs(self, image_tag, push_logs):
""" Print the docker push logs to docker_push.log file """
"""Print the docker push logs to docker_push.log file"""
scenario_name = context.scenario.name
vm_logger(scenario_name, "docker_push").info("***************************************************************")
vm_logger(scenario_name, "docker_push").info(f" Push docker image with tag: {image_tag} ")
Expand Down
2 changes: 1 addition & 1 deletion utils/_context/_scenarios/integrations.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def _get_unique_id(replay: bool, host_log_folder: str) -> str:
replay_file = f"{host_log_folder}/unique_id.txt"

if replay:
with open(replay_file, "r", encoding="utf-8") as f:
with open(replay_file, encoding="utf-8") as f:
unique_id = f.read()
else:
# pick a statistically unique id for the scenario
Expand Down
6 changes: 3 additions & 3 deletions utils/_context/_scenarios/k8s_lib_injection.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@


class KubernetesScenario(Scenario):
""" Scenario that tests kubernetes lib injection """
"""Scenario that tests kubernetes lib injection"""

def __init__(self, name, doc, github_workflow=None, scenario_groups=None, api_key=None, app_key=None) -> None:
super().__init__(name, doc=doc, github_workflow=github_workflow, scenario_groups=scenario_groups)
Expand Down Expand Up @@ -55,7 +55,7 @@ def configure(self, config):
logger.info("K8s Lib Injection environment configured")

def get_library_version(self):
""" Extract library version from the init image. """
"""Extract library version from the init image."""

logger.info("Get lib init tracer version")
lib_init_docker_image = get_docker_client().images.pull(self._library_init_image)
Expand Down Expand Up @@ -89,7 +89,7 @@ def components(self):


class WeblogInjectionScenario(Scenario):
"""Scenario that runs APM test agent """
"""Scenario that runs APM test agent"""

def __init__(self, name, doc, github_workflow=None, scenario_groups=None) -> None:
super().__init__(name, doc=doc, github_workflow=github_workflow, scenario_groups=scenario_groups)
Expand Down
10 changes: 5 additions & 5 deletions utils/_context/_scenarios/parametric.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@


def _fail(message):
""" Used to mak a test as failed """
"""Used to mak a test as failed"""
logger.error(message)
raise Failed(message, pytrace=False) from None

Expand Down Expand Up @@ -104,7 +104,7 @@ def __setitem__(self, item, value):
def deserialize(self):
result = {}
for ctx_filename in glob.glob(f"{self.outer_inst.host_log_folder}/*_context.json"):
with open(ctx_filename, "r") as f:
with open(ctx_filename) as f:
fileContent = f.read()
# Remove last carriage return and the last comma. Wrap into json array.
all_params = json.loads(f"[{fileContent[:-2]}]")
Expand Down Expand Up @@ -185,7 +185,7 @@ def _pull_test_agent_image(self):
_get_client().images.pull(self.TEST_AGENT_IMAGE)

def _clean_containers(self):
""" some containers may still exists from previous unfinished sessions """
"""Some containers may still exists from previous unfinished sessions"""

for container in _get_client().containers.list(all=True):
if "test-client" in container.name or "test-agent" in container.name or "test-library" in container.name:
Expand All @@ -194,7 +194,7 @@ def _clean_containers(self):
container.remove(force=True)

def _clean_networks(self):
""" some network may still exists from previous unfinished sessions """
"""Some network may still exists from previous unfinished sessions"""
logger.info("Removing unused network")
_get_client().networks.prune()
logger.info("Removing unused network done")
Expand Down Expand Up @@ -274,7 +274,7 @@ def create_docker_network(self, test_id: str) -> Network:

@staticmethod
def get_host_port(worker_id: str, base_port: int) -> int:
""" deterministic port allocation for each worker """
"""Deterministic port allocation for each worker"""

if worker_id == "master": # xdist disabled
return base_port
Expand Down
Loading

0 comments on commit c0a6d73

Please sign in to comment.