diff --git a/bugbug/code_search/parser.py b/bugbug/code_search/parser.py index 0fd068ebfa..f0eb1539f8 100644 --- a/bugbug/code_search/parser.py +++ b/bugbug/code_search/parser.py @@ -42,7 +42,9 @@ def search(repo_dir, commit_hash, symbol_name): return [] logger.error( - f"Error running 'hg grep' command.\nstdout:\n{e.stdout.decode()}\n\nstderr:\n{e.stderr.decode()}" + "Error running 'hg grep' command.\nstdout:\n %s \n\nstderr:\n %s", + e.stdout.decode(), + e.stderr.decode(), ) raise @@ -60,7 +62,9 @@ def search(repo_dir, commit_hash, symbol_name): ) except subprocess.CalledProcessError as e: logger.error( - f"Error running 'hg cat' command.\nstdout:\n{e.stdout.decode()}\n\nstderr:\n{e.stderr.decode()}" + "Error running 'hg cat' command.\nstdout:\n%s\n\nstderr:\n%s", + e.stdout.decode(), + e.stderr.decode(), ) raise diff --git a/bugbug/model.py b/bugbug/model.py index 9ef60c51d3..58255f81ed 100644 --- a/bugbug/model.py +++ b/bugbug/model.py @@ -317,7 +317,7 @@ def print_feature_importances(self, important_features, class_probabilities=None # allow maximum of 3 columns in a row to fit the page better COLUMNS = 3 - logger.info("Top {} features:".format(len(top_feature_names))) + logger.info("Top %s features:", len(top_feature_names)) for i in range(0, len(top_feature_names), COLUMNS): table = [] for item in shap_val: @@ -380,7 +380,7 @@ def train(self, importance_cutoff=0.15, limit=None): X = X[:limit] y = y[:limit] - logger.info(f"X: {X.shape}, y: {y.shape}") + logger.info("X: %s , y: %s", X.shape, y.shape) is_multilabel = isinstance(y[0], np.ndarray) is_binary = len(self.class_names) == 2 @@ -408,11 +408,14 @@ def train(self, importance_cutoff=0.15, limit=None): "std": score.std() * 2, } logger.info( - f"{scoring.capitalize()}: f{score.mean()} (+/- {score.std() * 2})" + "%s: f%.4f (+/- %.4f)", + scoring.capitalize(), + score.mean(), + (score.std() * 2), ) - logger.info(f"X_train: {X_train.shape}, y_train: {y_train.shape}") - logger.info(f"X_test: {X_test.shape}, y_test: {y_test.shape}") + logger.info("X_train: %s, y_train: %s", X_train.shape, y_train.shape) + logger.info("X_test: %s, y_test: %s", X_test.shape, y_test.shape) self.clf.fit(X_train, self.le.transform(y_train)) logger.info("Number of features: %d", self.clf.steps[-1][1].n_features_in_) @@ -480,7 +483,7 @@ def train(self, importance_cutoff=0.15, limit=None): "The predictions should be multilabel" ) - logger.info(f"No confidence threshold - {len(y_test)} classified") + logger.info("No confidence threshold - %d classified", len(y_test)) if is_multilabel: confusion_matrix = metrics.multilabel_confusion_matrix(y_test, y_pred) else: @@ -548,7 +551,9 @@ def train(self, importance_cutoff=0.15, limit=None): ) logger.info( - f"\nConfidence threshold > {confidence_threshold} - {classified_num} classified" + "\nConfidence threshold > %s - %d classified", + confidence_threshold, + classified_num, ) if is_multilabel: confusion_matrix = metrics.multilabel_confusion_matrix( @@ -579,7 +584,7 @@ def train(self, importance_cutoff=0.15, limit=None): X_train = X y_train = y - logger.info(f"X_train: {X_train.shape}, y_train: {y_train.shape}") + logger.info("X_train: %s, y_train: %s", X_train.shape, y_train.shape) self.clf.fit(X_train, self.le.transform(y_train)) diff --git a/bugbug/models/rcatype.py b/bugbug/models/rcatype.py index 6a1a6324c1..37ad88ca22 100644 --- a/bugbug/models/rcatype.py +++ b/bugbug/models/rcatype.py @@ -157,7 +157,7 @@ def get_rca_from_whiteboard(self, whiteboard_data): ) if rca_whiteboard_split[1] not in self.RCA_LIST: - logger.warning(rca_whiteboard_split[1] + " not in RCA_LIST") + logger.warning("%s not in RCA_LIST", rca_whiteboard_split[1]) else: rca.append(rca_whiteboard_split[1]) return rca diff --git a/bugbug/models/testselect.py b/bugbug/models/testselect.py index 8d9236f47e..150f16be8c 100644 --- a/bugbug/models/testselect.py +++ b/bugbug/models/testselect.py @@ -85,7 +85,7 @@ def _get_cost(config: str) -> int: if all(s in config for s in substrings): return cost - logger.warning(f"Couldn't find cost for {config}") + logger.warning("Couldn't find cost for %s", config) return max(cost for _, cost in costs) @@ -309,7 +309,7 @@ def select_configs( for group in groups: if group not in equivalence_sets: - logger.warning(f"No equivalence sets for group {group}") + logger.warning("No equivalence sets for group %s", group) continue # Create constraints to ensure at least one task from each set of equivalent # groups is selected. diff --git a/bugbug/phabricator.py b/bugbug/phabricator.py index 38858cda79..c3c05b53b0 100644 --- a/bugbug/phabricator.py +++ b/bugbug/phabricator.py @@ -187,7 +187,7 @@ def get_testing_project(rev: RevisionDict) -> str | None: ] if len(testing_projects) > 1: - logger.warning("Revision D{} has more than one testing tag.".format(rev["id"])) + logger.warning("Revision D%s has more than one testing tag.", rev["id"]) if len(testing_projects) == 0: return None @@ -237,7 +237,7 @@ def get_first_review_time(rev: RevisionDict) -> timedelta | None: ) = get_review_dates(rev) if creation_date is None: - logger.warning("Revision D{} has no creation date.".format(rev["id"])) + logger.warning("Revision D%s has no creation date.", rev["id"]) return None if len(review_dates) == 0: @@ -253,7 +253,7 @@ def get_first_review_time(rev: RevisionDict) -> timedelta | None: and first_exclusion_end_date is not None and first_exclusion_start_date > first_exclusion_end_date ): - logger.warning("Revision D{} was in an inconsistent state.".format(rev["id"])) + logger.warning("Revision D%s was in an inconsistent state.", rev["id"]) if ( first_exclusion_start_date is None @@ -264,9 +264,8 @@ def get_first_review_time(rev: RevisionDict) -> timedelta | None: first_exclusion_end_date is None or first_exclusion_end_date > first_review_date ): logger.warning( - "Revision D{} was accepted while in 'planned changes' or 'closed' state.".format( - rev["id"] - ) + "Revision D%s was accepted while in 'planned changes' or 'closed' state.", + rev["id"], ) return first_review_date - creation_date else: @@ -284,7 +283,7 @@ def get_pending_review_time(rev: RevisionDict) -> timedelta | None: creation_date, _, exclusion_start_dates, exclusion_end_dates = get_review_dates(rev) if creation_date is None: - logger.warning("Revision D{} has no creation date.".format(rev["id"])) + logger.warning("Revision D%s has no creation date.", rev["id"]) return None last_exclusion_start_date = max(exclusion_start_dates, default=None) @@ -295,9 +294,8 @@ def get_pending_review_time(rev: RevisionDict) -> timedelta | None: or last_exclusion_start_date > last_exclusion_end_date ): logger.warning( - "Revision D{} was in an inconsistent state (needs review, but is in an exception timespan).".format( - rev["id"] - ) + "Revision D%s was in an inconsistent state (needs review, but is in an exception timespan).", + rev["id"], ) if last_exclusion_end_date is not None: diff --git a/bugbug/repository.py b/bugbug/repository.py index e487dcaa99..ca4b872aa7 100644 --- a/bugbug/repository.py +++ b/bugbug/repository.py @@ -670,7 +670,9 @@ def set_commit_metrics( try: get_space_metrics(commit.metrics, after_metrics["spaces"]) except AnalysisException: - logger.debug(f"rust-code-analysis error on commit {commit.node}, path {path}") + logger.debug( + "rust-code-analysis error on commit %s, path %s", commit.node, path + ) before_metrics_dict = get_total_metrics_dict() try: @@ -679,7 +681,9 @@ def set_commit_metrics( before_metrics_dict, before_metrics["spaces"], calc_summaries=False ) except AnalysisException: - logger.debug(f"rust-code-analysis error on commit {commit.node}, path {path}") + logger.debug( + "rust-code-analysis error on commit %s, path %s", commit.node, path + ) commit.metrics_diff = { f"{metric}_total": commit.metrics[f"{metric}_total"] @@ -704,7 +708,10 @@ def set_commit_metrics( get_space_metrics(metrics_dict, func, calc_summaries=False) except AnalysisException: logger.debug( - f"rust-code-analysis error on commit {commit.node}, path {path}, function {func['name']}" + "rust-code-analysis error on commit %s, path %s, function %s}", + commit.node, + path, + func["name"], ) commit.functions[path].append( @@ -734,7 +741,7 @@ def transform(hg: hglib.client, repo_dir: str, commit: Commit) -> Commit: try: patch_data = rs_parsepatch.get_lines(patch) except Exception: - logger.error(f"Exception while analyzing {commit.node}") + logger.error("Exception while analyzing %s", commit.node) raise for stats in patch_data: @@ -1185,7 +1192,10 @@ def update_complex_experiences( ) else: logger.warning( - f"Experience missing for file {orig}, type '{commit_type}', on commit {commit.node}" + "Experience missing for file %s, type '%s', on commit %s", + orig, + commit_type, + commit.node, ) if ( diff --git a/bugbug/tools/build_repair/agent.py b/bugbug/tools/build_repair/agent.py index bdb17e3d3b..7175bbd7c7 100644 --- a/bugbug/tools/build_repair/agent.py +++ b/bugbug/tools/build_repair/agent.py @@ -184,8 +184,11 @@ async def _run_stage( stop=stop_after_attempt(5), wait=wait_exponential_jitter(initial=2, max=60, jitter=5), before_sleep=lambda rs: logger.warning( - f"Bug {bug_id}: {stage_name} transient error " - f"(attempt {rs.attempt_number}/5), retrying: {rs.outcome.exception()}" + "Bug %s: %s transient error (attempt %d/5), retrying: %s", + bug_id, + stage_name, + rs.attempt_number, + rs.outcome.exception(), ), reraise=True, ) @@ -194,7 +197,7 @@ async def _query(): async for message in query(prompt=prompt, options=options): serialized = self._serialize_message(message) transcript.append(serialized) - logger.debug(f"Bug {bug_id}: {stage_name} [{serialized['type']}]") + logger.debug("Bug %s: %s [%s]", bug_id, stage_name, serialized["type"]) if on_message: on_message(stage_name, serialized) if isinstance(message, ResultMessage): @@ -247,8 +250,10 @@ def _prepare_input_files(self, failure: BuildFailure, worktree_path: Path) -> No out_dir.mkdir(parents=True, exist_ok=True) logger.info( - f"Prepared input files for bug {failure.bug_id} at {in_dir} " - f"({len(failure.failure_tasks)} failure tasks)" + "Prepared input files for bug %s at %s (%d failure tasks)", + failure.bug_id, + in_dir, + len(failure.failure_tasks), ) def _read_output(self, failure: BuildFailure, worktree_path: Path, key: str) -> str: @@ -267,9 +272,13 @@ async def run( on_message: Callable[[str, dict], None] | None = None, ) -> AgentResponse: logger.info( - f"Starting build repair for bug {failure.bug_id} " - f"(commit={failure.git_commit}, worktree={worktree_path}, " - f"analysis_only={self.analysis_only}, skip_try_push={skip_try_push})" + "Starting build repair for bug %s " + "(commit=%s, worktree=%s, analysis_only=%s, skip_try_push=%s)", + failure.bug_id, + failure.git_commit, + worktree_path, + self.analysis_only, + skip_try_push, ) self._prepare_input_files(failure, worktree_path) @@ -280,8 +289,9 @@ async def run( total_usage: dict = {} logger.info( - f"Bug {failure.bug_id}: starting Stage 1 (analysis) " - f"with model={self.analysis_model}" + "Bug %s: starting Stage 1 (analysis) with model=%s", + failure.bug_id, + self.analysis_model, ) stage1_options = ClaudeAgentOptions( model=self.analysis_model, @@ -321,7 +331,9 @@ async def run( total_usage[k] = total_usage.get(k, 0) + v except Exception as e: logger.error( - f"Bug {failure.bug_id}: Stage 1 (analysis) failed: {e}", exc_info=True + "Bug %s: starting Stage 2 (fix) with model=%s", + failure.bug_id, + self.fix_model, ) return AgentResponse( error=str(e), @@ -333,19 +345,22 @@ async def run( ) logger.info( - f"Bug {failure.bug_id}: Stage 1 complete " - f"(cost=${total_cost:.4f}, turns={total_turns})" + "Bug %s: Stage 1 complete (cost=$%.4f, turns=%d)", + failure.bug_id, + total_cost, + total_turns, ) - summary = self._read_output(failure, worktree_path, "summary") analysis = self._read_output(failure, worktree_path, "analysis") logger.info( - f"Bug {failure.bug_id}: read output files " - f"(summary={len(summary)} chars, analysis={len(analysis)} chars)" + "Bug %s: read output files (summary=%d chars, analysis=%d chars)", + failure.bug_id, + len(summary), + len(analysis), ) if self.analysis_only: - logger.info(f"Bug {failure.bug_id}: analysis-only mode, skipping Stage 2") + logger.info("Bug %s: analysis-only mode, skipping Stage 2", failure.bug_id) return AgentResponse( summary=summary, analysis=analysis, @@ -356,7 +371,9 @@ async def run( ) logger.info( - f"Bug {failure.bug_id}: starting Stage 2 (fix) with model={self.fix_model}" + "Bug %s: starting Stage 2 (fix) with model=%s", + failure.bug_id, + self.fix_model, ) stage2_options = ClaudeAgentOptions( model=self.fix_model, @@ -395,8 +412,10 @@ async def run( if isinstance(v, (int, float)): total_usage[k] = total_usage.get(k, 0) + v except Exception as e: - logger.error( - f"Bug {failure.bug_id}: Stage 2 (fix) failed: {e}", exc_info=True + logger.exception( + "Bug %s: Stage 2 (fix) failed: %s", + failure.bug_id, + e, ) return AgentResponse( summary=summary, @@ -410,8 +429,10 @@ async def run( ) logger.info( - f"Bug {failure.bug_id}: Stage 2 complete " - f"(cost=${total_cost:.4f}, turns={total_turns})" + "Bug %s: Stage 2 complete (cost=$%.4f, turns=%d)", + failure.bug_id, + total_cost, + total_turns, ) subprocess.run( @@ -426,10 +447,10 @@ async def run( text=True, ) diff = diff_result.stdout - logger.info(f"Bug {failure.bug_id}: git diff produced {len(diff)} chars") + logger.info("Bug %s: git diff produced %d chars", failure.bug_id, len(diff)) if not diff.strip(): - logger.warning(f"Bug {failure.bug_id}: no diff produced, returning early") + logger.warning("Bug %s: no diff produced, returning early", failure.bug_id) return AgentResponse( summary=summary, analysis=analysis, @@ -447,8 +468,10 @@ async def run( failure.failure_tasks[0]["task_name"] if failure.failure_tasks else "" ) logger.info( - f"Bug {failure.bug_id}: starting try verification " - f"(task={task_name}, skip_try_push={skip_try_push})" + "Bug %s: starting try verification (task=%s, skip_try_push=%s)", + failure.bug_id, + task_name, + skip_try_push, ) try_result = run_try_verification( worktree_path=worktree_path, @@ -458,11 +481,15 @@ async def run( ) logger.info( - f"Bug {failure.bug_id}: try verification done " - f"(local_build={try_result.local_build_passed}, " - f"try_build={try_result.try_build_passed}, " - f"lando_job={try_result.lando_job_id}, " - f"total_cost=${total_cost:.4f}, total_turns={total_turns})" + "Bug %s: try verification done " + "(local_build=%s, try_build=%s, lando_job=%s, " + "total_cost=$%.4f, total_turns=%d)", + failure.bug_id, + try_result.local_build_passed, + try_result.try_build_passed, + try_result.lando_job_id, + total_cost, + total_turns, ) return AgentResponse( summary=summary, @@ -483,8 +510,9 @@ async def run( stop=stop_after_attempt(3), wait=wait_exponential_jitter(initial=2, max=30, jitter=5), before_sleep=lambda rs: logger.warning( - f"Verification failed (attempt {rs.attempt_number}/3), " - f"retrying: {rs.outcome.exception()}" + "Verification failed (attempt %d/3), retrying: %s", + rs.attempt_number, + rs.outcome.exception(), ), reraise=True, ) @@ -524,8 +552,10 @@ async def verify( ) logger.info( - f"Bug {failure.bug_id}: starting verification stage " - f"(model={self.verify_model}, ground_truth={gt_commits})" + "Bug %s: starting verification stage (model=%s, ground_truth=%s)", + failure.bug_id, + self.verify_model, + gt_commits, ) transcript, cost, turns, usage = await self._run_stage( diff --git a/bugbug/tools/build_repair/scorer.py b/bugbug/tools/build_repair/scorer.py index 2fc702258c..da513ab13d 100644 --- a/bugbug/tools/build_repair/scorer.py +++ b/bugbug/tools/build_repair/scorer.py @@ -98,7 +98,7 @@ def summarize(self, score_rows: list[dict]) -> dict: } if self.num_trials > 1: summary.update(_pass_at_k(score_rows, self.num_trials, "successful")) - logger.info(f"BasicMetrics summary: {summary}") + logger.info("BasicMetrics summary: %s", summary) return summary @@ -136,7 +136,7 @@ def summarize(self, score_rows: list[dict]) -> dict: summary.update( _pass_at_k(score_rows, self.num_trials, "local_build_passed") ) - logger.info(f"BuildPassRate summary: {summary}") + logger.info("BuildPassRate summary: %s", summary) return summary @@ -203,5 +203,5 @@ def summarize(self, score_rows: list[dict]) -> dict: summary.update( _pass_at_k(score_rows, self.num_trials, "fix_matches_ground_truth") ) - logger.info(f"LLMFixMatching summary: {summary}") + logger.info("LLMFixMatching summary: %s", summary) return summary diff --git a/bugbug/tools/build_repair/try_server.py b/bugbug/tools/build_repair/try_server.py index 6ef558556e..c71ad0c854 100644 --- a/bugbug/tools/build_repair/try_server.py +++ b/bugbug/tools/build_repair/try_server.py @@ -43,7 +43,7 @@ class TryPushResult: def _commit_fix(worktree_path: Path, bug_id: int) -> None: - logger.info(f"Committing fix for bug {bug_id} in {worktree_path}") + logger.info("Committing fix for bug %s in %s", bug_id, worktree_path) subprocess.run( ["git", "add", "-A"], cwd=worktree_path, @@ -63,7 +63,7 @@ def _commit_fix(worktree_path: Path, bug_id: int) -> None: cwd=worktree_path, check=True, ) - logger.info(f"Bug {bug_id}: fix committed") + logger.info("Bug %s: fix committed", bug_id) def _run_subprocess( @@ -88,29 +88,29 @@ def _run_subprocess( def _run_local_build(worktree_path: Path) -> bool: capture = not logger.isEnabledFor(logging.DEBUG) - logger.info(f"Running bootstrap in {worktree_path}") + logger.info("Running bootstrap in %s", worktree_path) result = _run_subprocess( ["./mach", "--no-interactive", "bootstrap"], worktree_path, capture ) if result.returncode != 0: if capture and result.stderr: - logger.warning(f"Bootstrap stderr:\n{result.stderr[-2000:]}") + logger.warning("Bootstrap stderr:\n%s", result.stderr[-2000:]) raise RuntimeError( f"Local bootstrap failed with return code {result.returncode}" ) - logger.info(f"Running local build in {worktree_path}") + logger.info("Running local build in %s", worktree_path) result = _run_subprocess(["./mach", "build"], worktree_path, capture) passed = result.returncode == 0 status = "passed" if passed else "failed" - logger.info(f"Local build {status} (returncode={result.returncode})") + logger.info("Local build %s (returncode=%s)", status, result.returncode) if not passed and capture and result.stderr: - logger.warning(f"Build stderr:\n{result.stderr[-2000:]}") + logger.warning("Build stderr:\n%s", result.stderr[-2000:]) return passed def _submit_try(worktree_path: Path, task_name: str) -> tuple[str | None, str | None]: - logger.info(f"Submitting try push for task={task_name} in {worktree_path}") + logger.info("Submitting try push for task=%s in %s", task_name, worktree_path) result = subprocess.run( ["./mach", "try", "fuzzy", "--query", task_name], cwd=worktree_path, @@ -119,16 +119,18 @@ def _submit_try(worktree_path: Path, task_name: str) -> tuple[str | None, str | env=_mach_env(worktree_path), ) stdout = result.stdout + result.stderr - logger.debug(f"Try push output: {stdout}") + logger.debug("Try push output: %s", stdout) match = _LANDO_JOB_ID_RE.search(stdout) if not match: - logger.warning(f"Could not parse Lando job ID from try output: {stdout}") + logger.warning("Could not parse Lando job ID from try output: %s", stdout) return None, None lando_job_id = match.group(1) treeherder_url = f"{TREEHERDER_BASE_URL}/jobs?repo=try&landoCommitID={lando_job_id}" logger.info( - f"Try push submitted: lando_job_id={lando_job_id}, treeherder={treeherder_url}" + "Try push submitted: lando_job_id=%s, treeherder=%s", + lando_job_id, + treeherder_url, ) return lando_job_id, treeherder_url @@ -146,7 +148,7 @@ def _get_push_revision(lando_job_id: str) -> str | None: if results: return results[0].get("revision") except Exception: - logger.exception(f"Error fetching push revision for lando job {lando_job_id}") + logger.exception("Error fetching push revision for lando job %s", lando_job_id) return None @@ -162,7 +164,7 @@ def _get_push_by_revision(revision: str) -> dict | None: results = resp.json().get("results", []) return results[0] if results else None except Exception: - logger.exception(f"Error fetching push by revision {revision}") + logger.exception("Error fetching push by revision %s", revision) return None @@ -181,15 +183,17 @@ def _get_build_job_result(push_id: int, task_name: str) -> str | None: return job["state"] return job["result"] except Exception: - logger.exception(f"Error fetching build job result for push {push_id}") + logger.exception("Error fetching build job result for push %s", push_id) return None def _poll_treeherder(lando_job_id: str, task_name: str) -> bool | None: logger.info( - f"Polling Treeherder for lando_job_id={lando_job_id}, task={task_name} " - f"(timeout={TRY_PUSH_TIMEOUT_SECONDS}s, " - f"interval={TRY_PUSH_POLL_INTERVAL_SECONDS}s)" + "Polling Treeherder for lando_job_id=%s, task=%s (timeout=%ss, interval=%ss)", + lando_job_id, + task_name, + TRY_PUSH_TIMEOUT_SECONDS, + TRY_PUSH_POLL_INTERVAL_SECONDS, ) deadline = time.monotonic() + TRY_PUSH_TIMEOUT_SECONDS push_id: int | None = None @@ -201,37 +205,40 @@ def _poll_treeherder(lando_job_id: str, task_name: str) -> bool | None: revision = _get_push_revision(lando_job_id) if revision: logger.info( - f"Resolved revision={revision} for lando_job_id={lando_job_id}" + "Resolved revision=%s for lando_job_id=%s", revision, lando_job_id ) push = _get_push_by_revision(revision) if push: push_id = push["id"] - logger.info(f"Resolved push_id={push_id} for revision={revision}") + logger.info( + "Resolved push_id=%s for revision=%s", push_id, revision + ) if push_id is not None: result = _get_build_job_result(push_id, task_name) logger.debug( - f"Poll #{poll_count}: job result={result} for push_id={push_id}" + "Poll #%s: job result=%s for push_id=%s", poll_count, result, push_id ) if result == "success": - logger.info(f"Try build succeeded for lando_job_id={lando_job_id}") + logger.info("Try build succeeded for lando_job_id=%s", lando_job_id) return True if result in ("busted", "testfailed", "exception"): logger.info( - f"Try build failed ({result}) for lando_job_id={lando_job_id}" + "Try build failed (%s) for lando_job_id=%s", result, lando_job_id ) return False else: logger.debug( - f"Poll #{poll_count}: push not yet available for " - f"lando_job_id={lando_job_id}" + "Poll #%s: push not yet available for lando_job_id=%s", + poll_count, + lando_job_id, ) - time.sleep(TRY_PUSH_POLL_INTERVAL_SECONDS) logger.warning( - f"Try push polling timed out after {poll_count} polls " - f"for lando job {lando_job_id}" + "Try push polling timed out after %s polls for lando job %s", + poll_count, + lando_job_id, ) return None @@ -243,14 +250,16 @@ def run_try_verification( skip_try_push: bool = False, ) -> TryPushResult: logger.info( - f"Starting try verification for bug {bug_id} " - f"(task={task_name}, skip_try_push={skip_try_push})" + "Starting try verification for bug %s (task=%s, skip_try_push=%s)", + bug_id, + task_name, + skip_try_push, ) _commit_fix(worktree_path, bug_id) local_passed = _run_local_build(worktree_path) if not local_passed: - logger.warning(f"Bug {bug_id}: local build failed, skipping try push") + logger.warning("Bug %s: local build failed, skipping try push", bug_id) return TryPushResult( local_build_passed=False, try_build_passed=None, @@ -259,7 +268,9 @@ def run_try_verification( ) if skip_try_push: - logger.info(f"Bug {bug_id}: local build passed, skipping try push as requested") + logger.info( + "Bug %s: local build passed, skipping try push as requested", bug_id + ) return TryPushResult( local_build_passed=True, try_build_passed=None, @@ -269,7 +280,7 @@ def run_try_verification( lando_job_id, treeherder_url = _submit_try(worktree_path, task_name) if not lando_job_id: - logger.warning(f"Bug {bug_id}: try push submission failed, no lando job ID") + logger.warning("Bug %s: try push submission failed, no lando job ID", bug_id) return TryPushResult( local_build_passed=True, try_build_passed=None, diff --git a/bugbug/tools/build_repair/worktree.py b/bugbug/tools/build_repair/worktree.py index b3c1c6dfee..1fe2980738 100644 --- a/bugbug/tools/build_repair/worktree.py +++ b/bugbug/tools/build_repair/worktree.py @@ -27,7 +27,7 @@ def __init__( def create(self, commit_hash: str, name: str) -> Path: worktree_path = self.base_dir / name logger.info( - f"Creating worktree {name} at {worktree_path} (commit={commit_hash})" + "Creating worktree %s at %s (commit=%s)", name, worktree_path, commit_hash ) if worktree_path.exists(): self.cleanup(name) @@ -45,11 +45,11 @@ def create(self, commit_hash: str, name: str) -> Path: cwd=self.repo, check=True, ) - logger.info(f"Worktree {name} created") + logger.info("Worktree %s created", name) return worktree_path def cleanup(self, name: str) -> None: - logger.info(f"Cleaning up worktree {name}") + logger.info("Cleaning up worktree %s", name) # --force twice to operate on locked worktrees (see https://git-scm.com/docs/git-worktree#_options) result = subprocess.run( [ @@ -65,15 +65,17 @@ def cleanup(self, name: str) -> None: text=True, ) if result.returncode != 0: - logger.error(f"Failed to remove worktree {name}: {result.stderr.strip()}") + logger.error( + "Failed to remove worktree %s: %s", name, result.stderr.strip() + ) else: - logger.info(f"Worktree {name} removed") + logger.info("Worktree %s removed", name) def cleanup_all(self) -> None: - logger.info(f"Cleaning up all worktrees in {self.base_dir}") + logger.info("Cleaning up all worktrees in %s", self.base_dir) for entry in self.base_dir.iterdir(): if entry.is_dir(): - logger.info(f"Removing worktree {entry}") + logger.info("Removing worktree %s", entry) subprocess.run( ["git", "worktree", "remove", "--force", "--force", str(entry)], cwd=self.repo, diff --git a/bugbug/tools/code_review/scorer.py b/bugbug/tools/code_review/scorer.py index 5a6e78267d..f1ab394f11 100644 --- a/bugbug/tools/code_review/scorer.py +++ b/bugbug/tools/code_review/scorer.py @@ -127,8 +127,10 @@ def score( if gt_comment["file_path"] != gen_comment.file: logger.debug( - f"File mismatch for diff {diff_id}: " - f"{gt_comment['file_path']} != {gen_comment.file}" + "File mismatch for diff %s: %s != %s", + diff_id, + gt_comment["file_path"], + gen_comment.file, ) continue diff --git a/bugbug/tools/comment_resolution/agent.py b/bugbug/tools/comment_resolution/agent.py index daa6980d1f..c76e97e03b 100644 --- a/bugbug/tools/comment_resolution/agent.py +++ b/bugbug/tools/comment_resolution/agent.py @@ -377,7 +377,7 @@ def generate_fixes_for_all_comments(self, revision_id): } except Exception as e: logger.warning( - f"Error generating fix for comment {comment_id}: {e}" + "Error generating fix for comment %s: %s", comment_id, e ) comment_map[comment_id] = { "fix": f"Error: {e}", diff --git a/bugbug/tools/core/platforms/phabricator.py b/bugbug/tools/core/platforms/phabricator.py index 599c3a2a72..748a01ac15 100644 --- a/bugbug/tools/core/platforms/phabricator.py +++ b/bugbug/tools/core/platforms/phabricator.py @@ -77,7 +77,7 @@ def _get_users_info_batch_impl(user_phids: set[str]) -> dict[str, dict]: if not user_phids: return {} - logger.info(f"Fetching user info for {len(user_phids)} PHIDs") + logger.info("Fetching user info for %s PHIDs", len(user_phids)) # Get user names and nick users_response = phabricator.request( diff --git a/http_service/bugbug_http/app.py b/http_service/bugbug_http/app.py index cae3141378..8099f7effa 100644 --- a/http_service/bugbug_http/app.py +++ b/http_service/bugbug_http/app.py @@ -291,19 +291,19 @@ def is_pending(job): job_id = redis_conn.get(job.mapping_key) if not job_id: - LOGGER.debug(f"No job ID mapping {job_id}, False") + LOGGER.debug("No job ID mapping %s, False", job_id) return False try: job = Job.fetch(job_id.decode("ascii"), connection=redis_conn) except NoSuchJobError: - LOGGER.debug(f"No job in DB for {job_id}, False") + LOGGER.debug("No job in DB for %s, False", job_id) # The job might have expired from redis return False job_status = job.get_status() if job_status == "started": - LOGGER.debug(f"Job {job_id} is running, True") + LOGGER.debug("Job %s is running, True", job_id) return True # Enforce job timeout as RQ doesn't seems to do it https://github.com/rq/rq/issues/758 @@ -314,15 +314,15 @@ def is_pending(job): job.cancel() job.cleanup() - LOGGER.debug(f"Job timeout {job_id}, False") + LOGGER.debug("Job timeout %s, False", job_id) return False if job_status == "queued": - LOGGER.debug(f"Job {job_id} is queued, True") + LOGGER.debug("Job %s is queued, True", job_id) return True - LOGGER.debug(f"Job {job_id} has status {job_status}, False") + LOGGER.debug("Job %s has status %s, False", job_id, job_status) return False @@ -384,18 +384,18 @@ def is_prediction_invalidated(job, change_time): def clean_prediction_cache(job): # If the bug was modified since last time we classified it, clear the cache to avoid stale answer - LOGGER.debug(f"Cleaning results for {job}") + LOGGER.debug("Cleaning results for %s", job) redis_conn.delete(job.result_key) redis_conn.delete(job.change_time_key) def get_result(job: JobInfo) -> Any | None: - LOGGER.debug(f"Checking for existing results at {job.result_key}") + LOGGER.debug("Checking for existing results at %s", job.result_key) result = redis_conn.get(job.result_key) if result: - LOGGER.debug(f"Found {result!r}") + LOGGER.debug("Found %r", result) try: result = dctx.decompress(result) except zstandard.ZstdError: @@ -1129,7 +1129,7 @@ def patch_schedules(base_rev, patch_hash): redis_conn.set(patch_key, patch) redis_conn.expire(patch_key, 7 * 24 * 3600) # 7 days expiration - LOGGER.info(f"Stored patch with hash {patch_hash}") + LOGGER.info("Stored patch with hash %s", patch_hash) schedule_job(job) diff --git a/http_service/bugbug_http/download_models.py b/http_service/bugbug_http/download_models.py index 4cac343b04..5e2d421807 100644 --- a/http_service/bugbug_http/download_models.py +++ b/http_service/bugbug_http/download_models.py @@ -22,8 +22,8 @@ def download_models(): except FileNotFoundError: if ALLOW_MISSING_MODELS: LOGGER.info( - "Missing %r model, skipping because ALLOW_MISSING_MODELS is set" - % model_name + "Missing %r model, skipping because ALLOW_MISSING_MODELS is set", + model_name, ) return None else: diff --git a/http_service/bugbug_http/listener.py b/http_service/bugbug_http/listener.py index 45f9c9974f..95b98df288 100755 --- a/http_service/bugbug_http/listener.py +++ b/http_service/bugbug_http/listener.py @@ -86,11 +86,9 @@ def _on_message(body, message): url = "{}/push/{}/{}/schedules".format(BUGBUG_HTTP_SERVER, branch, rev) response = requests.get(url, headers={"X-Api-Key": "pulse_listener"}) if response.status_code == 202: - logger.info("Successfully requested {}/{}".format(branch, rev)) + logger.info("Successfully requested %s/%s", branch, rev) else: - logger.warning( - "We got status: {} for: {}".format(response.status_code, url) - ) + logger.warning("We got status: %s for: %s", response.status_code, url) except Exception: logger.warning(body) traceback.print_exc() diff --git a/http_service/bugbug_http/models.py b/http_service/bugbug_http/models.py index 23ad1fd6dd..a9102ce5c3 100644 --- a/http_service/bugbug_http/models.py +++ b/http_service/bugbug_http/models.py @@ -62,7 +62,7 @@ def setkey(key: str, value: bytes, compress: bool = False) -> None: - LOGGER.debug(f"Storing data at {key}: {value!r}") + LOGGER.debug("Storing data at %s: %r", key, value) if compress: value = cctx.compress(value) redis.set(key, value) @@ -93,7 +93,7 @@ def classify_bug(model_name: str, bug_ids: Sequence[int], bugzilla_token: str) - model = MODEL_CACHE.get(model_name) if not model: - LOGGER.info("Missing model %r, aborting" % model_name) + LOGGER.info("Missing model %r, aborting", model_name) return "NOK" model_extra_data = model.get_extra_data() @@ -153,7 +153,7 @@ def classify_issue( model = MODEL_CACHE.get(model_name) if not model: - LOGGER.info("Missing model %r, aborting" % model_name) + LOGGER.info("Missing model %r, aborting", model_name) return "NOK" model_extra_data = model.get_extra_data() @@ -199,7 +199,7 @@ def classify_broken_site_report(model_name: str, reports_data: list[dict]) -> st model = MODEL_CACHE.get(model_name) if not model: - LOGGER.info("Missing model %r, aborting" % model_name) + LOGGER.info("Missing model %r, aborting", model_name) return "NOK" model_extra_data = model.get_extra_data() @@ -247,7 +247,7 @@ def schedule_tests(branch: str, rev: str) -> str: try: revs = get_hgmo_stack(branch, rev) except requests.exceptions.RequestException: - LOGGER.warning(f"Push not found for {branch} @ {rev}!") + LOGGER.warning("Push not found for %s @ %s!", branch, rev) return "NOK" # On "try", consider commits from other branches too (see https://bugzilla.mozilla.org/show_bug.cgi?id=1790493). @@ -304,11 +304,11 @@ def schedule_tests_from_patch(base_rev: str, patch_hash: str) -> str: patch_data_raw = redis.get(patch_key) if not patch_data_raw: - LOGGER.error(f"Patch not found in Redis for hash {patch_hash}") + LOGGER.error("Patch not found in Redis for hash %s", patch_hash) return "NOK" hg_base_rev = utils.git2hg(base_rev) - LOGGER.info(f"Mapped git base rev {base_rev} to hg rev {hg_base_rev}") + LOGGER.info("Mapped git base rev %s to hg rev %s", base_rev, hg_base_rev) # Pull the base revision to the local repository LOGGER.info("Pulling base revision from the remote repository...") diff --git a/http_service/bugbug_http/readthrough_cache.py b/http_service/bugbug_http/readthrough_cache.py index 967538e160..a11b0162cc 100644 --- a/http_service/bugbug_http/readthrough_cache.py +++ b/http_service/bugbug_http/readthrough_cache.py @@ -47,7 +47,7 @@ def get(self, key, force_store=False): self.items_last_accessed[key] = datetime.datetime.now() if store_item: LOGGER.info( - f"Storing item with the following key in readthroughcache: {key}" + "Storing item with the following key in readthroughcache: %s", key ) self.items_storage[key] = item @@ -58,7 +58,8 @@ def purge_expired_entries(self): for key, time_last_touched in list(self.items_last_accessed.items()): if time_last_touched < purge_entries_before: LOGGER.info( - f"Evicting item with the following key from readthroughcache: {key}" + "Evicting item with the following key from readthroughcache: %s", + key, ) del self.items_last_accessed[key] del self.items_storage[key] diff --git a/pyproject.toml b/pyproject.toml index e678ab4224..0687730eb0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -137,7 +137,7 @@ members = ["http_service"] extend-exclude = ["data"] [tool.ruff.lint] -select = ["E4", "E7", "E9", "F", "I", "T10", "CPY"] +select = ["E4", "E7", "E9", "F", "I", "T10", "CPY", "G"] [tool.ruff.lint.isort] known-first-party = ["bugbug_http"] diff --git a/scripts/build_repair_eval.py b/scripts/build_repair_eval.py index b8106a5259..0dff16178e 100644 --- a/scripts/build_repair_eval.py +++ b/scripts/build_repair_eval.py @@ -171,7 +171,7 @@ def _register_model_costs(client) -> None: completion_token_cost=completion_cost, ) except Exception as e: - logger.debug(f"Could not register cost for {model_id}: {e}") + logger.debug("Could not register cost for %s: %s", model_id, e) def _make_weave_callback(): @@ -243,8 +243,10 @@ async def invoke( ) -> dict: wt_name = f"bug-{bug_id}-{uuid.uuid4().hex[:8]}" logger.info( - f"Invoking bug {bug_id} " - f"(commit={gh_failure_commits[0][:12]}, {len(failures)} failures)" + "Invoking bug %s (commit=%s, %s failures)", + bug_id, + gh_failure_commits[0][:12], + len(failures), ) worktree_created = False @@ -255,8 +257,10 @@ async def invoke( ) if datetime.fromisoformat(fix_commit_date).date() < cutoff: logger.warning( - f"Skipping bug {bug_id}: fix date {fix_commit_date} " - f"is before model cutoff {cutoff}" + "Skipping bug %s: fix date %s is before model cutoff %s", + bug_id, + fix_commit_date, + cutoff, ) raise ValueError("skipped_data_contamination") @@ -278,11 +282,15 @@ async def invoke( on_message=on_message, ) logger.info( - f"Bug {bug_id} completed: error={result.error}, " - f"diff_len={len(result.diff)}, cost=${result.cost_usd:.4f}, " - f"turns={result.num_turns}, " - f"local_build={result.local_build_passed}, " - f"try_build={result.try_build_passed}" + "Bug %s completed: error=%s, diff_len=%s, cost=$%.4f, turns=%s, " + "local_build=%s, try_build=%s", + bug_id, + result.error, + len(result.diff), + result.cost_usd, + result.num_turns, + result.local_build_passed, + result.try_build_passed, ) output = result.model_dump() @@ -303,7 +311,7 @@ async def invoke( return output finally: if worktree_created: - logger.info(f"Bug {bug_id}: cleaning up worktree {wt_name}") + logger.info("Bug %s: cleaning up worktree %s", bug_id, wt_name) self.worktree_mgr.cleanup(wt_name) @@ -334,10 +342,15 @@ def main() -> None: logging.getLogger("urllib3").setLevel(logging.WARNING) logger.info( - f"Starting evaluation: dataset={args.dataset}, limit={args.limit}, " - f"trials={args.trials}, parallelism={args.parallelism}, " - f"analysis_only={args.analysis_only}, no_try_push={args.no_try_push}, " - f"firefox_repo={args.firefox_repo}" + "Starting evaluation: dataset=%s, limit=%s, trials=%s, parallelism=%s, " + "analysis_only=%s, no_try_push=%s, firefox_repo=%s", + args.dataset, + args.limit, + args.trials, + args.parallelism, + args.analysis_only, + args.no_try_push, + args.firefox_repo, ) os.environ["WEAVE_PARALLELISM"] = str(args.parallelism) @@ -346,10 +359,10 @@ def main() -> None: _register_model_costs(client) dataset = weave.ref(args.dataset).get() - logger.info(f"Loaded dataset {args.dataset} with {len(dataset.rows)} rows") + logger.info("Loaded dataset %s with %s rows", args.dataset, len(dataset.rows)) if args.limit: dataset.rows = dataset.rows[: args.limit] - logger.info(f"Limited to {len(dataset.rows)} rows") + logger.info("Limited to %s rows", len(dataset.rows)) scorers = [ BasicMetricsScorer(num_trials=args.trials), @@ -357,7 +370,7 @@ def main() -> None: ] if not args.analysis_only: scorers.insert(1, BuildPassRateScorer(num_trials=args.trials)) - logger.info(f"Scorers: {[type(s).__name__ for s in scorers]}") + logger.info("Scorers: %s", [type(s).__name__ for s in scorers]) model = BuildRepairModel( firefox_repo=args.firefox_repo, @@ -371,7 +384,7 @@ def main() -> None: trials=args.trials, ) results = asyncio.run(evaluation.evaluate(model)) - logger.info(f"Evaluation results: {results}") + logger.info("Evaluation results: %s", results) if __name__ == "__main__": diff --git a/scripts/check_all_metrics.py b/scripts/check_all_metrics.py index 70ec8d5090..9cd664ff84 100644 --- a/scripts/check_all_metrics.py +++ b/scripts/check_all_metrics.py @@ -61,7 +61,7 @@ def get_model_name(queue, task_id: str): # Show a warning if no matching route was found, this can happen when the # current task has a dependency to a non-training task or if the route # pattern changes. - LOGGER.warning(f"No matching route found for task id {task_id}") + LOGGER.warning("No matching route found for task id %s", task_id) def get_model_names(task_id: str) -> list[str]: @@ -73,9 +73,10 @@ def get_model_names(task_id: str) -> list[str]: for i, task_id in enumerate(task["dependencies"]): LOGGER.info( - "Loading task dependencies {}/{} {}".format( - i + 1, len(task["dependencies"]), task_id - ) + "Loading task dependencies %s/%s %s", + i + 1, + len(task["dependencies"]), + task_id, ) model_name = get_model_name(queue, task_id) diff --git a/scripts/commit_classifier.py b/scripts/commit_classifier.py index 6ddcfe0610..769ab2aaf8 100644 --- a/scripts/commit_classifier.py +++ b/scripts/commit_classifier.py @@ -315,7 +315,7 @@ def apply_phab(self, hg, phabricator_deployment, diff_id): # Update repo to base revision hg_base = needed_stack[0].base_revision if not self.has_revision(hg, hg_base): - logger.warning("Missing base revision {} from Phabricator".format(hg_base)) + logger.warning("Missing base revision %s from Phabricator", hg_base) hg_base = "default" if hg_base: @@ -455,8 +455,8 @@ def generate_feature_importance_data(self, probs, importance): clean_X <= median ).sum() / clean_X.shape[0] - logger.info("Feature: {}".format(name)) - logger.info("Shap value: {}{}".format("+" if (is_positive) else "-", val)) + logger.info("Feature: %s", name) + logger.info("Shap value: %s%s", "+" if is_positive else "-", val) logger.info("spearman: %f", spearman) logger.info("value: %f", value) logger.info("overall mean: %f", np.mean(X)) diff --git a/scripts/inline_comments_data_collection.py b/scripts/inline_comments_data_collection.py index 7c0373dbde..505d5bb51a 100644 --- a/scripts/inline_comments_data_collection.py +++ b/scripts/inline_comments_data_collection.py @@ -120,9 +120,9 @@ def process_comments(limit, diff_length_limit): diffs = api.search_diffs(diff_phid=most_recent_update["fields"]["new"]) if not diffs: logger.error( - "No diff found for fix patch, PHID {}, revision {}".format( - most_recent_update["fields"]["new"], revision_info["id"] - ) + "No diff found for fix patch, PHID %s, revision %s", + most_recent_update["fields"]["new"], + revision_info["id"], ) continue @@ -142,9 +142,9 @@ def process_comments(limit, diff_length_limit): diffs = api.search_diffs(diff_phid=most_recent_update["fields"]["old"]) if not diffs: logger.error( - "No diff found for previous patch, PHID {}, revision {}".format( - most_recent_update["fields"]["old"], revision_info["id"] - ) + "No diff found for previous patch, PHID %s, revision %s", + most_recent_update["fields"]["old"], + revision_info["id"], ) continue @@ -155,7 +155,7 @@ def process_comments(limit, diff_length_limit): revision_id, previous_patch_id, fix_patch_id ) except Exception as e: - logger.error(f"Failed to fetch diff: {e}") + logger.error("Failed to fetch diff: %s", e) continue if len(patch_diff) > diff_length_limit: diff --git a/scripts/regressor_finder.py b/scripts/regressor_finder.py index 72b1a4fae3..b09437c94b 100644 --- a/scripts/regressor_finder.py +++ b/scripts/regressor_finder.py @@ -212,7 +212,9 @@ def find_bug_fixing_commits(self) -> None: start_date = datetime.now() - RELATIVE_START_DATE end_date = datetime.now() - RELATIVE_END_DATE logger.info( - f"Gathering bug IDs associated to commits (since {start_date} and up to {end_date})..." + "Gathering bug IDs associated to commits (since %s and up to %s)...", + start_date, + end_date, ) commit_map = defaultdict(list) for commit in repository.get_commits(): diff --git a/scripts/retrieve_ci_failures.py b/scripts/retrieve_ci_failures.py index 82590dbd24..a927d0d44d 100644 --- a/scripts/retrieve_ci_failures.py +++ b/scripts/retrieve_ci_failures.py @@ -103,7 +103,9 @@ def get_fixed_by_commit_pushes(): while start < today: end = min(start + timedelta(days=7), today) - logger.info(f"Retrieving 'fixed by commit' data between {start} and {end}...") + logger.info( + "Retrieving 'fixed by commit' data between %s and %s...", start, end + ) fixed_by_commit_elements += query_redash(start, end) start = end @@ -134,7 +136,7 @@ def get_fixed_by_commit_pushes(): } ) - logger.info(f"Analyzing {len(fixed_by_commit_pushes)} 'fixed by commit' pushes.") + logger.info("Analyzing %s 'fixed by commit' pushes.", len(fixed_by_commit_pushes)) backouts_by_bug_id = defaultdict(int) for commit in repository.get_commits(include_backouts=True): @@ -153,14 +155,14 @@ def get_fixed_by_commit_pushes(): no_relanding_bugs.add(bug_id) logger.info( - f"{len(no_relanding_bugs)} cases removed because there was no relanding." + "%s cases removed because there was no relanding.", len(no_relanding_bugs) ) for bug_id in no_relanding_bugs: del fixed_by_commit_pushes[bug_id] logger.info( - f"{len(fixed_by_commit_pushes)} 'fixed by commit' pushes left to analyze." + "%s 'fixed by commit' pushes left to analyze.", len(fixed_by_commit_pushes) ) # Skip cases where there are multiple backouts associated to the same bug ID. @@ -171,14 +173,15 @@ def get_fixed_by_commit_pushes(): multiple_backouts.add(bug_id) logger.info( - f"{len(multiple_backouts)} cases to be removed because there were multiple backouts in the same bug." + "%s cases to be removed because there were multiple backouts in the same bug.", + len(multiple_backouts), ) for multiple_backout in multiple_backouts: del fixed_by_commit_pushes[multiple_backout] logger.info( - f"{len(fixed_by_commit_pushes)} 'fixed by commit' pushes left to analyze." + "%s 'fixed by commit' pushes left to analyze.", len(fixed_by_commit_pushes) ) # Skip cases where there is no backout (and so the fix was a bustage fix). @@ -192,14 +195,15 @@ def get_fixed_by_commit_pushes(): no_backouts.add(bug_id) logger.info( - f"{len(no_backouts)} cases to be removed because there were no backouts in the bug." + "%s cases to be removed because there were no backouts in the bug.", + len(no_backouts), ) for no_backout in no_backouts: del fixed_by_commit_pushes[no_backout] logger.info( - f"{len(fixed_by_commit_pushes)} 'fixed by commit' pushes left to analyze." + "%s 'fixed by commit' pushes left to analyze.", len(fixed_by_commit_pushes) ) # TODO: skip cases where a single push contains multiple backouts? @@ -356,8 +360,8 @@ def generate_diffs(repo_url, repo_path, fixed_by_commit_pushes, upload): else: diff_errors += 1 - logger.info(f"Failed mapping {mapping_errors} hashes") - logger.info(f"Failed generating {diff_errors} diffs") + logger.info("Failed mapping %s hashes", mapping_errors) + logger.info("Failed generating %s diffs", diff_errors) def write_results(fixed_by_commit_pushes): diff --git a/scripts/retrieve_training_metrics.py b/scripts/retrieve_training_metrics.py index ef38a95910..f03a75e500 100644 --- a/scripts/retrieve_training_metrics.py +++ b/scripts/retrieve_training_metrics.py @@ -30,7 +30,7 @@ def get_task_metrics_from_uri(index_uri): r = requests.get(index_url) if r.status_code == 404: - LOGGER.error(f"File not found for URL {index_url}, check your arguments") + LOGGER.error("File not found for URL %s, check your arguments", index_url) sys.exit(1) r.raise_for_status() diff --git a/scripts/test_scheduling_history_retriever.py b/scripts/test_scheduling_history_retriever.py index c9e2fb8e48..e4f2f3ea6b 100644 --- a/scripts/test_scheduling_history_retriever.py +++ b/scripts/test_scheduling_history_retriever.py @@ -118,7 +118,7 @@ def generate( yield value except mozci.errors.MissingDataError: logger.warning( - f"Tasks for push {push.rev} can't be found on ActiveData" + "Tasks for push %s can't be found on ActiveData", push.rev ) except Exception: num_errors += 1