Coverage for britney2/policies/autopkgtest.py: 89%
795 statements
« prev ^ index » next coverage.py v7.6.0, created at 2025-10-30 09:44 +0000
« prev ^ index » next coverage.py v7.6.0, created at 2025-10-30 09:44 +0000
1# Copyright (C) 2013 - 2016 Canonical Ltd.
2# Authors:
3# Colin Watson <cjwatson@ubuntu.com>
4# Jean-Baptiste Lallement <jean-baptiste.lallement@canonical.com>
5# Martin Pitt <martin.pitt@ubuntu.com>
7# This program is free software; you can redistribute it and/or modify
8# it under the terms of the GNU General Public License as published by
9# the Free Software Foundation; either version 2 of the License, or
10# (at your option) any later version.
12# This program is distributed in the hope that it will be useful,
13# but WITHOUT ANY WARRANTY; without even the implied warranty of
14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15# GNU General Public License for more details.
17import calendar
18import collections
19import http.client
20import io
21import itertools
22import json
23import optparse
24import os
25import socket
26import sys
27import tarfile
28import time
29import urllib.parse
30from collections.abc import Iterator
31from copy import deepcopy
32from enum import Enum
33from functools import lru_cache, total_ordering
34from typing import TYPE_CHECKING, Any, Optional, cast
35from urllib.error import HTTPError
36from urllib.request import urlopen
37from urllib.response import addinfourl
39import apt_pkg
41from britney2 import (
42 BinaryPackageId,
43 PackageId,
44 SourcePackage,
45 SuiteClass,
46 Suites,
47 TargetSuite,
48)
49from britney2.hints import HintAnnotate, HintType
50from britney2.migrationitem import MigrationItem
51from britney2.policies import PolicyVerdict
52from britney2.policies.policy import AbstractBasePolicy
53from britney2.utils import iter_except, parse_option
55if TYPE_CHECKING: 55 ↛ 56line 55 didn't jump to line 56 because the condition on line 55 was never true
56 import amqplib.client_0_8 as amqp
58 from ..britney import Britney
59 from ..excuse import Excuse
60 from ..hints import HintParser
63@total_ordering
64class Result(Enum):
65 PASS = 1
66 NEUTRAL = 2
67 FAIL = 3
68 OLD_PASS = 4
69 OLD_NEUTRAL = 5
70 OLD_FAIL = 6
71 NONE = 7
73 def __lt__(self, other: "Result") -> bool:
74 return True if self.value < other.value else False
77EXCUSES_LABELS = {
78 "PASS": '<span style="background:#87d96c">Pass</span>',
79 "OLD_PASS": '<span style="background:#87d96c">Pass</span>',
80 "NEUTRAL": '<span style="background:#e5c545">No tests, superficial or marked flaky</span>',
81 "OLD_NEUTRAL": '<span style="background:#e5c545">No tests, superficial or marked flaky</span>',
82 "FAIL": '<span style="background:#ff6666">Failed</span>',
83 "OLD_FAIL": '<span style="background:#ff6666">Failed</span>',
84 "ALWAYSFAIL": '<span style="background:#e5c545">Failed (not a regression)</span>',
85 "REGRESSION": '<span style="background:#ff6666">Regression</span>',
86 "IGNORE-FAIL": '<span style="background:#e5c545">Ignored failure</span>',
87 "RUNNING": '<span style="background:#99ddff">Test triggered</span>',
88 "RUNNING-ALWAYSFAIL": '<span style="background:#99ddff">Test triggered (will not be considered a regression)</span>',
89 "RUNNING-IGNORE": '<span style="background:#99ddff">Test triggered (failure will be ignored)</span>',
90 "RUNNING-REFERENCE": '<span style="background:#ff6666">Reference test triggered, but real test failed already</span>',
91}
93REF_TRIG = "migration-reference/0"
95VERSION_KEY = "britney-autopkgtest-pending-file-version"
98def srchash(src: str) -> str:
99 """archive hash prefix for source package"""
101 if src.startswith("lib"): 101 ↛ 102line 101 didn't jump to line 102 because the condition on line 101 was never true
102 return src[:4]
103 else:
104 return src[0]
107def added_pkgs_compared_to_target_suite(
108 package_ids: frozenset[BinaryPackageId],
109 target_suite: TargetSuite,
110 *,
111 invert: bool = False,
112) -> Iterator[BinaryPackageId]:
113 if invert: 113 ↛ 114line 113 didn't jump to line 114 because the condition on line 113 was never true
114 pkgs_ids_to_ignore = package_ids - set(
115 target_suite.which_of_these_are_in_the_suite(package_ids)
116 )
117 names_ignored = {p.package_name for p in pkgs_ids_to_ignore}
118 else:
119 names_ignored = {
120 p.package_name
121 for p in target_suite.which_of_these_are_in_the_suite(package_ids)
122 }
123 yield from (p for p in package_ids if p.package_name not in names_ignored)
126def all_leaf_results(
127 test_results: dict[str, dict[str, dict[str, list[Any]]]],
128) -> Iterator[list[Any]]:
129 for trigger in test_results.values():
130 for arch in trigger.values():
131 yield from arch.values()
134def mark_result_as_old(result: Result) -> Result:
135 """Convert current result into corresponding old result"""
137 if result == Result.FAIL:
138 result = Result.OLD_FAIL
139 elif result == Result.PASS:
140 result = Result.OLD_PASS
141 elif result == Result.NEUTRAL: 141 ↛ 143line 141 didn't jump to line 143 because the condition on line 141 was always true
142 result = Result.OLD_NEUTRAL
143 return result
146class AutopkgtestPolicy(AbstractBasePolicy):
147 """autopkgtest regression policy for source migrations
149 Run autopkgtests for the excuse and all of its reverse dependencies, and
150 reject the upload if any of those regress.
151 """
153 def __init__(self, options: optparse.Values, suite_info: Suites) -> None:
154 super().__init__(
155 "autopkgtest", options, suite_info, {SuiteClass.PRIMARY_SOURCE_SUITE}
156 )
157 # tests requested in this and previous runs
158 # trigger -> src -> [arch]
159 self.pending_tests: dict[str, dict[str, dict[str, int]]] | None = None
160 self.pending_tests_file = os.path.join(
161 self.state_dir, "autopkgtest-pending.json"
162 )
163 self.testsuite_triggers: dict[str, set[str]] = {}
164 self.result_in_baseline_cache: dict[str, dict[str, list[Any]]] = (
165 collections.defaultdict(dict)
166 )
168 self.amqp_file_handle: io.TextIOWrapper | None = None
170 # Default values for this policy's options
171 parse_option(options, "adt_baseline")
172 parse_option(options, "adt_huge", to_int=True)
173 parse_option(options, "adt_ppas")
174 parse_option(options, "adt_reference_max_age", day_to_sec=True)
175 parse_option(options, "adt_pending_max_age", default=5, day_to_sec=True)
176 parse_option(options, "adt_regression_penalty", default=0, to_int=True)
177 parse_option(options, "adt_log_url") # see below for defaults
178 parse_option(options, "adt_retry_url") # see below for defaults
179 parse_option(options, "adt_retry_older_than", day_to_sec=True)
180 parse_option(options, "adt_results_cache_age", day_to_sec=True)
181 parse_option(options, "adt_shared_results_cache")
182 parse_option(options, "adt_success_bounty", default=0, to_int=True)
183 parse_option(options, "adt_ignore_failure_for_new_tests", to_bool=True)
185 # When ADT_RESULTS_CACHE_AGE is smaller than or equal to
186 # ADT_REFERENCE_MAX_AGE old reference result will be removed from cache
187 # before the newly scheduled results are in, potentially causing
188 # additional waiting. For packages like glibc this might cause an
189 # infinite delay as there will always be a package that's
190 # waiting. Similarly for ADT_RETRY_OLDER_THAN.
191 if self.options.adt_results_cache_age <= self.options.adt_reference_max_age:
192 self.logger.warning(
193 "Unexpected: ADT_REFERENCE_MAX_AGE bigger than ADT_RESULTS_CACHE_AGE"
194 )
195 if self.options.adt_results_cache_age <= self.options.adt_retry_older_than:
196 self.logger.warning(
197 "Unexpected: ADT_RETRY_OLDER_THAN bigger than ADT_RESULTS_CACHE_AGE"
198 )
200 if not self.options.adt_log_url: 200 ↛ 226line 200 didn't jump to line 226 because the condition on line 200 was always true
201 # Historical defaults
202 if self.options.adt_swift_url.startswith("file://"):
203 self.options.adt_log_url = os.path.join(
204 self.options.adt_ci_url,
205 "data",
206 "autopkgtest",
207 self.options.series,
208 "{arch}",
209 "{hash}",
210 "{package}",
211 "{run_id}",
212 "log.gz",
213 )
214 else:
215 self.options.adt_log_url = os.path.join(
216 self.options.adt_swift_url,
217 "{swift_container}",
218 self.options.series,
219 "{arch}",
220 "{hash}",
221 "{package}",
222 "{run_id}",
223 "log.gz",
224 )
226 if hasattr(self.options, "adt_retry_url_mech"): 226 ↛ 227line 226 didn't jump to line 227 because the condition on line 226 was never true
227 self.logger.warning(
228 "The ADT_RETRY_URL_MECH configuration has been deprecated."
229 )
230 self.logger.warning(
231 "Instead britney now supports ADT_RETRY_URL for more flexibility."
232 )
233 if self.options.adt_retry_url:
234 self.logger.error(
235 "Please remove the ADT_RETRY_URL_MECH as ADT_RETRY_URL will be used."
236 )
237 elif self.options.adt_retry_url_mech == "run_id":
238 self.options.adt_retry_url = (
239 self.options.adt_ci_url + "api/v1/retry/{run_id}"
240 )
241 if not self.options.adt_retry_url: 241 ↛ 258line 241 didn't jump to line 258 because the condition on line 241 was always true
242 # Historical default
243 self.options.adt_retry_url = (
244 self.options.adt_ci_url
245 + "request.cgi?"
246 + "release={release}&arch={arch}&package={package}&trigger={trigger}{ppas}"
247 )
249 # results map: trigger -> src -> arch -> [passed, version, run_id, seen]
250 # - trigger is "source/version" of an unstable package that triggered
251 # this test run.
252 # - "passed" is a Result
253 # - "version" is the package version of "src" of that test
254 # - "run_id" is an opaque ID that identifies a particular test run for
255 # a given src/arch.
256 # - "seen" is an approximate time stamp of the test run. How this is
257 # deduced depends on the interface used.
258 self.test_results: dict[str, dict[str, dict[str, list[Any]]]] = {}
259 if self.options.adt_shared_results_cache:
260 self.results_cache_file = self.options.adt_shared_results_cache
261 else:
262 self.results_cache_file = os.path.join(
263 self.state_dir, "autopkgtest-results.cache"
264 )
266 try:
267 self.options.adt_ppas = self.options.adt_ppas.strip().split()
268 except AttributeError:
269 self.options.adt_ppas = []
271 self.swift_container = "autopkgtest-" + options.series
272 if self.options.adt_ppas:
273 self.swift_container += "-" + options.adt_ppas[-1].replace("/", "-")
275 # restrict adt_arches to architectures we actually run for
276 self.adt_arches = []
277 for arch in self.options.adt_arches.split():
278 if arch in self.options.architectures:
279 self.adt_arches.append(arch)
280 else:
281 self.logger.info(
282 "Ignoring ADT_ARCHES %s as it is not in architectures list", arch
283 )
285 def __del__(self) -> None:
286 if self.amqp_file_handle: 286 ↛ exitline 286 didn't return from function '__del__' because the condition on line 286 was always true
287 try:
288 self.amqp_file_handle.close()
289 except AttributeError:
290 pass
292 def register_hints(self, hint_parser: "HintParser") -> None:
293 hint_parser.register_hint_type(
294 HintType(
295 "force-badtest",
296 versioned=HintAnnotate.OPTIONAL,
297 architectured=HintAnnotate.OPTIONAL,
298 )
299 )
300 hint_parser.register_hint_type(HintType("force-skiptest"))
302 def initialise(self, britney: "Britney") -> None:
303 super().initialise(britney)
304 # We want to use the "current" time stamp in multiple locations
305 time_now = round(time.time())
306 if hasattr(self.options, "fake_runtime"): 306 ↛ 307line 306 didn't jump to line 307 because the condition on line 306 was never true
307 time_now = int(self.options.fake_runtime)
308 self._now = time_now
309 # compute inverse Testsuite-Triggers: map, unifying all series
310 self.logger.info("Building inverse testsuite_triggers map")
311 for suite in self.suite_info:
312 for src, data in suite.sources.items():
313 for trigger in data.testsuite_triggers:
314 self.testsuite_triggers.setdefault(trigger, set()).add(src)
315 target_suite_name = self.suite_info.target_suite.name
317 os.makedirs(self.state_dir, exist_ok=True)
318 self.read_pending_tests()
320 # read the cached results that we collected so far
321 if os.path.exists(self.results_cache_file):
322 with open(self.results_cache_file) as f:
323 test_results = json.load(f)
324 self.test_results = self.check_and_upgrade_cache(test_results)
325 self.logger.info("Read previous results from %s", self.results_cache_file)
326 else:
327 self.logger.info(
328 "%s does not exist, re-downloading all results from swift",
329 self.results_cache_file,
330 )
332 # read in the new results
333 if self.options.adt_swift_url.startswith("file://"):
334 debci_file = self.options.adt_swift_url[7:]
335 if os.path.exists(debci_file):
336 with open(debci_file) as f:
337 test_results = json.load(f)
338 self.logger.info("Read new results from %s", debci_file)
339 for res in test_results["results"]:
340 # if there's no date, the test didn't finish yet
341 if res["date"] is None: 341 ↛ 342line 341 didn't jump to line 342 because the condition on line 341 was never true
342 continue
343 (test_suite, triggers, src, arch, ver, status, run_id, seen) = [
344 res["suite"],
345 res["trigger"],
346 res["package"],
347 res["arch"],
348 res["version"],
349 res["status"],
350 str(res["run_id"]),
351 round(
352 calendar.timegm(
353 time.strptime(res["date"][0:-5], "%Y-%m-%dT%H:%M:%S")
354 )
355 ),
356 ]
357 if test_suite != target_suite_name: 357 ↛ 359line 357 didn't jump to line 359 because the condition on line 357 was never true
358 # not requested for this target suite, so ignore
359 continue
360 if triggers is None: 360 ↛ 362line 360 didn't jump to line 362 because the condition on line 360 was never true
361 # not requested for this policy, so ignore
362 continue
363 if status is None:
364 # still running => pending
365 continue
366 for trigger in triggers.split():
367 # remove matching test requests
368 self.remove_from_pending(trigger, src, arch, seen)
369 if status == "tmpfail": 369 ↛ 371line 369 didn't jump to line 371 because the condition on line 369 was never true
370 # let's see if we still need it
371 continue
372 self.logger.debug(
373 "Results %s %s %s added", src, trigger, status
374 )
375 self.add_trigger_to_results(
376 trigger,
377 src,
378 ver,
379 arch,
380 run_id,
381 seen,
382 Result[status.upper()],
383 )
384 else:
385 self.logger.info(
386 "%s does not exist, no new data will be processed", debci_file
387 )
389 # The cache can contain results against versions of packages that
390 # are not in any suite anymore. Strip those out, as we don't want
391 # to use those results. Additionally, old references may be
392 # filtered out.
393 if self.options.adt_baseline == "reference":
394 self.filter_old_results()
396 # we need sources, binaries, and installability tester, so for now
397 # remember the whole britney object
398 self.britney = britney
400 # Initialize AMQP connection
401 self.amqp_channel: Optional["amqp.channel.Channel"] = None
402 self.amqp_file_handle = None
403 if self.options.dry_run: 403 ↛ 404line 403 didn't jump to line 404 because the condition on line 403 was never true
404 return
406 amqp_url = self.options.adt_amqp
408 if amqp_url.startswith("amqp://"): 408 ↛ 409line 408 didn't jump to line 409 because the condition on line 408 was never true
409 import amqplib.client_0_8 as amqp
411 # depending on the setup we connect to a AMQP server
412 creds = urllib.parse.urlsplit(amqp_url, allow_fragments=False)
413 self.amqp_con = amqp.Connection(
414 creds.hostname, userid=creds.username, password=creds.password
415 )
416 self.amqp_channel = self.amqp_con.channel()
417 self.logger.info("Connected to AMQP server")
418 elif amqp_url.startswith("file://"): 418 ↛ 423line 418 didn't jump to line 423 because the condition on line 418 was always true
419 # or in Debian and in testing mode, adt_amqp will be a file:// URL
420 amqp_file = amqp_url[7:]
421 self.amqp_file_handle = open(amqp_file, "w", 1)
422 else:
423 raise RuntimeError("Unknown ADT_AMQP schema %s" % amqp_url.split(":", 1)[0])
425 def check_and_upgrade_cache(
426 self, test_results: dict[str, dict[str, dict[str, list[Any]]]]
427 ) -> dict[str, dict[str, dict[str, list[Any]]]]:
428 for leaf_result in all_leaf_results(test_results):
429 leaf_result[0] = Result[leaf_result[0]]
431 # Drop results older than ADT_RESULTS_CACHE_AGE
432 for trigger in list(test_results.keys()):
433 for pkg in list(test_results[trigger].keys()):
434 for arch in list(test_results[trigger][pkg].keys()):
435 arch_result = test_results[trigger][pkg][arch]
436 if self._now - arch_result[3] > self.options.adt_results_cache_age: 436 ↛ 437line 436 didn't jump to line 437 because the condition on line 436 was never true
437 del test_results[trigger][pkg][arch]
438 if not test_results[trigger][pkg]: 438 ↛ 439line 438 didn't jump to line 439 because the condition on line 438 was never true
439 del test_results[trigger][pkg]
440 if not test_results[trigger]: 440 ↛ 441line 440 didn't jump to line 441 because the condition on line 440 was never true
441 del test_results[trigger]
443 return test_results
445 def filter_old_results(self) -> None:
446 """Remove results for old versions and reference runs from the cache.
448 For now, only delete reference runs. If we delete regular
449 results after a while, packages with lots of triggered tests may
450 never have all the results at the same time."""
452 test_results = self.test_results
454 for trigger, trigger_data in test_results.items():
455 for src, results in trigger_data.items():
456 for arch, result in results.items():
457 if (
458 trigger == REF_TRIG
459 and self._now - result[3] > self.options.adt_reference_max_age
460 ):
461 result[0] = mark_result_as_old(result[0])
462 elif not self.test_version_in_any_suite(src, result[1]):
463 result[0] = mark_result_as_old(result[0])
465 def test_version_in_any_suite(self, src: str, version: str) -> bool:
466 """Check if the mentioned version of src is found in a suite
468 To prevent regressions in the target suite, the result should be
469 from a test with the version of the package in either the source
470 suite or the target suite. The source suite is also valid,
471 because due to versioned test dependencies and Breaks/Conflicts
472 relations, regularly the version in the source suite is used
473 during testing.
474 """
476 versions = set()
477 for suite in self.suite_info:
478 try:
479 srcinfo = suite.sources[src]
480 except KeyError:
481 continue
482 versions.add(srcinfo.version)
484 valid_version = False
485 for ver in versions:
486 if apt_pkg.version_compare(ver, version) == 0:
487 valid_version = True
488 break
490 return valid_version
492 def save_pending_json(self) -> None:
493 # update the pending tests on-disk cache
494 self.logger.info(
495 "Updating pending requested tests in %s" % self.pending_tests_file
496 )
497 # Shallow clone pending_tests as we only modify the toplevel and change its type.
498 pending_tests: dict[str, Any] = {}
499 if self.pending_tests:
500 pending_tests = dict(self.pending_tests)
501 # Avoid adding if there are no pending results at all (eases testing)
502 pending_tests[VERSION_KEY] = 1
503 with open(self.pending_tests_file + ".new", "w") as f:
504 json.dump(pending_tests, f, indent=2)
505 os.rename(self.pending_tests_file + ".new", self.pending_tests_file)
507 def save_state(self, britney: "Britney") -> None:
508 super().save_state(britney)
510 # update the results on-disk cache, unless we are using a r/o shared one
511 if not self.options.adt_shared_results_cache:
512 self.logger.info("Updating results cache")
513 test_results = deepcopy(self.test_results)
514 for result in all_leaf_results(test_results):
515 result[0] = result[0].name
516 with open(self.results_cache_file + ".new", "w") as f:
517 json.dump(test_results, f, indent=2)
518 os.rename(self.results_cache_file + ".new", self.results_cache_file)
520 self.save_pending_json()
522 def format_retry_url(
523 self, run_id: str | None, arch: str, testsrc: str, trigger: str
524 ) -> str:
525 if self.options.adt_ppas:
526 ppas = "&" + urllib.parse.urlencode(
527 [("ppa", p) for p in self.options.adt_ppas]
528 )
529 else:
530 ppas = ""
531 return cast(str, self.options.adt_retry_url).format(
532 run_id=run_id,
533 release=self.options.series,
534 arch=arch,
535 package=testsrc,
536 trigger=urllib.parse.quote_plus(trigger),
537 ppas=ppas,
538 )
540 def format_log_url(self, testsrc: str, arch: str, run_id: str) -> str:
541 return cast(str, self.options.adt_log_url).format(
542 release=self.options.series,
543 swift_container=self.swift_container,
544 hash=srchash(testsrc),
545 package=testsrc,
546 arch=arch,
547 run_id=run_id,
548 )
550 def apply_src_policy_impl(
551 self,
552 tests_info: dict[str, Any],
553 item: MigrationItem,
554 source_data_tdist: SourcePackage | None,
555 source_data_srcdist: SourcePackage,
556 excuse: "Excuse",
557 ) -> PolicyVerdict:
558 assert self.hints is not None # for type checking
559 # initialize
560 verdict = PolicyVerdict.PASS
561 all_self_tests_pass = False
562 source_name = item.package
563 results_info = []
565 # skip/delay autopkgtests until new package is built somewhere
566 if not source_data_srcdist.binaries:
567 self.logger.debug(
568 "%s hasnot been built anywhere, skipping autopkgtest policy",
569 excuse.name,
570 )
571 verdict = PolicyVerdict.REJECTED_TEMPORARILY
572 excuse.add_verdict_info(verdict, "Autopkgtest deferred: missing all builds")
574 if "all" in excuse.missing_builds:
575 self.logger.debug(
576 "%s hasnot been built for arch:all, skipping autopkgtest policy",
577 source_name,
578 )
579 verdict = PolicyVerdict.REJECTED_TEMPORARILY
580 excuse.add_verdict_info(
581 verdict, "Autopkgtest deferred: missing arch:all build"
582 )
584 if not verdict.is_rejected:
585 self.logger.debug("Checking autopkgtests for %s", source_name)
586 trigger = source_name + "/" + source_data_srcdist.version
588 # build a (testsrc, testver) → arch → (status, run_id, log_url) map; we trigger/check test
589 # results per architecture for technical/efficiency reasons, but we
590 # want to evaluate and present the results by tested source package
591 # first
592 pkg_arch_result: dict[
593 tuple[str, str], dict[str, tuple[str, str | None, str]]
594 ] = collections.defaultdict(dict)
595 for arch in self.adt_arches:
596 if arch in excuse.missing_builds:
597 verdict = PolicyVerdict.REJECTED_TEMPORARILY
598 self.logger.debug(
599 "%s hasnot been built on arch %s, delay autopkgtest there",
600 source_name,
601 arch,
602 )
603 excuse.add_verdict_info(
604 verdict,
605 f"Autopkgtest deferred on {arch}: missing arch:{arch} build",
606 )
607 elif arch in excuse.policy_info["depends"].get(
608 "arch_all_not_installable", []
609 ):
610 self.logger.debug(
611 "%s is uninstallable on arch %s (which is allowed), not running autopkgtest there",
612 source_name,
613 arch,
614 )
615 excuse.addinfo(
616 f"Autopkgtest skipped on {arch}: not installable (which is allowed)"
617 )
618 elif (
619 arch in excuse.unsatisfiable_on_archs
620 and arch
621 not in excuse.policy_info["depends"].get(
622 "autopkgtest_run_anyways", []
623 )
624 ):
625 verdict = PolicyVerdict.REJECTED_TEMPORARILY
626 self.logger.debug(
627 "%s is uninstallable on arch %s, not running autopkgtest there",
628 source_name,
629 arch,
630 )
631 excuse.addinfo(f"Autopkgtest skipped on {arch}: not installable")
632 else:
633 self.request_tests_for_source(
634 item, arch, source_data_srcdist, pkg_arch_result, excuse
635 )
637 # add test result details to Excuse
638 cloud_url = self.options.adt_ci_url + "packages/%(h)s/%(s)s/%(r)s/%(a)s"
639 testver: str | None
640 for testsrc, testver in sorted(pkg_arch_result):
641 assert testver is not None
642 arch_results = pkg_arch_result[(testsrc, testver)]
643 r = {v[0] for v in arch_results.values()}
644 if r & {"FAIL", "OLD_FAIL", "REGRESSION"}:
645 verdict = PolicyVerdict.REJECTED_PERMANENTLY
646 elif r & {"RUNNING", "RUNNING-REFERENCE"} and not verdict.is_rejected:
647 verdict = PolicyVerdict.REJECTED_TEMPORARILY
648 # skip version if still running on all arches
649 if not r - {"RUNNING", "RUNNING-ALWAYSFAIL", "RUNNING-IGNORE"}:
650 testver = None
652 # A source package is eligible for the bounty if it has tests
653 # of its own that pass on all tested architectures.
654 if testsrc == source_name:
655 excuse.autopkgtest_results = r
656 if r == {"PASS"}:
657 all_self_tests_pass = True
659 if testver:
660 testname = f"{testsrc}/{testver}"
661 else:
662 testname = testsrc
664 html_archmsg = []
665 for arch in sorted(arch_results):
666 (status, run_id, log_url) = arch_results[arch]
667 artifact_url = None
668 retry_url = None
669 reference_url = None
670 reference_retry_url = None
671 history_url = None
672 if self.options.adt_ppas:
673 if log_url.endswith("log.gz"):
674 artifact_url = log_url.replace("log.gz", "artifacts.tar.gz")
675 else:
676 history_url = cloud_url % {
677 "h": srchash(testsrc),
678 "s": testsrc,
679 "r": self.options.series,
680 "a": arch,
681 }
682 if status not in ("PASS", "RUNNING", "RUNNING-IGNORE"):
683 retry_url = self.format_retry_url(
684 run_id, arch, testsrc, trigger
685 )
687 baseline_result = self.result_in_baseline(testsrc, arch)
688 if baseline_result and baseline_result[0] != Result.NONE:
689 baseline_run_id = str(baseline_result[2])
690 reference_url = self.format_log_url(
691 testsrc, arch, baseline_run_id
692 )
693 if self.options.adt_baseline == "reference":
694 reference_retry_url = self.format_retry_url(
695 baseline_run_id, arch, testsrc, REF_TRIG
696 )
697 tests_info.setdefault(testname, {})[arch] = [
698 status,
699 log_url,
700 history_url,
701 artifact_url,
702 retry_url,
703 ]
705 # render HTML snippet for testsrc entry for current arch
706 if history_url:
707 message = f'<a href="{history_url}">{arch}</a>'
708 else:
709 message = arch
710 message += ': <a href="{}">{}</a>'.format(
711 log_url,
712 EXCUSES_LABELS[status],
713 )
714 if retry_url:
715 message += (
716 '<a href="%s" style="text-decoration: none;"> ♻</a>'
717 % retry_url
718 )
719 if reference_url:
720 message += ' (<a href="%s">reference</a>' % reference_url
721 if reference_retry_url:
722 message += (
723 '<a href="%s" style="text-decoration: none;"> ♻</a>'
724 % reference_retry_url
725 )
726 message += ")"
727 if artifact_url:
728 message += ' <a href="%s">[artifacts]</a>' % artifact_url
729 html_archmsg.append(message)
731 # render HTML line for testsrc entry
732 # - if action is or may be required
733 # - for ones own package
734 if (
735 r
736 - {
737 "PASS",
738 "NEUTRAL",
739 "RUNNING-ALWAYSFAIL",
740 "ALWAYSFAIL",
741 "IGNORE-FAIL",
742 }
743 or testsrc == source_name
744 ):
745 if testver:
746 pkg = '<a href="#{0}">{0}</a>/{1}'.format(testsrc, testver)
747 else:
748 pkg = '<a href="#{0}">{0}</a>'.format(testsrc)
749 results_info.append(
750 "Autopkgtest for {}: {}".format(pkg, ", ".join(html_archmsg))
751 )
753 if verdict.is_rejected:
754 # check for force-skiptest hint
755 hints = self.hints.search(
756 "force-skiptest",
757 package=source_name,
758 version=source_data_srcdist.version,
759 )
760 if hints:
761 excuse.addreason("skiptest")
762 excuse.addinfo(
763 "Autopkgtest check should wait for tests relating to %s %s, but forced by %s"
764 % (source_name, source_data_srcdist.version, hints[0].user)
765 )
766 verdict = PolicyVerdict.PASS_HINTED
767 else:
768 excuse.addreason("autopkgtest")
770 if (
771 self.options.adt_success_bounty
772 and verdict == PolicyVerdict.PASS
773 and all_self_tests_pass
774 ):
775 excuse.add_bounty("autopkgtest", self.options.adt_success_bounty)
776 if self.options.adt_regression_penalty and verdict in {
777 PolicyVerdict.REJECTED_PERMANENTLY,
778 PolicyVerdict.REJECTED_TEMPORARILY,
779 }:
780 if self.options.adt_regression_penalty > 0: 780 ↛ 783line 780 didn't jump to line 783 because the condition on line 780 was always true
781 excuse.add_penalty("autopkgtest", self.options.adt_regression_penalty)
782 # In case we give penalties instead of blocking, we must always pass
783 verdict = PolicyVerdict.PASS
784 for i in results_info:
785 if verdict.is_rejected:
786 excuse.add_verdict_info(verdict, i)
787 else:
788 excuse.addinfo(i)
790 return verdict
792 #
793 # helper functions
794 #
796 @staticmethod
797 def has_autodep8(srcinfo: SourcePackage) -> bool:
798 """Check if package is covered by autodep8
800 srcinfo is an item from self.britney.sources
801 """
802 # autodep8?
803 for t in srcinfo.testsuite:
804 if t.startswith("autopkgtest-pkg"):
805 return True
807 return False
809 def request_tests_for_source(
810 self,
811 item: MigrationItem,
812 arch: str,
813 source_data_srcdist: SourcePackage,
814 pkg_arch_result: dict[tuple[str, str], dict[str, tuple[str, str | None, str]]],
815 excuse: "Excuse",
816 ) -> None:
817 pkg_universe = self.britney.pkg_universe
818 target_suite = self.suite_info.target_suite
819 source_suite = item.suite
820 sources_t = target_suite.sources
821 sources_s = item.suite.sources
822 packages_s_a = item.suite.binaries[arch]
823 source_name = item.package
824 source_version = source_data_srcdist.version
825 # request tests (unless they were already requested earlier or have a result)
826 tests = self.tests_for_source(source_name, source_version, arch, excuse)
827 is_huge = len(tests) > self.options.adt_huge
829 # Here we figure out what is required from the source suite
830 # for the test to install successfully.
831 #
832 # The ImplicitDependencyPolicy does a similar calculation, but
833 # if I (elbrus) understand correctly, only in the reverse
834 # dependency direction. We are doing something similar here
835 # but in the dependency direction (note: this code is older).
836 # We use the ImplicitDependencyPolicy result for the reverse
837 # dependencies and we keep the code below for the
838 # dependencies. Using the ImplicitDependencyPolicy results
839 # also in the reverse direction seems to require quite some
840 # reorganisation to get that information available here, as in
841 # the current state only the current excuse is available here
842 # and the required other excuses may not be calculated yet.
843 #
844 # Loop over all binary packages from trigger and
845 # recursively look up which *versioned* dependencies are
846 # only satisfied in the source suite.
847 #
848 # For all binaries found, look up which packages they
849 # break/conflict with in the target suite, but not in the
850 # source suite. The main reason to do this is to cover test
851 # dependencies, so we will check Testsuite-Triggers as
852 # well.
853 #
854 # OI: do we need to do the first check in a smart way
855 # (i.e. only for the packages that are actually going to be
856 # installed) for the breaks/conflicts set as well, i.e. do
857 # we need to check if any of the packages that we now
858 # enforce being from the source suite, actually have new
859 # versioned depends and new breaks/conflicts.
860 #
861 # For all binaries found, add the set of unique source
862 # packages to the list of triggers.
864 bin_triggers: set[PackageId] = set()
865 bin_new = set(source_data_srcdist.binaries)
866 for n_binary in iter_except(bin_new.pop, KeyError):
867 if n_binary in bin_triggers:
868 continue
869 bin_triggers.add(n_binary)
871 # Check if there is a dependency that is not
872 # available in the target suite.
873 # We add slightly too much here, because new binaries
874 # will also show up, but they are already properly
875 # installed. Nevermind.
876 depends = pkg_universe.dependencies_of(n_binary)
877 # depends is a frozenset{frozenset{BinaryPackageId, ..}}
878 for deps_of_bin in depends:
879 if target_suite.any_of_these_are_in_the_suite(deps_of_bin):
880 # if any of the alternative dependencies is already
881 # satisfied in the target suite, we can just ignore it
882 continue
883 # We'll figure out which version later
884 bin_new.update(
885 added_pkgs_compared_to_target_suite(deps_of_bin, target_suite)
886 )
888 # Check if the package breaks/conflicts anything. We might
889 # be adding slightly too many source packages due to the
890 # check here as a binary package that is broken may be
891 # coming from a different source package in the source
892 # suite. Nevermind.
893 bin_broken = set()
894 for t_binary in bin_triggers:
895 # broken is a frozenset{BinaryPackageId, ..}
896 broken = pkg_universe.negative_dependencies_of(
897 cast(BinaryPackageId, t_binary)
898 )
899 broken_in_target = {
900 p.package_name
901 for p in target_suite.which_of_these_are_in_the_suite(broken)
902 }
903 broken_in_source = {
904 p.package_name
905 for p in source_suite.which_of_these_are_in_the_suite(broken)
906 }
907 # We want packages with a newer version in the source suite that
908 # no longer has the conflict. This is an approximation
909 broken_filtered = {
910 p
911 for p in broken
912 if p.package_name in broken_in_target
913 and p.package_name not in broken_in_source
914 }
915 # We add the version in the target suite, but the code below will
916 # change it to the version in the source suite
917 bin_broken.update(broken_filtered)
918 bin_triggers.update(bin_broken)
920 # The ImplicitDependencyPolicy also found packages that need
921 # to migrate together, so add them to the triggers too.
922 for bin_implicit in excuse.depends_packages_flattened:
923 if bin_implicit.architecture == arch:
924 bin_triggers.add(bin_implicit)
926 triggers = set()
927 for t_binary2 in bin_triggers:
928 if t_binary2.architecture == arch:
929 try:
930 source_of_bin = packages_s_a[t_binary2.package_name].source
931 # If the version in the target suite is the same, don't add a trigger.
932 # Note that we looked up the source package in the source suite.
933 # If it were a different source package in the target suite, however, then
934 # we would not have this source package in the same version anyway.
935 if (
936 sources_t.get(source_of_bin, None) is None
937 or sources_s[source_of_bin].version
938 != sources_t[source_of_bin].version
939 ):
940 triggers.add(
941 source_of_bin + "/" + sources_s[source_of_bin].version
942 )
943 except KeyError:
944 # Apparently the package was removed from
945 # unstable e.g. if packages are replaced
946 # (e.g. -dbg to -dbgsym)
947 pass
948 if t_binary2 not in source_data_srcdist.binaries:
949 for tdep_src in self.testsuite_triggers.get( 949 ↛ 952line 949 didn't jump to line 952 because the loop on line 949 never started
950 t_binary2.package_name, set()
951 ):
952 try:
953 # Only add trigger if versions in the target and source suites are different
954 if (
955 sources_t.get(tdep_src, None) is None
956 or sources_s[tdep_src].version
957 != sources_t[tdep_src].version
958 ):
959 triggers.add(
960 tdep_src + "/" + sources_s[tdep_src].version
961 )
962 except KeyError:
963 # Apparently the source was removed from
964 # unstable (testsuite_triggers are unified
965 # over all suites)
966 pass
967 trigger = source_name + "/" + source_version
968 triggers.discard(trigger)
969 triggers_list = sorted(list(triggers))
970 triggers_list.insert(0, trigger)
972 for testsrc, testver in tests:
973 self.pkg_test_request(testsrc, arch, triggers_list, huge=is_huge)
974 (result, real_ver, run_id, url) = self.pkg_test_result(
975 testsrc, testver, arch, trigger
976 )
977 pkg_arch_result[(testsrc, real_ver)][arch] = (result, run_id, url)
979 def tests_for_source(
980 self, src: str, ver: str, arch: str, excuse: "Excuse"
981 ) -> list[tuple[str, str]]:
982 """Iterate over all tests that should be run for given source and arch"""
984 source_suite = self.suite_info.primary_source_suite
985 target_suite = self.suite_info.target_suite
986 sources_info = target_suite.sources
987 binaries_info = target_suite.binaries[arch]
989 reported_pkgs = set()
991 tests = []
993 # Debian doesn't have linux-meta, but Ubuntu does
994 # for linux themselves we don't want to trigger tests -- these should
995 # all come from linux-meta*. A new kernel ABI without a corresponding
996 # -meta won't be installed and thus we can't sensibly run tests against
997 # it.
998 if ( 998 ↛ 1002line 998 didn't jump to line 1002
999 src.startswith("linux")
1000 and src.replace("linux", "linux-meta") in sources_info
1001 ):
1002 return []
1004 # we want to test the package itself, if it still has a test in unstable
1005 # but only if the package actually exists on this arch
1006 srcinfo = source_suite.sources[src]
1007 if ("autopkgtest" in srcinfo.testsuite or self.has_autodep8(srcinfo)) and len(
1008 excuse.packages[arch]
1009 ) > 0:
1010 reported_pkgs.add(src)
1011 tests.append((src, ver))
1013 extra_bins = []
1014 # Debian doesn't have linux-meta, but Ubuntu does
1015 # Hack: For new kernels trigger all DKMS packages by pretending that
1016 # linux-meta* builds a "dkms" binary as well. With that we ensure that we
1017 # don't regress DKMS drivers with new kernel versions.
1018 if src.startswith("linux-meta"):
1019 # does this have any image on this arch?
1020 for pkg_id in srcinfo.binaries:
1021 if pkg_id.architecture == arch and "-image" in pkg_id.package_name:
1022 try:
1023 extra_bins.append(binaries_info["dkms"].pkg_id)
1024 except KeyError:
1025 pass
1027 if not self.has_built_on_this_arch_or_is_arch_all(srcinfo, arch):
1028 return []
1030 pkg_universe = self.britney.pkg_universe
1031 # plus all direct reverse dependencies and test triggers of its
1032 # binaries which have an autopkgtest
1033 for binary in itertools.chain(srcinfo.binaries, extra_bins):
1034 rdeps = pkg_universe.reverse_dependencies_of(binary)
1035 for rdep in rdeps:
1036 try:
1037 rdep_src = binaries_info[rdep.package_name].source
1038 # Don't re-trigger the package itself here; this should
1039 # have been done above if the package still continues to
1040 # have an autopkgtest in unstable.
1041 if rdep_src == src:
1042 continue
1043 except KeyError:
1044 continue
1046 rdep_src_info = sources_info[rdep_src]
1047 if "autopkgtest" in rdep_src_info.testsuite or self.has_autodep8(
1048 rdep_src_info
1049 ):
1050 if rdep_src not in reported_pkgs:
1051 tests.append((rdep_src, rdep_src_info.version))
1052 reported_pkgs.add(rdep_src)
1054 for tdep_src in self.testsuite_triggers.get(binary.package_name, set()):
1055 if tdep_src not in reported_pkgs:
1056 try:
1057 tdep_src_info = sources_info[tdep_src]
1058 except KeyError:
1059 continue
1060 if "autopkgtest" in tdep_src_info.testsuite or self.has_autodep8( 1060 ↛ 1054line 1060 didn't jump to line 1054 because the condition on line 1060 was always true
1061 tdep_src_info
1062 ):
1063 for pkg_id in tdep_src_info.binaries: 1063 ↛ 1054line 1063 didn't jump to line 1054 because the loop on line 1063 didn't complete
1064 if pkg_id.architecture == arch:
1065 tests.append((tdep_src, tdep_src_info.version))
1066 reported_pkgs.add(tdep_src)
1067 break
1069 tests.sort(key=lambda s_v: s_v[0])
1070 return tests
1072 def read_pending_tests(self) -> None:
1073 """Read pending test requests from previous britney runs
1075 Initialize self.pending_tests with that data.
1076 """
1077 assert self.pending_tests is None, "already initialized"
1078 if not os.path.exists(self.pending_tests_file):
1079 self.logger.info(
1080 "No %s, starting with no pending tests", self.pending_tests_file
1081 )
1082 self.pending_tests = {}
1083 return
1084 with open(self.pending_tests_file) as f:
1085 self.pending_tests = json.load(f)
1086 if VERSION_KEY in self.pending_tests:
1087 del self.pending_tests[VERSION_KEY]
1088 for trigger in list(self.pending_tests.keys()):
1089 for pkg in list(self.pending_tests[trigger].keys()):
1090 arch_dict = self.pending_tests[trigger][pkg]
1091 for arch in list(arch_dict.keys()):
1092 if (
1093 self._now - arch_dict[arch]
1094 > self.options.adt_pending_max_age
1095 ):
1096 del arch_dict[arch]
1097 if not arch_dict:
1098 del self.pending_tests[trigger][pkg]
1099 if not self.pending_tests[trigger]:
1100 del self.pending_tests[trigger]
1101 else:
1102 # Migration code:
1103 for trigger_data in self.pending_tests.values(): 1103 ↛ 1104line 1103 didn't jump to line 1104 because the loop on line 1103 never started
1104 for pkg, arch_list in trigger_data.items():
1105 trigger_data[pkg] = {}
1106 for arch in arch_list:
1107 trigger_data[pkg][arch] = self._now
1109 self.logger.info(
1110 "Read pending requested tests from %s", self.pending_tests_file
1111 )
1112 self.logger.debug("%s", self.pending_tests)
1114 # this requires iterating over all triggers and thus is expensive;
1115 # cache the results
1116 @lru_cache(None)
1117 def latest_run_for_package(self, src: str, arch: str) -> str:
1118 """Return latest run ID for src on arch"""
1120 latest_run_id = ""
1121 for srcmap in self.test_results.values():
1122 try:
1123 run_id = srcmap[src][arch][2]
1124 except KeyError:
1125 continue
1126 if run_id > latest_run_id:
1127 latest_run_id = run_id
1128 return latest_run_id
1130 def urlopen_retry(self, url: str) -> http.client.HTTPResponse | addinfourl:
1131 """A urlopen() that retries on time outs or errors"""
1133 exc: Exception
1134 for retry in range(5): 1134 ↛ 1155line 1134 didn't jump to line 1155 because the loop on line 1134 didn't complete
1135 try:
1136 req = urlopen(url, timeout=30)
1137 code = req.getcode()
1138 if not code or 200 <= code < 300: 1138 ↛ 1134line 1138 didn't jump to line 1134 because the condition on line 1138 was always true
1139 return req # type: ignore[no-any-return]
1140 except TimeoutError as e: 1140 ↛ 1141line 1140 didn't jump to line 1141 because the exception caught by line 1140 didn't happen
1141 self.logger.info(
1142 "Timeout downloading '%s', will retry %d more times."
1143 % (url, 5 - retry - 1)
1144 )
1145 exc = e
1146 except HTTPError as e:
1147 if e.code not in (503, 502): 1147 ↛ 1149line 1147 didn't jump to line 1149 because the condition on line 1147 was always true
1148 raise
1149 self.logger.info(
1150 "Caught error %d downloading '%s', will retry %d more times."
1151 % (e.code, url, 5 - retry - 1)
1152 )
1153 exc = e
1154 else:
1155 raise exc
1157 @lru_cache(None)
1158 def fetch_swift_results(self, swift_url: str, src: str, arch: str) -> None:
1159 """Download new results for source package/arch from swift"""
1161 # prepare query: get all runs with a timestamp later than the latest
1162 # run_id for this package/arch; '@' is at the end of each run id, to
1163 # mark the end of a test run directory path
1164 # example: <autopkgtest-wily>wily/amd64/libp/libpng/20150630_054517@/result.tar
1165 query = {
1166 "delimiter": "@",
1167 "prefix": f"{self.options.series}/{arch}/{srchash(src)}/{src}/",
1168 }
1170 # determine latest run_id from results
1171 if not self.options.adt_shared_results_cache:
1172 latest_run_id = self.latest_run_for_package(src, arch)
1173 if latest_run_id:
1174 query["marker"] = query["prefix"] + latest_run_id
1176 # request new results from swift
1177 url = os.path.join(swift_url, self.swift_container)
1178 url += "?" + urllib.parse.urlencode(query)
1179 f = None
1180 try:
1181 f = self.urlopen_retry(url)
1182 if f.getcode() == 200:
1183 result_paths = f.read().decode().strip().splitlines()
1184 elif f.getcode() == 204: # No content 1184 ↛ 1190line 1184 didn't jump to line 1190 because the condition on line 1184 was always true
1185 result_paths = []
1186 else:
1187 # we should not ever end up here as we expect a HTTPError in
1188 # other cases; e. g. 3XX is something that tells us to adjust
1189 # our URLS, so fail hard on those
1190 raise NotImplementedError(
1191 "fetch_swift_results(%s): cannot handle HTTP code %r"
1192 % (url, f.getcode())
1193 )
1194 except OSError as e:
1195 # 401 "Unauthorized" is swift's way of saying "container does not exist"
1196 if getattr(e, "code", -1) == 401: 1196 ↛ 1205line 1196 didn't jump to line 1205 because the condition on line 1196 was always true
1197 self.logger.info(
1198 "fetch_swift_results: %s does not exist yet or is inaccessible", url
1199 )
1200 return
1201 # Other status codes are usually a transient
1202 # network/infrastructure failure. Ignoring this can lead to
1203 # re-requesting tests which we already have results for, so
1204 # fail hard on this and let the next run retry.
1205 self.logger.error("Failure to fetch swift results from %s: %s", url, str(e))
1206 sys.exit(1)
1207 finally:
1208 if f is not None: 1208 ↛ 1211line 1208 didn't jump to line 1211 because the condition on line 1208 was always true
1209 f.close() 1209 ↛ exitline 1209 didn't return from function 'fetch_swift_results' because the return on line 1200 wasn't executed
1211 for p in result_paths:
1212 self.fetch_one_result(
1213 os.path.join(swift_url, self.swift_container, p, "result.tar"),
1214 src,
1215 arch,
1216 )
1218 def fetch_one_result(self, url: str, src: str, arch: str) -> None:
1219 """Download one result URL for source/arch
1221 Remove matching pending_tests entries.
1222 """
1223 f = None
1224 try:
1225 f = self.urlopen_retry(url)
1226 if f.getcode() == 200: 1226 ↛ 1229line 1226 didn't jump to line 1229 because the condition on line 1226 was always true
1227 tar_bytes = io.BytesIO(f.read())
1228 else:
1229 raise NotImplementedError(
1230 "fetch_one_result(%s): cannot handle HTTP code %r"
1231 % (url, f.getcode())
1232 )
1233 except OSError as err:
1234 self.logger.error("Failure to fetch %s: %s", url, str(err))
1235 # we tolerate "not found" (something went wrong on uploading the
1236 # result), but other things indicate infrastructure problems
1237 if getattr(err, "code", -1) == 404:
1238 return
1239 sys.exit(1)
1240 finally:
1241 if f is not None: 1241 ↛ exit, 1241 ↛ 12432 missed branches: 1) line 1241 didn't return from function 'fetch_one_result' because the return on line 1238 wasn't executed, 2) line 1241 didn't jump to line 1243 because the condition on line 1241 was always true
1242 f.close() 1242 ↛ exitline 1242 didn't return from function 'fetch_one_result' because the return on line 1238 wasn't executed
1243 try:
1244 with tarfile.open(None, "r", tar_bytes) as tar:
1245 exitcode = int(tar.extractfile("exitcode").read().strip()) # type: ignore[union-attr]
1246 srcver = tar.extractfile("testpkg-version").read().decode().strip() # type: ignore[union-attr]
1247 (ressrc, ver) = srcver.split()
1248 testinfo = json.loads(tar.extractfile("testinfo.json").read().decode()) # type: ignore[union-attr]
1249 except (KeyError, ValueError, tarfile.TarError) as err:
1250 self.logger.error("%s is damaged, ignoring: %s", url, str(err))
1251 # ignore this; this will leave an orphaned request in autopkgtest-pending.json
1252 # and thus require manual retries after fixing the tmpfail, but we
1253 # can't just blindly attribute it to some pending test.
1254 return
1256 if src != ressrc: 1256 ↛ 1257line 1256 didn't jump to line 1257 because the condition on line 1256 was never true
1257 self.logger.error(
1258 "%s is a result for package %s, but expected package %s",
1259 url,
1260 ressrc,
1261 src,
1262 )
1263 return
1265 # parse recorded triggers in test result
1266 for e in testinfo.get("custom_environment", []): 1266 ↛ 1271line 1266 didn't jump to line 1271 because the loop on line 1266 didn't complete
1267 if e.startswith("ADT_TEST_TRIGGERS="): 1267 ↛ 1266line 1267 didn't jump to line 1266 because the condition on line 1267 was always true
1268 result_triggers = [i for i in e.split("=", 1)[1].split() if "/" in i]
1269 break
1270 else:
1271 self.logger.error("%s result has no ADT_TEST_TRIGGERS, ignoring")
1272 return
1274 run_id = os.path.basename(os.path.dirname(url))
1275 seen = round(calendar.timegm(time.strptime(run_id, "%Y%m%d_%H%M%S@")))
1276 # allow some skipped tests, but nothing else
1277 if exitcode in [0, 2]:
1278 result = Result.PASS
1279 elif exitcode == 8: 1279 ↛ 1280line 1279 didn't jump to line 1280 because the condition on line 1279 was never true
1280 result = Result.NEUTRAL
1281 else:
1282 result = Result.FAIL
1284 self.logger.info(
1285 "Fetched test result for %s/%s/%s %s (triggers: %s): %s",
1286 src,
1287 ver,
1288 arch,
1289 run_id,
1290 result_triggers,
1291 result.name.lower(),
1292 )
1294 # remove matching test requests
1295 for trigger in result_triggers:
1296 self.remove_from_pending(trigger, src, arch)
1298 # add this result
1299 for trigger in result_triggers:
1300 self.add_trigger_to_results(trigger, src, ver, arch, run_id, seen, result)
1302 def remove_from_pending(
1303 self, trigger: str, src: str, arch: str, timestamp: int = sys.maxsize
1304 ) -> None:
1305 assert self.pending_tests is not None # for type checking
1306 try:
1307 arch_dict = self.pending_tests[trigger][src]
1308 if timestamp < arch_dict[arch]:
1309 # The result is from before the moment of scheduling, so it's
1310 # not the one we're waiting for
1311 return
1312 del arch_dict[arch]
1313 if not arch_dict:
1314 del self.pending_tests[trigger][src]
1315 if not self.pending_tests[trigger]:
1316 del self.pending_tests[trigger]
1317 self.logger.debug(
1318 "-> matches pending request %s/%s for trigger %s", src, arch, trigger
1319 )
1320 except KeyError:
1321 self.logger.debug(
1322 "-> does not match any pending request for %s/%s", src, arch
1323 )
1325 def add_trigger_to_results(
1326 self,
1327 trigger: str,
1328 src: str,
1329 ver: str,
1330 arch: str,
1331 run_id: str,
1332 timestamp: int,
1333 status_to_add: Result,
1334 ) -> None:
1335 # Ensure that we got a new enough version
1336 try:
1337 (trigsrc, trigver) = trigger.split("/", 1)
1338 except ValueError:
1339 self.logger.info("Ignoring invalid test trigger %s", trigger)
1340 return
1341 if trigsrc == src and apt_pkg.version_compare(ver, trigver) < 0: 1341 ↛ 1342line 1341 didn't jump to line 1342 because the condition on line 1341 was never true
1342 self.logger.debug(
1343 "test trigger %s, but run for older version %s, ignoring", trigger, ver
1344 )
1345 return
1347 stored_result = (
1348 self.test_results.setdefault(trigger, {})
1349 .setdefault(src, {})
1350 .setdefault(arch, [Result.FAIL, None, "", 0])
1351 )
1353 # reruns shouldn't flip the result from PASS or NEUTRAL to
1354 # FAIL, so remember the most recent version of the best result
1355 # we've seen. Except for reference updates, which we always
1356 # want to update with the most recent result. The result data
1357 # may not be ordered by timestamp, so we need to check time.
1358 update = False
1359 if self.options.adt_baseline == "reference" and trigger == REF_TRIG:
1360 if stored_result[3] < timestamp:
1361 update = True
1362 elif status_to_add < stored_result[0]:
1363 update = True
1364 elif status_to_add == stored_result[0] and stored_result[3] < timestamp:
1365 update = True
1367 if update:
1368 stored_result[0] = status_to_add
1369 stored_result[1] = ver
1370 stored_result[2] = run_id
1371 stored_result[3] = timestamp
1373 def send_test_request(
1374 self, src: str, arch: str, triggers: list[str], huge: bool = False
1375 ) -> None:
1376 """Send out AMQP request for testing src/arch for triggers
1378 If huge is true, then the request will be put into the -huge instead of
1379 normal queue.
1380 """
1381 if self.options.dry_run: 1381 ↛ 1382line 1381 didn't jump to line 1382 because the condition on line 1381 was never true
1382 return
1384 params: dict[str, Any] = {"triggers": triggers}
1385 if self.options.adt_ppas:
1386 params["ppas"] = self.options.adt_ppas
1387 qname = f"debci-ppa-{self.options.series}-{arch}"
1388 elif huge:
1389 qname = f"debci-huge-{self.options.series}-{arch}"
1390 else:
1391 qname = f"debci-{self.options.series}-{arch}"
1392 params["submit-time"] = time.strftime("%Y-%m-%d %H:%M:%S%z", time.gmtime())
1394 if self.amqp_channel: 1394 ↛ 1395line 1394 didn't jump to line 1395 because the condition on line 1394 was never true
1395 self.amqp_channel.basic_publish(
1396 amqp.Message(
1397 src + "\n" + json.dumps(params), delivery_mode=2
1398 ), # persistent
1399 routing_key=qname,
1400 )
1401 # we save pending.json with every request, so that if britney
1402 # crashes we don't re-request tests. This is only needed when using
1403 # real amqp, as with file-based submission the pending tests are
1404 # returned by debci along with the results each run.
1405 self.save_pending_json()
1406 else:
1407 # for file-based submission, triggers are space separated
1408 params["triggers"] = [" ".join(params["triggers"])]
1409 assert self.amqp_file_handle
1410 self.amqp_file_handle.write(f"{qname}:{src} {json.dumps(params)}\n")
1412 def pkg_test_request(
1413 self, src: str, arch: str, all_triggers: list[str], huge: bool = False
1414 ) -> None:
1415 """Request one package test for a set of triggers
1417 all_triggers is a list of "pkgname/version". These are the packages
1418 that will be taken from the source suite. The first package in this
1419 list is the package that triggers the testing of src, the rest are
1420 additional packages required for installability of the test deps. If
1421 huge is true, then the request will be put into the -huge instead of
1422 normal queue.
1424 This will only be done if that test wasn't already requested in
1425 a previous run (i. e. if it's not already in self.pending_tests)
1426 or if there is already a fresh or a positive result for it. This
1427 ensures to download current results for this package before
1428 requesting any test."""
1429 trigger = all_triggers[0]
1430 uses_swift = not self.options.adt_swift_url.startswith("file://")
1431 try:
1432 result = self.test_results[trigger][src][arch]
1433 has_result = True
1434 except KeyError:
1435 has_result = False
1437 if has_result:
1438 result_state = result[0]
1439 if result_state in {Result.OLD_PASS, Result.OLD_FAIL, Result.OLD_NEUTRAL}:
1440 pass
1441 elif (
1442 result_state == Result.FAIL
1443 and self.result_in_baseline(src, arch)[0]
1444 in {Result.PASS, Result.NEUTRAL, Result.OLD_PASS, Result.OLD_NEUTRAL}
1445 and self._now - result[3] > self.options.adt_retry_older_than
1446 ):
1447 # We might want to retry this failure, so continue
1448 pass
1449 elif not uses_swift:
1450 # We're done if we don't retrigger and we're not using swift
1451 return
1452 elif result_state in {Result.PASS, Result.NEUTRAL}:
1453 self.logger.debug(
1454 "%s/%s triggered by %s already known", src, arch, trigger
1455 )
1456 return
1458 # Without swift we don't expect new results
1459 if uses_swift:
1460 self.logger.info(
1461 "Checking for new results for failed %s/%s for trigger %s",
1462 src,
1463 arch,
1464 trigger,
1465 )
1466 self.fetch_swift_results(self.options.adt_swift_url, src, arch)
1467 # do we have one now?
1468 try:
1469 self.test_results[trigger][src][arch]
1470 return
1471 except KeyError:
1472 pass
1474 self.request_test_if_not_queued(src, arch, trigger, all_triggers, huge=huge)
1476 def request_test_if_not_queued(
1477 self,
1478 src: str,
1479 arch: str,
1480 trigger: str,
1481 all_triggers: list[str] = [],
1482 huge: bool = False,
1483 ) -> None:
1484 assert self.pending_tests is not None # for type checking
1485 if not all_triggers:
1486 all_triggers = [trigger]
1488 # Don't re-request if it's already pending
1489 arch_dict = self.pending_tests.setdefault(trigger, {}).setdefault(src, {})
1490 if arch in arch_dict.keys():
1491 self.logger.debug(
1492 "Test %s/%s for %s is already pending, not queueing", src, arch, trigger
1493 )
1494 else:
1495 self.logger.debug(
1496 "Requesting %s autopkgtest on %s to verify %s", src, arch, trigger
1497 )
1498 arch_dict[arch] = self._now
1499 self.send_test_request(src, arch, all_triggers, huge=huge)
1501 def result_in_baseline(self, src: str, arch: str) -> list[Any]:
1502 """Get the result for src on arch in the baseline
1504 The baseline is optionally all data or a reference set)
1505 """
1507 # this requires iterating over all cached results and thus is expensive;
1508 # cache the results
1509 try:
1510 return self.result_in_baseline_cache[src][arch]
1511 except KeyError:
1512 pass
1514 result_reference: list[Any] = [Result.NONE, None, "", 0]
1515 if self.options.adt_baseline == "reference":
1516 if src not in self.suite_info.target_suite.sources: 1516 ↛ 1517line 1516 didn't jump to line 1517 because the condition on line 1516 was never true
1517 return result_reference
1519 try:
1520 result_reference = self.test_results[REF_TRIG][src][arch]
1521 self.logger.debug(
1522 "Found result for src %s in reference: %s",
1523 src,
1524 result_reference[0].name,
1525 )
1526 except KeyError:
1527 self.logger.debug(
1528 "Found NO result for src %s in reference: %s",
1529 src,
1530 result_reference[0].name,
1531 )
1532 self.result_in_baseline_cache[src][arch] = deepcopy(result_reference)
1533 return result_reference
1535 result_ever: list[Any] = [Result.FAIL, None, "", 0]
1536 for srcmap in self.test_results.values():
1537 try:
1538 if srcmap[src][arch][0] != Result.FAIL:
1539 result_ever = srcmap[src][arch]
1540 # If we are not looking at a reference run, We don't really
1541 # care about anything except the status, so we're done
1542 # once we find a PASS.
1543 if result_ever[0] == Result.PASS:
1544 break
1545 except KeyError:
1546 pass
1548 self.result_in_baseline_cache[src][arch] = deepcopy(result_ever)
1549 self.logger.debug("Result for src %s ever: %s", src, result_ever[0].name)
1550 return result_ever
1552 def has_test_in_target(self, src: str) -> bool:
1553 test_in_target = False
1554 try:
1555 srcinfo = self.suite_info.target_suite.sources[src]
1556 if "autopkgtest" in srcinfo.testsuite or self.has_autodep8(srcinfo):
1557 test_in_target = True
1558 # AttributeError is only needed for the test suite as
1559 # srcinfo can be a NoneType
1560 except (KeyError, AttributeError):
1561 pass
1563 return test_in_target
1565 def pkg_test_result(
1566 self, src: str, ver: str, arch: str, trigger: str
1567 ) -> tuple[str, str, str | None, str]:
1568 """Get current test status of a particular package
1570 Return (status, real_version, run_id, log_url) tuple; status is a key in
1571 EXCUSES_LABELS. run_id is None if the test is still running.
1572 """
1573 assert self.pending_tests is not None # for type checking
1574 # determine current test result status
1575 run_id = None
1576 try:
1577 r = self.test_results[trigger][src][arch]
1578 ver = r[1]
1579 run_id = r[2]
1581 if r[0] in {Result.FAIL, Result.OLD_FAIL}:
1582 # determine current test result status
1583 baseline_result = self.result_in_baseline(src, arch)[0]
1585 # Special-case triggers from linux-meta*: we cannot compare
1586 # results against different kernels, as e. g. a DKMS module
1587 # might work against the default kernel but fail against a
1588 # different flavor; so for those, ignore the "ever
1589 # passed" check; FIXME: check against trigsrc only
1590 if self.options.adt_baseline != "reference" and (
1591 trigger.startswith("linux-meta") or trigger.startswith("linux/")
1592 ):
1593 baseline_result = Result.FAIL
1595 # Check if the autopkgtest (still) exists in the target suite
1596 test_in_target = self.has_test_in_target(src)
1598 if test_in_target and baseline_result in {
1599 Result.NONE,
1600 Result.OLD_FAIL,
1601 Result.OLD_NEUTRAL,
1602 Result.OLD_PASS,
1603 }:
1604 self.request_test_if_not_queued(src, arch, REF_TRIG)
1606 if self.has_force_badtest(src, ver, arch):
1607 result = "IGNORE-FAIL"
1608 elif not test_in_target:
1609 if self.options.adt_ignore_failure_for_new_tests: 1609 ↛ 1610line 1609 didn't jump to line 1610 because the condition on line 1609 was never true
1610 result = "IGNORE-FAIL"
1611 else:
1612 result = r[0].name
1613 elif baseline_result in {Result.FAIL, Result.OLD_FAIL}:
1614 result = "ALWAYSFAIL"
1615 elif baseline_result == Result.NONE: 1615 ↛ 1616line 1615 didn't jump to line 1616 because the condition on line 1615 was never true
1616 result = "RUNNING-REFERENCE"
1617 else:
1618 result = "REGRESSION"
1620 else:
1621 result = r[0].name
1623 url = self.format_log_url(src, arch, run_id)
1624 except KeyError:
1625 # no result for src/arch; still running?
1626 assert arch in self.pending_tests.get(trigger, {}).get(src, {}).keys(), (
1627 "Result for %s/%s/%s (triggered by %s) is neither known nor pending!"
1628 % (src, ver, arch, trigger)
1629 )
1631 if self.has_force_badtest(src, ver, arch):
1632 result = "RUNNING-IGNORE"
1633 else:
1634 if self.has_test_in_target(src):
1635 baseline_result = self.result_in_baseline(src, arch)[0]
1636 if baseline_result == Result.FAIL:
1637 result = "RUNNING-ALWAYSFAIL"
1638 else:
1639 result = "RUNNING"
1640 else:
1641 if self.options.adt_ignore_failure_for_new_tests:
1642 result = "RUNNING-IGNORE"
1643 else:
1644 result = "RUNNING"
1645 url = self.options.adt_ci_url + "status/pending"
1647 return (result, ver, run_id, url)
1649 def has_force_badtest(self, src: str, ver: str, arch: str) -> bool:
1650 """Check if src/ver/arch has a force-badtest hint"""
1652 assert self.hints is not None
1653 hints = self.hints.search("force-badtest", package=src)
1654 if hints:
1655 self.logger.info(
1656 "Checking hints for %s/%s/%s: %s",
1657 src,
1658 arch,
1659 ver,
1660 [str(h) for h in hints],
1661 )
1662 for hint in hints:
1663 if [
1664 mi
1665 for mi in hint.packages
1666 if mi.architecture in ["source", arch]
1667 and (
1668 mi.version is None
1669 or mi.version == "all" # Historical unversioned hint
1670 or apt_pkg.version_compare(ver, mi.version) <= 0
1671 )
1672 ]:
1673 return True
1675 return False
1677 def has_built_on_this_arch_or_is_arch_all(
1678 self, src_data: SourcePackage, arch: str
1679 ) -> bool:
1680 """When a source builds arch:all binaries, those binaries are
1681 added to all architectures and thus the source 'exists'
1682 everywhere. This function checks if the source has any arch
1683 specific binaries on this architecture and if not, if it
1684 has them on any architecture.
1685 """
1686 packages_s_a = self.suite_info.primary_source_suite.binaries[arch]
1687 has_unknown_binary = False
1688 for binary_s in src_data.binaries:
1689 try:
1690 binary_u = packages_s_a[binary_s.package_name]
1691 except KeyError:
1692 # src_data.binaries has all the built binaries, so if
1693 # we get here, we know that at least one architecture
1694 # has architecture specific binaries
1695 has_unknown_binary = True
1696 continue
1697 if binary_u.architecture == arch:
1698 return True
1699 # If we get here, we have only seen arch:all packages for this
1700 # arch.
1701 return not has_unknown_binary