Coverage for britney2/policies/autopkgtest.py: 90%
760 statements
« prev ^ index » next coverage.py v6.5.0, created at 2024-04-18 20:48 +0000
« prev ^ index » next coverage.py v6.5.0, created at 2024-04-18 20:48 +0000
1# -*- coding: utf-8 -*-
3# Copyright (C) 2013 - 2016 Canonical Ltd.
4# Authors:
5# Colin Watson <cjwatson@ubuntu.com>
6# Jean-Baptiste Lallement <jean-baptiste.lallement@canonical.com>
7# Martin Pitt <martin.pitt@ubuntu.com>
9# This program is free software; you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation; either version 2 of the License, or
12# (at your option) any later version.
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
19import calendar
20import collections
21from copy import deepcopy
22from enum import Enum
23import os
24import json
25import tarfile
26import io
27import itertools
28import re
29import sys
30import time
31from typing import Any, Optional
32import urllib.parse
33from urllib.request import urlopen
34from functools import lru_cache, total_ordering
36import apt_pkg
38import britney2.hints
40from britney2 import SourcePackage, SuiteClass, Suites
41from britney2.migrationitem import MigrationItem
42from britney2.policies import PolicyVerdict
43from britney2.policies.policy import BasePolicy
44from britney2.utils import iter_except, parse_option
47@total_ordering
48class Result(Enum):
49 PASS = 1
50 NEUTRAL = 2
51 FAIL = 3
52 OLD_PASS = 4
53 OLD_NEUTRAL = 5
54 OLD_FAIL = 6
55 NONE = 7
57 def __lt__(self, other: "Result") -> bool:
58 return True if self.value < other.value else False
61EXCUSES_LABELS = {
62 "PASS": '<span style="background:#87d96c">Pass</span>',
63 "OLD_PASS": '<span style="background:#87d96c">Pass</span>',
64 "NEUTRAL": '<span style="background:#e5c545">No tests, superficial or marked flaky</span>',
65 "OLD_NEUTRAL": '<span style="background:#e5c545">No tests, superficial or marked flaky</span>',
66 "FAIL": '<span style="background:#ff6666">Failed</span>',
67 "OLD_FAIL": '<span style="background:#ff6666">Failed</span>',
68 "ALWAYSFAIL": '<span style="background:#e5c545">Failed (not a regression)</span>',
69 "REGRESSION": '<span style="background:#ff6666">Regression or new test</span>',
70 "IGNORE-FAIL": '<span style="background:#e5c545">Ignored failure</span>',
71 "RUNNING": '<span style="background:#99ddff">Test in progress</span>',
72 "RUNNING-REFERENCE": '<span style="background:#ff6666">Reference test in progress, but real test failed already</span>',
73 "RUNNING-ALWAYSFAIL": '<span style="background:#99ddff">Test in progress (will not be considered a regression)</span>',
74}
76REF_TRIG = 'migration-reference/0'
78VERSION_KEY = 'britney-autopkgtest-pending-file-version'
81def srchash(src: str) -> str:
82 '''archive hash prefix for source package'''
84 if src.startswith('lib'): 84 ↛ 85line 84 didn't jump to line 85, because the condition on line 84 was never true
85 return src[:4]
86 else:
87 return src[0]
90def added_pkgs_compared_to_target_suite(package_ids, target_suite, *, invert: bool = False):
91 if invert: 91 ↛ 92line 91 didn't jump to line 92, because the condition on line 91 was never true
92 pkgs_ids_to_ignore = package_ids - set(target_suite.which_of_these_are_in_the_suite(package_ids))
93 names_ignored = {p.package_name for p in pkgs_ids_to_ignore}
94 else:
95 names_ignored = {p.package_name for p in target_suite.which_of_these_are_in_the_suite(package_ids)}
96 yield from (p for p in package_ids if p.package_name not in names_ignored)
99def all_leaf_results(test_results):
100 for trigger in test_results.values():
101 for arch in trigger.values():
102 yield from arch.values()
105def mark_result_as_old(result: Result) -> Result:
106 '''Convert current result into corresponding old result'''
108 if result == Result.FAIL:
109 result = Result.OLD_FAIL
110 elif result == Result.PASS:
111 result = Result.OLD_PASS
112 elif result == Result.NEUTRAL: 112 ↛ 114line 112 didn't jump to line 114, because the condition on line 112 was never false
113 result = Result.OLD_NEUTRAL
114 return result
117class AutopkgtestPolicy(BasePolicy):
118 """autopkgtest regression policy for source migrations
120 Run autopkgtests for the excuse and all of its reverse dependencies, and
121 reject the upload if any of those regress.
122 """
124 def __init__(self, options, suite_info: Suites):
125 super().__init__('autopkgtest', options, suite_info, {SuiteClass.PRIMARY_SOURCE_SUITE})
126 # tests requested in this and previous runs
127 # trigger -> src -> [arch]
128 self.pending_tests: Optional[dict[str, dict[str, dict[str, int]]]] = None
129 self.pending_tests_file = os.path.join(self.state_dir, 'autopkgtest-pending.json')
130 self.testsuite_triggers: dict[str, set[str]] = {}
131 self.result_in_baseline_cache = collections.defaultdict(dict)
133 # Default values for this policy's options
134 parse_option(options, 'adt_baseline')
135 parse_option(options, 'adt_huge', to_int=True)
136 parse_option(options, 'adt_ppas')
137 parse_option(options, 'adt_reference_max_age', day_to_sec=True)
138 parse_option(options, 'adt_pending_max_age', default=5, day_to_sec=True)
139 parse_option(options, 'adt_regression_penalty', default=0, to_int=True)
140 parse_option(options, 'adt_log_url') # see below for defaults
141 parse_option(options, 'adt_retry_url') # see below for defaults
142 parse_option(options, 'adt_retry_older_than', day_to_sec=True)
143 parse_option(options, 'adt_results_cache_age', day_to_sec=True)
144 parse_option(options, 'adt_shared_results_cache')
145 parse_option(options, 'adt_success_bounty', default=0, to_int=True)
146 parse_option(options, 'adt_ignore_failure_for_new_tests', to_bool=True)
148 # When ADT_RESULTS_CACHE_AGE is smaller than or equal to
149 # ADT_REFERENCE_MAX_AGE old reference result will be removed from cache
150 # before the newly scheduled results are in, potentially causing
151 # additional waiting. For packages like glibc this might cause an
152 # infinite delay as there will always be a package that's
153 # waiting. Similarly for ADT_RETRY_OLDER_THAN.
154 if self.options.adt_results_cache_age <= self.options.adt_reference_max_age:
155 self.logger.warning("Unexpected: ADT_REFERENCE_MAX_AGE bigger than ADT_RESULTS_CACHE_AGE")
156 if self.options.adt_results_cache_age <= self.options.adt_retry_older_than:
157 self.logger.warning("Unexpected: ADT_RETRY_OLDER_THAN bigger than ADT_RESULTS_CACHE_AGE")
159 if not self.options.adt_log_url: 159 ↛ 183line 159 didn't jump to line 183, because the condition on line 159 was never false
160 # Historical defaults
161 if self.options.adt_swift_url.startswith('file://'):
162 self.options.adt_log_url = os.path.join(
163 self.options.adt_ci_url,
164 'data',
165 'autopkgtest',
166 self.options.series,
167 '{arch}',
168 '{hash}',
169 '{package}',
170 '{run_id}',
171 'log.gz')
172 else:
173 self.options.adt_log_url = os.path.join(
174 self.options.adt_swift_url,
175 '{swift_container}',
176 self.options.series,
177 '{arch}',
178 '{hash}',
179 '{package}',
180 '{run_id}',
181 'log.gz')
183 if hasattr(self.options, 'adt_retry_url_mech'): 183 ↛ 184line 183 didn't jump to line 184, because the condition on line 183 was never true
184 self.logger.warning("The ADT_RETRY_URL_MECH configuration has been deprecated.")
185 self.logger.warning("Instead britney now supports ADT_RETRY_URL for more flexibility.")
186 if self.options.adt_retry_url:
187 self.logger.error("Please remove the ADT_RETRY_URL_MECH as ADT_RETRY_URL will be used.")
188 elif self.options.adt_retry_url_mech == 'run_id':
189 self.options.adt_retry_url = self.options.adt_ci_url + 'api/v1/retry/{run_id}'
190 if not self.options.adt_retry_url: 190 ↛ 204line 190 didn't jump to line 204, because the condition on line 190 was never false
191 # Historical default
192 self.options.adt_retry_url = self.options.adt_ci_url + 'request.cgi?' + \
193 'release={release}&arch={arch}&package={package}&trigger={trigger}{ppas}'
195 # results map: trigger -> src -> arch -> [passed, version, run_id, seen]
196 # - trigger is "source/version" of an unstable package that triggered
197 # this test run.
198 # - "passed" is a Result
199 # - "version" is the package version of "src" of that test
200 # - "run_id" is an opaque ID that identifies a particular test run for
201 # a given src/arch.
202 # - "seen" is an approximate time stamp of the test run. How this is
203 # deduced depends on the interface used.
204 self.test_results = {}
205 if self.options.adt_shared_results_cache:
206 self.results_cache_file = self.options.adt_shared_results_cache
207 else:
208 self.results_cache_file = os.path.join(self.state_dir, 'autopkgtest-results.cache')
210 try:
211 self.options.adt_ppas = self.options.adt_ppas.strip().split()
212 except AttributeError:
213 self.options.adt_ppas = []
215 self.swift_container = 'autopkgtest-' + options.series
216 if self.options.adt_ppas:
217 self.swift_container += '-' + options.adt_ppas[-1].replace('/', '-')
219 # restrict adt_arches to architectures we actually run for
220 self.adt_arches = []
221 for arch in self.options.adt_arches.split():
222 if arch in self.options.architectures:
223 self.adt_arches.append(arch)
224 else:
225 self.logger.info("Ignoring ADT_ARCHES %s as it is not in architectures list", arch)
227 def __del__(self) -> None:
228 try:
229 self.amqp_file_handle.close()
230 except AttributeError:
231 pass
233 def register_hints(self, hint_parser) -> None:
234 hint_parser.register_hint_type('force-badtest', britney2.hints.split_into_one_hint_per_package)
235 hint_parser.register_hint_type('force-skiptest', britney2.hints.split_into_one_hint_per_package)
237 def initialise(self, britney) -> None:
238 super().initialise(britney)
239 # We want to use the "current" time stamp in multiple locations
240 time_now = round(time.time())
241 if hasattr(self.options, 'fake_runtime'):
242 time_now = int(self.options.fake_runtime)
243 self._now = time_now
244 # compute inverse Testsuite-Triggers: map, unifying all series
245 self.logger.info('Building inverse testsuite_triggers map')
246 for suite in self.suite_info:
247 for src, data in suite.sources.items():
248 for trigger in data.testsuite_triggers:
249 self.testsuite_triggers.setdefault(trigger, set()).add(src)
250 target_suite_name = self.suite_info.target_suite.name
252 os.makedirs(self.state_dir, exist_ok=True)
253 self.read_pending_tests()
255 # read the cached results that we collected so far
256 if os.path.exists(self.results_cache_file):
257 with open(self.results_cache_file) as f:
258 test_results = json.load(f)
259 self.test_results = self.check_and_upgrade_cache(test_results)
260 self.logger.info('Read previous results from %s', self.results_cache_file)
261 else:
262 self.logger.info('%s does not exist, re-downloading all results from swift', self.results_cache_file)
264 # read in the new results
265 if self.options.adt_swift_url.startswith('file://'):
266 debci_file = self.options.adt_swift_url[7:]
267 if os.path.exists(debci_file):
268 with open(debci_file) as f:
269 test_results = json.load(f)
270 self.logger.info('Read new results from %s', debci_file)
271 for res in test_results['results']:
272 # tests denied on infrastructure don't get a version
273 if res['version'] is None: 273 ↛ 274line 273 didn't jump to line 274, because the condition on line 273 was never true
274 res['version'] = 'blocked-on-ci-infra'
275 (test_suite, triggers, src, arch, ver, status, run_id, seen) = ([
276 res['suite'],
277 res['trigger'],
278 res['package'],
279 res['arch'],
280 res['version'],
281 res['status'],
282 str(res['run_id']),
283 round(calendar.timegm(time.strptime(res['updated_at'][0:-5], '%Y-%m-%dT%H:%M:%S')))])
284 if test_suite != target_suite_name: 284 ↛ 286line 284 didn't jump to line 286, because the condition on line 284 was never true
285 # not requested for this target suite, so ignore
286 continue
287 if triggers is None: 287 ↛ 289line 287 didn't jump to line 289, because the condition on line 287 was never true
288 # not requested for this policy, so ignore
289 continue
290 if status is None:
291 # still running => pending
292 continue
293 for trigger in triggers.split():
294 # remove matching test requests
295 self.remove_from_pending(trigger, src, arch, seen)
296 if status == 'tmpfail': 296 ↛ 298line 296 didn't jump to line 298, because the condition on line 296 was never true
297 # let's see if we still need it
298 continue
299 self.logger.debug('Results %s %s %s added', src, trigger, status)
300 self.add_trigger_to_results(trigger, src, ver, arch, run_id, seen, Result[status.upper()])
301 else:
302 self.logger.info('%s does not exist, no new data will be processed', debci_file)
304 # The cache can contain results against versions of packages that
305 # are not in any suite anymore. Strip those out, as we don't want
306 # to use those results. Additionally, old references may be
307 # filtered out.
308 if self.options.adt_baseline == 'reference':
309 self.filter_old_results()
311 # we need sources, binaries, and installability tester, so for now
312 # remember the whole britney object
313 self.britney = britney
315 # Initialize AMQP connection
316 self.amqp_channel = None
317 self.amqp_file_handle = None
318 if self.options.dry_run: 318 ↛ 319line 318 didn't jump to line 319, because the condition on line 318 was never true
319 return
321 amqp_url = self.options.adt_amqp
323 if amqp_url.startswith('amqp://'): 323 ↛ 324line 323 didn't jump to line 324, because the condition on line 323 was never true
324 import amqplib.client_0_8 as amqp
325 # depending on the setup we connect to a AMQP server
326 creds = urllib.parse.urlsplit(amqp_url, allow_fragments=False)
327 self.amqp_con = amqp.Connection(creds.hostname, userid=creds.username,
328 password=creds.password)
329 self.amqp_channel = self.amqp_con.channel()
330 self.logger.info('Connected to AMQP server')
331 elif amqp_url.startswith('file://'): 331 ↛ 336line 331 didn't jump to line 336, because the condition on line 331 was never false
332 # or in Debian and in testing mode, adt_amqp will be a file:// URL
333 amqp_file = amqp_url[7:]
334 self.amqp_file_handle = open(amqp_file, 'w', 1)
335 else:
336 raise RuntimeError('Unknown ADT_AMQP schema %s' % amqp_url.split(':', 1)[0])
338 def check_and_upgrade_cache(self, test_results):
339 for result in all_leaf_results(test_results):
340 result[0] = Result[result[0]]
342 # Drop results older than ADT_RESULTS_CACHE_AGE
343 for trigger in list(test_results.keys()):
344 for pkg in list(test_results[trigger].keys()):
345 for arch in list(test_results[trigger][pkg].keys()):
346 result = test_results[trigger][pkg][arch]
347 if self._now - result[3] > self.options.adt_results_cache_age: 347 ↛ 348line 347 didn't jump to line 348, because the condition on line 347 was never true
348 del test_results[trigger][pkg][arch]
349 if not test_results[trigger][pkg]: 349 ↛ 350line 349 didn't jump to line 350, because the condition on line 349 was never true
350 del test_results[trigger][pkg]
351 if not test_results[trigger]: 351 ↛ 352line 351 didn't jump to line 352, because the condition on line 351 was never true
352 del test_results[trigger]
354 return test_results
356 def filter_old_results(self) -> None:
357 '''Remove results for old versions and reference runs from the cache.
359 For now, only delete reference runs. If we delete regular
360 results after a while, packages with lots of triggered tests may
361 never have all the results at the same time.
362'''
364 test_results = self.test_results
366 for (trigger, trigger_data) in test_results.items():
367 for (src, results) in trigger_data.items():
368 for (arch, result) in results.items():
369 if trigger == REF_TRIG and \
370 self._now - result[3] > self.options.adt_reference_max_age:
371 result[0] = mark_result_as_old(result[0])
372 elif not self.test_version_in_any_suite(src, result[1]):
373 result[0] = mark_result_as_old(result[0])
375 def test_version_in_any_suite(self, src: str, version: str) -> bool:
376 '''Check if the mentioned version of src is found in a suite
378 To prevent regressions in the target suite, the result should be
379 from a test with the version of the package in either the source
380 suite or the target suite. The source suite is also valid,
381 because due to versioned test dependencies and Breaks/Conflicts
382 relations, regularly the version in the source suite is used
383 during testing.
384 '''
386 versions = set()
387 for suite in self.suite_info:
388 try:
389 srcinfo = suite.sources[src]
390 except KeyError:
391 continue
392 versions.add(srcinfo.version)
394 valid_version = False
395 for ver in versions:
396 if apt_pkg.version_compare(ver, version) == 0:
397 valid_version = True
398 break
400 return valid_version
402 def save_pending_json(self) -> None:
403 # update the pending tests on-disk cache
404 self.logger.info('Updating pending requested tests in %s' % self.pending_tests_file)
405 # Shallow clone pending_tests as we only modify the toplevel and change its type.
406 pending_tests: dict[str, Any] = {}
407 if self.pending_tests:
408 pending_tests = dict(self.pending_tests)
409 # Avoid adding if there are no pending results at all (eases testing)
410 pending_tests[VERSION_KEY] = 1
411 with open(self.pending_tests_file + '.new', 'w') as f:
412 json.dump(pending_tests, f, indent=2)
413 os.rename(self.pending_tests_file + '.new', self.pending_tests_file)
415 def save_state(self, britney) -> None:
416 super().save_state(britney)
418 # update the results on-disk cache, unless we are using a r/o shared one
419 if not self.options.adt_shared_results_cache:
420 self.logger.info('Updating results cache')
421 test_results = deepcopy(self.test_results)
422 for result in all_leaf_results(test_results):
423 result[0] = result[0].name
424 with open(self.results_cache_file + '.new', 'w') as f:
425 json.dump(test_results, f, indent=2)
426 os.rename(self.results_cache_file + '.new', self.results_cache_file)
428 self.save_pending_json()
430 def format_retry_url(self, run_id, arch, testsrc, trigger: str) -> str:
431 if self.options.adt_ppas:
432 ppas = "&" + urllib.parse.urlencode([('ppa', p) for p in self.options.adt_ppas])
433 else:
434 ppas = ""
435 return self.options.adt_retry_url.format(
436 run_id=run_id,
437 release=self.options.series,
438 arch=arch,
439 package=testsrc,
440 trigger=urllib.parse.quote_plus(trigger),
441 ppas=ppas,
442 )
444 def format_log_url(self, testsrc: str, arch: str, run_id) -> str:
445 return self.options.adt_log_url.format(
446 release=self.options.series,
447 swift_container=self.swift_container,
448 hash=srchash(testsrc),
449 package=testsrc,
450 arch=arch,
451 run_id=run_id,
452 )
454 def apply_src_policy_impl(self, tests_info, item: MigrationItem, source_data_tdist,
455 source_data_srcdist: SourcePackage, excuse) -> PolicyVerdict:
456 assert self.hints is not None # for type checking
457 # initialize
458 verdict = PolicyVerdict.PASS
459 all_self_tests_pass = False
460 source_name = item.package
461 results_info = []
463 # skip/delay autopkgtests until new package is built somewhere
464 if not source_data_srcdist.binaries:
465 self.logger.debug('%s hasnot been built anywhere, skipping autopkgtest policy', excuse.name)
466 verdict = PolicyVerdict.REJECTED_TEMPORARILY
467 excuse.add_verdict_info(verdict, "nothing built yet, autopkgtest delayed")
469 if 'all' in excuse.missing_builds:
470 self.logger.debug('%s hasnot been built for arch:all, skipping autopkgtest policy', source_name)
471 verdict = PolicyVerdict.REJECTED_TEMPORARILY
472 excuse.add_verdict_info(verdict, "arch:all not built yet, autopkgtest delayed")
474 if verdict == PolicyVerdict.PASS:
475 self.logger.debug('Checking autopkgtests for %s', source_name)
476 trigger = source_name + '/' + source_data_srcdist.version
478 # build a (testsrc, testver) → arch → (status, log_url) map; we trigger/check test
479 # results per architecture for technical/efficiency reasons, but we
480 # want to evaluate and present the results by tested source package
481 # first
482 pkg_arch_result = collections.defaultdict(dict)
483 for arch in self.adt_arches:
484 if arch in excuse.missing_builds:
485 verdict = PolicyVerdict.REJECTED_TEMPORARILY
486 self.logger.debug('%s hasnot been built on arch %s, delay autopkgtest there', source_name, arch)
487 excuse.add_verdict_info(verdict, "arch:%s not built yet, autopkgtest delayed there" % arch)
488 elif arch in excuse.policy_info['depends'].get('arch_all_not_installable', []):
489 self.logger.debug(
490 '%s is uninstallable on arch %s (which is allowed), not running autopkgtest there',
491 source_name, arch)
492 excuse.addinfo(
493 "uninstallable on arch %s (which is allowed), not running autopkgtest there" % arch)
494 elif (arch in excuse.unsatisfiable_on_archs and
495 arch not in excuse.policy_info['depends'].get('autopkgtest_run_anyways', [])):
496 verdict = PolicyVerdict.REJECTED_TEMPORARILY
497 self.logger.debug('%s is uninstallable on arch %s, not running autopkgtest there', source_name, arch)
498 excuse.addinfo("uninstallable on arch %s, not running autopkgtest there" % arch)
499 else:
500 self.request_tests_for_source(item, arch, source_data_srcdist, pkg_arch_result, excuse)
502 # add test result details to Excuse
503 cloud_url = self.options.adt_ci_url + "packages/%(h)s/%(s)s/%(r)s/%(a)s"
504 for (testsrc, testver) in sorted(pkg_arch_result):
505 arch_results = pkg_arch_result[(testsrc, testver)]
506 r = {v[0] for v in arch_results.values()}
507 if 'REGRESSION' in r:
508 verdict = PolicyVerdict.REJECTED_PERMANENTLY
509 elif ('RUNNING' in r or 'RUNNING-REFERENCE' in r) and verdict == PolicyVerdict.PASS:
510 verdict = PolicyVerdict.REJECTED_TEMPORARILY
511 # skip version if still running on all arches
512 if not r - {'RUNNING', 'RUNNING-ALWAYSFAIL'}:
513 testver = None
515 # A source package is eligible for the bounty if it has tests
516 # of its own that pass on all tested architectures.
517 if testsrc == source_name:
518 excuse.autopkgtest_results = r
519 if r == {'PASS'}:
520 all_self_tests_pass = True
522 if testver:
523 testname = '%s/%s' % (testsrc, testver)
524 else:
525 testname = testsrc
527 html_archmsg = []
528 for arch in sorted(arch_results):
529 (status, run_id, log_url) = arch_results[arch]
530 artifact_url = None
531 retry_url = None
532 reference_url = None
533 reference_retry_url = None
534 history_url = None
535 if self.options.adt_ppas:
536 if log_url.endswith('log.gz'):
537 artifact_url = log_url.replace('log.gz', 'artifacts.tar.gz')
538 else:
539 history_url = cloud_url % {
540 'h': srchash(testsrc), 's': testsrc,
541 'r': self.options.series, 'a': arch}
542 if status in ('NEUTRAL', 'REGRESSION', 'RUNNING-REFERENCE'):
543 retry_url = self.format_retry_url(run_id, arch, testsrc, trigger)
545 baseline_result = self.result_in_baseline(testsrc, arch)
546 if baseline_result and baseline_result[0] != Result.NONE:
547 baseline_run_id = str(baseline_result[2])
548 reference_url = self.format_log_url(testsrc, arch, baseline_run_id)
549 if self.options.adt_baseline == 'reference':
550 reference_retry_url = self.format_retry_url(
551 baseline_run_id, arch, testsrc, REF_TRIG)
552 tests_info.setdefault(testname, {})[arch] = \
553 [status, log_url, history_url, artifact_url, retry_url]
555 # render HTML snippet for testsrc entry for current arch
556 if history_url:
557 message = '<a href="%s">%s</a>' % (history_url, arch)
558 else:
559 message = arch
560 message += ': <a href="%s">%s</a>' % (log_url, EXCUSES_LABELS[status])
561 if retry_url:
562 message += '<a href="%s" style="text-decoration: none;"> ♻</a>' % retry_url
563 if reference_url:
564 message += ' (<a href="%s">reference</a>' % reference_url
565 if reference_retry_url:
566 message += '<a href="%s" style="text-decoration: none;"> ♻</a>' % reference_retry_url
567 message += ')'
568 if artifact_url:
569 message += ' <a href="%s">[artifacts]</a>' % artifact_url
570 html_archmsg.append(message)
572 # render HTML line for testsrc entry
573 # - if action is or may be required
574 # - for ones own package
575 if r - {'PASS', 'NEUTRAL', 'RUNNING-ALWAYSFAIL', 'ALWAYSFAIL', 'IGNORE-FAIL'} or \
576 testsrc == source_name:
577 if testver:
578 pkg = '<a href="#{0}">{0}</a>/{1}'.format(testsrc, testver)
579 else:
580 pkg = '<a href="#{0}">{0}</a>'.format(testsrc)
581 results_info.append("autopkgtest for %s: %s" % (pkg, ', '.join(html_archmsg)))
583 if verdict != PolicyVerdict.PASS:
584 # check for force-skiptest hint
585 hints = self.hints.search('force-skiptest', package=source_name, version=source_data_srcdist.version)
586 if hints:
587 excuse.addreason('skiptest')
588 excuse.addinfo("Should wait for tests relating to %s %s, but forced by %s" %
589 (source_name, source_data_srcdist.version, hints[0].user))
590 verdict = PolicyVerdict.PASS_HINTED
591 else:
592 excuse.addreason('autopkgtest')
594 if self.options.adt_success_bounty and verdict == PolicyVerdict.PASS and all_self_tests_pass:
595 excuse.add_bounty('autopkgtest', self.options.adt_success_bounty)
596 if self.options.adt_regression_penalty and \
597 verdict in {PolicyVerdict.REJECTED_PERMANENTLY, PolicyVerdict.REJECTED_TEMPORARILY}:
598 if self.options.adt_regression_penalty > 0: 598 ↛ 601line 598 didn't jump to line 601, because the condition on line 598 was never false
599 excuse.add_penalty('autopkgtest', self.options.adt_regression_penalty)
600 # In case we give penalties instead of blocking, we must always pass
601 verdict = PolicyVerdict.PASS
602 for i in results_info:
603 if verdict.is_rejected:
604 excuse.add_verdict_info(verdict, i)
605 else:
606 excuse.addinfo(i)
608 return verdict
610 #
611 # helper functions
612 #
614 @staticmethod
615 def has_autodep8(srcinfo: SourcePackage) -> bool:
616 '''Check if package is covered by autodep8
618 srcinfo is an item from self.britney.sources
619 '''
620 # autodep8?
621 for t in srcinfo.testsuite:
622 if t.startswith('autopkgtest-pkg'):
623 return True
625 return False
627 def request_tests_for_source(self, item: MigrationItem, arch: str, source_data_srcdist: SourcePackage,
628 pkg_arch_result, excuse) -> None:
629 pkg_universe = self.britney.pkg_universe
630 target_suite = self.suite_info.target_suite
631 source_suite = item.suite
632 sources_t = target_suite.sources
633 sources_s = item.suite.sources
634 packages_s_a = item.suite.binaries[arch]
635 source_name = item.package
636 source_version = source_data_srcdist.version
637 # request tests (unless they were already requested earlier or have a result)
638 tests = self.tests_for_source(source_name, source_version, arch, excuse)
639 is_huge = len(tests) > self.options.adt_huge
641 # Here we figure out what is required from the source suite
642 # for the test to install successfully.
643 #
644 # The ImplicitDependencyPolicy does a similar calculation, but
645 # if I (elbrus) understand correctly, only in the reverse
646 # dependency direction. We are doing something similar here
647 # but in the dependency direction (note: this code is older).
648 # We use the ImplicitDependencyPolicy result for the reverse
649 # dependencies and we keep the code below for the
650 # dependencies. Using the ImplicitDependencyPolicy results
651 # also in the reverse direction seems to require quite some
652 # reorganisation to get that information available here, as in
653 # the current state only the current excuse is available here
654 # and the required other excuses may not be calculated yet.
655 #
656 # Loop over all binary packages from trigger and
657 # recursively look up which *versioned* dependencies are
658 # only satisfied in the source suite.
659 #
660 # For all binaries found, look up which packages they
661 # break/conflict with in the target suite, but not in the
662 # source suite. The main reason to do this is to cover test
663 # dependencies, so we will check Testsuite-Triggers as
664 # well.
665 #
666 # OI: do we need to do the first check in a smart way
667 # (i.e. only for the packages that are actually going to be
668 # installed) for the breaks/conflicts set as well, i.e. do
669 # we need to check if any of the packages that we now
670 # enforce being from the source suite, actually have new
671 # versioned depends and new breaks/conflicts.
672 #
673 # For all binaries found, add the set of unique source
674 # packages to the list of triggers.
676 bin_triggers = set()
677 bin_new = set(source_data_srcdist.binaries)
678 for binary in iter_except(bin_new.pop, KeyError):
679 if binary in bin_triggers:
680 continue
681 bin_triggers.add(binary)
683 # Check if there is a dependency that is not
684 # available in the target suite.
685 # We add slightly too much here, because new binaries
686 # will also show up, but they are already properly
687 # installed. Nevermind.
688 depends = pkg_universe.dependencies_of(binary)
689 # depends is a frozenset{frozenset{BinaryPackageId, ..}}
690 for deps_of_bin in depends:
691 if target_suite.any_of_these_are_in_the_suite(deps_of_bin):
692 # if any of the alternative dependencies is already
693 # satisfied in the target suite, we can just ignore it
694 continue
695 # We'll figure out which version later
696 bin_new.update(added_pkgs_compared_to_target_suite(deps_of_bin, target_suite))
698 # Check if the package breaks/conflicts anything. We might
699 # be adding slightly too many source packages due to the
700 # check here as a binary package that is broken may be
701 # coming from a different source package in the source
702 # suite. Nevermind.
703 bin_broken = set()
704 for binary in bin_triggers:
705 # broken is a frozenset{BinaryPackageId, ..}
706 broken = pkg_universe.negative_dependencies_of(binary)
707 broken_in_target = {p.package_name for p in target_suite.which_of_these_are_in_the_suite(broken)}
708 broken_in_source = {p.package_name for p in source_suite.which_of_these_are_in_the_suite(broken)}
709 # We want packages with a newer version in the source suite that
710 # no longer has the conflict. This is an approximation
711 broken_filtered = set(
712 p for p in broken if
713 p.package_name in broken_in_target and
714 p.package_name not in broken_in_source)
715 # We add the version in the target suite, but the code below will
716 # change it to the version in the source suite
717 bin_broken.update(broken_filtered)
718 bin_triggers.update(bin_broken)
720 # The ImplicitDependencyPolicy also found packages that need
721 # to migrate together, so add them to the triggers too.
722 for bin_implicit in excuse.depends_packages_flattened:
723 if bin_implicit.architecture == arch:
724 bin_triggers.add(bin_implicit)
726 triggers = set()
727 for binary in bin_triggers:
728 if binary.architecture == arch:
729 try:
730 source_of_bin = packages_s_a[binary.package_name].source
731 # If the version in the target suite is the same, don't add a trigger.
732 # Note that we looked up the source package in the source suite.
733 # If it were a different source package in the target suite, however, then
734 # we would not have this source package in the same version anyway.
735 if (sources_t.get(source_of_bin, None) is None or
736 sources_s[source_of_bin].version != sources_t[source_of_bin].version):
737 triggers.add(
738 source_of_bin + '/' +
739 sources_s[source_of_bin].version)
740 except KeyError:
741 # Apparently the package was removed from
742 # unstable e.g. if packages are replaced
743 # (e.g. -dbg to -dbgsym)
744 pass
745 if binary not in source_data_srcdist.binaries:
746 for tdep_src in self.testsuite_triggers.get(binary.package_name, set()): 746 ↛ 747line 746 didn't jump to line 747, because the loop on line 746 never started
747 try:
748 # Only add trigger if versions in the target and source suites are different
749 if (sources_t.get(tdep_src, None) is None or
750 sources_s[tdep_src].version != sources_t[tdep_src].version):
751 triggers.add(
752 tdep_src + '/' +
753 sources_s[tdep_src].version)
754 except KeyError:
755 # Apparently the source was removed from
756 # unstable (testsuite_triggers are unified
757 # over all suites)
758 pass
759 trigger = source_name + '/' + source_version
760 triggers.discard(trigger)
761 triggers_list = sorted(list(triggers))
762 triggers_list.insert(0, trigger)
764 for (testsrc, testver) in tests:
765 self.pkg_test_request(testsrc, arch, triggers_list, huge=is_huge)
766 (result, real_ver, run_id, url) = self.pkg_test_result(testsrc, testver, arch, trigger)
767 pkg_arch_result[(testsrc, real_ver)][arch] = (result, run_id, url)
769 def tests_for_source(self, src: str, ver: str, arch: str, excuse) -> list[tuple[str, str]]:
770 '''Iterate over all tests that should be run for given source and arch'''
772 source_suite = self.suite_info.primary_source_suite
773 target_suite = self.suite_info.target_suite
774 sources_info = target_suite.sources
775 binaries_info = target_suite.binaries[arch]
777 reported_pkgs = set()
779 tests = []
781 # Debian doesn't have linux-meta, but Ubuntu does
782 # for linux themselves we don't want to trigger tests -- these should
783 # all come from linux-meta*. A new kernel ABI without a corresponding
784 # -meta won't be installed and thus we can't sensibly run tests against
785 # it.
786 if src.startswith('linux') and src.replace('linux', 'linux-meta') in sources_info: 786 ↛ 787line 786 didn't jump to line 787, because the condition on line 786 was never true
787 return []
789 # we want to test the package itself, if it still has a test in unstable
790 # but only if the package actually exists on this arch
791 srcinfo = source_suite.sources[src]
792 if ('autopkgtest' in srcinfo.testsuite or self.has_autodep8(srcinfo)) and \
793 len(excuse.packages[arch]) > 0:
794 reported_pkgs.add(src)
795 tests.append((src, ver))
797 extra_bins = []
798 # Debian doesn't have linux-meta, but Ubuntu does
799 # Hack: For new kernels trigger all DKMS packages by pretending that
800 # linux-meta* builds a "dkms" binary as well. With that we ensure that we
801 # don't regress DKMS drivers with new kernel versions.
802 if src.startswith('linux-meta'):
803 # does this have any image on this arch?
804 for pkg_id in srcinfo.binaries:
805 if pkg_id.architecture == arch and '-image' in pkg_id.package_name:
806 try:
807 extra_bins.append(binaries_info['dkms'].pkg_id)
808 except KeyError:
809 pass
811 if not self.has_built_on_this_arch_or_is_arch_all(srcinfo, arch):
812 return []
814 pkg_universe = self.britney.pkg_universe
815 # plus all direct reverse dependencies and test triggers of its
816 # binaries which have an autopkgtest
817 for binary in itertools.chain(srcinfo.binaries, extra_bins):
818 rdeps = pkg_universe.reverse_dependencies_of(binary)
819 for rdep in rdeps:
820 try:
821 rdep_src = binaries_info[rdep.package_name].source
822 # Don't re-trigger the package itself here; this should
823 # have been done above if the package still continues to
824 # have an autopkgtest in unstable.
825 if rdep_src == src:
826 continue
827 except KeyError:
828 continue
830 rdep_src_info = sources_info[rdep_src]
831 if 'autopkgtest' in rdep_src_info.testsuite or self.has_autodep8(rdep_src_info):
832 if rdep_src not in reported_pkgs:
833 tests.append((rdep_src, rdep_src_info.version))
834 reported_pkgs.add(rdep_src)
836 for tdep_src in self.testsuite_triggers.get(binary.package_name, set()):
837 if tdep_src not in reported_pkgs:
838 try:
839 tdep_src_info = sources_info[tdep_src]
840 except KeyError:
841 continue
842 if 'autopkgtest' in tdep_src_info.testsuite or self.has_autodep8(tdep_src_info): 842 ↛ 836line 842 didn't jump to line 836, because the condition on line 842 was never false
843 for pkg_id in tdep_src_info.binaries: 843 ↛ 836line 843 didn't jump to line 836, because the loop on line 843 didn't complete
844 if pkg_id.architecture == arch:
845 tests.append((tdep_src, tdep_src_info.version))
846 reported_pkgs.add(tdep_src)
847 break
849 tests.sort(key=lambda s_v: s_v[0])
850 return tests
852 def read_pending_tests(self) -> None:
853 '''Read pending test requests from previous britney runs
855 Initialize self.pending_tests with that data.
856 '''
857 assert self.pending_tests is None, 'already initialized'
858 if not os.path.exists(self.pending_tests_file):
859 self.logger.info('No %s, starting with no pending tests', self.pending_tests_file)
860 self.pending_tests = {}
861 return
862 with open(self.pending_tests_file) as f:
863 self.pending_tests = json.load(f)
864 if VERSION_KEY in self.pending_tests:
865 del self.pending_tests[VERSION_KEY]
866 for trigger in list(self.pending_tests.keys()):
867 for pkg in list(self.pending_tests[trigger].keys()):
868 arch_dict = self.pending_tests[trigger][pkg]
869 for arch in list(arch_dict.keys()):
870 if self._now - arch_dict[arch] > self.options.adt_pending_max_age:
871 del arch_dict[arch]
872 if not arch_dict:
873 del self.pending_tests[trigger][pkg]
874 if not self.pending_tests[trigger]:
875 del self.pending_tests[trigger]
876 else:
877 # Migration code:
878 for trigger_data in self.pending_tests.values(): 878 ↛ 879line 878 didn't jump to line 879, because the loop on line 878 never started
879 for (pkg, arch_list) in trigger_data.items():
880 trigger_data[pkg] = {}
881 for arch in arch_list:
882 trigger_data[pkg][arch] = self._now
884 self.logger.info('Read pending requested tests from %s', self.pending_tests_file)
885 self.logger.debug('%s', self.pending_tests)
887 # this requires iterating over all triggers and thus is expensive;
888 # cache the results
889 @lru_cache(None)
890 def latest_run_for_package(self, src: str, arch) -> str:
891 '''Return latest run ID for src on arch'''
893 latest_run_id = ''
894 for srcmap in self.test_results.values():
895 try:
896 run_id = srcmap[src][arch][2]
897 except KeyError:
898 continue
899 if run_id > latest_run_id:
900 latest_run_id = run_id
901 return latest_run_id
903 @lru_cache(None)
904 def fetch_swift_results(self, swift_url: str, src: str, arch: str) -> None:
905 '''Download new results for source package/arch from swift'''
907 # prepare query: get all runs with a timestamp later than the latest
908 # run_id for this package/arch; '@' is at the end of each run id, to
909 # mark the end of a test run directory path
910 # example: <autopkgtest-wily>wily/amd64/libp/libpng/20150630_054517@/result.tar
911 query = {'delimiter': '@',
912 'prefix': '%s/%s/%s/%s/' % (self.options.series, arch, srchash(src), src)}
914 # determine latest run_id from results
915 if not self.options.adt_shared_results_cache:
916 latest_run_id = self.latest_run_for_package(src, arch)
917 if latest_run_id:
918 query['marker'] = query['prefix'] + latest_run_id
920 # request new results from swift
921 url = os.path.join(swift_url, self.swift_container)
922 url += '?' + urllib.parse.urlencode(query)
923 f = None
924 try:
925 f = urlopen(url, timeout=30)
926 if f.getcode() == 200:
927 result_paths = f.read().decode().strip().splitlines()
928 elif f.getcode() == 204: # No content 928 ↛ 934line 928 didn't jump to line 934, because the condition on line 928 was never false
929 result_paths = []
930 else:
931 # we should not ever end up here as we expect a HTTPError in
932 # other cases; e. g. 3XX is something that tells us to adjust
933 # our URLS, so fail hard on those
934 raise NotImplementedError('fetch_swift_results(%s): cannot handle HTTP code %i' %
935 (url, f.getcode()))
936 except IOError as e:
937 # 401 "Unauthorized" is swift's way of saying "container does not exist"
938 if getattr(e, 'code', -1) == 401: 938 ↛ 945line 938 didn't jump to line 945, because the condition on line 938 was never false
939 self.logger.info('fetch_swift_results: %s does not exist yet or is inaccessible', url)
940 return
941 # Other status codes are usually a transient
942 # network/infrastructure failure. Ignoring this can lead to
943 # re-requesting tests which we already have results for, so
944 # fail hard on this and let the next run retry.
945 self.logger.error('Failure to fetch swift results from %s: %s', url, str(e))
946 sys.exit(1)
947 finally:
948 if f is not None: 948 ↛ 951line 948 didn't jump to line 951, because the condition on line 948 was never false
949 f.close() 949 ↛ exitline 949 didn't return from function 'fetch_swift_results', because the return on line 940 wasn't executed
951 for p in result_paths:
952 self.fetch_one_result(
953 os.path.join(swift_url, self.swift_container, p, 'result.tar'), src, arch)
955 def fetch_one_result(self, url: str, src: str, arch: str) -> None:
956 '''Download one result URL for source/arch
958 Remove matching pending_tests entries.
959 '''
960 f = None
961 try:
962 f = urlopen(url, timeout=30)
963 if f.getcode() == 200: 963 ↛ 966line 963 didn't jump to line 966, because the condition on line 963 was never false
964 tar_bytes = io.BytesIO(f.read())
965 else:
966 raise NotImplementedError('fetch_one_result(%s): cannot handle HTTP code %i' %
967 (url, f.getcode()))
968 except IOError as err:
969 self.logger.error('Failure to fetch %s: %s', url, str(err))
970 # we tolerate "not found" (something went wrong on uploading the
971 # result), but other things indicate infrastructure problems
972 if getattr(err, 'code', -1) == 404:
973 return
974 sys.exit(1)
975 finally:
976 if f is not None: 976 ↛ exit, 976 ↛ 9782 missed branches: 1) line 976 didn't return from function 'fetch_one_result', because the return on line 973 wasn't executed, 2) line 976 didn't jump to line 978, because the condition on line 976 was never false
977 f.close() 977 ↛ exitline 977 didn't return from function 'fetch_one_result', because the return on line 973 wasn't executed
978 try:
979 with tarfile.open(None, 'r', tar_bytes) as tar:
980 exitcode = int(tar.extractfile('exitcode').read().strip())
981 srcver = tar.extractfile('testpkg-version').read().decode().strip()
982 (ressrc, ver) = srcver.split()
983 testinfo = json.loads(tar.extractfile('testinfo.json').read().decode())
984 except (KeyError, ValueError, tarfile.TarError) as err:
985 self.logger.error('%s is damaged, ignoring: %s', url, str(err))
986 # ignore this; this will leave an orphaned request in autopkgtest-pending.json
987 # and thus require manual retries after fixing the tmpfail, but we
988 # can't just blindly attribute it to some pending test.
989 return
991 if src != ressrc: 991 ↛ 992line 991 didn't jump to line 992, because the condition on line 991 was never true
992 self.logger.error('%s is a result for package %s, but expected package %s', url, ressrc, src)
993 return
995 # parse recorded triggers in test result
996 for e in testinfo.get('custom_environment', []): 996 ↛ 1001line 996 didn't jump to line 1001, because the loop on line 996 didn't complete
997 if e.startswith('ADT_TEST_TRIGGERS='): 997 ↛ 996line 997 didn't jump to line 996, because the condition on line 997 was never false
998 result_triggers = [i for i in e.split('=', 1)[1].split() if '/' in i]
999 break
1000 else:
1001 self.logger.error('%s result has no ADT_TEST_TRIGGERS, ignoring')
1002 return
1004 run_id = os.path.basename(os.path.dirname(url))
1005 seen = round(calendar.timegm(time.strptime(run_id, '%Y%m%d_%H%M%S@')))
1006 # allow some skipped tests, but nothing else
1007 if exitcode in [0, 2]:
1008 result = Result.PASS
1009 elif exitcode == 8: 1009 ↛ 1010line 1009 didn't jump to line 1010, because the condition on line 1009 was never true
1010 result = Result.NEUTRAL
1011 else:
1012 result = Result.FAIL
1014 self.logger.info(
1015 'Fetched test result for %s/%s/%s %s (triggers: %s): %s',
1016 src, ver, arch, run_id, result_triggers, result.name.lower())
1018 # remove matching test requests
1019 for trigger in result_triggers:
1020 self.remove_from_pending(trigger, src, arch)
1022 # add this result
1023 for trigger in result_triggers:
1024 self.add_trigger_to_results(trigger, src, ver, arch, run_id, seen, result)
1026 def remove_from_pending(self, trigger: str, src: str, arch: str, timestamp: int = sys.maxsize) -> None:
1027 assert self.pending_tests is not None # for type checking
1028 try:
1029 arch_dict = self.pending_tests[trigger][src]
1030 if timestamp < arch_dict[arch]:
1031 # The result is from before the moment of scheduling, so it's
1032 # not the one we're waiting for
1033 return
1034 del arch_dict[arch]
1035 if not arch_dict:
1036 del self.pending_tests[trigger][src]
1037 if not self.pending_tests[trigger]:
1038 del self.pending_tests[trigger]
1039 self.logger.info('-> matches pending request %s/%s for trigger %s', src, arch, trigger)
1040 except KeyError:
1041 self.logger.info('-> does not match any pending request for %s/%s', src, arch)
1043 def add_trigger_to_results(self, trigger: str, src: str, ver: str, arch: str, run_id: str, timestamp: int,
1044 status_to_add: Result) -> None:
1045 # Ensure that we got a new enough version
1046 try:
1047 (trigsrc, trigver) = trigger.split('/', 1)
1048 except ValueError:
1049 self.logger.info('Ignoring invalid test trigger %s', trigger)
1050 return
1051 if trigsrc == src and apt_pkg.version_compare(ver, trigver) < 0: 1051 ↛ 1052line 1051 didn't jump to line 1052, because the condition on line 1051 was never true
1052 self.logger.debug('test trigger %s, but run for older version %s, ignoring', trigger, ver)
1053 return
1055 stored_result = self.test_results.setdefault(trigger, {}).setdefault(
1056 src, {}).setdefault(arch, [Result.FAIL, None, '', 0])
1058 # reruns shouldn't flip the result from PASS or NEUTRAL to
1059 # FAIL, so remember the most recent version of the best result
1060 # we've seen. Except for reference updates, which we always
1061 # want to update with the most recent result. The result data
1062 # may not be ordered by timestamp, so we need to check time.
1063 update = False
1064 if self.options.adt_baseline == 'reference' and trigger == REF_TRIG:
1065 if stored_result[3] < timestamp:
1066 update = True
1067 elif status_to_add < stored_result[0]:
1068 update = True
1069 elif status_to_add == stored_result[0] and stored_result[3] < timestamp:
1070 update = True
1072 if update:
1073 stored_result[0] = status_to_add
1074 stored_result[1] = ver
1075 stored_result[2] = run_id
1076 stored_result[3] = timestamp
1078 def send_test_request(self, src: str, arch: str, triggers: list[str], huge: bool = False) -> None:
1079 '''Send out AMQP request for testing src/arch for triggers
1081 If huge is true, then the request will be put into the -huge instead of
1082 normal queue.
1083 '''
1084 if self.options.dry_run: 1084 ↛ 1085line 1084 didn't jump to line 1085, because the condition on line 1084 was never true
1085 return
1087 params: dict[str, Any] = {'triggers': triggers}
1088 if self.options.adt_ppas:
1089 params['ppas'] = self.options.adt_ppas
1090 qname = 'debci-ppa-%s-%s' % (self.options.series, arch)
1091 elif huge:
1092 qname = 'debci-huge-%s-%s' % (self.options.series, arch)
1093 else:
1094 qname = 'debci-%s-%s' % (self.options.series, arch)
1095 params['submit-time'] = time.strftime('%Y-%m-%d %H:%M:%S%z', time.gmtime())
1097 if self.amqp_channel: 1097 ↛ 1098line 1097 didn't jump to line 1098, because the condition on line 1097 was never true
1098 self.amqp_channel.basic_publish(amqp.Message(src + '\n' + json.dumps(params),
1099 delivery_mode=2), # persistent
1100 routing_key=qname)
1101 # we save pending.json with every request, so that if britney
1102 # crashes we don't re-request tests. This is only needed when using
1103 # real amqp, as with file-based submission the pending tests are
1104 # returned by debci along with the results each run.
1105 self.save_pending_json()
1106 else:
1107 # for file-based submission, triggers are space separated
1108 params['triggers'] = [' '.join(params['triggers'])]
1109 assert self.amqp_file_handle
1110 self.amqp_file_handle.write('%s:%s %s\n' % (qname, src, json.dumps(params)))
1112 def pkg_test_request(self, src: str, arch: str, all_triggers: list[str], huge: bool = False) -> None:
1113 '''Request one package test for a set of triggers
1115 all_triggers is a list of "pkgname/version". These are the packages
1116 that will be taken from the source suite. The first package in this
1117 list is the package that triggers the testing of src, the rest are
1118 additional packages required for installability of the test deps. If
1119 huge is true, then the request will be put into the -huge instead of
1120 normal queue.
1122 This will only be done if that test wasn't already requested in
1123 a previous run (i. e. if it's not already in self.pending_tests)
1124 or if there is already a fresh or a positive result for it. This
1125 ensures to download current results for this package before
1126 requesting any test.
1127'''
1128 trigger = all_triggers[0]
1129 uses_swift = not self.options.adt_swift_url.startswith('file://')
1130 try:
1131 result = self.test_results[trigger][src][arch]
1132 has_result = True
1133 except KeyError:
1134 has_result = False
1136 if has_result:
1137 result_state = result[0]
1138 if result_state in {Result.OLD_PASS, Result.OLD_FAIL, Result.OLD_NEUTRAL}:
1139 pass
1140 elif result_state == Result.FAIL and \
1141 self.result_in_baseline(src, arch)[0] in {
1142 Result.PASS, Result.NEUTRAL, Result.OLD_PASS, Result.OLD_NEUTRAL} and \
1143 self._now - result[3] > self.options.adt_retry_older_than:
1144 # We might want to retry this failure, so continue
1145 pass
1146 elif not uses_swift:
1147 # We're done if we don't retrigger and we're not using swift
1148 return
1149 elif result_state in {Result.PASS, Result.NEUTRAL}:
1150 self.logger.debug('%s/%s triggered by %s already known', src, arch, trigger)
1151 return
1153 # Without swift we don't expect new results
1154 if uses_swift:
1155 self.logger.info('Checking for new results for failed %s/%s for trigger %s', src, arch, trigger)
1156 self.fetch_swift_results(self.options.adt_swift_url, src, arch)
1157 # do we have one now?
1158 try:
1159 self.test_results[trigger][src][arch]
1160 return
1161 except KeyError:
1162 pass
1164 self.request_test_if_not_queued(src, arch, trigger, all_triggers, huge=huge)
1166 def request_test_if_not_queued(self, src: str, arch: str, trigger: str, all_triggers: list[str] = [],
1167 huge: bool = False) -> None:
1168 assert self.pending_tests is not None # for type checking
1169 if not all_triggers:
1170 all_triggers = [trigger]
1172 # Don't re-request if it's already pending
1173 arch_dict = self.pending_tests.setdefault(trigger, {}).setdefault(src, {})
1174 if arch in arch_dict.keys():
1175 self.logger.debug('Test %s/%s for %s is already pending, not queueing', src, arch, trigger)
1176 else:
1177 self.logger.debug('Requesting %s autopkgtest on %s to verify %s', src, arch, trigger)
1178 arch_dict[arch] = self._now
1179 self.send_test_request(src, arch, all_triggers, huge=huge)
1181 def result_in_baseline(self, src: str, arch: str):
1182 '''Get the result for src on arch in the baseline
1184 The baseline is optionally all data or a reference set)
1185 '''
1187 # this requires iterating over all cached results and thus is expensive;
1188 # cache the results
1189 try:
1190 return self.result_in_baseline_cache[src][arch]
1191 except KeyError:
1192 pass
1194 result_reference = [Result.NONE, None, '', 0]
1195 if self.options.adt_baseline == 'reference':
1196 if src not in self.suite_info.target_suite.sources:
1197 return result_reference
1199 try:
1200 result_reference = self.test_results[REF_TRIG][src][arch]
1201 self.logger.debug('Found result for src %s in reference: %s',
1202 src, result_reference[0].name)
1203 except KeyError:
1204 self.logger.debug('Found NO result for src %s in reference: %s',
1205 src, result_reference[0].name)
1206 self.result_in_baseline_cache[src][arch] = deepcopy(result_reference)
1207 return result_reference
1209 result_ever = [Result.FAIL, None, '', 0]
1210 for srcmap in self.test_results.values():
1211 try:
1212 if srcmap[src][arch][0] != Result.FAIL:
1213 result_ever = srcmap[src][arch]
1214 # If we are not looking at a reference run, We don't really
1215 # care about anything except the status, so we're done
1216 # once we find a PASS.
1217 if result_ever[0] == Result.PASS:
1218 break
1219 except KeyError:
1220 pass
1222 self.result_in_baseline_cache[src][arch] = deepcopy(result_ever)
1223 self.logger.debug('Result for src %s ever: %s', src, result_ever[0].name)
1224 return result_ever
1226 def has_test_in_target(self, src: str) -> bool:
1227 test_in_target = False
1228 try:
1229 srcinfo = self.suite_info.target_suite.sources[src]
1230 if 'autopkgtest' in srcinfo.testsuite or self.has_autodep8(srcinfo):
1231 test_in_target = True
1232 # AttributeError is only needed for the test suite as
1233 # srcinfo can be a NoneType
1234 except (KeyError, AttributeError):
1235 pass
1237 return test_in_target
1239 def pkg_test_result(self, src: str, ver: str, arch: str, trigger: str) -> tuple[str, str, Optional[str], str]:
1240 '''Get current test status of a particular package
1242 Return (status, real_version, run_id, log_url) tuple; status is a key in
1243 EXCUSES_LABELS. run_id is None if the test is still running.
1244 '''
1245 assert self.pending_tests is not None # for type checking
1246 # determine current test result status
1247 run_id = None
1248 try:
1249 r = self.test_results[trigger][src][arch]
1250 ver = r[1]
1251 run_id = r[2]
1253 if r[0] in {Result.FAIL, Result.OLD_FAIL}:
1254 # determine current test result status
1255 baseline_result = self.result_in_baseline(src, arch)[0]
1257 # Special-case triggers from linux-meta*: we cannot compare
1258 # results against different kernels, as e. g. a DKMS module
1259 # might work against the default kernel but fail against a
1260 # different flavor; so for those, ignore the "ever
1261 # passed" check; FIXME: check against trigsrc only
1262 if self.options.adt_baseline != 'reference' and \
1263 (trigger.startswith('linux-meta') or trigger.startswith('linux/')):
1264 baseline_result = Result.FAIL
1266 # Check if the autopkgtest (still) exists in the target suite
1267 test_in_target = self.has_test_in_target(src)
1269 if test_in_target and baseline_result in \
1270 {Result.NONE, Result.OLD_FAIL, Result.OLD_NEUTRAL, Result.OLD_PASS}:
1271 self.request_test_if_not_queued(src, arch, REF_TRIG)
1273 result = 'REGRESSION'
1274 if baseline_result in {Result.FAIL, Result.OLD_FAIL}:
1275 result = 'ALWAYSFAIL'
1276 elif baseline_result == Result.NONE and test_in_target: 1276 ↛ 1277line 1276 didn't jump to line 1277, because the condition on line 1276 was never true
1277 result = 'RUNNING-REFERENCE'
1279 if self.options.adt_ignore_failure_for_new_tests and not test_in_target:
1280 result = 'ALWAYSFAIL'
1282 if self.has_force_badtest(src, ver, arch):
1283 result = 'IGNORE-FAIL'
1284 else:
1285 result = r[0].name
1287 url = self.format_log_url(src, arch, run_id)
1288 except KeyError:
1289 # no result for src/arch; still running?
1290 if arch in self.pending_tests.get(trigger, {}).get(src, {}).keys(): 1290 ↛ 1300line 1290 didn't jump to line 1300, because the condition on line 1290 was never false
1291 baseline_result = self.result_in_baseline(src, arch)[0]
1292 if self.options.adt_ignore_failure_for_new_tests and not self.has_test_in_target(src):
1293 result = 'RUNNING-ALWAYSFAIL'
1294 elif baseline_result != Result.FAIL and not self.has_force_badtest(src, ver, arch):
1295 result = 'RUNNING'
1296 else:
1297 result = 'RUNNING-ALWAYSFAIL'
1298 url = self.options.adt_ci_url + 'status/pending'
1299 else:
1300 raise RuntimeError('Result for %s/%s/%s (triggered by %s) is neither known nor pending!' %
1301 (src, ver, arch, trigger))
1303 return (result, ver, run_id, url)
1305 def has_force_badtest(self, src: str, ver: str, arch: str) -> bool:
1306 '''Check if src/ver/arch has a force-badtest hint'''
1308 hints = self.hints.search('force-badtest', package=src)
1309 if hints:
1310 self.logger.info('Checking hints for %s/%s/%s: %s', src, ver, arch, [str(h) for h in hints])
1311 for hint in hints:
1312 if [mi for mi in hint.packages if mi.architecture in ['source', arch] and
1313 (mi.version == 'all' or apt_pkg.version_compare(ver, mi.version) <= 0)]:
1314 return True
1316 return False
1318 def has_built_on_this_arch_or_is_arch_all(self, src_data: SourcePackage, arch: str) -> bool:
1319 '''When a source builds arch:all binaries, those binaries are
1320 added to all architectures and thus the source 'exists'
1321 everywhere. This function checks if the source has any arch
1322 specific binaries on this architecture and if not, if it
1323 has them on any architecture.
1324 '''
1325 packages_s_a = self.suite_info.primary_source_suite.binaries[arch]
1326 has_unknown_binary = False
1327 for binary_s in src_data.binaries:
1328 try:
1329 binary_u = packages_s_a[binary_s.package_name]
1330 except KeyError:
1331 # src_data.binaries has all the built binaries, so if
1332 # we get here, we know that at least one architecture
1333 # has architecture specific binaries
1334 has_unknown_binary = True
1335 continue
1336 if binary_u.architecture == arch:
1337 return True
1338 # If we get here, we have only seen arch:all packages for this
1339 # arch.
1340 return not has_unknown_binary