Coverage for britney2/migration.py: 94%
267 statements
« prev ^ index » next coverage.py v6.5.0, created at 2025-03-23 07:34 +0000
« prev ^ index » next coverage.py v6.5.0, created at 2025-03-23 07:34 +0000
1import contextlib
2import copy
3import optparse
4from typing import TYPE_CHECKING, Optional, cast, Union
5from collections.abc import Iterator
7import apt_pkg
9from britney2.transaction import MigrationTransactionState, UndoItem
10from britney2.utils import (
11 MigrationConstraintException,
12 check_installability,
13 clone_nuninst,
14 compute_reverse_tree,
15 find_smooth_updateable_binaries,
16)
18if TYPE_CHECKING: 18 ↛ 19line 18 didn't jump to line 19, because the condition on line 18 was never true
19 from . import BinaryPackage, BinaryPackageId, Suites
20 from .hints import HintCollection
21 from .installability.universe import BinaryPackageUniverse
22 from .migrationitem import MigrationItem, MigrationItemFactory
25def compute_eqv_set(
26 pkg_universe: "BinaryPackageUniverse",
27 updates: set["BinaryPackageId"],
28 rms: set["BinaryPackageId"],
29) -> set[tuple[str, str]]:
30 eqv_set: set[tuple[str, str]] = set()
31 # If we are removing *and* updating packages, then check for eqv. packages
32 if rms and updates:
33 eqv_table = {(x.package_name, x.architecture): x for x in rms}
35 for new_pkg_id in updates:
36 binary, _, parch = new_pkg_id
37 key = (binary, parch)
38 old_pkg_id = eqv_table.get(key)
39 if old_pkg_id is not None:
40 if pkg_universe.are_equivalent(new_pkg_id, old_pkg_id):
41 eqv_set.add(key)
42 return eqv_set
45def is_nuninst_worse(
46 must_be_installable: list[str],
47 nuninst_now_arch: set[str],
48 nuninst_after_arch: set[str],
49 allow_uninst: set[Optional[str]],
50) -> bool:
51 if len(nuninst_after_arch - allow_uninst) > len(nuninst_now_arch - allow_uninst):
52 return True
54 regression = nuninst_after_arch - nuninst_now_arch
55 if not regression.isdisjoint(must_be_installable): 55 ↛ 56line 55 didn't jump to line 56, because the condition on line 55 was never true
56 return True
57 return False
60class MigrationManager(object):
62 def __init__(
63 self,
64 options: optparse.Values,
65 suite_info: "Suites",
66 all_binaries: dict["BinaryPackageId", "BinaryPackage"],
67 pkg_universe: "BinaryPackageUniverse",
68 constraints: dict[str, list[str]],
69 allow_uninst: dict[str, set[Optional[str]]],
70 migration_item_factory: "MigrationItemFactory",
71 hints: "HintCollection",
72 ) -> None:
73 self.options = options
74 self.suite_info = suite_info
75 self.all_binaries = all_binaries
76 self.pkg_universe = pkg_universe
77 self.constraints = constraints
78 self.allow_uninst = allow_uninst
79 self.hints = hints
80 self._transactions: list[MigrationTransactionState] = []
81 self._all_architectures: frozenset[str] = frozenset(self.options.architectures)
82 self._migration_item_factory = migration_item_factory
84 @property
85 def current_transaction(self) -> Optional[MigrationTransactionState]:
86 return self._transactions[-1] if self._transactions else None
88 def compute_groups(
89 self,
90 item: "MigrationItem",
91 allow_smooth_updates: bool = True,
92 removals: set["BinaryPackageId"] = cast(set["BinaryPackageId"], frozenset()),
93 ) -> tuple[
94 str, set["BinaryPackageId"], set["BinaryPackageId"], set["BinaryPackageId"]
95 ]:
96 """Compute the groups of binaries being migrated by item
98 This method will compute the binaries that will be added to,
99 replaced in or removed from the target suite and which of
100 the removals are smooth updatable.
102 Parameters:
103 * "allow_smooth_updates" is a boolean determining whether smooth-
104 updates are permitted in this migration. When set to False,
105 the "smoothbins" return value will always be the empty set.
106 Any value that would have been there will now be in "rms"
107 instead. (defaults: True)
108 * "removals" is a set of binaries that is assumed to be
109 removed at the same time as this migration (e.g. in the same
110 "easy"-hint). This may affect what if some binaries are
111 smooth updated or not. (defaults: empty-set)
112 - Binaries must be given as ("package-name", "version",
113 "architecture") tuples.
115 Returns a tuple (adds, rms, smoothbins). "adds" is a set of
116 binaries that will updated in or appear after the migration.
117 "rms" is a set of binaries that are not smooth-updatable (or
118 binaries that could be, but there is no reason to let them be
119 smooth updated). "smoothbins" is set of binaries that are to
120 be smooth-updated.
122 Each "binary" in "adds", "rms" and "smoothbins" will be a
123 tuple of ("package-name", "version", "architecture") and are
124 thus tuples suitable for passing on to the
125 InstallabilityTester.
128 Unlike migrate_items_to_target_suite, this will not modify
129 any data structure.
130 """
131 # local copies for better performances
132 item_package = item.package
133 target_suite = self.suite_info.target_suite
134 binaries_t = target_suite.binaries
136 adds = set()
138 # remove all binary packages (if the source already exists)
139 if item.architecture == "source" or not item.is_removal:
140 source_name = item_package
141 if source_name in target_suite.sources:
142 rms, smoothbins = self._compute_removals(
143 item, allow_smooth_updates, removals
144 )
145 else:
146 rms = set()
147 smoothbins = set()
149 # single binary removal; used for clearing up after smooth
150 # updates but not supported as a manual hint
151 else:
152 assert item_package in binaries_t[item.architecture]
153 pkg_id_t = binaries_t[item.architecture][item_package].pkg_id
154 binary, ver, parch = pkg_id_t
155 if ver != item.version:
156 raise MigrationConstraintException(
157 "trying cruft removal item %s, while %s has %s/%s on %s"
158 % (item, target_suite.name, binary, ver, parch)
159 )
160 source_name = binaries_t[item.architecture][item_package].source
161 rms = {pkg_id_t}
162 smoothbins = set()
164 # add the new binary packages (if we are not removing)
165 if not item.is_removal:
166 source_suite = item.suite
167 binaries_s = source_suite.binaries
168 source_data = source_suite.sources[source_name]
169 source_ver_new = source_data.version
170 sources_t = target_suite.sources
171 if source_name in sources_t:
172 source_data_old = sources_t[source_name]
173 source_ver_old = source_data_old.version
174 if apt_pkg.version_compare(source_ver_old, source_ver_new) > 0:
175 raise MigrationConstraintException(
176 "trying src:%s %s, while %s has %s"
177 % (
178 source_name,
179 source_ver_new,
180 target_suite.name,
181 source_ver_old,
182 )
183 )
185 for pkg_id_s in source_data.binaries:
186 binary, ver, parch = pkg_id_s
187 if item.architecture not in ["source", parch]:
188 continue
190 if binaries_s[parch][binary].source != source_name:
191 # This binary package has been hijacked by some other source.
192 # So don't add it as part of this update.
193 #
194 # Also, if this isn't a source update, don't remove
195 # the package that's been hijacked if it's present.
196 if item.architecture != "source": 196 ↛ 200line 196 didn't jump to line 200, because the condition on line 196 was never false
197 for rm_item in list(rms):
198 if (rm_item[0], rm_item[2]) == (binary, parch):
199 rms.remove(rm_item)
200 continue
202 # Don't add the binary if it is cruft; smooth updates will keep it if possible
203 if (
204 parch not in self.options.outofsync_arches
205 and source_data.version != binaries_s[parch][binary].source_version
206 ):
207 continue
209 if binary in binaries_t[parch]:
210 oldver = binaries_t[parch][binary].version
211 if apt_pkg.version_compare(oldver, ver) > 0:
212 raise MigrationConstraintException(
213 "trying %s %s from src:%s %s, while %s has %s"
214 % (
215 binary,
216 ver,
217 source_name,
218 source_ver_new,
219 target_suite.name,
220 oldver,
221 )
222 )
224 adds.add(pkg_id_s)
226 return (source_name, adds, rms, smoothbins)
228 def _compute_removals(
229 self,
230 item: "MigrationItem",
231 allow_smooth_updates: bool,
232 removals: set["BinaryPackageId"],
233 ) -> tuple[set["BinaryPackageId"], set["BinaryPackageId"]]:
234 pkg_universe = self.pkg_universe
235 source_suite = item.suite
236 target_suite = self.suite_info.target_suite
237 binaries_s = source_suite.binaries
238 binaries_t = target_suite.binaries
239 source_name = item.package
240 source_data = target_suite.sources[source_name]
242 bins: list["BinaryPackageId"] = []
243 # remove all the binaries
245 # first, build a list of eligible binaries
246 for pkg_id in source_data.binaries:
247 binary, _, parch = pkg_id
248 if item.architecture != "source" and parch != item.architecture:
249 continue
251 # Work around #815995
252 if ( 252 ↛ 257line 252 didn't jump to line 257
253 item.architecture == "source"
254 and item.is_removal
255 and binary not in binaries_t[parch]
256 ):
257 continue
259 bin_data = binaries_t[parch][binary]
260 # Do not include hijacked binaries nor cruft (cruft is handled separately)
261 if (
262 bin_data.source != source_name
263 or bin_data.source_version != source_data.version
264 ):
265 continue
266 bins.append(pkg_id)
268 if allow_smooth_updates and source_suite.suite_class.is_primary_source:
269 smoothbins = find_smooth_updateable_binaries(
270 bins,
271 source_suite.sources[source_name],
272 pkg_universe,
273 target_suite,
274 binaries_t,
275 binaries_s,
276 removals,
277 self.options.smooth_updates,
278 self.hints,
279 )
280 else:
281 smoothbins = set()
283 # remove all the binaries which aren't being smooth updated
284 if (
285 item.architecture != "source"
286 and source_suite.suite_class.is_additional_source
287 ):
288 # Special-case for pu/tpu:
289 # if this is a binary migration from *pu, only the arch:any
290 # packages will be present. ideally dak would also populate
291 # the arch-indep packages, but as that's not the case we
292 # must keep them around; they will not be re-added by the
293 # migration so will end up missing from testing
294 all_binaries = self.all_binaries
295 rms = {
296 pkg_id
297 for pkg_id in bins
298 if pkg_id not in smoothbins
299 and all_binaries[pkg_id].architecture != "all"
300 }
301 else:
302 rms = {pkg_id for pkg_id in bins if pkg_id not in smoothbins}
304 return rms, smoothbins
306 def _apply_item_to_target_suite(
307 self,
308 item: "MigrationItem",
309 removals: set["BinaryPackageId"] = cast(set["BinaryPackageId"], frozenset()),
310 ) -> tuple[set["BinaryPackageId"], set["BinaryPackageId"]]:
311 """Apply a change to the target suite as requested by `item`
313 An optional set of binaries may be passed in "removals". Binaries listed
314 in this set will be assumed to be removed at the same time as the "item"
315 will migrate. This may change what binaries will be smooth-updated.
316 - Binaries in this set must be instances of BinaryPackageId.
318 This method applies the changes required by the action `item` tracking
319 them so it will be possible to revert them.
321 The method returns a tuple containing a set of packages
322 affected by the change (as (name, arch)-tuples) and the
323 dictionary undo which can be used to rollback the changes.
324 """
325 undo: UndoItem = {"binaries": {}, "sources": {}, "virtual": {}}
327 affected_all = set()
328 updated_binaries = set()
330 # local copies for better performance
331 source_suite = item.suite
332 target_suite = self.suite_info.target_suite
333 packages_t = target_suite.binaries
334 provides_t = target_suite.provides_table
335 pkg_universe = self.pkg_universe
336 transaction = self.current_transaction
338 source_name, updates, rms, smooth_updates = self.compute_groups(
339 item, removals=removals
340 )
341 sources_t = target_suite.sources
342 # Handle the source package
343 old_source = sources_t.get(source_name)
345 # add/update the source package
346 if item.is_removal and item.architecture == "source":
347 del sources_t[source_name]
348 else:
349 # with OUTOFSYNC_ARCHES, the source can be removed before out-of-sync binaries are removed
350 if not item.is_removal or source_name in source_suite.sources: 350 ↛ 359line 350 didn't jump to line 359, because the condition on line 350 was never false
351 # always create a copy of the SourcePackage object
352 sources_t[source_name] = copy.copy(source_suite.sources[source_name])
353 if old_source is not None:
354 # always create a new list of binaries
355 sources_t[source_name].binaries = copy.copy(old_source.binaries)
356 else:
357 sources_t[source_name].binaries = set()
359 undo["sources"][source_name] = old_source
361 eqv_set = compute_eqv_set(pkg_universe, updates, rms)
363 # remove all the binaries which aren't being smooth updated
364 for rm_pkg_id in rms:
365 binary, version, parch = rm_pkg_id
366 pkey = (binary, parch)
367 binaries_t_a = packages_t[parch]
368 provides_t_a = provides_t[parch]
370 pkg_data = binaries_t_a[binary]
371 # save the old binary for undo
372 undo["binaries"][pkey] = rm_pkg_id
373 if pkey not in eqv_set:
374 # all the reverse dependencies are affected by
375 # the change
376 affected_all.update(pkg_universe.reverse_dependencies_of(rm_pkg_id))
377 affected_all.update(pkg_universe.negative_dependencies_of(rm_pkg_id))
379 # remove the provided virtual packages
380 for provided_pkg, prov_version, _ in pkg_data.provides:
381 key = (provided_pkg, parch)
382 if key not in undo["virtual"]:
383 undo["virtual"][key] = provides_t_a[provided_pkg].copy()
384 provides_t_a[provided_pkg].remove((binary, prov_version))
385 if not provides_t_a[provided_pkg]:
386 del provides_t_a[provided_pkg]
387 # for source removal, the source is already gone
388 if source_name in sources_t:
389 sources_t[source_name].binaries.discard(rm_pkg_id)
390 # finally, remove the binary package
391 del binaries_t_a[binary]
392 target_suite.remove_binary(rm_pkg_id)
394 # Add/Update binary packages in testing
395 if updates:
396 packages_s = source_suite.binaries
398 for updated_pkg_id in updates:
399 binary, new_version, parch = updated_pkg_id
400 key = (binary, parch)
401 binaries_t_a = packages_t[parch]
402 provides_t_a = provides_t[parch]
403 equivalent_replacement = key in eqv_set
405 # obviously, added/modified packages are affected
406 if not equivalent_replacement:
407 affected_all.add(updated_pkg_id)
408 # if the binary already exists in testing, it is currently
409 # built by another source package. we therefore remove the
410 # version built by the other source package, after marking
411 # all of its reverse dependencies as affected
412 if binary in binaries_t_a:
413 old_pkg_data = binaries_t_a[binary]
414 old_pkg_id = old_pkg_data.pkg_id
415 # save the old binary package
416 undo["binaries"][key] = old_pkg_id
417 if not equivalent_replacement: 417 ↛ 422line 417 didn't jump to line 422, because the condition on line 417 was never false
418 # all the reverse conflicts
419 affected_all.update(
420 pkg_universe.reverse_dependencies_of(old_pkg_id)
421 )
422 target_suite.remove_binary(old_pkg_id)
423 elif transaction and transaction.parent_transaction:
424 # the binary isn't in the target suite, but it may have been at
425 # the start of the current hint and have been removed
426 # by an earlier migration. if that's the case then we
427 # will have a record of the older instance of the binary
428 # in the undo information. we can use that to ensure
429 # that the reverse dependencies of the older binary
430 # package are also checked.
431 # reverse dependencies built from this source can be
432 # ignored as their reverse trees are already handled
433 # by this function
434 for tundo, tpkg in transaction.parent_transaction.undo_items:
435 if key in tundo["binaries"]: 435 ↛ 436line 435 didn't jump to line 436, because the condition on line 435 was never true
436 tpkg_id = tundo["binaries"][key]
437 affected_all.update(
438 pkg_universe.reverse_dependencies_of(tpkg_id)
439 )
441 # add/update the binary package from the source suite
442 new_pkg_data = packages_s[parch][binary]
443 binaries_t_a[binary] = new_pkg_data
444 target_suite.add_binary(updated_pkg_id)
445 updated_binaries.add(updated_pkg_id)
446 # add the binary to the source package
447 sources_t[source_name].binaries.add(updated_pkg_id)
448 # register new provided packages
449 for provided_pkg, prov_version, _ in new_pkg_data.provides:
450 key = (provided_pkg, parch)
451 if key not in undo["virtual"]:
452 restore_as = (
453 provides_t_a[provided_pkg].copy()
454 if provided_pkg in provides_t_a
455 else None
456 )
457 undo["virtual"][key] = restore_as
458 provides_t_a[provided_pkg].add((binary, prov_version))
459 if not equivalent_replacement:
460 # all the reverse dependencies are affected by the change
461 affected_all.add(updated_pkg_id)
462 affected_all.update(
463 pkg_universe.negative_dependencies_of(updated_pkg_id)
464 )
466 # Also include the transitive rdeps of the packages found so far
467 compute_reverse_tree(pkg_universe, affected_all)
468 if transaction:
469 transaction.add_undo_item(undo, updated_binaries)
470 # return the affected packages (direct and than all)
471 return (affected_all, smooth_updates)
473 def _apply_multiple_items_to_target_suite(
474 self, items: list["MigrationItem"]
475 ) -> tuple[
476 bool,
477 Union[frozenset[str], set[str]],
478 set["BinaryPackageId"],
479 set["BinaryPackageId"],
480 ]:
481 is_source_migration = False
482 if len(items) == 1:
483 item = items[0]
484 # apply the changes
485 affected_all, smooth_updates = self._apply_item_to_target_suite(item)
486 if item.architecture == "source":
487 affected_architectures: Union[frozenset[str], set[str]] = (
488 self._all_architectures
489 )
490 is_source_migration = True
491 else:
492 affected_architectures = {item.architecture}
493 else:
494 affected_architectures = set()
495 removals: set[BinaryPackageId] = set()
496 affected_all = set()
497 smooth_updates = set()
498 for item in items:
499 _, _, rms, _ = self.compute_groups(item, allow_smooth_updates=False)
500 removals.update(rms)
501 affected_architectures.add(item.architecture)
503 if "source" in affected_architectures:
504 affected_architectures = self._all_architectures
505 is_source_migration = True
507 for item in items:
508 item_affected_all, item_smooth = self._apply_item_to_target_suite(
509 item, removals=removals
510 )
511 affected_all.update(item_affected_all)
512 smooth_updates.update(item_smooth)
514 return is_source_migration, affected_architectures, affected_all, smooth_updates
516 def migrate_items_to_target_suite(
517 self,
518 items: list["MigrationItem"],
519 nuninst_now: dict[str, set[str]],
520 stop_on_first_regression: bool = True,
521 ) -> tuple[bool, dict[str, set[str]], str, set["MigrationItem"]]:
522 is_accepted = True
523 target_suite = self.suite_info.target_suite
524 packages_t = target_suite.binaries
526 nobreakall_arches = self.options.nobreakall_arches
527 new_arches = self.options.new_arches
528 break_arches = self.options.break_arches
529 arch = None
531 is_source_migration, affected_architectures, affected_all, smooth_updates = (
532 self._apply_multiple_items_to_target_suite(items)
533 )
535 # Copy nuninst_comp - we have to deep clone affected
536 # architectures.
538 # NB: We do this *after* updating testing as we have to filter out
539 # removed binaries. Otherwise, uninstallable binaries that were
540 # removed by the item would still be counted.
542 nuninst_after = clone_nuninst(
543 nuninst_now, packages_s=packages_t, architectures=affected_architectures
544 )
545 must_be_installable = self.constraints["keep-installable"]
547 # check the affected packages on all the architectures
548 for arch in sorted(affected_architectures):
549 check_archall = arch in nobreakall_arches
551 check_installability(
552 target_suite,
553 packages_t,
554 arch,
555 affected_all,
556 check_archall,
557 nuninst_after,
558 )
560 # if the uninstallability counter is worse than before, break the loop
561 if stop_on_first_regression:
562 if is_nuninst_worse(
563 must_be_installable,
564 nuninst_now[arch],
565 nuninst_after[arch],
566 self.allow_uninst[arch],
567 ):
568 if arch not in break_arches:
569 is_accepted = False
570 break
571 # ... except for a few special cases:
572 elif is_source_migration or arch in new_arches: 572 ↛ 575line 572 didn't jump to line 575, because the condition on line 572 was never false
573 pass
574 else:
575 is_accepted = False
576 break
578 new_cruft = {
579 self._migration_item_factory.generate_removal_for_cruft_item(x)
580 for x in smooth_updates
581 }
583 return (is_accepted, nuninst_after, arch, new_cruft)
585 @contextlib.contextmanager
586 def start_transaction(self) -> Iterator[MigrationTransactionState]:
587 tmts = MigrationTransactionState(
588 self.suite_info, self.all_binaries, self.current_transaction
589 )
590 self._transactions.append(tmts)
591 try:
592 yield tmts
593 except Exception:
594 if not tmts.is_committed and not tmts.is_rolled_back:
595 tmts.rollback()
596 raise
597 finally:
598 self._transactions.pop() 598 ↛ exitline 598 didn't except from function 'start_transaction', because the raise on line 596 wasn't executed
599 assert tmts.is_rolled_back or tmts.is_committed