Coverage for britney2/installability/solver.py: 98%
203 statements
« prev ^ index » next coverage.py v6.5.0, created at 2025-03-23 07:34 +0000
« prev ^ index » next coverage.py v6.5.0, created at 2025-03-23 07:34 +0000
1# -*- coding: utf-8 -*-
3# Copyright (C) 2012 Niels Thykier <niels@thykier.net>
4# - Includes code by Paul Harrison
5# (http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py)
7# This program is free software; you can redistribute it and/or modify
8# it under the terms of the GNU General Public License as published by
9# the Free Software Foundation; either version 2 of the License, or
10# (at your option) any later version.
12# This program is distributed in the hope that it will be useful,
13# but WITHOUT ANY WARRANTY; without even the implied warranty of
14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15# GNU General Public License for more details.
17import logging
18from collections import deque
19from itertools import chain
20from typing import TYPE_CHECKING
21from collections.abc import Iterable
23from britney2.utils import ifilter_only, iter_except
25if TYPE_CHECKING: 25 ↛ 26line 25 didn't jump to line 26, because the condition on line 25 was never true
26 from .. import BinaryPackageId
27 from ..migrationitem import MigrationItem
28 from .tester import InstallabilityTester
29 from .universe import BinaryPackageUniverse
32class OrderNode(object):
33 __slots__ = ["before", "after"]
35 def __init__(self) -> None:
36 self.after: set[str] = set()
37 self.before: set[str] = set()
40def compute_scc(graph: dict[str, OrderNode]) -> list[tuple[str, ...]]:
41 """Iterative algorithm for strongly-connected components
43 Iterative variant of Tarjan's algorithm for finding strongly-connected
44 components.
46 :param graph: dict of all nodes along which their edges (in "before" and "after")
47 :return: List of components (each component is a list of items)
48 """
49 result: list[tuple[str, ...]] = []
50 low: dict[str, int] = {}
51 node_stack: list[str] = []
53 def _cannot_be_a_scc(graph_node: str) -> bool:
54 if not graph[graph_node].before or not graph[graph_node].after:
55 # Short-cut obviously isolated component
56 result.append((graph_node,))
57 # Set the item number so high that no other item might
58 # mistakenly assume that they can form a component via
59 # this item.
60 # (Replaces the "is w on the stack check" for us from
61 # the original algorithm)
62 low[graph_node] = len(graph) + 1
63 return True
64 return False
66 def _handle_succ(
67 parent: str, parent_num: int, successors_remaining: list[str]
68 ) -> bool:
69 while successors_remaining:
70 succ = successors_remaining.pop()
71 succ_num = low.get(succ, None)
72 if succ_num is not None:
73 if succ_num < parent_num:
74 # These two nodes are part of the probably
75 # same SSC (or succ is isolated
76 low[parent] = parent_num = succ_num
77 continue
78 # It cannot be a part of a SCC if it does not have depends
79 # or reverse depends.
80 if _cannot_be_a_scc(succ):
81 continue
82 succ_num = len(low)
83 low[succ] = succ_num
84 work_stack.append((succ, len(node_stack), succ_num, graph[succ].before))
85 node_stack.append(succ)
86 # "Recurse" into the child node first
87 return True
88 return False
90 # graph is a dict, shouldn't need sorting
91 for n in graph:
92 if n in low:
93 continue
94 # It cannot be a part of a SCC if it does not have depends
95 # or reverse depends.
96 if _cannot_be_a_scc(n):
97 continue
99 root_num = len(low)
100 low[n] = root_num
101 # DFS work-stack needed to avoid call recursion. It (more or less)
102 # replaces the variables on the call stack in Tarjan's algorithm
103 work_stack = [(n, len(node_stack), root_num, graph[n].before)]
104 node_stack.append(n)
105 while work_stack:
106 node, stack_idx, orig_node_num, successors = work_stack[-1]
107 if successors and _handle_succ(node, low[node], sorted(successors)):
108 # _handle_succ has pushed a new node on to work_stack
109 # and we need to "restart" the loop to handle that first
110 continue
112 # This node is done; remove it from the work stack
113 work_stack.pop()
115 # This node is out of successor. Push up the "low" value
116 # (Exception: root node has no parent)
117 node_num = low[node]
118 if work_stack:
119 parent = work_stack[-1][0]
120 parent_num = low[parent]
121 if node_num <= parent_num:
122 # This node is a part of a component with its parent.
123 # We update the parent's node number and push the
124 # responsibility of building the component unto the
125 # parent.
126 low[parent] = node_num
127 continue
128 if node_num != orig_node_num:
129 # The node is a part of an SCC with a ancestor (and parent)
130 continue
131 # We got a component
132 component = tuple(node_stack[stack_idx:])
133 del node_stack[stack_idx:]
134 result.append(component)
135 # Re-number all items, so no other item might
136 # mistakenly assume that they can form a component via
137 # one of these items.
138 # (Replaces the "is w on the stack check" for us from
139 # the original algorithm)
140 new_num = len(graph) + 1
141 for item in component:
142 low[item] = new_num
144 assert not node_stack
146 return result
149def apply_order(
150 key: str,
151 other: str,
152 order: dict[str, OrderNode],
153 logger: logging.Logger,
154 order_cause: str,
155 invert: bool = False,
156 order_sub_cause: str = "",
157) -> None:
158 if other == key:
159 # "Self-relation" => ignore
160 return
161 order_key = order[key]
162 if invert:
163 order[other].after.add(key)
164 order_set = order_key.before
165 else:
166 order[other].before.add(key)
167 order_set = order_key.after
168 if (
169 logger.isEnabledFor(logging.DEBUG) and other not in order_set
170 ): # pragma: no cover
171 if order_sub_cause:
172 order_sub_cause = " (%s)" % order_sub_cause
173 logger.debug(
174 "%s induced order%s: %s before %s", order_cause, order_sub_cause, key, other
175 )
176 # Defer adding until the end to ensure we only log the first time a dependency order is introduced.
177 order_set.add(other)
180class InstallabilitySolver(object):
181 def __init__(
182 self, universe: "BinaryPackageUniverse", inst_tester: "InstallabilityTester"
183 ) -> None:
184 """Create a new installability solver"""
185 self._universe = universe
186 self._inst_tester = inst_tester
187 logger_name = ".".join((self.__class__.__module__, self.__class__.__name__))
188 self.logger = logging.getLogger(logger_name)
190 def _compute_group_order_rms(
191 self,
192 rms: Iterable["BinaryPackageId"],
193 order: dict[str, OrderNode],
194 key: str,
195 ptable: dict["BinaryPackageId", str],
196 going_out: set["BinaryPackageId"],
197 ) -> None:
198 sat_in_testing = self._inst_tester.any_of_these_are_in_the_suite
199 universe = self._universe
200 logger = self.logger
201 for rdep in chain.from_iterable(
202 universe.reverse_dependencies_of(r) for r in rms
203 ):
204 # The binaries have reverse dependencies in testing;
205 # check if we can/should migrate them first.
206 for depgroup in universe.dependencies_of(rdep):
207 rigid = depgroup - going_out
208 if sat_in_testing(rigid):
209 # (partly) satisfied by testing, assume it is okay
210 continue
211 if rdep in ptable:
212 apply_order(key, ptable[rdep], order, logger, "Removal")
214 def _compute_order_for_dependency(
215 self,
216 key: str,
217 depgroup: frozenset["BinaryPackageId"],
218 ptable: dict["BinaryPackageId", str],
219 order: dict[str, OrderNode],
220 going_in: set["BinaryPackageId"],
221 ) -> None:
222 # We got three cases:
223 # - "swap" (replace existing binary with a newer version)
224 # - "addition" (add new binary without removing any)
225 # - "removal" (remove binary without providing a new)
226 #
227 # The problem is that only the two latter requires
228 # an ordering. A "swap" (in itself) should not
229 # affect us.
230 other_adds = set()
231 other_rms = set()
232 logger = self.logger
233 for d in ifilter_only(ptable, depgroup):
234 other = ptable[d]
235 if d in going_in:
236 # "other" provides something "key" needs,
237 # schedule accordingly.
238 other_adds.add(other)
239 else:
240 # "other" removes something "key" needs,
241 # schedule accordingly.
242 other_rms.add(other)
244 for other in other_adds - other_rms:
245 apply_order(key, other, order, logger, "Dependency", order_sub_cause="add")
246 for other in other_rms - other_adds:
247 apply_order(
248 key,
249 other,
250 order,
251 logger,
252 "Dependency",
253 order_sub_cause="remove",
254 invert=True,
255 )
257 def _compute_group_order_adds(
258 self,
259 adds: Iterable["BinaryPackageId"],
260 order: dict[str, OrderNode],
261 key: str,
262 ptable: dict["BinaryPackageId", str],
263 going_out: set["BinaryPackageId"],
264 going_in: set["BinaryPackageId"],
265 ) -> None:
266 sat_in_testing = self._inst_tester.any_of_these_are_in_the_suite
267 universe = self._universe
268 for depgroup in chain.from_iterable(universe.dependencies_of(a) for a in adds):
269 # Check if this item should migrate before others
270 # (e.g. because they depend on a new [version of a]
271 # binary provided by this item).
272 rigid = depgroup - going_out
273 if sat_in_testing(rigid):
274 # (partly) satisfied by testing, assume it is okay
275 continue
276 self._compute_order_for_dependency(key, depgroup, ptable, order, going_in)
278 def _compute_group_order(
279 self,
280 groups: Iterable[
281 tuple[
282 "MigrationItem",
283 Iterable["BinaryPackageId"],
284 Iterable["BinaryPackageId"],
285 ]
286 ],
287 key2item: dict[str, "MigrationItem"],
288 ) -> dict[str, OrderNode]:
289 universe = self._universe
290 ptable = {}
291 order: dict[str, OrderNode] = {}
292 going_out: set["BinaryPackageId"] = set()
293 going_in: set["BinaryPackageId"] = set()
294 logger = self.logger
295 debug_solver = logger.isEnabledFor(logging.DEBUG)
297 # Build the tables
298 for item, adds, rms in groups:
299 key = str(item)
300 key2item[key] = item
301 order[key] = OrderNode()
302 going_in.update(adds)
303 going_out.update(rms)
304 for x in chain(adds, rms):
305 ptable[x] = key
307 if debug_solver: # pragma: no cover
308 self._dump_groups(groups)
310 # This large loop will add ordering constrains on each "item"
311 # that migrates based on various rules.
312 for item, adds, rms in groups:
313 key = str(item)
314 oldcons = set(
315 chain.from_iterable(universe.negative_dependencies_of(r) for r in rms)
316 )
317 newcons = set(
318 chain.from_iterable(universe.negative_dependencies_of(a) for a in adds)
319 )
320 oldcons -= newcons
321 # Some of the old binaries have "conflicts" that will
322 # be removed.
323 for o in ifilter_only(ptable, oldcons):
324 # "key" removes a conflict with one of
325 # "other"'s binaries, so it is probably a good
326 # idea to migrate "key" before "other"
327 apply_order(key, ptable[o], order, logger, "Conflict", invert=True)
329 self._compute_group_order_rms(rms, order, key, ptable, going_out)
330 self._compute_group_order_adds(
331 adds, order, key, ptable, going_out, going_in
332 )
334 return order
336 def _merge_items_into_components(
337 self, comps: list[tuple[str, ...]], order: dict[str, OrderNode]
338 ) -> dict[str, tuple[str, ...]]:
339 merged = {}
340 scc: dict[str, tuple[str, ...]] = {}
341 debug_solver = self.logger.isEnabledFor(logging.DEBUG)
342 for com in comps:
343 scc_id = com[0]
344 scc[scc_id] = com
345 merged[scc_id] = scc_id
346 if len(com) < 2:
347 # Trivial case
348 continue
349 so_before = order[scc_id].before
350 so_after = order[scc_id].after
351 for n in com:
352 if n == scc_id:
353 continue
354 so_before.update(order[n].before)
355 so_after.update(order[n].after)
356 merged[n] = scc_id
357 del order[n]
358 if debug_solver: # pragma: no cover
359 self.logger.debug("SCC: %s -- %s", scc_id, str(sorted(com)))
361 for com in comps:
362 node = com[0]
363 nbefore = set(merged[b] for b in order[node].before)
364 nafter = set(merged[b] for b in order[node].after)
366 # Drop self-relations (usually caused by the merging)
367 nbefore.discard(node)
368 nafter.discard(node)
369 order[node].before = nbefore
370 order[node].after = nafter
372 for com in comps:
373 scc_id = com[0]
375 for other_scc_id in order[scc_id].before:
376 order[other_scc_id].after.add(scc_id)
377 for other_scc_id in order[scc_id].after:
378 order[other_scc_id].before.add(scc_id)
380 return scc
382 def solve_groups(
383 self,
384 groups: Iterable[
385 tuple[
386 "MigrationItem",
387 Iterable["BinaryPackageId"],
388 Iterable["BinaryPackageId"],
389 ]
390 ],
391 ) -> list[list["MigrationItem"]]:
392 """
393 :param groups: iterable of tuples. The first element is a
394 MigrationItem, the second element is a collection of BinaryPackageId
395 reflecting requested updates and the third element is a collection of
396 BinaryPackageId reflecting requested removals.
397 """
398 result: list[list["MigrationItem"]] = []
399 emitted: set[str] = set()
400 queue: deque[str] = deque()
401 key2item: dict[str, "MigrationItem"] = {}
402 debug_solver = self.logger.isEnabledFor(logging.DEBUG)
404 order = self._compute_group_order(groups, key2item)
406 # === MILESTONE: Partial-order constrains computed ===
408 # At this point, we have computed all the partial-order
409 # constrains needed. Some of these may have created strongly
410 # connected components (SSC) [of size 2 or greater], which
411 # represents a group of items that (we believe) must migrate
412 # together.
413 #
414 # Each one of those components will become an "easy" hint.
416 comps = compute_scc(order)
417 # Now that we got the SSCs (in comps), we select on item from
418 # each SSC to represent the group and become an ID for that
419 # SSC.
420 # * scc_keys[ssc_id] => All the item-keys in that SSC
421 #
422 # We also "repair" the ordering, so we know in which order the
423 # hints should be emitted.
424 scc_keys = self._merge_items_into_components(comps, order)
426 if debug_solver: # pragma: no cover
427 self.logger.debug("-- PARTIAL ORDER --")
429 initial_round = []
430 for com in sorted(order):
431 if debug_solver and order[com].before: # pragma: no cover
432 self.logger.debug("N: %s <= %s", com, str(sorted(order[com].before)))
433 if not order[com].after:
434 # This component can be scheduled immediately, add it
435 # to the queue
436 initial_round.append(com)
437 elif debug_solver: # pragma: no cover
438 self.logger.debug("N: %s >= %s", com, str(sorted(order[com].after)))
440 queue.extend(sorted(initial_round, key=len))
441 del initial_round
443 if debug_solver: # pragma: no cover
444 self.logger.debug("-- END PARTIAL ORDER --")
445 self.logger.debug("-- LINEARIZED ORDER --")
447 for cur in iter_except(queue.popleft, IndexError):
448 if order[cur].after <= emitted and cur not in emitted:
449 # This item is ready to be emitted right now
450 if debug_solver: # pragma: no cover
451 self.logger.debug("%s -- %s", cur, sorted(scc_keys[cur]))
452 emitted.add(cur)
453 result.append([key2item[x] for x in scc_keys[cur]])
454 if order[cur].before:
455 # There are components that come after this one.
456 # Add it to queue:
457 # - if it is ready, it will be emitted.
458 # - else, it will be dropped and re-added later.
459 queue.extend(sorted(order[cur].before - emitted, key=len))
461 if debug_solver: # pragma: no cover
462 self.logger.debug("-- END LINEARIZED ORDER --")
464 return result
466 def _dump_groups(
467 self,
468 groups: Iterable[
469 tuple[
470 "MigrationItem",
471 Iterable["BinaryPackageId"],
472 Iterable["BinaryPackageId"],
473 ]
474 ],
475 ) -> None: # pragma: no cover
476 self.logger.debug("=== Groups ===")
477 for item, adds, rms in groups:
478 self.logger.debug("%s => A: %s, R: %s", str(item), str(adds), str(rms))
479 self.logger.debug("=== END Groups ===")