Coverage for britney2/installability/solver.py: 98%
203 statements
« prev ^ index » next coverage.py v7.6.0, created at 2025-10-17 17:32 +0000
« prev ^ index » next coverage.py v7.6.0, created at 2025-10-17 17:32 +0000
1# Copyright (C) 2012 Niels Thykier <niels@thykier.net>
2# - Includes code by Paul Harrison
3# (http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py)
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU General Public License as published by
7# the Free Software Foundation; either version 2 of the License, or
8# (at your option) any later version.
10# This program is distributed in the hope that it will be useful,
11# but WITHOUT ANY WARRANTY; without even the implied warranty of
12# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13# GNU General Public License for more details.
15import logging
16from collections import deque
17from collections.abc import Iterable
18from itertools import chain
19from typing import TYPE_CHECKING
21from britney2.utils import ifilter_only, iter_except
23if TYPE_CHECKING: 23 ↛ 24line 23 didn't jump to line 24 because the condition on line 23 was never true
24 from .. import BinaryPackageId
25 from ..migrationitem import MigrationItem
26 from .tester import InstallabilityTester
27 from .universe import BinaryPackageUniverse
30class OrderNode:
31 __slots__ = ["before", "after"]
33 def __init__(self) -> None:
34 self.after: set[str] = set()
35 self.before: set[str] = set()
38def compute_scc(graph: dict[str, OrderNode]) -> list[tuple[str, ...]]:
39 """Iterative algorithm for strongly-connected components
41 Iterative variant of Tarjan's algorithm for finding strongly-connected
42 components.
44 :param graph: dict of all nodes along which their edges (in "before" and "after")
45 :return: List of components (each component is a list of items)
46 """
47 result: list[tuple[str, ...]] = []
48 low: dict[str, int] = {}
49 node_stack: list[str] = []
51 def _cannot_be_a_scc(graph_node: str) -> bool:
52 if not graph[graph_node].before or not graph[graph_node].after:
53 # Short-cut obviously isolated component
54 result.append((graph_node,))
55 # Set the item number so high that no other item might
56 # mistakenly assume that they can form a component via
57 # this item.
58 # (Replaces the "is w on the stack check" for us from
59 # the original algorithm)
60 low[graph_node] = len(graph) + 1
61 return True
62 return False
64 def _handle_succ(
65 parent: str, parent_num: int, successors_remaining: list[str]
66 ) -> bool:
67 while successors_remaining:
68 succ = successors_remaining.pop()
69 succ_num = low.get(succ, None)
70 if succ_num is not None:
71 if succ_num < parent_num:
72 # These two nodes are part of the probably
73 # same SSC (or succ is isolated
74 low[parent] = parent_num = succ_num
75 continue
76 # It cannot be a part of a SCC if it does not have depends
77 # or reverse depends.
78 if _cannot_be_a_scc(succ):
79 continue
80 succ_num = len(low)
81 low[succ] = succ_num
82 work_stack.append((succ, len(node_stack), succ_num, graph[succ].before))
83 node_stack.append(succ)
84 # "Recurse" into the child node first
85 return True
86 return False
88 # graph is a dict, shouldn't need sorting
89 for n in graph:
90 if n in low:
91 continue
92 # It cannot be a part of a SCC if it does not have depends
93 # or reverse depends.
94 if _cannot_be_a_scc(n):
95 continue
97 root_num = len(low)
98 low[n] = root_num
99 # DFS work-stack needed to avoid call recursion. It (more or less)
100 # replaces the variables on the call stack in Tarjan's algorithm
101 work_stack = [(n, len(node_stack), root_num, graph[n].before)]
102 node_stack.append(n)
103 while work_stack:
104 node, stack_idx, orig_node_num, successors = work_stack[-1]
105 if successors and _handle_succ(node, low[node], sorted(successors)):
106 # _handle_succ has pushed a new node on to work_stack
107 # and we need to "restart" the loop to handle that first
108 continue
110 # This node is done; remove it from the work stack
111 work_stack.pop()
113 # This node is out of successor. Push up the "low" value
114 # (Exception: root node has no parent)
115 node_num = low[node]
116 if work_stack:
117 parent = work_stack[-1][0]
118 parent_num = low[parent]
119 if node_num <= parent_num:
120 # This node is a part of a component with its parent.
121 # We update the parent's node number and push the
122 # responsibility of building the component unto the
123 # parent.
124 low[parent] = node_num
125 continue
126 if node_num != orig_node_num: 126 ↛ 128line 126 didn't jump to line 128 because the condition on line 126 was never true
127 # The node is a part of an SCC with a ancestor (and parent)
128 continue
129 # We got a component
130 component = tuple(node_stack[stack_idx:])
131 del node_stack[stack_idx:]
132 result.append(component)
133 # Re-number all items, so no other item might
134 # mistakenly assume that they can form a component via
135 # one of these items.
136 # (Replaces the "is w on the stack check" for us from
137 # the original algorithm)
138 new_num = len(graph) + 1
139 for item in component:
140 low[item] = new_num
142 assert not node_stack
144 return result
147def apply_order(
148 key: str,
149 other: str,
150 order: dict[str, OrderNode],
151 logger: logging.Logger,
152 order_cause: str,
153 invert: bool = False,
154 order_sub_cause: str = "",
155) -> None:
156 if other == key:
157 # "Self-relation" => ignore
158 return
159 order_key = order[key]
160 if invert:
161 order[other].after.add(key)
162 order_set = order_key.before
163 else:
164 order[other].before.add(key)
165 order_set = order_key.after
166 if (
167 logger.isEnabledFor(logging.DEBUG) and other not in order_set
168 ): # pragma: no cover
169 if order_sub_cause:
170 order_sub_cause = " (%s)" % order_sub_cause
171 logger.debug(
172 "%s induced order%s: %s before %s", order_cause, order_sub_cause, key, other
173 )
174 # Defer adding until the end to ensure we only log the first time a dependency order is introduced.
175 order_set.add(other)
178class InstallabilitySolver:
179 def __init__(
180 self, universe: "BinaryPackageUniverse", inst_tester: "InstallabilityTester"
181 ) -> None:
182 """Create a new installability solver"""
183 self._universe = universe
184 self._inst_tester = inst_tester
185 logger_name = ".".join((self.__class__.__module__, self.__class__.__name__))
186 self.logger = logging.getLogger(logger_name)
188 def _compute_group_order_rms(
189 self,
190 rms: Iterable["BinaryPackageId"],
191 order: dict[str, OrderNode],
192 key: str,
193 ptable: dict["BinaryPackageId", str],
194 going_out: set["BinaryPackageId"],
195 ) -> None:
196 sat_in_testing = self._inst_tester.any_of_these_are_in_the_suite
197 universe = self._universe
198 logger = self.logger
199 for rdep in chain.from_iterable(
200 universe.reverse_dependencies_of(r) for r in rms
201 ):
202 # The binaries have reverse dependencies in testing;
203 # check if we can/should migrate them first.
204 for depgroup in universe.dependencies_of(rdep):
205 rigid = depgroup - going_out
206 if sat_in_testing(rigid):
207 # (partly) satisfied by testing, assume it is okay
208 continue
209 if rdep in ptable:
210 apply_order(key, ptable[rdep], order, logger, "Removal")
212 def _compute_order_for_dependency(
213 self,
214 key: str,
215 depgroup: frozenset["BinaryPackageId"],
216 ptable: dict["BinaryPackageId", str],
217 order: dict[str, OrderNode],
218 going_in: set["BinaryPackageId"],
219 ) -> None:
220 # We got three cases:
221 # - "swap" (replace existing binary with a newer version)
222 # - "addition" (add new binary without removing any)
223 # - "removal" (remove binary without providing a new)
224 #
225 # The problem is that only the two latter requires
226 # an ordering. A "swap" (in itself) should not
227 # affect us.
228 other_adds = set()
229 other_rms = set()
230 logger = self.logger
231 for d in ifilter_only(ptable, depgroup):
232 other = ptable[d]
233 if d in going_in:
234 # "other" provides something "key" needs,
235 # schedule accordingly.
236 other_adds.add(other)
237 else:
238 # "other" removes something "key" needs,
239 # schedule accordingly.
240 other_rms.add(other)
242 for other in other_adds - other_rms:
243 apply_order(key, other, order, logger, "Dependency", order_sub_cause="add")
244 for other in other_rms - other_adds:
245 apply_order(
246 key,
247 other,
248 order,
249 logger,
250 "Dependency",
251 order_sub_cause="remove",
252 invert=True,
253 )
255 def _compute_group_order_adds(
256 self,
257 adds: Iterable["BinaryPackageId"],
258 order: dict[str, OrderNode],
259 key: str,
260 ptable: dict["BinaryPackageId", str],
261 going_out: set["BinaryPackageId"],
262 going_in: set["BinaryPackageId"],
263 ) -> None:
264 sat_in_testing = self._inst_tester.any_of_these_are_in_the_suite
265 universe = self._universe
266 for depgroup in chain.from_iterable(universe.dependencies_of(a) for a in adds):
267 # Check if this item should migrate before others
268 # (e.g. because they depend on a new [version of a]
269 # binary provided by this item).
270 rigid = depgroup - going_out
271 if sat_in_testing(rigid):
272 # (partly) satisfied by testing, assume it is okay
273 continue
274 self._compute_order_for_dependency(key, depgroup, ptable, order, going_in)
276 def _compute_group_order(
277 self,
278 groups: Iterable[
279 tuple[
280 "MigrationItem",
281 Iterable["BinaryPackageId"],
282 Iterable["BinaryPackageId"],
283 ]
284 ],
285 key2item: dict[str, "MigrationItem"],
286 ) -> dict[str, OrderNode]:
287 universe = self._universe
288 ptable = {}
289 order: dict[str, OrderNode] = {}
290 going_out: set["BinaryPackageId"] = set()
291 going_in: set["BinaryPackageId"] = set()
292 logger = self.logger
293 debug_solver = logger.isEnabledFor(logging.DEBUG)
295 # Build the tables
296 for item, adds, rms in groups:
297 key = str(item)
298 key2item[key] = item
299 order[key] = OrderNode()
300 going_in.update(adds)
301 going_out.update(rms)
302 for x in chain(adds, rms):
303 ptable[x] = key
305 if debug_solver: # pragma: no cover
306 self._dump_groups(groups)
308 # This large loop will add ordering constrains on each "item"
309 # that migrates based on various rules.
310 for item, adds, rms in groups:
311 key = str(item)
312 oldcons = set(
313 chain.from_iterable(universe.negative_dependencies_of(r) for r in rms)
314 )
315 newcons = set(
316 chain.from_iterable(universe.negative_dependencies_of(a) for a in adds)
317 )
318 oldcons -= newcons
319 # Some of the old binaries have "conflicts" that will
320 # be removed.
321 for o in ifilter_only(ptable, oldcons):
322 # "key" removes a conflict with one of
323 # "other"'s binaries, so it is probably a good
324 # idea to migrate "key" before "other"
325 apply_order(key, ptable[o], order, logger, "Conflict", invert=True)
327 self._compute_group_order_rms(rms, order, key, ptable, going_out)
328 self._compute_group_order_adds(
329 adds, order, key, ptable, going_out, going_in
330 )
332 return order
334 def _merge_items_into_components(
335 self, comps: list[tuple[str, ...]], order: dict[str, OrderNode]
336 ) -> dict[str, tuple[str, ...]]:
337 merged = {}
338 scc: dict[str, tuple[str, ...]] = {}
339 debug_solver = self.logger.isEnabledFor(logging.DEBUG)
340 for com in comps:
341 scc_id = com[0]
342 scc[scc_id] = com
343 merged[scc_id] = scc_id
344 if len(com) < 2:
345 # Trivial case
346 continue
347 so_before = order[scc_id].before
348 so_after = order[scc_id].after
349 for n in com:
350 if n == scc_id:
351 continue
352 so_before.update(order[n].before)
353 so_after.update(order[n].after)
354 merged[n] = scc_id
355 del order[n]
356 if debug_solver: # pragma: no cover
357 self.logger.debug("SCC: %s -- %s", scc_id, str(sorted(com)))
359 for com in comps:
360 node = com[0]
361 nbefore = {merged[b] for b in order[node].before}
362 nafter = {merged[b] for b in order[node].after}
364 # Drop self-relations (usually caused by the merging)
365 nbefore.discard(node)
366 nafter.discard(node)
367 order[node].before = nbefore
368 order[node].after = nafter
370 for com in comps:
371 scc_id = com[0]
373 for other_scc_id in order[scc_id].before:
374 order[other_scc_id].after.add(scc_id)
375 for other_scc_id in order[scc_id].after:
376 order[other_scc_id].before.add(scc_id)
378 return scc
380 def solve_groups(
381 self,
382 groups: Iterable[
383 tuple[
384 "MigrationItem",
385 Iterable["BinaryPackageId"],
386 Iterable["BinaryPackageId"],
387 ]
388 ],
389 ) -> list[list["MigrationItem"]]:
390 """
391 :param groups: iterable of tuples. The first element is a
392 MigrationItem, the second element is a collection of BinaryPackageId
393 reflecting requested updates and the third element is a collection of
394 BinaryPackageId reflecting requested removals.
395 """
396 result: list[list["MigrationItem"]] = []
397 emitted: set[str] = set()
398 queue: deque[str] = deque()
399 key2item: dict[str, "MigrationItem"] = {}
400 debug_solver = self.logger.isEnabledFor(logging.DEBUG)
402 order = self._compute_group_order(groups, key2item)
404 # === MILESTONE: Partial-order constrains computed ===
406 # At this point, we have computed all the partial-order
407 # constrains needed. Some of these may have created strongly
408 # connected components (SSC) [of size 2 or greater], which
409 # represents a group of items that (we believe) must migrate
410 # together.
411 #
412 # Each one of those components will become an "easy" hint.
414 comps = compute_scc(order)
415 # Now that we got the SSCs (in comps), we select on item from
416 # each SSC to represent the group and become an ID for that
417 # SSC.
418 # * scc_keys[ssc_id] => All the item-keys in that SSC
419 #
420 # We also "repair" the ordering, so we know in which order the
421 # hints should be emitted.
422 scc_keys = self._merge_items_into_components(comps, order)
424 if debug_solver: # pragma: no cover
425 self.logger.debug("-- PARTIAL ORDER --")
427 initial_round = []
428 for com in sorted(order):
429 if debug_solver and order[com].before: # pragma: no cover
430 self.logger.debug("N: %s <= %s", com, str(sorted(order[com].before)))
431 if not order[com].after:
432 # This component can be scheduled immediately, add it
433 # to the queue
434 initial_round.append(com)
435 elif debug_solver: # pragma: no cover
436 self.logger.debug("N: %s >= %s", com, str(sorted(order[com].after)))
438 queue.extend(sorted(initial_round, key=len))
439 del initial_round
441 if debug_solver: # pragma: no cover
442 self.logger.debug("-- END PARTIAL ORDER --")
443 self.logger.debug("-- LINEARIZED ORDER --")
445 for cur in iter_except(queue.popleft, IndexError):
446 if order[cur].after <= emitted and cur not in emitted:
447 # This item is ready to be emitted right now
448 if debug_solver: # pragma: no cover
449 self.logger.debug("%s -- %s", cur, sorted(scc_keys[cur]))
450 emitted.add(cur)
451 result.append([key2item[x] for x in scc_keys[cur]])
452 if order[cur].before:
453 # There are components that come after this one.
454 # Add it to queue:
455 # - if it is ready, it will be emitted.
456 # - else, it will be dropped and re-added later.
457 queue.extend(sorted(order[cur].before - emitted, key=len))
459 if debug_solver: # pragma: no cover
460 self.logger.debug("-- END LINEARIZED ORDER --")
462 return result
464 def _dump_groups(
465 self,
466 groups: Iterable[
467 tuple[
468 "MigrationItem",
469 Iterable["BinaryPackageId"],
470 Iterable["BinaryPackageId"],
471 ]
472 ],
473 ) -> None: # pragma: no cover
474 self.logger.debug("=== Groups ===")
475 for item, adds, rms in groups:
476 self.logger.debug("%s => A: %s, R: %s", str(item), str(adds), str(rms))
477 self.logger.debug("=== END Groups ===")