Coverage for britney2/installability/solver.py: 100%

196 statements  

« prev     ^ index     » next       coverage.py v6.5.0, created at 2024-04-18 20:48 +0000

1# -*- coding: utf-8 -*- 

2 

3# Copyright (C) 2012 Niels Thykier <niels@thykier.net> 

4# - Includes code by Paul Harrison 

5# (http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py) 

6 

7# This program is free software; you can redistribute it and/or modify 

8# it under the terms of the GNU General Public License as published by 

9# the Free Software Foundation; either version 2 of the License, or 

10# (at your option) any later version. 

11 

12# This program is distributed in the hope that it will be useful, 

13# but WITHOUT ANY WARRANTY; without even the implied warranty of 

14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

15# GNU General Public License for more details. 

16 

17import logging 

18from collections import deque 

19from itertools import chain 

20 

21from britney2.utils import (ifilter_only, iter_except) 

22 

23 

24class OrderNode(object): 

25 

26 __slots__ = ['before', 'after'] 

27 

28 def __init__(self): 

29 self.after = set() 

30 self.before = set() 

31 

32 

33def compute_scc(graph): 

34 """Iterative algorithm for strongly-connected components 

35 

36 Iterative variant of Tarjan's algorithm for finding strongly-connected 

37 components. 

38 

39 :param graph: dict of all nodes along which their edges (in "before" and "after") 

40 :return: List of components (each component is a list of items) 

41 """ 

42 result = [] 

43 low = {} 

44 node_stack = [] 

45 

46 def _cannot_be_a_scc(graph_node): 

47 if not graph[graph_node].before or not graph[graph_node].after: 

48 # Short-cut obviously isolated component 

49 result.append((graph_node,)) 

50 # Set the item number so high that no other item might 

51 # mistakenly assume that they can form a component via 

52 # this item. 

53 # (Replaces the "is w on the stack check" for us from 

54 # the original algorithm) 

55 low[graph_node] = len(graph) + 1 

56 return True 

57 return False 

58 

59 def _handle_succ(parent, parent_num, successors_remaining): 

60 while successors_remaining: 

61 succ = successors_remaining.pop() 

62 succ_num = low.get(succ, None) 

63 if succ_num is not None: 

64 if succ_num < parent_num: 

65 # These two nodes are part of the probably 

66 # same SSC (or succ is isolated 

67 low[parent] = parent_num = succ_num 

68 continue 

69 # It cannot be a part of a SCC if it does not have depends 

70 # or reverse depends. 

71 if _cannot_be_a_scc(succ): 

72 continue 

73 succ_num = len(low) 

74 low[succ] = succ_num 

75 work_stack.append((succ, len(node_stack), succ_num, graph[succ].before)) 

76 node_stack.append(succ) 

77 # "Recurse" into the child node first 

78 return True 

79 return False 

80 

81 # graph is a dict, shouldn't need sorting 

82 for n in graph: 

83 if n in low: 

84 continue 

85 # It cannot be a part of a SCC if it does not have depends 

86 # or reverse depends. 

87 if _cannot_be_a_scc(n): 

88 continue 

89 

90 root_num = len(low) 

91 low[n] = root_num 

92 # DFS work-stack needed to avoid call recursion. It (more or less) 

93 # replaces the variables on the call stack in Tarjan's algorithm 

94 work_stack = [(n, len(node_stack), root_num, graph[n].before)] 

95 node_stack.append(n) 

96 while work_stack: 

97 node, stack_idx, orig_node_num, successors = work_stack[-1] 

98 if successors and _handle_succ(node, low[node], sorted(successors)): 

99 # _handle_succ has pushed a new node on to work_stack 

100 # and we need to "restart" the loop to handle that first 

101 continue 

102 

103 # This node is done; remove it from the work stack 

104 work_stack.pop() 

105 

106 # This node is out of successor. Push up the "low" value 

107 # (Exception: root node has no parent) 

108 node_num = low[node] 

109 if work_stack: 

110 parent = work_stack[-1][0] 

111 parent_num = low[parent] 

112 if node_num <= parent_num: 

113 # This node is a part of a component with its parent. 

114 # We update the parent's node number and push the 

115 # responsibility of building the component unto the 

116 # parent. 

117 low[parent] = node_num 

118 continue 

119 if node_num != orig_node_num: 

120 # The node is a part of an SCC with a ancestor (and parent) 

121 continue 

122 # We got a component 

123 component = tuple(node_stack[stack_idx:]) 

124 del node_stack[stack_idx:] 

125 result.append(component) 

126 # Re-number all items, so no other item might 

127 # mistakenly assume that they can form a component via 

128 # one of these items. 

129 # (Replaces the "is w on the stack check" for us from 

130 # the original algorithm) 

131 new_num = len(graph) + 1 

132 for item in component: 

133 low[item] = new_num 

134 

135 assert not node_stack 

136 

137 return result 

138 

139 

140def apply_order(key, other, order, logger, order_cause, invert=False, order_sub_cause=''): 

141 if other == key: 

142 # "Self-relation" => ignore 

143 return 

144 order_key = order[key] 

145 if invert: 

146 order[other].after.add(key) 

147 order_set = order_key.before 

148 else: 

149 order[other].before.add(key) 

150 order_set = order_key.after 

151 if logger.isEnabledFor(logging.DEBUG) and other not in order_set: # pragma: no cover 

152 if order_sub_cause: 

153 order_sub_cause = ' (%s)' % order_sub_cause 

154 logger.debug("%s induced order%s: %s before %s", order_cause, order_sub_cause, key, other) 

155 # Defer adding until the end to ensure we only log the first time a dependency order is introduced. 

156 order_set.add(other) 

157 

158 

159class InstallabilitySolver(object): 

160 

161 def __init__(self, universe, inst_tester): 

162 """Create a new installability solver 

163 

164 universe is a BinaryPackageUniverse. 

165 """ 

166 self._universe = universe 

167 self._inst_tester = inst_tester 

168 logger_name = ".".join((self.__class__.__module__, self.__class__.__name__)) 

169 self.logger = logging.getLogger(logger_name) 

170 

171 def _compute_group_order_rms(self, rms, order, key, ptable, going_out): 

172 sat_in_testing = self._inst_tester.any_of_these_are_in_the_suite 

173 universe = self._universe 

174 logger = self.logger 

175 for rdep in chain.from_iterable(universe.reverse_dependencies_of(r) for r in rms): 

176 # The binaries have reverse dependencies in testing; 

177 # check if we can/should migrate them first. 

178 for depgroup in universe.dependencies_of(rdep): 

179 rigid = depgroup - going_out 

180 if sat_in_testing(rigid): 

181 # (partly) satisfied by testing, assume it is okay 

182 continue 

183 if rdep in ptable: 

184 apply_order(key, ptable[rdep], order, logger, 'Removal') 

185 

186 def _compute_order_for_dependency(self, key, depgroup, ptable, order, going_in): 

187 # We got three cases: 

188 # - "swap" (replace existing binary with a newer version) 

189 # - "addition" (add new binary without removing any) 

190 # - "removal" (remove binary without providing a new) 

191 # 

192 # The problem is that only the two latter requires 

193 # an ordering. A "swap" (in itself) should not 

194 # affect us. 

195 other_adds = set() 

196 other_rms = set() 

197 logger = self.logger 

198 for d in ifilter_only(ptable, depgroup): 

199 other = ptable[d] 

200 if d in going_in: 

201 # "other" provides something "key" needs, 

202 # schedule accordingly. 

203 other_adds.add(other) 

204 else: 

205 # "other" removes something "key" needs, 

206 # schedule accordingly. 

207 other_rms.add(other) 

208 

209 for other in other_adds - other_rms: 

210 apply_order(key, other, order, logger, 'Dependency', order_sub_cause='add') 

211 for other in other_rms - other_adds: 

212 apply_order(key, other, order, logger, 'Dependency', order_sub_cause='remove', invert=True) 

213 

214 def _compute_group_order_adds(self, adds, order, key, ptable, going_out, going_in): 

215 sat_in_testing = self._inst_tester.any_of_these_are_in_the_suite 

216 universe = self._universe 

217 for depgroup in chain.from_iterable(universe.dependencies_of(a) for a in adds): 

218 # Check if this item should migrate before others 

219 # (e.g. because they depend on a new [version of a] 

220 # binary provided by this item). 

221 rigid = depgroup - going_out 

222 if sat_in_testing(rigid): 

223 # (partly) satisfied by testing, assume it is okay 

224 continue 

225 self._compute_order_for_dependency(key, depgroup, ptable, order, going_in) 

226 

227 def _compute_group_order(self, groups, key2item): 

228 universe = self._universe 

229 ptable = {} 

230 order = {} 

231 going_out = set() 

232 going_in = set() 

233 logger = self.logger 

234 debug_solver = logger.isEnabledFor(logging.DEBUG) 

235 

236 # Build the tables 

237 for (item, adds, rms) in groups: 

238 key = str(item) 

239 key2item[key] = item 

240 order[key] = OrderNode() 

241 going_in.update(adds) 

242 going_out.update(rms) 

243 for x in chain(adds, rms): 

244 ptable[x] = key 

245 

246 if debug_solver: # pragma: no cover 

247 self._dump_groups(groups) 

248 

249 # This large loop will add ordering constrains on each "item" 

250 # that migrates based on various rules. 

251 for (item, adds, rms) in groups: 

252 key = str(item) 

253 oldcons = set(chain.from_iterable(universe.negative_dependencies_of(r) for r in rms)) 

254 newcons = set(chain.from_iterable(universe.negative_dependencies_of(a) for a in adds)) 

255 oldcons -= newcons 

256 # Some of the old binaries have "conflicts" that will 

257 # be removed. 

258 for o in ifilter_only(ptable, oldcons): 

259 # "key" removes a conflict with one of 

260 # "other"'s binaries, so it is probably a good 

261 # idea to migrate "key" before "other" 

262 apply_order(key, ptable[o], order, logger, 'Conflict', invert=True) 

263 

264 self._compute_group_order_rms(rms, order, key, ptable, going_out) 

265 self._compute_group_order_adds(adds, order, key, ptable, going_out, going_in) 

266 

267 return order 

268 

269 def _merge_items_into_components(self, comps, order): 

270 merged = {} 

271 scc = {} 

272 debug_solver = self.logger.isEnabledFor(logging.DEBUG) 

273 for com in comps: 

274 scc_id = com[0] 

275 scc[scc_id] = com 

276 merged[scc_id] = scc_id 

277 if len(com) < 2: 

278 # Trivial case 

279 continue 

280 so_before = order[scc_id].before 

281 so_after = order[scc_id].after 

282 for n in com: 

283 if n == scc_id: 

284 continue 

285 so_before.update(order[n].before) 

286 so_after.update(order[n].after) 

287 merged[n] = scc_id 

288 del order[n] 

289 if debug_solver: # pragma: no cover 

290 self.logger.debug("SCC: %s -- %s", scc_id, str(sorted(com))) 

291 

292 for com in comps: 

293 node = com[0] 

294 nbefore = set(merged[b] for b in order[node].before) 

295 nafter = set(merged[b] for b in order[node].after) 

296 

297 # Drop self-relations (usually caused by the merging) 

298 nbefore.discard(node) 

299 nafter.discard(node) 

300 order[node].before = nbefore 

301 order[node].after = nafter 

302 

303 for com in comps: 

304 scc_id = com[0] 

305 

306 for other_scc_id in order[scc_id].before: 

307 order[other_scc_id].after.add(scc_id) 

308 for other_scc_id in order[scc_id].after: 

309 order[other_scc_id].before.add(scc_id) 

310 

311 return scc 

312 

313 def solve_groups(self, groups): 

314 ''' 

315 :param groups: iterable of tuples. The first element is a 

316 MigrationItem, the second element is a set of BinaryPackageId 

317 reflecting requested updates and the third element is a set of 

318 BinaryPackageId reflecting requested removals. 

319 ''' 

320 result = [] 

321 emitted = set() 

322 queue = deque() 

323 key2item = {} 

324 debug_solver = self.logger.isEnabledFor(logging.DEBUG) 

325 

326 order = self._compute_group_order(groups, key2item) 

327 

328 # === MILESTONE: Partial-order constrains computed === 

329 

330 # At this point, we have computed all the partial-order 

331 # constrains needed. Some of these may have created strongly 

332 # connected components (SSC) [of size 2 or greater], which 

333 # represents a group of items that (we believe) must migrate 

334 # together. 

335 # 

336 # Each one of those components will become an "easy" hint. 

337 

338 comps = compute_scc(order) 

339 # Now that we got the SSCs (in comps), we select on item from 

340 # each SSC to represent the group and become an ID for that 

341 # SSC. 

342 # * scc_keys[ssc_id] => All the item-keys in that SSC 

343 # 

344 # We also "repair" the ordering, so we know in which order the 

345 # hints should be emitted. 

346 scc_keys = self._merge_items_into_components(comps, order) 

347 

348 if debug_solver: # pragma: no cover 

349 self.logger.debug("-- PARTIAL ORDER --") 

350 

351 initial_round = [] 

352 for com in sorted(order): 

353 if debug_solver and order[com].before: # pragma: no cover 

354 self.logger.debug("N: %s <= %s", com, str(sorted(order[com].before))) 

355 if not order[com].after: 

356 # This component can be scheduled immediately, add it 

357 # to the queue 

358 initial_round.append(com) 

359 elif debug_solver: # pragma: no cover 

360 self.logger.debug("N: %s >= %s", com, str(sorted(order[com].after))) 

361 

362 queue.extend(sorted(initial_round, key=len)) 

363 del initial_round 

364 

365 if debug_solver: # pragma: no cover 

366 self.logger.debug("-- END PARTIAL ORDER --") 

367 self.logger.debug("-- LINEARIZED ORDER --") 

368 

369 for cur in iter_except(queue.popleft, IndexError): 

370 if order[cur].after <= emitted and cur not in emitted: 

371 # This item is ready to be emitted right now 

372 if debug_solver: # pragma: no cover 

373 self.logger.debug("%s -- %s", cur, sorted(scc_keys[cur])) 

374 emitted.add(cur) 

375 result.append([key2item[x] for x in scc_keys[cur]]) 

376 if order[cur].before: 

377 # There are components that come after this one. 

378 # Add it to queue: 

379 # - if it is ready, it will be emitted. 

380 # - else, it will be dropped and re-added later. 

381 queue.extend(sorted(order[cur].before - emitted, key=len)) 

382 

383 if debug_solver: # pragma: no cover 

384 self.logger.debug("-- END LINEARIZED ORDER --") 

385 

386 return result 

387 

388 def _dump_groups(self, groups): # pragma: no cover 

389 self.logger.debug("=== Groups ===") 

390 for (item, adds, rms) in groups: 

391 self.logger.debug("%s => A: %s, R: %s", str(item), str(adds), str(rms)) 

392 self.logger.debug("=== END Groups ===")