Coverage for britney2/migration.py: 96%

255 statements  

« prev     ^ index     » next       coverage.py v6.5.0, created at 2024-04-18 20:48 +0000

1import apt_pkg 

2import contextlib 

3import copy 

4 

5from britney2.transaction import MigrationTransactionState 

6from britney2.utils import ( 

7 MigrationConstraintException, compute_reverse_tree, check_installability, clone_nuninst, 

8 find_smooth_updateable_binaries, 

9) 

10 

11 

12def compute_eqv_set(pkg_universe, updates, rms): 

13 eqv_set = set() 

14 # If we are removing *and* updating packages, then check for eqv. packages 

15 if rms and updates: 

16 eqv_table = {(x.package_name, x.architecture): x for x in rms} 

17 

18 for new_pkg_id in updates: 

19 binary, _, parch = new_pkg_id 

20 key = (binary, parch) 

21 old_pkg_id = eqv_table.get(key) 

22 if old_pkg_id is not None: 

23 if pkg_universe.are_equivalent(new_pkg_id, old_pkg_id): 

24 eqv_set.add(key) 

25 return eqv_set 

26 

27 

28def is_nuninst_worse(must_be_installable, nuninst_now_arch, nuninst_after_arch, allow_uninst): 

29 if len(nuninst_after_arch - allow_uninst) > \ 

30 len(nuninst_now_arch - allow_uninst): 

31 return True 

32 

33 regression = nuninst_after_arch - nuninst_now_arch 

34 if not regression.isdisjoint(must_be_installable): 34 ↛ 35line 34 didn't jump to line 35, because the condition on line 34 was never true

35 return True 

36 return False 

37 

38 

39class MigrationManager(object): 

40 

41 def __init__(self, options, suite_info, all_binaries, pkg_universe, 

42 constraints, allow_uninst, migration_item_factory, hints): 

43 self.options = options 

44 self.suite_info = suite_info 

45 self.all_binaries = all_binaries 

46 self.pkg_universe = pkg_universe 

47 self.constraints = constraints 

48 self.allow_uninst = allow_uninst 

49 self.hints = hints 

50 self._transactions = [] 

51 self._all_architectures = frozenset(self.options.architectures) 

52 self._migration_item_factory = migration_item_factory 

53 

54 @property 

55 def current_transaction(self): 

56 return self._transactions[-1] if self._transactions else None 

57 

58 def compute_groups(self, 

59 item, 

60 allow_smooth_updates=True, 

61 removals=frozenset()): 

62 """Compute the groups of binaries being migrated by item 

63 

64 This method will compute the binaries that will be added to, 

65 replaced in or removed from the target suite and which of 

66 the removals are smooth updatable. 

67 

68 Parameters: 

69 * "item" is a MigrationItem 

70 * "allow_smooth_updates" is a boolean determining whether smooth- 

71 updates are permitted in this migration. When set to False, 

72 the "smoothbins" return value will always be the empty set. 

73 Any value that would have been there will now be in "rms" 

74 instead. (defaults: True) 

75 * "removals" is a set of binaries that is assumed to be 

76 removed at the same time as this migration (e.g. in the same 

77 "easy"-hint). This may affect what if some binaries are 

78 smooth updated or not. (defaults: empty-set) 

79 - Binaries must be given as ("package-name", "version", 

80 "architecture") tuples. 

81 

82 Returns a tuple (adds, rms, smoothbins). "adds" is a set of 

83 binaries that will updated in or appear after the migration. 

84 "rms" is a set of binaries that are not smooth-updatable (or 

85 binaries that could be, but there is no reason to let them be 

86 smooth updated). "smoothbins" is set of binaries that are to 

87 be smooth-updated. 

88 

89 Each "binary" in "adds", "rms" and "smoothbins" will be a 

90 tuple of ("package-name", "version", "architecture") and are 

91 thus tuples suitable for passing on to the 

92 InstallabilityTester. 

93 

94 

95 Unlike migrate_items_to_target_suite, this will not modify 

96 any data structure. 

97 """ 

98 # local copies for better performances 

99 item_package = item.package 

100 target_suite = self.suite_info.target_suite 

101 binaries_t = target_suite.binaries 

102 

103 adds = set() 

104 

105 # remove all binary packages (if the source already exists) 

106 if item.architecture == 'source' or not item.is_removal: 

107 source_name = item_package 

108 if source_name in target_suite.sources: 

109 rms, smoothbins = self._compute_removals(item, allow_smooth_updates, removals) 

110 else: 

111 rms = set() 

112 smoothbins = set() 

113 

114 # single binary removal; used for clearing up after smooth 

115 # updates but not supported as a manual hint 

116 else: 

117 assert item_package in binaries_t[item.architecture] 

118 pkg_id = binaries_t[item.architecture][item_package].pkg_id 

119 binary, ver, parch = pkg_id 

120 if ver != item.version: 

121 raise MigrationConstraintException( 

122 "trying cruft removal item %s, while %s has %s/%s on %s" % ( 

123 item, target_suite.name, 

124 binary, ver, parch)) 

125 source_name = binaries_t[item.architecture][item_package].source 

126 rms = {pkg_id} 

127 smoothbins = set() 

128 

129 # add the new binary packages (if we are not removing) 

130 if not item.is_removal: 

131 source_suite = item.suite 

132 binaries_s = source_suite.binaries 

133 source_data = source_suite.sources[source_name] 

134 source_ver_new = source_data.version 

135 sources_t = target_suite.sources 

136 if source_name in sources_t: 

137 source_data_old = sources_t[source_name] 

138 source_ver_old = source_data_old.version 

139 if apt_pkg.version_compare(source_ver_old, source_ver_new) > 0: 

140 raise MigrationConstraintException("trying src:%s %s, while %s has %s" % ( 

141 source_name, source_ver_new, target_suite.name, source_ver_old)) 

142 

143 for pkg_id in source_data.binaries: 

144 binary, ver, parch = pkg_id 

145 if item.architecture not in ['source', parch]: 

146 continue 

147 

148 if binaries_s[parch][binary].source != source_name: 

149 # This binary package has been hijacked by some other source. 

150 # So don't add it as part of this update. 

151 # 

152 # Also, if this isn't a source update, don't remove 

153 # the package that's been hijacked if it's present. 

154 if item.architecture != 'source': 154 ↛ 158line 154 didn't jump to line 158, because the condition on line 154 was never false

155 for rm_b, rm_v, rm_p in list(rms): 

156 if (rm_b, rm_p) == (binary, parch): 

157 rms.remove((rm_b, rm_v, rm_p)) 

158 continue 

159 

160 # Don't add the binary if it is cruft; smooth updates will keep it if possible 

161 if (parch not in self.options.outofsync_arches and 

162 source_data.version != binaries_s[parch][binary].source_version): 

163 continue 

164 

165 if binary in binaries_t[parch]: 

166 oldver = binaries_t[parch][binary].version 

167 if apt_pkg.version_compare(oldver, ver) > 0: 

168 raise MigrationConstraintException("trying %s %s from src:%s %s, while %s has %s" % ( 

169 binary, ver, source_name, source_ver_new, target_suite.name, oldver)) 

170 

171 adds.add(pkg_id) 

172 

173 return (source_name, adds, rms, smoothbins) 

174 

175 def _compute_removals(self, item, allow_smooth_updates, removals): 

176 pkg_universe = self.pkg_universe 

177 source_suite = item.suite 

178 target_suite = self.suite_info.target_suite 

179 binaries_s = source_suite.binaries 

180 binaries_t = target_suite.binaries 

181 source_name = item.package 

182 source_data = target_suite.sources[source_name] 

183 

184 bins = [] 

185 # remove all the binaries 

186 

187 # first, build a list of eligible binaries 

188 for pkg_id in source_data.binaries: 

189 binary, _, parch = pkg_id 

190 if item.architecture != 'source' and parch != item.architecture: 

191 continue 

192 

193 # Work around #815995 

194 if item.architecture == 'source' and item.is_removal and binary not in binaries_t[parch]: 194 ↛ 195line 194 didn't jump to line 195, because the condition on line 194 was never true

195 continue 

196 

197 bin_data = binaries_t[parch][binary] 

198 # Do not include hijacked binaries nor cruft (cruft is handled separately) 

199 if bin_data.source != source_name or bin_data.source_version != source_data.version: 

200 continue 

201 bins.append(pkg_id) 

202 

203 if allow_smooth_updates and source_suite.suite_class.is_primary_source: 

204 smoothbins = find_smooth_updateable_binaries(bins, 

205 source_suite.sources[source_name], 

206 pkg_universe, 

207 target_suite, 

208 binaries_t, 

209 binaries_s, 

210 removals, 

211 self.options.smooth_updates, 

212 self.hints) 

213 else: 

214 smoothbins = set() 

215 

216 # remove all the binaries which aren't being smooth updated 

217 if item.architecture != 'source' and source_suite.suite_class.is_additional_source: 

218 # Special-case for pu/tpu: 

219 # if this is a binary migration from *pu, only the arch:any 

220 # packages will be present. ideally dak would also populate 

221 # the arch-indep packages, but as that's not the case we 

222 # must keep them around; they will not be re-added by the 

223 # migration so will end up missing from testing 

224 all_binaries = self.all_binaries 

225 rms = {pkg_id for pkg_id in bins 

226 if pkg_id not in smoothbins and all_binaries[pkg_id].architecture != 'all'} 

227 else: 

228 rms = {pkg_id for pkg_id in bins if pkg_id not in smoothbins} 

229 

230 return rms, smoothbins 

231 

232 def _apply_item_to_target_suite(self, item, removals=frozenset()): 

233 """Apply a change to the target suite as requested by `item` 

234 

235 An optional set of binaries may be passed in "removals". Binaries listed 

236 in this set will be assumed to be removed at the same time as the "item" 

237 will migrate. This may change what binaries will be smooth-updated. 

238 - Binaries in this set must be instances of BinaryPackageId. 

239 

240 This method applies the changes required by the action `item` tracking 

241 them so it will be possible to revert them. 

242 

243 The method returns a tuple containing a set of packages 

244 affected by the change (as (name, arch)-tuples) and the 

245 dictionary undo which can be used to rollback the changes. 

246 """ 

247 undo = {'binaries': {}, 'sources': {}, 'virtual': {}} 

248 

249 affected_all = set() 

250 updated_binaries = set() 

251 

252 # local copies for better performance 

253 source_suite = item.suite 

254 target_suite = self.suite_info.target_suite 

255 packages_t = target_suite.binaries 

256 provides_t = target_suite.provides_table 

257 pkg_universe = self.pkg_universe 

258 transaction = self.current_transaction 

259 

260 source_name, updates, rms, smooth_updates = self.compute_groups(item, removals=removals) 

261 sources_t = target_suite.sources 

262 # Handle the source package 

263 old_source = sources_t.get(source_name) 

264 

265 # add/update the source package 

266 if item.is_removal and item.architecture == 'source': 

267 del sources_t[source_name] 

268 else: 

269 # with OUTOFSYNC_ARCHES, the source can be removed before out-of-sync binaries are removed 

270 if not item.is_removal or source_name in source_suite.sources: 270 ↛ 279line 270 didn't jump to line 279, because the condition on line 270 was never false

271 # always create a copy of the SourcePackage object 

272 sources_t[source_name] = copy.copy(source_suite.sources[source_name]) 

273 if old_source is not None: 

274 # always create a new list of binaries 

275 sources_t[source_name].binaries = copy.copy(old_source.binaries) 

276 else: 

277 sources_t[source_name].binaries = set() 

278 

279 undo['sources'][source_name] = old_source 

280 

281 eqv_set = compute_eqv_set(pkg_universe, updates, rms) 

282 

283 # remove all the binaries which aren't being smooth updated 

284 for rm_pkg_id in rms: 

285 binary, version, parch = rm_pkg_id 

286 pkey = (binary, parch) 

287 binaries_t_a = packages_t[parch] 

288 provides_t_a = provides_t[parch] 

289 

290 pkg_data = binaries_t_a[binary] 

291 # save the old binary for undo 

292 undo['binaries'][pkey] = rm_pkg_id 

293 if pkey not in eqv_set: 

294 # all the reverse dependencies are affected by 

295 # the change 

296 affected_all.update(pkg_universe.reverse_dependencies_of(rm_pkg_id)) 

297 affected_all.update(pkg_universe.negative_dependencies_of(rm_pkg_id)) 

298 

299 # remove the provided virtual packages 

300 for provided_pkg, prov_version, _ in pkg_data.provides: 

301 key = (provided_pkg, parch) 

302 if key not in undo['virtual']: 

303 undo['virtual'][key] = provides_t_a[provided_pkg].copy() 

304 provides_t_a[provided_pkg].remove((binary, prov_version)) 

305 if not provides_t_a[provided_pkg]: 

306 del provides_t_a[provided_pkg] 

307 # for source removal, the source is already gone 

308 if source_name in sources_t: 

309 sources_t[source_name].binaries.discard(rm_pkg_id) 

310 # finally, remove the binary package 

311 del binaries_t_a[binary] 

312 target_suite.remove_binary(rm_pkg_id) 

313 

314 # Add/Update binary packages in testing 

315 if updates: 

316 packages_s = source_suite.binaries 

317 

318 for updated_pkg_id in updates: 

319 binary, new_version, parch = updated_pkg_id 

320 key = (binary, parch) 

321 binaries_t_a = packages_t[parch] 

322 provides_t_a = provides_t[parch] 

323 equivalent_replacement = key in eqv_set 

324 

325 # obviously, added/modified packages are affected 

326 if not equivalent_replacement: 

327 affected_all.add(updated_pkg_id) 

328 # if the binary already exists in testing, it is currently 

329 # built by another source package. we therefore remove the 

330 # version built by the other source package, after marking 

331 # all of its reverse dependencies as affected 

332 if binary in binaries_t_a: 

333 old_pkg_data = binaries_t_a[binary] 

334 old_pkg_id = old_pkg_data.pkg_id 

335 # save the old binary package 

336 undo['binaries'][key] = old_pkg_id 

337 if not equivalent_replacement: 337 ↛ 340line 337 didn't jump to line 340, because the condition on line 337 was never false

338 # all the reverse conflicts 

339 affected_all.update(pkg_universe.reverse_dependencies_of(old_pkg_id)) 

340 target_suite.remove_binary(old_pkg_id) 

341 elif transaction and transaction.parent_transaction: 

342 # the binary isn't in the target suite, but it may have been at 

343 # the start of the current hint and have been removed 

344 # by an earlier migration. if that's the case then we 

345 # will have a record of the older instance of the binary 

346 # in the undo information. we can use that to ensure 

347 # that the reverse dependencies of the older binary 

348 # package are also checked. 

349 # reverse dependencies built from this source can be 

350 # ignored as their reverse trees are already handled 

351 # by this function 

352 for (tundo, tpkg) in transaction.parent_transaction.undo_items: 

353 if key in tundo['binaries']: 353 ↛ 354line 353 didn't jump to line 354, because the condition on line 353 was never true

354 tpkg_id = tundo['binaries'][key] 

355 affected_all.update(pkg_universe.reverse_dependencies_of(tpkg_id)) 

356 

357 # add/update the binary package from the source suite 

358 new_pkg_data = packages_s[parch][binary] 

359 binaries_t_a[binary] = new_pkg_data 

360 target_suite.add_binary(updated_pkg_id) 

361 updated_binaries.add(updated_pkg_id) 

362 # add the binary to the source package 

363 sources_t[source_name].binaries.add(updated_pkg_id) 

364 # register new provided packages 

365 for provided_pkg, prov_version, _ in new_pkg_data.provides: 

366 key = (provided_pkg, parch) 

367 if key not in undo['virtual']: 

368 restore_as = provides_t_a[provided_pkg].copy() if provided_pkg in provides_t_a else None 

369 undo['virtual'][key] = restore_as 

370 provides_t_a[provided_pkg].add((binary, prov_version)) 

371 if not equivalent_replacement: 

372 # all the reverse dependencies are affected by the change 

373 affected_all.add(updated_pkg_id) 

374 affected_all.update(pkg_universe.negative_dependencies_of(updated_pkg_id)) 

375 

376 # Also include the transitive rdeps of the packages found so far 

377 compute_reverse_tree(pkg_universe, affected_all) 

378 if transaction: 

379 transaction.add_undo_item(undo, updated_binaries) 

380 # return the affected packages (direct and than all) 

381 return (affected_all, smooth_updates) 

382 

383 def _apply_multiple_items_to_target_suite(self, items): 

384 """ 

385 :param items: list of MigrationItems 

386 """ 

387 is_source_migration = False 

388 if len(items) == 1: 

389 item = items[0] 

390 # apply the changes 

391 affected_all, smooth_updates = self._apply_item_to_target_suite(item) 

392 if item.architecture == 'source': 

393 affected_architectures = self._all_architectures 

394 is_source_migration = True 

395 else: 

396 affected_architectures = {item.architecture} 

397 else: 

398 affected_architectures = set() 

399 removals = set() 

400 affected_all = set() 

401 smooth_updates = set() 

402 for item in items: 

403 _, _, rms, _ = self.compute_groups(item, allow_smooth_updates=False) 

404 removals.update(rms) 

405 affected_architectures.add(item.architecture) 

406 

407 if 'source' in affected_architectures: 

408 affected_architectures = self._all_architectures 

409 is_source_migration = True 

410 

411 for item in items: 

412 item_affected_all, item_smooth = self._apply_item_to_target_suite(item, 

413 removals=removals) 

414 affected_all.update(item_affected_all) 

415 smooth_updates.update(item_smooth) 

416 

417 return is_source_migration, affected_architectures, affected_all, smooth_updates 

418 

419 def migrate_items_to_target_suite(self, items, nuninst_now, stop_on_first_regression=True): 

420 is_accepted = True 

421 target_suite = self.suite_info.target_suite 

422 packages_t = target_suite.binaries 

423 

424 nobreakall_arches = self.options.nobreakall_arches 

425 new_arches = self.options.new_arches 

426 break_arches = self.options.break_arches 

427 arch = None 

428 

429 is_source_migration, affected_architectures, affected_all, smooth_updates = \ 

430 self._apply_multiple_items_to_target_suite(items) 

431 

432 # Copy nuninst_comp - we have to deep clone affected 

433 # architectures. 

434 

435 # NB: We do this *after* updating testing as we have to filter out 

436 # removed binaries. Otherwise, uninstallable binaries that were 

437 # removed by the item would still be counted. 

438 

439 nuninst_after = clone_nuninst(nuninst_now, packages_s=packages_t, architectures=affected_architectures) 

440 must_be_installable = self.constraints['keep-installable'] 

441 

442 # check the affected packages on all the architectures 

443 for arch in sorted(affected_architectures): 

444 check_archall = arch in nobreakall_arches 

445 

446 check_installability(target_suite, packages_t, arch, affected_all, 

447 check_archall, nuninst_after) 

448 

449 # if the uninstallability counter is worse than before, break the loop 

450 if stop_on_first_regression: 

451 worse = is_nuninst_worse(must_be_installable, nuninst_now[arch], nuninst_after[arch], self.allow_uninst[arch]) 

452 

453 # ... except for a few special cases 

454 if worse and ((not is_source_migration and arch not in new_arches) or 

455 (arch not in break_arches)): 

456 is_accepted = False 

457 break 

458 

459 new_cruft = {self._migration_item_factory.generate_removal_for_cruft_item(x) for x in smooth_updates} 

460 

461 return (is_accepted, nuninst_after, arch, new_cruft) 

462 

463 @contextlib.contextmanager 

464 def start_transaction(self): 

465 tmts = MigrationTransactionState(self.suite_info, self.all_binaries, self.current_transaction) 

466 self._transactions.append(tmts) 

467 try: 

468 yield tmts 

469 except Exception: 

470 if not tmts.is_committed and not tmts.is_rolled_back: 

471 tmts.rollback() 

472 raise 

473 finally: 

474 self._transactions.pop() 474 ↛ exitline 474 didn't except from function 'start_transaction', because the raise on line 472 wasn't executed

475 assert tmts.is_rolled_back or tmts.is_committed