3
# Copyright (c) 2004-2008 Canonical
5
# Author: Michael Vogt <michael.vogt@ubuntu.com>
7
# This program is free software; you can redistribute it and/or
8
# modify it under the terms of the GNU General Public License as
9
# published by the Free Software Foundation; either version 2 of the
10
# License, or (at your option) any later version.
12
# This program is distributed in the hope that it will be useful,
13
# but WITHOUT ANY WARRANTY; without even the implied warranty of
14
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
# GNU General Public License for more details.
17
# You should have received a copy of the GNU General Public License
18
# along with this program; if not, write to the Free Software
19
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22
from __future__ import absolute_import, print_function
25
warnings.filterwarnings("ignore", "apt API not stable yet", FutureWarning)
37
import ConfigParser as configparser
38
from subprocess import Popen, PIPE
40
from .DistUpgradeGettext import gettext as _
41
from .DistUpgradeGettext import ngettext
43
from .utils import inside_chroot, estimate_kernel_size_in_boot
45
class CacheException(Exception):
47
class CacheExceptionLockingFailed(CacheException):
49
class CacheExceptionDpkgInterrupted(CacheException):
52
# the initrd/vmlinuz/abi space required in /boot for each kernel
53
# we estimate based on the current kernel size and add a safety marging
54
def _set_kernel_initrd_size():
55
size = estimate_kernel_size_in_boot()
57
logging.warn("estimate_kernel_size_in_boot() returned '0'?")
59
# add small safety buffer
62
KERNEL_INITRD_SIZE = _set_kernel_initrd_size()
64
class FreeSpaceRequired(object):
65
""" FreeSpaceRequired object:
68
- the total size required (size_total)
69
- the dir that requires the space (dir)
70
- the additional space that is needed (size_needed)
72
def __init__(self, size_total, dir, size_needed):
73
self.size_total = size_total
75
self.size_needed = size_needed
77
return "FreeSpaceRequired Object: Dir: %s size_total: %s size_needed: %s" % (self.dir, self.size_total, self.size_needed)
80
class NotEnoughFreeSpaceError(CacheException):
82
Exception if there is not enough free space for this operation
85
def __init__(self, free_space_required_list):
86
self.free_space_required_list = free_space_required_list
88
class MyCache(apt.Cache):
93
def __init__(self, config, view, quirks, progress=None, lock=True):
94
apt.Cache.__init__(self, progress)
100
self.partialUpgrade = False
102
self.metapkgs = self.config.getlist("Distro","MetaPkgs")
107
apt_pkg.pkgsystem_lock()
108
self.lock_lists_dir()
110
except SystemError as e:
111
# checking for this is ok, its not translatable
112
if "dpkg --configure -a" in str(e):
113
raise CacheExceptionDpkgInterrupted(e)
114
raise CacheExceptionLockingFailed(e)
115
# a list of regexp that are not allowed to be removed
116
self.removal_blacklist = config.getListFromFile("Distro","RemovalBlacklistFile")
117
self.uname = Popen(["uname","-r"], stdout=PIPE,
118
universal_newlines=True).communicate()[0].strip()
120
# from hardy on we use recommends by default, so for the
121
# transition to the new dist we need to enable them now
122
if (config.get("Sources","From") == "hardy" and
123
not "RELEASE_UPGRADE_NO_RECOMMENDS" in os.environ):
124
apt_pkg.config.set("APT::Install-Recommends","true")
126
def _apply_dselect_upgrade(self):
127
""" honor the dselect install state """
131
if pkg._pkg.selected_state == apt_pkg.SELSTATE_INSTALL:
132
# upgrade() will take care of this
133
pkg.mark_install(auto_inst=False, auto_fix=False)
136
def req_reinstall_pkgs(self):
137
" return the packages not downloadable packages in reqreinst state "
140
if ((not pkg.candidate or not pkg.candidate.downloadable)
142
(pkg._pkg.inst_state == self.ReInstReq or
143
pkg._pkg.inst_state == self.HoldReInstReq)):
144
reqreinst.add(pkg.name)
147
def fix_req_reinst(self, view):
148
" check for reqreinst state and offer to fix it "
149
reqreinst = self.req_reinstall_pkgs
150
if len(reqreinst) > 0:
151
header = ngettext("Remove package in bad state",
152
"Remove packages in bad state",
154
summary = ngettext("The package '%s' is in an inconsistent "
155
"state and needs to be reinstalled, but "
156
"no archive can be found for it. "
157
"Do you want to remove this package "
159
"The packages '%s' are in an inconsistent "
160
"state and need to be reinstalled, but "
161
"no archives can be found for them. Do you "
162
"want to remove these packages now to "
164
len(reqreinst)) % ", ".join(reqreinst)
165
if view.askYesNoQuestion(header, summary):
167
cmd = ["dpkg","--remove","--force-remove-reinstreq"] + list(reqreinst)
168
view.getTerminal().call(cmd)
174
def _initAptLog(self):
175
" init logging, create log file"
176
logdir = self.config.getWithDefault("Files","LogDir",
177
"/var/log/dist-upgrade")
178
if not os.path.exists(logdir):
180
apt_pkg.config.set("Dir::Log",logdir)
181
apt_pkg.config.set("Dir::Log::Terminal","apt-term.log")
182
self.logfd = os.open(os.path.join(logdir,"apt.log"),
183
os.O_RDWR|os.O_CREAT|os.O_APPEND, 0o644)
184
now = datetime.datetime.now()
185
header = "Log time: %s\n" % now
186
os.write(self.logfd, header.encode("utf-8"))
188
# turn on debugging in the cache
189
apt_pkg.config.set("Debug::pkgProblemResolver","true")
190
apt_pkg.config.set("Debug::pkgDepCache::AutoInstall","true")
191
def _startAptResolverLog(self):
192
if hasattr(self, "old_stdout"):
193
os.close(self.old_stdout)
194
os.close(self.old_stderr)
195
self.old_stdout = os.dup(1)
196
self.old_stderr = os.dup(2)
197
os.dup2(self.logfd, 1)
198
os.dup2(self.logfd, 2)
199
def _stopAptResolverLog(self):
202
os.dup2(self.old_stdout, 1)
203
os.dup2(self.old_stderr, 2)
204
# use this decorator instead of the _start/_stop stuff directly
205
# FIXME: this should probably be a decorator class where all
206
# logging is moved into?
207
def withResolverLog(f):
208
" decorator to ensure that the apt output is logged "
209
def wrapper(*args, **kwargs):
210
args[0]._startAptResolverLog()
211
res = f(*args, **kwargs)
212
args[0]._stopAptResolverLog()
218
def required_download(self):
219
""" get the size of the packages that are required to download """
220
pm = apt_pkg.PackageManager(self._depcache)
221
fetcher = apt_pkg.Acquire()
222
pm.get_archives(fetcher, self._list, self._records)
223
return fetcher.fetch_needed
225
def additional_required_space(self):
226
""" get the size of the additional required space on the fs """
227
return self._depcache.usr_size
230
""" is the cache broken """
231
return self._depcache.broken_count > 0
234
def lock_lists_dir(self):
235
name = apt_pkg.config.find_dir("Dir::State::Lists") + "lock"
236
self._listsLock = apt_pkg.get_lock(name)
237
if self._listsLock < 0:
238
e = "Can not lock '%s' " % name
239
raise CacheExceptionLockingFailed(e)
240
def unlock_lists_dir(self):
241
if self._listsLock > 0:
242
os.close(self._listsLock)
244
def update(self, fprogress=None):
246
our own update implementation is required because we keep the lists
249
self.unlock_lists_dir()
250
res = apt.Cache.update(self, fprogress)
251
self.lock_lists_dir()
252
if fprogress and fprogress.release_file_download_error:
253
# FIXME: not ideal error message, but we just reuse a
254
# existing one here to avoid a new string
255
raise IOError(_("The server may be overloaded"))
257
raise IOError("apt.cache.update() returned False, but did not raise exception?!?")
259
def commit(self, fprogress, iprogress):
260
logging.info("cache.commit()")
263
apt.Cache.commit(self, fprogress, iprogress)
265
def release_lock(self, pkgSystemOnly=True):
268
apt_pkg.pkgsystem_unlock()
270
except SystemError as e:
271
logging.debug("failed to SystemUnLock() (%s) " % e)
273
def get_lock(self, pkgSystemOnly=True):
276
apt_pkg.pkgsystem_lock()
278
except SystemError as e:
279
logging.debug("failed to SystemLock() (%s) " % e)
281
def downloadable(self, pkg, useCandidate=True):
282
" check if the given pkg can be downloaded "
284
ver = self._depcache.get_candidate_ver(pkg._pkg)
286
ver = pkg._pkg.current_ver
288
logging.warning("no version information for '%s' (useCandidate=%s)" % (pkg.name, useCandidate))
290
return ver.downloadable
292
def pkg_auto_removable(self, pkg):
293
""" check if the pkg is auto-removable """
294
return (pkg.is_installed and
295
self._depcache.is_garbage(pkg._pkg))
297
def fix_broken(self):
298
""" try to fix broken dependencies on the system, may throw
299
SystemError when it can't"""
300
return self._depcache.fix_broken()
302
def create_snapshot(self):
303
""" create a snapshot of the current changes """
306
for pkg in self.get_changes():
307
if pkg.marked_install or pkg.marked_upgrade:
308
self.to_install.append(pkg.name)
309
if pkg.marked_delete:
310
self.to_remove.append(pkg.name)
313
self._depcache.init()
315
def restore_snapshot(self):
316
""" restore a snapshot """
317
actiongroup = apt_pkg.ActionGroup(self._depcache)
318
# just make pyflakes shut up, later we need to use
319
# with self.actiongroup():
322
for name in self.to_remove:
325
for name in self.to_install:
327
pkg.mark_install(auto_fix=False, auto_inst=False)
329
def need_server_mode(self):
331
This checks if we run on a desktop or a server install.
333
A server install has more freedoms, for a desktop install
334
we force a desktop meta package to be install on the upgrade.
336
We look for a installed desktop meta pkg and for key
337
dependencies, if none of those are installed we assume
340
#logging.debug("need_server_mode() run")
341
# check for the MetaPkgs (e.g. ubuntu-desktop)
342
metapkgs = self.config.getlist("Distro","MetaPkgs")
344
# if it is installed we are done
345
if key in self and self[key].is_installed:
346
logging.debug("need_server_mode(): run in 'desktop' mode, (because of pkg '%s')" % key)
348
# if it is not installed, but its key depends are installed
349
# we are done too (we auto-select the package later)
351
for pkg in self.config.getlist(key,"KeyDependencies"):
352
deps_found &= pkg in self and self[pkg].is_installed
354
logging.debug("need_server_mode(): run in 'desktop' mode, (because of key deps for '%s')" % key)
356
logging.debug("need_server_mode(): can not find a desktop meta package or key deps, running in server mode")
359
def sanity_check(self, view):
360
""" check if the cache is ok and if the required metapkgs
365
logging.debug("Have broken pkgs, trying to fix them")
368
view.error(_("Broken packages"),
369
_("Your system contains broken packages "
370
"that couldn't be fixed with this "
372
"Please fix them first using synaptic or "
373
"apt-get before proceeding."))
377
def mark_install(self, pkg, reason=""):
378
logging.debug("Installing '%s' (%s)" % (pkg, reason))
380
self[pkg].mark_install()
381
if not (self[pkg].marked_install or self[pkg].marked_upgrade):
382
logging.error("Installing/upgrading '%s' failed" % pkg)
383
#raise SystemError("Installing '%s' failed" % pkg)
386
def mark_upgrade(self, pkg, reason=""):
387
logging.debug("Upgrading '%s' (%s)" % (pkg, reason))
388
if pkg in self and self[pkg].is_installed:
389
self[pkg].mark_upgrade()
390
if not self[pkg].marked_upgrade:
391
logging.error("Upgrading '%s' failed" % pkg)
394
def mark_remove(self, pkg, reason=""):
395
logging.debug("Removing '%s' (%s)" % (pkg, reason))
397
self[pkg].mark_delete()
398
def mark_purge(self, pkg, reason=""):
399
logging.debug("Purging '%s' (%s)" % (pkg, reason))
401
self._depcache.mark_delete(self[pkg]._pkg,True)
403
def _keep_installed(self, pkgname, reason):
405
and self[pkgname].is_installed
406
and self[pkgname].marked_delete):
407
self.mark_install(pkgname, reason)
409
def keep_installed_rule(self):
410
""" run after the dist-upgrade to ensure that certain
411
packages are kept installed """
412
# first the global list
413
for pkgname in self.config.getlist("Distro","KeepInstalledPkgs"):
414
self._keep_installed(pkgname, "Distro KeepInstalledPkgs rule")
415
# the the per-metapkg rules
416
for key in self.metapkgs:
417
if key in self and (self[key].is_installed or
418
self[key].marked_install):
419
for pkgname in self.config.getlist(key,"KeepInstalledPkgs"):
420
self._keep_installed(pkgname, "%s KeepInstalledPkgs rule" % key)
422
# only enforce section if we have a network. Otherwise we run
423
# into CD upgrade issues for installed language packs etc
424
if self.config.get("Options","withNetwork") == "True":
425
logging.debug("Running KeepInstalledSection rules")
426
# now the KeepInstalledSection code
427
for section in self.config.getlist("Distro","KeepInstalledSection"):
429
if (pkg.candidate and pkg.candidate.downloadable
430
and pkg.marked_delete and pkg.section == section):
431
self._keep_installed(pkg.name, "Distro KeepInstalledSection rule: %s" % section)
432
for key in self.metapkgs:
433
if key in self and (self[key].is_installed or
434
self[key].marked_install):
435
for section in self.config.getlist(key,"KeepInstalledSection"):
437
if (pkg.candidate and pkg.candidate.downloadable
438
and pkg.marked_delete and
439
pkg.section == section):
440
self._keep_installed(pkg.name, "%s KeepInstalledSection rule: %s" % (key, section))
443
def post_upgrade_rule(self):
444
" run after the upgrade was done in the cache "
445
for (rule, action) in [("Install", self.mark_install),
446
("Upgrade", self.mark_upgrade),
447
("Remove", self.mark_remove),
448
("Purge", self.mark_purge)]:
449
# first the global list
450
for pkg in self.config.getlist("Distro","PostUpgrade%s" % rule):
451
action(pkg, "Distro PostUpgrade%s rule" % rule)
452
for key in self.metapkgs:
453
if key in self and (self[key].is_installed or
454
self[key].marked_install):
455
for pkg in self.config.getlist(key,"PostUpgrade%s" % rule):
456
action(pkg, "%s PostUpgrade%s rule" % (key, rule))
457
# run the quirks handlers
458
if not self.partialUpgrade:
459
self.quirks.run("PostDistUpgradeCache")
461
def identifyObsoleteKernels(self):
462
# we have a funny policy that we remove security updates
463
# for the kernel from the archive again when a new ABI
464
# version hits the archive. this means that we have
466
# linux-image-2.6.24-15-generic
468
# linux-image-2.6.24-19-generic
471
# This code tries to identify the kernels that can be removed
472
logging.debug("identifyObsoleteKernels()")
473
obsolete_kernels = set()
474
version = self.config.get("KernelRemoval","Version")
475
basenames = self.config.getlist("KernelRemoval","BaseNames")
476
types = self.config.getlist("KernelRemoval","Types")
478
for base in basenames:
479
basename = "%s-%s-" % (base,version)
481
if (pkg.name.startswith(basename) and
482
pkg.name.endswith(type) and
484
if (pkg.name == "%s-%s" % (base,self.uname)):
485
logging.debug("skipping running kernel %s" % pkg.name)
487
logging.debug("removing obsolete kernel '%s'" % pkg.name)
488
obsolete_kernels.add(pkg.name)
489
logging.debug("identifyObsoleteKernels found '%s'" % obsolete_kernels)
490
return obsolete_kernels
492
def checkForNvidia(self):
494
this checks for nvidia hardware and checks what driver is needed
496
logging.debug("nvidiaUpdate()")
497
# if the free drivers would give us a equally hard time, we would
498
# never be able to release
500
from .NvidiaDetector.nvidiadetector import NvidiaDetection
501
except (ImportError, SyntaxError) as e:
502
# SyntaxError is temporary until the port of NvidiaDetector to
503
# Python 3 is in the archive.
504
logging.error("NvidiaDetector can not be imported %s" % e)
507
# get new detection module and use the modalises files
508
# from within the release-upgrader
509
nv = NvidiaDetection(obsolete="./ubuntu-drivers-obsolete.pkgs")
510
#nv = NvidiaDetection()
511
# check if a binary driver is installed now
512
for oldDriver in nv.oldPackages:
513
if oldDriver in self and self[oldDriver].is_installed:
514
self.mark_remove(oldDriver, "old nvidia driver")
517
logging.info("no old nvidia driver installed, installing no new")
519
# check which one to use
520
driver = nv.selectDriver()
521
logging.debug("nv.selectDriver() returned '%s'" % driver)
522
if not driver in self:
523
logging.warning("no '%s' found" % driver)
525
if not (self[driver].marked_install or self[driver].marked_upgrade):
526
self[driver].mark_install()
527
logging.info("installing %s as suggested by NvidiaDetector" % driver)
529
except Exception as e:
530
logging.error("NvidiaDetection returned a error: %s" % e)
534
def getKernelsFromBaseInstaller(self):
535
"""get the list of recommended kernels from base-installer"""
536
p = Popen(["/bin/sh", "./get_kernel_list.sh"],
537
stdout=PIPE, universal_newlines=True)
540
logging.warn("./get_kernel_list.sh returned non-zero exitcode")
542
kernels = p.communicate()[0]
543
kernels = [x.strip() for x in kernels.split("\n")]
544
kernels = [x for x in kernels if len(x) > 0]
545
logging.debug("./get_kernel_list.sh returns: %s" % kernels)
548
def _selectKernelFromBaseInstaller(self):
549
""" use the get_kernel_list.sh script (that uses base-installer)
550
to figure out what kernel is most suitable for the system
552
# check if we have a kernel from that list installed first
553
kernels = self.getKernelsFromBaseInstaller()
554
for kernel in kernels:
555
if not kernel in self:
556
logging.debug("%s not available in cache" % kernel)
558
# this can happen e.g. on cdrom -> cdrom only upgrades
559
# where on hardy we have linux-386 but on the lucid CD
560
# we only have linux-generic
561
if (not self[kernel].candidate or
562
not self[kernel].candidate.downloadable):
563
logging.debug("%s not downloadable" % kernel)
566
if self[kernel].is_installed or self[kernel].marked_install:
567
logging.debug("%s kernel already installed" % kernel)
568
if self[kernel].is_upgradable and not self[kernel].marked_upgrade:
569
self.mark_upgrade(kernel, "Upgrading kernel from base-installer")
571
# if we have not found a kernel yet, use the first one that installs
572
for kernel in kernels:
573
if self.mark_install(kernel,
574
"Selecting new kernel from base-installer"):
575
if self._has_kernel_headers_installed():
576
prefix, sep, postfix = kernel.partition("-")
577
headers = "%s-header-%s" % (prefix, postfix)
580
"Selecting new kernel headers from base-installer")
582
logging.debug("no kernel-headers installed")
585
def _has_kernel_headers_installed(self):
587
if (pkg.name.startswith("linux-headers-") and
592
def checkForKernel(self):
593
""" check for the running kernel and try to ensure that we have
596
logging.debug("Kernel uname: '%s' " % self.uname)
598
(version, build, flavour) = self.uname.split("-")
599
except Exception as e:
600
logging.warning("Can't parse kernel uname: '%s' (self compiled?)" % e)
602
# now check if we have a SMP system
603
dmesg = Popen(["dmesg"],stdout=PIPE).communicate()[0]
604
if b"WARNING: NR_CPUS limit" in dmesg:
605
logging.debug("UP kernel on SMP system!?!")
606
# use base-installer to get the kernel we want (if it exists)
607
if os.path.exists("./get_kernel_list.sh"):
608
self._selectKernelFromBaseInstaller()
610
logging.debug("skipping ./get_kernel_list.sh: not found")
613
def checkPriority(self):
614
# tuple of priorities we require to be installed
615
need = ('required', )
616
# stuff that its ok not to have
617
removeEssentialOk = self.config.getlist("Distro","RemoveEssentialOk")
620
# WORKADOUND bug on the CD/python-apt #253255
621
ver = pkg._pcache._depcache.get_candidate_ver(pkg._pkg)
622
if ver and ver.priority == 0:
623
logging.error("Package %s has no priority set" % pkg.name)
625
if (pkg.candidate and pkg.candidate.downloadable and
626
not (pkg.is_installed or pkg.marked_install) and
627
not pkg.name in removeEssentialOk and
628
# ignore multiarch priority required packages
629
not ":" in pkg.name and
630
pkg.candidate.priority in need):
631
self.mark_install(pkg.name, "priority in required set '%s' but not scheduled for install" % need)
633
# FIXME: make this a decorator (just like the withResolverLog())
634
def updateGUI(self, view, lock):
642
view.pulseProgress(finished=True)
646
def distUpgrade(self, view, serverMode, partialUpgrade):
648
lock = threading.Lock()
650
t = threading.Thread(target=self.updateGUI, args=(self.view, lock,))
653
# mvo: disabled as it casues to many errornous installs
654
#self._apply_dselect_upgrade()
656
# upgrade (and make sure this way that the cache is ok)
659
# check that everything in priority required is installed
662
# see if our KeepInstalled rules are honored
663
self.keep_installed_rule()
665
# check if we got a new kernel (if we are not inside a
668
logging.warn("skipping kernel checks because we run inside a chroot")
670
self.checkForKernel()
672
# check for nvidia stuff
673
self.checkForNvidia()
675
# and if we have some special rules
676
self.post_upgrade_rule()
678
# install missing meta-packages (if not in server upgrade mode)
679
self._keepBaseMetaPkgsInstalled(view)
681
# if this fails, a system error is raised
682
self._installMetaPkgs(view)
684
# see if it all makes sense, if not this function raises
685
self._verifyChanges()
687
except SystemError as e:
688
# this should go into a finally: line, see below for the
689
# rationale why it doesn't
692
# FIXME: change the text to something more useful
693
details = _("An unresolvable problem occurred while "
694
"calculating the upgrade:\n%s\n\n "
695
"This can be caused by:\n"
696
" * Upgrading to a pre-release version of Ubuntu\n"
697
" * Running the current pre-release version of Ubuntu\n"
698
" * Unofficial software packages not provided by Ubuntu\n"
700
# we never have partialUpgrades (including removes) on a stable system
701
# with only ubuntu sources so we do not recommend reporting a bug
703
details += _("This is most likely a transient problem, "
704
"please try again later.")
706
details += _("If none of this applies, then please report this bug using "
707
"the command 'ubuntu-bug update-manager' in a terminal.")
708
# make the error text available again on stdout for the
710
self._stopAptResolverLog()
711
view.error(_("Could not calculate the upgrade"), details)
712
# start the resolver log again because this is run with
713
# the withResolverLog decorator
714
self._startAptResolverLog()
715
logging.error("Dist-upgrade failed: '%s'", e)
717
# would be nice to be able to use finally: here, but we need
718
# to run on python2.4 too
720
# wait for the gui-update thread to exit
724
# check the trust of the packages that are going to change
726
for pkg in self.get_changes():
727
if pkg.marked_delete:
729
# special case because of a bug in pkg.candidate.origins
730
if pkg.marked_downgrade:
731
for ver in pkg._pkg.version_list:
732
# version is lower than installed one
733
if apt_pkg.version_compare(
734
ver.ver_str, pkg.installed.version) < 0:
735
for (verFileIter, index) in ver.file_list:
736
indexfile = pkg._pcache._list.find_index(verFileIter)
737
if indexfile and not indexfile.is_trusted:
738
untrusted.append(pkg.name)
741
origins = pkg.candidate.origins
743
for origin in origins:
745
trusted |= origin.trusted
747
untrusted.append(pkg.name)
748
# check if the user overwrote the unauthenticated warning
750
b = self.config.getboolean("Distro","AllowUnauthenticated")
752
logging.warning("AllowUnauthenticated set!")
754
except configparser.NoOptionError as e:
756
if len(untrusted) > 0:
758
logging.error("Unauthenticated packages found: '%s'" % \
760
# FIXME: maybe ask a question here? instead of failing?
761
self._stopAptResolverLog()
762
view.error(_("Error authenticating some packages"),
763
_("It was not possible to authenticate some "
764
"packages. This may be a transient network problem. "
765
"You may want to try again later. See below for a "
766
"list of unauthenticated packages."),
767
"\n".join(untrusted))
768
# start the resolver log again because this is run with
769
# the withResolverLog decorator
770
self._startAptResolverLog()
774
def _verifyChanges(self):
775
""" this function tests if the current changes don't violate
776
our constrains (blacklisted removals etc)
778
removeEssentialOk = self.config.getlist("Distro","RemoveEssentialOk")
780
for pkg in self.get_changes():
781
if pkg.marked_delete and self._inRemovalBlacklist(pkg.name):
782
logging.debug("The package '%s' is marked for removal but it's in the removal blacklist", pkg.name)
783
raise SystemError(_("The package '%s' is marked for removal but it is in the removal blacklist.") % pkg.name)
784
if pkg.marked_delete and (pkg._pkg.essential == True and
785
not pkg.name in removeEssentialOk):
786
logging.debug("The package '%s' is marked for removal but it's an ESSENTIAL package", pkg.name)
787
raise SystemError(_("The essential package '%s' is marked for removal.") % pkg.name)
788
# check bad-versions blacklist
789
badVersions = self.config.getlist("Distro","BadVersions")
790
for bv in badVersions:
791
(pkgname, ver) = bv.split("_")
792
if (pkgname in self and self[pkgname].candidate and
793
self[pkgname].candidate.version == ver and
794
(self[pkgname].marked_install or
795
self[pkgname].marked_upgrade)):
796
raise SystemError(_("Trying to install blacklisted version '%s'") % bv)
799
def _lookupPkgRecord(self, pkg):
801
helper to make sure that the pkg._records is pointing to the right
802
location - needed because python-apt 0.7.9 dropped the python-apt
803
version but we can not yet use the new version because on upgrade
804
the old version is still installed
806
ver = pkg._pcache._depcache.get_candidate_ver(pkg._pkg)
808
print("No candidate ver: ", pkg.name)
810
if ver.file_list is None:
811
print("No file_list for: %s " % self._pkg.name())
813
f, index = ver.file_list.pop(0)
814
pkg._pcache._records.lookup((f, index))
818
def installedTasks(self):
820
installed_tasks = set()
822
if not self._lookupPkgRecord(pkg):
823
logging.debug("no PkgRecord found for '%s', skipping " % pkg.name)
825
for line in pkg._pcache._records.record.split("\n"):
826
if line.startswith("Task:"):
827
for task in (line[len("Task:"):]).split(","):
829
if task not in tasks:
831
tasks[task].add(pkg.name)
834
for pkgname in tasks[task]:
835
if not self[pkgname].is_installed:
839
installed_tasks.add(task)
840
return installed_tasks
842
def installTasks(self, tasks):
843
logging.debug("running installTasks")
845
if pkg.marked_install or pkg.is_installed:
847
self._lookupPkgRecord(pkg)
848
if not (hasattr(pkg._pcache._records,"record") and pkg._pcache._records.record):
849
logging.warning("can not find Record for '%s'" % pkg.name)
851
for line in pkg._pcache._records.record.split("\n"):
852
if line.startswith("Task:"):
853
for task in (line[len("Task:"):]).split(","):
859
def _keepBaseMetaPkgsInstalled(self, view):
860
for pkg in self.config.getlist("Distro","BaseMetaPkgs"):
861
self._keep_installed(pkg, "base meta package keep installed rule")
863
def _installMetaPkgs(self, view):
865
def metaPkgInstalled():
867
internal helper that checks if at least one meta-pkg is
868
installed or marked install
873
if pkg.is_installed and pkg.marked_delete:
874
logging.debug("metapkg '%s' installed but marked_delete" % pkg.name)
875
if ((pkg.is_installed and not pkg.marked_delete)
876
or self[key].marked_install):
880
# now check for ubuntu-desktop, kubuntu-desktop, edubuntu-desktop
881
metapkgs = self.config.getlist("Distro","MetaPkgs")
883
# we never go without ubuntu-base
884
for pkg in self.config.getlist("Distro","BaseMetaPkgs"):
885
self[pkg].mark_install()
887
# every meta-pkg that is installed currently, will be marked
888
# install (that result in a upgrade and removes a mark_delete)
892
self[key].is_installed and
893
self[key].is_upgradable):
894
logging.debug("Marking '%s' for upgrade" % key)
895
self[key].mark_upgrade()
896
except SystemError as e:
897
# warn here, but don't fail, its possible that meta-packages
898
# conflict (like ubuntu-desktop vs xubuntu-desktop) LP: #775411
899
logging.warn("Can't mark '%s' for upgrade (%s)" % (key,e))
901
# check if we have a meta-pkg, if not, try to guess which one to pick
902
if not metaPkgInstalled():
903
logging.debug("none of the '%s' meta-pkgs installed" % metapkgs)
906
for pkg in self.config.getlist(key,"KeyDependencies"):
907
deps_found &= pkg in self and self[pkg].is_installed
909
logging.debug("guessing '%s' as missing meta-pkg" % key)
911
self[key].mark_install()
912
except (SystemError, KeyError) as e:
913
logging.error("failed to mark '%s' for install (%s)" % (key,e))
914
view.error(_("Can't install '%s'") % key,
915
_("It was impossible to install a "
916
"required package. Please report "
917
"this as a bug using "
918
"'ubuntu-bug update-manager' in "
921
logging.debug("marked_install: '%s' -> '%s'" % (key, self[key].marked_install))
923
# check if we actually found one
924
if not metaPkgInstalled():
925
# FIXME: provide a list
926
view.error(_("Can't guess meta-package"),
927
_("Your system does not contain a "
928
"ubuntu-desktop, kubuntu-desktop, xubuntu-desktop or "
929
"edubuntu-desktop package and it was not "
930
"possible to detect which version of "
931
"Ubuntu you are running.\n "
932
"Please install one of the packages "
933
"above first using synaptic or "
934
"apt-get before proceeding."))
938
def _inRemovalBlacklist(self, pkgname):
939
for expr in self.removal_blacklist:
940
if re.compile(expr).match(pkgname):
941
logging.debug("blacklist expr '%s' matches '%s'" % (expr, pkgname))
946
def tryMarkObsoleteForRemoval(self, pkgname, remove_candidates, foreign_pkgs):
947
#logging.debug("tryMarkObsoleteForRemoval(): %s" % pkgname)
948
# sanity check, first see if it looks like a running kernel pkg
949
if pkgname.endswith(self.uname):
950
logging.debug("skipping running kernel pkg '%s'" % pkgname)
952
if self._inRemovalBlacklist(pkgname):
953
logging.debug("skipping '%s' (in removalBlacklist)" % pkgname)
955
# ensure we honor KeepInstalledSection here as well
956
for section in self.config.getlist("Distro","KeepInstalledSection"):
957
if pkgname in self and self[pkgname].section == section:
958
logging.debug("skipping '%s' (in KeepInstalledSection)" % pkgname)
960
# if we don't have the package anyway, we are fine (this can
961
# happen when forced_obsoletes are specified in the config file)
962
if pkgname not in self:
963
#logging.debug("package '%s' not in cache" % pkgname)
965
# check if we want to purge
967
purge = self.config.getboolean("Distro","PurgeObsoletes")
968
except configparser.NoOptionError as e:
971
# this is a delete candidate, only actually delete,
972
# if it dosn't remove other packages depending on it
973
# that are not obsolete as well
974
actiongroup = apt_pkg.ActionGroup(self._depcache)
975
# just make pyflakes shut up, later we should use
976
# with self.actiongroup():
978
self.create_snapshot()
980
self[pkgname].mark_delete(purge=purge)
981
self.view.processEvents()
982
#logging.debug("marking '%s' for removal" % pkgname)
983
for pkg in self.get_changes():
984
if (pkg.name not in remove_candidates or
985
pkg.name in foreign_pkgs or
986
self._inRemovalBlacklist(pkg.name)):
987
logging.debug("package '%s' has unwanted removals, skipping" % pkgname)
988
self.restore_snapshot()
990
except (SystemError, KeyError) as e:
991
logging.warning("_tryMarkObsoleteForRemoval failed for '%s' (%s: %s)" % (pkgname, repr(e), e))
992
self.restore_snapshot()
996
def _getObsoletesPkgs(self):
997
" get all package names that are not downloadable "
1000
if pkg.is_installed:
1001
# check if any version is downloadable. we need to check
1002
# for older ones too, because there might be
1003
# cases where e.g. firefox in gutsy-updates is newer
1005
if not self.anyVersionDownloadable(pkg):
1006
obsolete_pkgs.add(pkg.name)
1007
return obsolete_pkgs
1009
def anyVersionDownloadable(self, pkg):
1010
" helper that checks if any of the version of pkg is downloadable "
1011
for ver in pkg._pkg.version_list:
1012
if ver.downloadable:
1016
def _getUnusedDependencies(self):
1017
" get all package names that are not downloadable "
1018
unused_dependencies =set()
1020
if pkg.is_installed and self._depcache.is_garbage(pkg._pkg):
1021
unused_dependencies.add(pkg.name)
1022
return unused_dependencies
1024
def get_installed_demoted_packages(self):
1025
""" return list of installed and demoted packages
1027
If a demoted package is a automatic install it will be skipped
1030
demotions_file = self.config.get("Distro","Demotions")
1031
if os.path.exists(demotions_file):
1032
with open(demotions_file) as demotions_f:
1033
for line in demotions_f:
1034
if not line.startswith("#"):
1035
demotions.add(line.strip())
1036
installed_demotions = set()
1037
for demoted_pkgname in demotions:
1038
if demoted_pkgname not in self:
1040
pkg = self[demoted_pkgname]
1041
if (not pkg.is_installed or
1042
self._depcache.is_auto_installed(pkg._pkg) or
1045
installed_demotions.add(pkg)
1046
return list(installed_demotions)
1048
def _getForeignPkgs(self, allowed_origin, fromDist, toDist):
1049
""" get all packages that are installed from a foreign repo
1050
(and are actually downloadable)
1054
if pkg.is_installed and self.downloadable(pkg):
1055
if not pkg.candidate:
1057
# assume it is foreign and see if it is from the
1060
for origin in pkg.candidate.origins:
1061
# FIXME: use some better metric here
1062
if fromDist in origin.archive and \
1063
origin.origin == allowed_origin:
1065
if toDist in origin.archive and \
1066
origin.origin == allowed_origin:
1069
foreign_pkgs.add(pkg.name)
1072
def checkFreeSpace(self, snapshots_in_use=False):
1074
this checks if we have enough free space on /var, /boot and /usr
1075
with the given cache
1077
Note: this can not be fully accurate if there are multiple
1078
mountpoints for /usr, /var, /boot
1081
class FreeSpace(object):
1082
" helper class that represents the free space on each mounted fs "
1083
def __init__(self, initialFree):
1084
self.free = initialFree
1088
""" return 'id' of a directory so that directories on the
1089
same filesystem get the same id (simply the mount_point)
1091
for mount_point in mounted:
1092
if d.startswith(mount_point):
1096
# this is all a bit complicated
1097
# 1) check what is mounted (in mounted)
1098
# 2) create FreeSpace objects for the dirs we are interested in
1100
# 3) use the mnt_map to check if we have enough free space and
1101
# if not tell the user how much is missing
1105
with open("/proc/mounts") as mounts:
1108
(what, where, fs, options, a, b) = line.split()
1109
except ValueError as e:
1110
logging.debug("line '%s' in /proc/mounts not understood (%s)" % (line, e))
1112
if not where in mounted:
1113
mounted.append(where)
1114
# make sure mounted is sorted by longest path
1115
mounted.sort(key=len, reverse=True)
1116
archivedir = apt_pkg.config.find_dir("Dir::Cache::archives")
1117
aufs_rw_dir = "/tmp/"
1118
if (hasattr(self, "config") and
1119
self.config.getWithDefault("Aufs","Enabled", False)):
1120
aufs_rw_dir = self.config.get("Aufs","RWDir")
1121
if not os.path.exists(aufs_rw_dir):
1122
os.makedirs(aufs_rw_dir)
1123
logging.debug("cache aufs_rw_dir: %s" % aufs_rw_dir)
1124
for d in ["/","/usr","/var","/boot", archivedir, aufs_rw_dir, "/home","/tmp/"]:
1125
d = os.path.realpath(d)
1126
fs_id = make_fs_id(d)
1127
if os.path.exists(d):
1129
free = st.f_bavail * st.f_frsize
1131
logging.warn("directory '%s' does not exists" % d)
1133
if fs_id in mnt_map:
1134
logging.debug("Dir %s mounted on %s" % (d,mnt_map[fs_id]))
1135
fs_free[d] = fs_free[mnt_map[fs_id]]
1137
logging.debug("Free space on %s: %s" % (d,free))
1139
fs_free[d] = FreeSpace(free)
1141
logging.debug("fs_free contains: '%s'" % fs_free)
1143
# now calculate the space that is required on /boot
1144
# we do this by checking how many linux-image-$ver packages
1145
# are installed or going to be installed
1148
# we match against everything that looks like a kernel
1149
# and add space check to filter out metapackages
1150
if re.match("^linux-(image|image-debug)-[0-9.]*-.*", pkg.name):
1151
if pkg.marked_install:
1152
logging.debug("%s (new-install) added with %s to boot space" % (pkg.name, KERNEL_INITRD_SIZE))
1153
space_in_boot += KERNEL_INITRD_SIZE
1154
# mvo: jaunty does not create .bak files anymore
1155
#elif (pkg.marked_upgrade or pkg.is_installed):
1156
# logging.debug("%s (upgrade|installed) added with %s to boot space" % (pkg.name, KERNEL_INITRD_SIZE))
1157
# space_in_boot += KERNEL_INITRD_SIZE # creates .bak
1159
# we check for various sizes:
1160
# archivedir is were we download the debs
1161
# /usr is assumed to get *all* of the install space (incorrect,
1162
# but as good as we can do currently + safety buffer
1163
# / has a small safety buffer as well
1164
required_for_aufs = 0.0
1165
if (hasattr(self, "config") and
1166
self.config.getWithDefault("Aufs","Enabled", False)):
1167
logging.debug("taking aufs overlay into space calculation")
1168
aufs_rw_dir = self.config.get("Aufs","RWDir")
1169
# if we use the aufs rw overlay all the space is consumed
1172
if pkg.marked_upgrade or pkg.marked_install:
1173
required_for_aufs += pkg.candidate.installed_size
1175
# add old size of the package if we use snapshots
1176
required_for_snapshots = 0.0
1177
if snapshots_in_use:
1179
if (pkg.is_installed and
1180
(pkg.marked_upgrade or pkg.marked_delete)):
1181
required_for_snapshots += pkg.installed.installed_size
1182
logging.debug("additional space for the snapshots: %s" % required_for_snapshots)
1184
# sum up space requirements
1185
for (dir, size) in [(archivedir, self.required_download),
1186
# plus 50M safety buffer in /usr
1187
("/usr", self.additional_required_space),
1188
("/usr", 50*1024*1024),
1189
("/boot", space_in_boot),
1190
("/tmp", 5*1024*1024), # /tmp for dkms LP: #427035
1191
("/", 10*1024*1024), # small safety buffer /
1192
(aufs_rw_dir, required_for_aufs),
1193
# if snapshots are in use
1194
("/usr", required_for_snapshots),
1196
dir = os.path.realpath(dir)
1197
logging.debug("dir '%s' needs '%s' of '%s' (%f)" % (dir, size, fs_free[dir], fs_free[dir].free))
1198
fs_free[dir].free -= size
1199
fs_free[dir].need += size
1202
# check for space required violations
1205
if fs_free[dir].free < 0:
1206
free_at_least = apt_pkg.size_to_str(float(abs(fs_free[dir].free)+1))
1207
# make_fs_id ensures we only get stuff on the same
1208
# mountpoint, so we report the requirements only once
1210
required_list[make_fs_id(dir)] = FreeSpaceRequired(apt_pkg.size_to_str(fs_free[dir].need), make_fs_id(dir), free_at_least)
1211
# raise exception if free space check fails
1212
if len(required_list) > 0:
1213
logging.error("Not enough free space: %s" % [str(i) for i in required_list])
1214
raise NotEnoughFreeSpaceError(list(required_list.values()))
1219
if __name__ == "__main__":
1221
from .DistUpgradeConfigParser import DistUpgradeConfig
1222
from .DistUpgradeView import DistUpgradeView
1224
c = MyCache(DistUpgradeConfig("."), DistUpgradeView(), None)
1226
#print(c._identifyObsoleteKernels())
1227
print(c.checkFreeSpace())
1233
c.installTasks(["ubuntu-desktop"])
1234
print(c.get_changes())
1235
c.restore_snapshot()