Merge branch 'master' into master-new2

This commit is contained in:
Roman Vyalov 2012-10-31 12:58:12 +00:00
commit be12c9cebe
45 changed files with 551 additions and 16244 deletions

466
rpm5utils/synthesis.py Normal file
View file

@ -0,0 +1,466 @@
import rpm
import re
from urllib2 import urlopen, HTTPError, URLError
import subprocess
import platform
import sys
ARCH = platform.machine()
def get_command_output(command, fatal_fails=True):
'''Execute command using subprocess.Popen and return its stdout output string. If return code is not 0, print error message and exit'''
#vprint("Executing command: " + str(command))
res = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = list(res.communicate())
if sys.stdout.encoding:
output[0] = output[0].decode(sys.stdout.encoding).encode("UTF-8")
output[1] = output[1].decode(sys.stdout.encoding).encode("UTF-8")
if(res.returncode != 0 and fatal_fails):
print(_("Error while calling command") + " '" + " ".join(command) + "'")
if(output[1] != None or output[0] != None):
print(_("Error message: \n")+ ((output[0].strip() + "\n") if output[0]!=None else "") +
(output[1].strip() if output[1]!=None else "") )
exit(1)
return [output[0], output[1], res.returncode]
class MediaSet(object):
@staticmethod
def from_list(media):
ms = MediaSet()
ms.urls = []
ms.media = {}
ms.by_url = {}
for medium in media:
med, url = medium
ms.media[med] = url
ms.by_url[url] = med
ms.urls.append(url)
return ms
@staticmethod
def from_system(cmd):
ms = MediaSet()
ms.urls = []
ms.media = {}
ms.by_url = {}
lines = get_command_output(cmd + ["--list-url", "--list-media", 'active'])[0].strip().split("\n")
for line in lines:
parts = line.split(" ")
medium = ' '.join(parts[:-1])
url = parts[-1]
if(url.endswith("/")):
url = url[:-1]
if(url.find('/') != -1):
ms.media[medium] = url
ms.by_url[parts[-1]] = medium
ms.urls.append(url)
return ms
class NEVR(object):
EQUAL = rpm.RPMSENSE_EQUAL #8
GREATER = rpm.RPMSENSE_GREATER #4
LESS = rpm.RPMSENSE_LESS #2
#re_ver = re.compile('^([\d\.]+:)?([\w\d\.\-\[\]]+)(:[\d\.]+)?$')
re_dep_ver = re.compile('^([^ \[\]]+)\[([\>\<\=\!]*) ([^ ]+)\]$')
re_dep = re.compile('^([^ \[\]]+)$')
types = {None: 0,
'==' : EQUAL,
'' : EQUAL,
'=' : EQUAL,
'>=' : EQUAL|GREATER,
'<=' : EQUAL|LESS,
'>' : GREATER,
'<' : LESS,
'!=' : LESS|GREATER,
'<>' : LESS|GREATER}
__slots__=['N', 'EVR', 'DE', 'DT', 'FL', 'E', 'VR']
def __init__(self, N, EVR, DE=None, DT=None, FL=None, E=None):
self.N = N
self.EVR = EVR
self.DE = DE
self.DT = DT
self.FL = FL
self.E = E
self.VR = EVR
if E:
if EVR.startswith(E + ':'):
self.VR = EVR[len(E)+1:]
else:
self.EVR = E + ':' + self.EVR
#try to get E
if not self.E and self.EVR and self.EVR.find(':') != -1:
items = self.EVR.split(':')
if items[0].find('.') == -1 and items[0].find('-') == -1:
self.E = items[0]
if not self.E and self.EVR:
self.E = '0'
self.EVR = '0:' + self.EVR
if self.DE == 'None':
self.DE = None
def __str__(self):
if self.FL:
for t in NEVR.types:
if not t:
continue
if NEVR.types[t] == self.FL:
return "%s %s %s" % (self.N, t, self.EVR)
if self.EVR:
return "%s == %s" % (self.N, self.EVR)
return "%s" % (self.N)
def __repr__(self):
return self.__str__()
def __eq__(self, val):
if not isinstance(val, NEVR):
raise Exception("Internal error: comparing between NEVR and " + str(type(val)))
return str(self) == str(val)
def __ne__(self, val):
return not (self == val)
@staticmethod
def from_depstring(s, DE_toremove=None):
s = s.replace('[*]', '')
if DE_toremove:
res = NEVR.re_dep_ver.match(s)
if res:
(name, t, val) = res.groups()
if val.endswith(':' + DE_toremove):
val = val[:-(len(DE_toremove) + 1)]
EVR = '%s[%s %s]' % (name, t, val)
res = NEVR.re_dep.match(s)
if res:
return NEVR(res.group(1), None)
res = NEVR.re_dep_ver.match(s)
if not res:
raise Exception('Incorrect requirement string: ' + s)
(name, t, val) = res.groups()
return NEVR(name, val, FL=NEVR.types[t])
re_version = re.compile("(\.)?((alpha)|(cvs)|(svn)|(r))?\d+((mdv)|(mdk)|(mnb))")
@staticmethod
def from_filename(rpmname, E=None):
''' Returns [name, version] for given rpm file or package name '''
suffix = ['.x86_64', '.noarch'] + ['.i%s86' % i for i in range(3,6)]
for s in suffix:
if(rpmname.endswith(s)):
rpmname = rpmname[:-len(s)]
sections = rpmname.split("-")
if(NEVR.re_version.search(sections[-1]) == None):
name = sections[:-3]
version = sections[-3:-1]
else:
name = sections[:-2]
version = sections[-2:]
return NEVR("-".join(name), "-".join(version), FL=NEVR.EQUAL, E=E)
def satisfies(self, val):
if self.N != val.N:
return False
if self.EVR == None or val.EVR == None:
return True
(pname, pt, pval) = (self.N, self.FL, self.EVR)
(rname, rt, rval) = (val.N, val.FL, val.EVR)
def cut_part(seperator, val1, val2):
if val1 and val2 and val1.count(seperator) != val2.count(seperator):
n = max(val1.count(seperator), val2.count(seperator))
val1 = seperator.join(val1.split(seperator)[:n])
val2 = seperator.join(val2.split(seperator)[:n])
return (val1, val2)
(rval, pval) = cut_part(':', rval, pval)
(rval, pval) = cut_part('-', rval, pval)
res = rpm.evrCompare(rval, pval)
if res == 1: # >
if pt & NEVR.GREATER:
return True
elif pt & NEVR.LESS:
if rt & NEVR.LESS:
return True
else:
return False
else:
if rt & NEVR.LESS:
return True
else:
return False
elif res == 0:
if rt & NEVR.EQUAL and pt & NEVR.EQUAL:
return True
if rt & NEVR.LESS and pt & NEVR.LESS:
return True
if rt & NEVR.GREATER and pt & NEVR.GREATER:
return True
return False
else: # <
if rt & NEVR.GREATER:
return True
elif rt & NEVR.LESS:
if pt & NEVR.LESS:
return True
else:
return False
else:
if pt & NEVR.LESS:
return True
else:
return False
class PackageSet:
tags = ['provides','requires','obsoletes','suggests', 'conflicts']
alltags = tags + ['nevr', 'arch']
def __init__(self):
self.what = {}
self.packages = {}
def load_from_system(self):
print(_("Loading the list of installed packages..."))
ts = rpm.TransactionSet()
mi = ts.dbMatch()
for tag in PackageSet.tags:
self.what[tag] = {}
for h in mi:
name = h['name']
if(name == 'gpg-pubkey'):
continue
if(name not in self.packages):
self.packages[h['name']] = {}
else:
print(_("Duplicating ") + name + '-' + h['version'] + '-' + h['release'])
print(_("Already found: ") + name + '-' + self.packages[name]["nevr"].EVR)
E = str(h['epoch'])
V = h['version']
R = h['release']
DE = h['distepoch']
DT = h['disttag']
if E == None or E == 'None':
E = '0'
EVR = "%s:%s-%s" % (E, V, R)
nevr = NEVR(name, EVR, FL=NEVR.EQUAL, DE=DE, DT=DT, E=E)
self.packages[name]['nevr'] = nevr
self.packages[name]['arch'] = h['arch']
for tag in PackageSet.tags:
if tag not in self.packages[name]:
self.packages[name][tag] = []
dss = h.dsFromHeader(tag[:-1] + 'name')
for s in dss:
fl = s.Flags()
#undocumented flag for special dependencies
if fl & 16777216:
continue
fl = fl % 16
_evr = s.EVR()
if _evr == '':
evr = NEVR(s.N(), None, FL=fl)
else:
evr = NEVR(s.N(), _evr, FL=fl)
self.packages[name][tag].append(evr)
if evr.N not in self.what[tag]:
self.what[tag][evr.N] = []
self.what[tag][evr.N].append((name, evr))
def load_from_repository(self, ms):
url_by_synthesis_url = {}
global fields
def get_synthesis_by_url(url):
if url.startswith('file://'):
url = url[6:]
if url.startswith('/'):
medium = ms.by_url[url]
return '/var/lib/urpmi/%s/synthesis.hdlist.cz' % medium
else:
return url + "/media_info/synthesis.hdlist.cz"
medium_by_synth = {}
synthesis_lists = []
for url in ms.urls:
synth = get_synthesis_by_url(url)
synthesis_lists.append(synth)
url_by_synthesis_url[synth] = url
medium_by_synth[synth] = ms.by_url[url]
def clear_data():
'''Clears the data of the current package from 'fields' dictionary'''
global fields
fields = {"provides":[], "requires":[], "obsoletes":[], "suggests":[],
"conflicts":[], "info":[], "summary":[]}
arches32 = ['i%d86' for i in range(3,6)]
for tag in PackageSet.tags:
self.what[tag] = {}
#the following code is awful, I know. But it's easy-to-understand and clear.
# don't like it - write better and send me :)
for synthesis_list in synthesis_lists:
try:
#print synthesis_list
print(_("Processing medium ") + medium_by_synth[synthesis_list] + "...")
if(synthesis_list.startswith("http://") or synthesis_list.startswith("ftp://")):
r = urlopen(synthesis_list)
s = r.read()
r.close()
elif(synthesis_list.startswith("rsync://")):
tmppath = '/tmp/urpm-reposync.synthesis_lists'
if (not os.path.exists(tmppath)):
os.mkdir(tmppath)
filename = tmppath + '/' + os.path.basename(synthesis_list)
os.system("rsync --copy-links %s %s 1>/dev/null 2>&1" % (synthesis_list, filename))
r = open(filename)
s = r.read()
r.close()
shutil.rmtree(tmppath)
elif(synthesis_list.startswith("/")): #local file
if not os.path.exists(synthesis_list):
eprint(_('Could not read synthesis file. (File %s not found)') % synthesis_list)
continue
r = open(synthesis_list)
s = r.read()
r.close()
res = subprocess.Popen(['gzip', '-d'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = res.communicate(s)
clear_data()
for line in output[0].split('\n'):
if(line == ''): # there can be empty lines
continue
items = line.split("@")
data = [x.strip() for x in items[2:]]
fields[items[1]] = data
if(items[1] == "info"):
rpmname = items[2]
size = int(items[4])
nevr = NEVR.from_filename(items[2], E=items[3])
nevr.E = items[3]
disttagepoch = '-'
if(len(items)>6):
disttagepoch = items[6]
nevr.DT = items[6]
if(len(items)>7):
disttagepoch += items[7]
nevr.DE = items[7]
arch = items[2].split('.')[-1]
if arch in arches32 and ARCH in arches:
arch = ARCH
in_repo = nevr.N in self.packages
new_arch_correct = arch == ARCH
if in_repo:
if nevr.DE == self.packages[nevr.N]['nevr'].DE:
ver_newer = rpm.evrCompare(nevr.EVR, self.packages[nevr.N]['nevr'].EVR) == 1
else:
ver_newer = (nevr.DE > self.packages[nevr.N]['nevr'].DE)
old_arch_correct = self.packages[nevr.N]['arch'] == ARCH
else:
ver_newer = None
old_arch_correct = None
toinst = not in_repo or (not old_arch_correct and new_arch_correct) or \
(ver_newer and old_arch_correct == new_arch_correct)
if toinst:
#remove old data
if nevr.N in self.packages:
for tag in PackageSet.tags:
for dep in self.packages[nevr.N][tag]:
self.what[tag][dep.N].remove((nevr.N, dep))
else:
self.packages[nevr.N] = {}
self.packages[nevr.N]['nevr'] = nevr
self.packages[nevr.N]["arch"] = arch
self.packages[nevr.N]["synthesis_list"] = synthesis_list
self.packages[nevr.N]["filename"] = rpmname
self.packages[nevr.N]["size"] = size
for tag in PackageSet.tags:
self.packages[nevr.N][tag] = []
for item in fields[tag]:
if item == '':
continue
dep = NEVR.from_depstring(item, DE_toremove=nevr.DE)
self.packages[nevr.N][tag].append(dep)
if dep.N not in self.what[tag]:
self.what[tag][dep.N] = []
self.what[tag][dep.N].append((nevr.N, dep))
self.packages[nevr.N]['medium'] = medium_by_synth[synthesis_list]
clear_data()
except (HTTPError,URLError):
eprint(_("File can not be processed! Url: ") + synthesis_list)
def whattag(self, tag, val):
if val.N not in self.what[tag]:
return []
found = []
for (pkg, dep) in self.what[tag][val.N]:
if dep.satisfies(val):
found.append(pkg)
return found
def whattag_revert(self, tag, val):
if val.N not in self.what[tag]:
return []
found = []
for (pkg, dep) in self.what[tag][val.N]:
if val.satisfies(dep):
found.append(pkg)
return found
def whatprovides(self, val):
return self.whattag('provides', val)
def whatobsoletes(self, val):
return self.whattag_revert('obsoletes', val)
def whatrequires(self, val):
return self.whattag_revert('requires', val)
def whatconflicts(self, val):
return self.whattag_revert('conflicts', val)
def whatrequires_pkg(self, pkg):
found = []
for req in self.packages[pkg]['provides']:
found += [(d, req) for d in self.whatrequires(req)]
return found

View file

@ -43,11 +43,6 @@ import rpm
from urllib2 import urlopen, HTTPError, URLError
import shutil
import configparser
cp = ConfigParser.RawConfigParser()
exit()
import gettext
#gettext.install('urpm-tools', 'locale', unicode=True, names=['gettext'])
gettext.install('urpm-tools')

View file

@ -11,7 +11,7 @@ import sys
import subprocess
import re
import os
from urllib2 import urlopen, HTTPError, URLError
import zlib
import glob
import shutil
@ -22,6 +22,8 @@ import unittest
import gettext
gettext.install('urpm-tools')
from rpm5utils.synthesis import *
ARCH = platform.machine()
downloaded_rpms_dir = '/tmp/urpm-reposync.rpms'
@ -52,21 +54,7 @@ def oprint(text):
print(text)
def get_command_output(command, fatal_fails=True):
'''Execute command using subprocess.Popen and return its stdout output string. If return code is not 0, print error message and exit'''
vprint("Executing command: " + str(command))
res = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = list(res.communicate())
if sys.stdout.encoding:
output[0] = output[0].decode(sys.stdout.encoding).encode("UTF-8")
output[1] = output[1].decode(sys.stdout.encoding).encode("UTF-8")
if(res.returncode != 0 and fatal_fails):
eprint(_("Error while calling command") + " '" + " ".join(command) + "'")
if(output[1] != None or output[0] != None):
eprint(_("Error message: \n")+ ((output[0].strip() + "\n") if output[0]!=None else "") +
(output[1].strip() if output[1]!=None else "") )
exit(1)
return [output[0], output[1], res.returncode]
def parse_command_line():
@ -100,437 +88,6 @@ def parse_command_line():
cmd = ['urpmq']
class MediaSet(object):
def __init__(self):
global cmd
self.urls = []
self.media = {}
self.by_url = {}
vprint("Loading media urls...")
lines = get_command_output(cmd + ["--list-url", "--list-media", 'active'])[0].strip().split("\n")
for line in lines:
parts = line.split(" ")
medium = ' '.join(parts[:-1])
url = parts[-1]
if(url.endswith("/")):
url = url[:-1]
if(url.find('/') != -1):
self.media[medium] = url
self.by_url[parts[-1]] = medium
self.urls.append(url)
vprint("Media urls: " + str(self.urls))
class NEVR:
EQUAL = rpm.RPMSENSE_EQUAL #8
GREATER = rpm.RPMSENSE_GREATER #4
LESS = rpm.RPMSENSE_LESS #2
#re_ver = re.compile('^([\d\.]+:)?([\w\d\.\-\[\]]+)(:[\d\.]+)?$')
re_dep_ver = re.compile('^([^ \[\]]+)\[([\>\<\=\!]*) ([^ ]+)\]$')
re_dep = re.compile('^([^ \[\]]+)$')
types = {None: 0,
'==' : EQUAL,
'' : EQUAL,
'=' : EQUAL,
'>=' : EQUAL|GREATER,
'<=' : EQUAL|LESS,
'>' : GREATER,
'<' : LESS,
'!=' : LESS|GREATER,
'<>' : LESS|GREATER}
def __init__(self, N, EVR, DE=None, DT=None, FL=None, E=None):
self.N = N
self.EVR = EVR
self.DE = DE
self.DT = DT
self.FL = FL
self.E = E
self.VR = EVR
if E:
if EVR.startswith(E + ':'):
self.VR = EVR[len(E)+1:]
else:
self.EVR = E + ':' + self.EVR
#try to get E
if not self.E and self.EVR and self.EVR.find(':') != -1:
items = self.EVR.split(':')
if items[0].find('.') == -1 and items[0].find('-') == -1:
self.E = items[0]
if not self.E and self.EVR:
self.E = '0'
self.EVR = '0:' + self.EVR
if self.DE == 'None':
self.DE = None
def __str__(self):
if self.FL:
for t in NEVR.types:
if not t:
continue
if NEVR.types[t] == self.FL:
return "%s %s %s" % (self.N, t, self.EVR)
if self.EVR:
return "%s == %s" % (self.N, self.EVR)
return "%s" % (self.N)
def __repr__(self):
return self.__str__()
def __eq__(self, val):
if not isinstance(val, NEVR):
raise Exception("Internal error: comparing between NEVR and " + str(type(val)))
return str(self) == str(val)
def __ne__(self, val):
return not (self == val)
@staticmethod
def from_depstring(s, DE_toremove=None):
s = s.replace('[*]', '')
if DE_toremove:
res = NEVR.re_dep_ver.match(s)
if res:
(name, t, val) = res.groups()
if val.endswith(':' + DE_toremove):
val = val[:-(len(DE_toremove) + 1)]
EVR = '%s[%s %s]' % (name, t, val)
res = NEVR.re_dep.match(s)
if res:
return NEVR(res.group(1), None)
res = NEVR.re_dep_ver.match(s)
if not res:
raise Exception('Incorrect requirement string: ' + s)
(name, t, val) = res.groups()
return NEVR(name, val, FL=NEVR.types[t])
re_version = re.compile("(\.)?((alpha)|(cvs)|(svn)|(r))?\d+((mdv)|(mdk)|(mnb))")
@staticmethod
def from_filename(rpmname, E=None):
''' Returns [name, version] for given rpm file or package name '''
suffix = ['.x86_64', '.noarch'] + ['.i%s86' % i for i in range(3,6)]
for s in suffix:
if(rpmname.endswith(s)):
rpmname = rpmname[:-len(s)]
sections = rpmname.split("-")
if(NEVR.re_version.search(sections[-1]) == None):
name = sections[:-3]
version = sections[-3:-1]
else:
name = sections[:-2]
version = sections[-2:]
return NEVR("-".join(name), "-".join(version), FL=NEVR.EQUAL, E=E)
def satisfies(self, val):
if self.N != val.N:
return False
if self.EVR == None or val.EVR == None:
return True
(pname, pt, pval) = (self.N, self.FL, self.EVR)
(rname, rt, rval) = (val.N, val.FL, val.EVR)
def cut_part(seperator, val1, val2):
if val1 and val2 and val1.count(seperator) != val2.count(seperator):
n = max(val1.count(seperator), val2.count(seperator))
val1 = seperator.join(val1.split(seperator)[:n])
val2 = seperator.join(val2.split(seperator)[:n])
return (val1, val2)
(rval, pval) = cut_part(':', rval, pval)
(rval, pval) = cut_part('-', rval, pval)
res = rpm.evrCompare(rval, pval)
if res == 1: # >
if pt & NEVR.GREATER:
return True
elif pt & NEVR.LESS:
if rt & NEVR.LESS:
return True
else:
return False
else:
if rt & NEVR.LESS:
return True
else:
return False
elif res == 0:
if rt & NEVR.EQUAL and pt & NEVR.EQUAL:
return True
if rt & NEVR.LESS and pt & NEVR.LESS:
return True
if rt & NEVR.GREATER and pt & NEVR.GREATER:
return True
return False
else: # <
if rt & NEVR.GREATER:
return True
elif rt & NEVR.LESS:
if pt & NEVR.LESS:
return True
else:
return False
else:
if pt & NEVR.LESS:
return True
else:
return False
class PackageSet:
tags = ['provides','requires','obsoletes','suggests', 'conflicts']
alltags = tags + ['nevr', 'arch']
def __init__(self):
self.what = {}
self.packages = {}
def load_from_system(self):
qprint(_("Loading the list of installed packages..."))
ts = rpm.TransactionSet()
mi = ts.dbMatch()
for tag in PackageSet.tags:
self.what[tag] = {}
for h in mi:
name = h['name']
if(name == 'gpg-pubkey'):
continue
if(name not in self.packages):
self.packages[h['name']] = {}
else:
qprint(_("Duplicating ") + name + '-' + h['version'] + '-' + h['release'])
qprint(_("Already found: ") + name + '-' + self.packages[name]["nevr"].EVR)
E = str(h['epoch'])
V = h['version']
R = h['release']
DE = h['distepoch']
DT = h['disttag']
if E == None or E == 'None':
E = '0'
EVR = "%s:%s-%s" % (E, V, R)
nevr = NEVR(name, EVR, FL=NEVR.EQUAL, DE=DE, DT=DT, E=E)
self.packages[name]['nevr'] = nevr
self.packages[name]['arch'] = h['arch']
for tag in PackageSet.tags:
if tag not in self.packages[name]:
self.packages[name][tag] = []
dss = h.dsFromHeader(tag[:-1] + 'name')
for s in dss:
fl = s.Flags()
#undocumented flag for special dependencies
if fl & 16777216:
continue
fl = fl % 16
_evr = s.EVR()
if _evr == '':
evr = NEVR(s.N(), None, FL=fl)
else:
evr = NEVR(s.N(), _evr, FL=fl)
self.packages[name][tag].append(evr)
if evr.N not in self.what[tag]:
self.what[tag][evr.N] = []
self.what[tag][evr.N].append((name, evr))
def load_from_repository(self):
url_by_synthesis_url = {}
global fields
def get_synthesis_by_url(url):
if url.startswith('file://'):
url = url[6:]
if url.startswith('/'):
medium = ms.by_url[url]
return '/var/lib/urpmi/%s/synthesis.hdlist.cz' % medium
else:
return url + "/media_info/synthesis.hdlist.cz"
medium_by_synth = {}
synthesis_lists = []
for url in ms.urls:
synth = get_synthesis_by_url(url)
synthesis_lists.append(synth)
url_by_synthesis_url[synth] = url
medium_by_synth[synth] = ms.by_url[url]
def clear_data():
'''Clears the data of the current package from 'fields' dictionary'''
global fields
fields = {"provides":[], "requires":[], "obsoletes":[], "suggests":[],
"conflicts":[], "info":[], "summary":[]}
arches32 = ['i%d86' for i in range(3,6)]
for tag in PackageSet.tags:
self.what[tag] = {}
#the following code is awful, I know. But it's easy-to-understand and clear.
# don't like it - write better and send me :)
for synthesis_list in synthesis_lists:
try:
#print synthesis_list
qprint(_("Processing medium ") + medium_by_synth[synthesis_list] + "...")
vprint(synthesis_list)
if(synthesis_list.startswith("http://") or synthesis_list.startswith("ftp://")):
r = urlopen(synthesis_list)
s = r.read()
r.close()
elif(synthesis_list.startswith("rsync://")):
tmppath = '/tmp/urpm-reposync.synthesis_lists'
if (not os.path.exists(tmppath)):
os.mkdir(tmppath)
filename = tmppath + '/' + os.path.basename(synthesis_list)
os.system("rsync --copy-links %s %s 1>/dev/null 2>&1" % (synthesis_list, filename))
r = open(filename)
s = r.read()
r.close()
shutil.rmtree(tmppath)
elif(synthesis_list.startswith("/")): #local file
if not os.path.exists(synthesis_list):
eprint(_('Could not read synthesis file. (File %s not found)') % synthesis_list)
continue
r = open(synthesis_list)
s = r.read()
r.close()
res = subprocess.Popen(['gzip', '-d'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = res.communicate(s)
clear_data()
for line in output[0].split('\n'):
if(line == ''): # there can be empty lines
continue
items = line.split("@")
data = [x.strip() for x in items[2:]]
fields[items[1]] = data
if(items[1] == "info"):
rpmname = items[2]
size = int(items[4])
nevr = NEVR.from_filename(items[2], E=items[3])
nevr.E = items[3]
disttagepoch = '-'
if(len(items)>6):
disttagepoch = items[6]
nevr.DT = items[6]
if(len(items)>7):
disttagepoch += items[7]
nevr.DE = items[7]
arch = items[2].split('.')[-1]
if arch in arches32 and ARCH in arches:
arch = ARCH
in_repo = nevr.N in self.packages
new_arch_correct = arch == ARCH
if in_repo:
if nevr.DE == self.packages[nevr.N]['nevr'].DE:
ver_newer = rpm.evrCompare(nevr.EVR, self.packages[nevr.N]['nevr'].EVR) == 1
else:
ver_newer = (nevr.DE > self.packages[nevr.N]['nevr'].DE)
old_arch_correct = self.packages[nevr.N]['arch'] == ARCH
else:
ver_newer = None
old_arch_correct = None
toinst = not in_repo or (not old_arch_correct and new_arch_correct) or \
(ver_newer and old_arch_correct == new_arch_correct)
if toinst:
#remove old data
if nevr.N in self.packages:
for tag in PackageSet.tags:
for dep in self.packages[nevr.N][tag]:
self.what[tag][dep.N].remove((nevr.N, dep))
else:
self.packages[nevr.N] = {}
self.packages[nevr.N]['nevr'] = nevr
self.packages[nevr.N]["arch"] = arch
self.packages[nevr.N]["synthesis_list"] = synthesis_list
self.packages[nevr.N]["filename"] = rpmname
self.packages[nevr.N]["size"] = size
for tag in PackageSet.tags:
self.packages[nevr.N][tag] = []
for item in fields[tag]:
if item == '':
continue
dep = NEVR.from_depstring(item, DE_toremove=nevr.DE)
self.packages[nevr.N][tag].append(dep)
if dep.N not in self.what[tag]:
self.what[tag][dep.N] = []
self.what[tag][dep.N].append((nevr.N, dep))
self.packages[nevr.N]['medium'] = medium_by_synth[synthesis_list]
clear_data()
except (HTTPError,URLError):
eprint(_("File can not be processed! Url: ") + synthesis_list)
def whattag(self, tag, val):
if val.N not in self.what[tag]:
return []
found = []
for (pkg, dep) in self.what[tag][val.N]:
if dep.satisfies(val):
found.append(pkg)
return found
def whattag_revert(self, tag, val):
if val.N not in self.what[tag]:
return []
found = []
for (pkg, dep) in self.what[tag][val.N]:
if val.satisfies(dep):
found.append(pkg)
return found
def whatprovides(self, val):
return self.whattag('provides', val)
def whatobsoletes(self, val):
return self.whattag_revert('obsoletes', val)
def whatrequires(self, val):
return self.whattag_revert('requires', val)
def whatconflicts(self, val):
return self.whattag_revert('conflicts', val)
def whatrequires_pkg(self, pkg):
found = []
for req in self.packages[pkg]['provides']:
found += [(d, req) for d in self.whatrequires(req)]
return found
to_update = []
to_downgrade = []
to_remove = []
@ -659,9 +216,51 @@ def resolve_dep_while_emulation(requirement, package):
to_append_bysource[package].append(pkgs[0])
def emulate_install(pkg):
''' Reurns True if something was done, False - if package was not installed '''
global actions, not_provided_packages, conflicting_packages
vprint('Emulating package installation: ' + pkg)
conflicts = False
for confl in repository.packages[pkg]['conflicts']:
res = installed.whatprovides(confl)
for item in res[:]:
if item in to_remove_pre:
res.remove(item)
if res:
conflicts = True
conflicting_packages.append( (pkg, res) )
vprint("New conflict: %s, %s" % (str(pkg), str(res)))
for prov in repository.packages[pkg]['provides']:
res = installed.whatconflicts(prov)
for item in res[:]:
if item in to_remove_pre:
res.remove(item)
if res:
conflicts = True
conflicting_packages.append( (res, pkg) )
vprint("New conflict: %s, %s" % (str(res), str(pkg)))
if conflicts:
return False
# remove the previosly added conflicts for this package
for item in conflicting_packages[:]:
pkg1, pkgs2 = item
if pkg1 == pkg:
conflicting_packages.remove(item)
vprint("Conflict %s have been resolved" % str(item))
if pkg in pkgs2:
pkgs2.remove(pkg)
if not pkgs2:
conflicting_packages.remove(item)
vprint("Conflict %s have been resolved" % str((pkg1, pkg)))
emptied = []
for p in not_provided_packages:
for req in not_provided_packages[p][:]:
@ -678,24 +277,6 @@ def emulate_install(pkg):
conflicts = False
for confl in repository.packages[pkg]['conflicts']:
res = installed.whatprovides(confl)
if res:
conflicts = True
conflicting_packages.append( (pkg, res) )
vprint("New conflict: %s, %s" % (str(pkg), str(res)))
for prov in repository.packages[pkg]['provides']:
res = installed.whatconflicts(prov)
if res:
conflicts = True
conflicting_packages.append( (res, pkg) )
vprint("New conflict: %s, %s" % (str(res), str(pkg)))
if conflicts:
return
url = ms.media[repository.packages[pkg]['medium']]
url += '/' + repository.packages[pkg]['filename'] + '.rpm'
@ -729,12 +310,16 @@ def emulate_install(pkg):
resolve_dep_while_emulation(req, pkg)
return True
def emulate_remove(pkg, updating=False):
''' Reurns True if something was done, False - if package was not removed '''
global not_provided_packages
vprint("Emulating package removing: " + pkg)
if pkg not in installed.packages:
if pkg not in installed.packages or 'nevr' not in installed.packages[pkg]:
vprint("Nothing to remove")
return
return False
if pkg in not_provided_packages:
not_provided_packages.pop(pkg)
@ -749,7 +334,7 @@ def emulate_remove(pkg, updating=False):
installed.packages[pkg]['old_package'] = P
if not actions: #do nothing while initial packages removing
return
return True
for dep in installed.packages[pkg]['old_package']['provides']:
if dep.N not in installed.what['requires']:
@ -763,16 +348,32 @@ def emulate_remove(pkg, updating=False):
not_provided_packages[package].append(requirement)
resolve_dep_while_emulation(requirement, package)
return True
def have_to_be_removed(pkg):
to_remove_problems[pkg] = []
for dep in installed.packages[pkg]['requires']:
res = installed.whatprovides(dep)
if not res:
act_resolved = False
for act_pkg in actions:
for atc_dep in repository.packages[act_pkg]['provides']:
if atc_dep.satisfies(dep):
act_resolved = True
break
if act_resolved:
break
if not res and not act_resolved:
to_remove_problems[pkg].append(_("\tRequires %s, which will not be installed.") % (str(dep) ))
continue
for dep in installed.packages[pkg]['provides']:
res = installed.whatconflicts(dep)
if res:
@ -815,8 +416,9 @@ def process_packages():
if problems[act]:
vprint ("\nPROBLEM: %s: %s" % (act, problems[act]))
if not problems[act]:
emulate_remove(act, updating=True)
emulate_install(act)
removed = emulate_remove(act, updating=True)
installed = emulate_install(act)
if removed or installed:
changed = True
for pr in problems:
if len(problems[pr])>0:
@ -914,7 +516,7 @@ def install_packages():
ts = rpm.TransactionSet()
# turn all the checks off. They can cause segfault in RPM for now.
ts.setVSFlags(rpm.RPMVSF_NOHDRCHK|rpm.RPMVSF_NOSHA1HEADER|rpm.RPMVSF_NODSAHEADER|rpm.RPMVSF_NORSAHEADER|rpm.RPMVSF_NOMD5|rpm.RPMVSF_NODSA|rpm.RPMVSF_NORSA|rpm._RPMVSF_NODIGESTS|rpm._RPMVSF_NOSIGNATURES)
ts.setVSFlags(rpm.RPMVSF_NOHDRCHK|rpm.RPMVSF_NOSHA1HEADER|rpm.RPMVSF_NODSAHEADER|rpm.RPMVSF_NORSAHEADER|rpm.RPMVSF_NOMD5|rpm.RPMVSF_NODSA|rpm.RPMVSF_NORSA)
ts.setProbFilter(rpm.RPMPROB_FILTER_OLDPACKAGE)
#flags for ts.run execution. We need it to speed the process up
@ -1163,12 +765,12 @@ def Main():
exclude_media.append(ii)
cmd = cmd + ['--excludemedia', media[1:]]
ms = MediaSet()
ms = MediaSet.from_system(cmd)
installed = PackageSet()
installed.load_from_system()
repository = PackageSet()
repository.load_from_repository()
repository.load_from_repository(ms)
installed_backup = copy.deepcopy(installed)
not_provided_packages = {}

View file

@ -1,6 +1,6 @@
Name: urpm-tools
Version: 2.1
Release: 1
Version: 2.2.1
Release: 0
Summary: Utilities that help to work with URPM-based repositories
Group: System/Configuration/Packaging
License: GPLv2
@ -32,7 +32,7 @@ Provides: python-rpm5utils = %{version}-%{release}
Mostly taken from yum.
%prep
%setup -q -n %{name}-%{version}
%setup -q -n %{name}
%install
rm -rf %{buildroot}
@ -57,7 +57,7 @@ make install DESTDIR=$RPM_BUILD_ROOT
%{_mandir}/man1/urpm-repograph.1.xz
%{_mandir}/man1/urpm-reposync.1.xz
%{_datadir}/locale/*/LC_MESSAGES/urpm-tools.mo
#%{_datadir}/locale/*/LC_MESSAGES/urpm-tools.mo
%doc COPYING
%files -n python-rpm5utils

View file

@ -1,9 +0,0 @@
-------------------
Urpm-tools Authors
-------------------
Anton Kirilenko
Andrey Ponomarenko
Denis Silakov
Vladimir Testov

View file

@ -1,339 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
675 Mass Ave, Cambridge, MA 02139, USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Library General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) 19yy <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) 19yy name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Library General
Public License instead of this License.

View file

@ -1,43 +0,0 @@
SUBDIRS = rpm5utils
PKGNAME = urpm-tools
PYTHON_UTILS = urpm-downloader urpm-package-cleanup urpm-repodiff urpm-repomanage urpm-repograph urpm-reposync
PERL_UTILS = urpm-repoclosure
PYTHON=python
PYFILES = $(wildcard *.py)
PYVER := $(shell $(PYTHON) -c 'import sys; print "%.3s" %(sys.version)')
PYSYSDIR := $(shell $(PYTHON) -c 'import sys; print sys.prefix')
PYLIBDIR = $(PYSYSDIR)/lib/python$(PYVER)
PKGDIR = $(PYLIBDIR)/site-packages
SHELL=/bin/bash
all:
@echo "Nothing to do. Run 'make install' or 'make clean'"
clean:
rm -f *.pyc *.pyo *~
rm -f test/*~
rm -f *.tar.gz
install:
mkdir -p $(DESTDIR)/usr/bin/
mkdir -p $(DESTDIR)/usr/share/man/man1
for util in $(PYTHON_UTILS); do \
install -m 755 $$util.py $(DESTDIR)/usr/bin/$$util; \
install -m 664 docs/$$util.1 $(DESTDIR)/usr/share/man/man1/$$util.1; \
done
for util in $(PERL_UTILS); do \
install -m 755 $$util.pl $(DESTDIR)/usr/bin/$$util; \
install -m 664 docs/$$util.1 $(DESTDIR)/usr/share/man/man1/$$util.1; \
done
for d in $(SUBDIRS); do make DESTDIR=$(DESTDIR) -C $$d install; [ $$? = 0 ] || exit 1; done
install -m 644 urpmmisc.py $(DESTDIR)/$(PKGDIR)/urpmmisc.py;
for d in `python localizer.py --list`; do\
mkdir -p $(DESTDIR)/usr/share/locale/$$d/LC_MESSAGES;\
install -m 644 locale/$$d/LC_MESSAGES/urpm-tools.mo $(DESTDIR)/usr/share/locale/$$d/LC_MESSAGES/urpm-tools.mo;\
done

View file

@ -1,7 +0,0 @@
Urpm-tools - a set of utilities to work with Urpm repositories
They make URPM-based repositories easier and more powerful to use.
These tools include: urpm-downloader, urpm-package-cleanup,
urpm-repoclosure, urpm-repodiff, urpm-repomanage, urpm-repograph,
urpm-reposync
rpm5utils are based on rpmUtils from yum, http://yum.baseurl.org

View file

@ -1,85 +0,0 @@
.\" urpm-downloader
.TH "urpm-downloader" "1" "21 December 2011" "Anton Kirilenko" ""
.SH "NAME"
urpm-downloader - download RPMs from URPM-based linux repositories
.SH "SYNOPSIS"
\fBurpm-downloader\fP [options] package(s)
.SH "DESCRIPTION"
.PP
\fBurpm-downloader\fP is a tool for downloading RPMs and SRPMs from URPM-based linux repositories
.PP
\fBpackage\fP Package name(s) to download. It can contain not only package names, but (S)RPM files too. In this case package name extracted from this file will be used
.PP
.SH "GENERAL OPTIONS"
.IP "\fB\-h, \-\-help\fP"
Help; display a help message and then quit.
.IP "\fB\-\-version\fP"
Report program version and exit.
.IP "\fB\-\-dest\-dir\fP"
Specify a destination directory for the download.
.IP "\fB\-v, \-\-verbose\fP"
Verbose (print additional info)
.IP "\fB-q, \-\-quiet\fP"
Quiet operation
.IP "\fB\-\-include\-media, \-\-media\fP"
Use only selected URPM media
.IP "\fB\-\-exclude\-media\fP"
Do not use selected URPM media
.IP "\fB\-\-fail\-broken\fP"
Exit if fail to resolve package dependencies.
.IP "\fB\-i, \-\-ignore-errors\fP"
Try to continue when error occurs
.PP
.SH "DOWNLOAD OPTIONS"
.IP "\fB\-s, \-\-source\fP"
Download the source RPMs (SRPMs)
.IP "\fB\-u, \-\-urls\fP"
Instead of downloading files, list the URLs that would be processed
.IP "\fB\-b, \-\-binary\fP"
Download binary RPMs
.IP "\fB\-s, \-\-source\fP"
Download the source RPMs (SRPMs)
.IP "\fB\-d, \-\-debug-info \fP"
Download debug RPMs
.IP "\fB\-r, \-\-resolve\fP"
When downloading RPMs, resolve dependencies and also download the required packages, if they are not already installed
.IP "\fB\-a, \-\-resolve\-all\fP"
When downloading RPMs, resolve dependencies and also download the required packages, even if they are already installed
.IP "\fB\-x, \-\-exclude\-packages\fP"
Exclude package(s) by regex
.IP "\fB\-o, \-\-overwrite\fP"
If the file already exists, download it again and overwrite the old one
.IP "\fB\-\-all\-alternatives\fP"
If package dependency can be satisfied by several packages, download all of them (by default, only the first one is downloaded)
.IP "\fB\-\-all\-versions\fP"
If different versions of package present in repository, process them all
.PP
.SH "EXAMPLES"
.IP "Download RPMs for given packages (pk1, pk2, ...) into the directory 'path':"
\fBurpm-downloader --dest-dir path pkg1 pkg2\fP
.IP "Download SRPMs for given packages (pk1, pk2, ...) into the current directory:"
\fBurpm-downloader -s pkg1 pkg2\fP
.IP "Download the package with a whole dependency tree to the specified directory:"
\fBurpm-downloader -a --dest-dir path package-name\fP
.IP "You want to rebuild existing rpm. Download corresponding SRPM and all the packages missing for building:"
\fBurpm-downloader -sr --dest-dir path package.rpm\fP
.PP
.SH "EXIT CODES"
.IP \fB0\fP
Completed successfully
.IP \fB1\fP
Error calling external command (urpmq, rpm, etc.). This command output will be printed before exit
.IP \fB2\fP
Can not download SRPM
.IP \fB3\fP
Can not download RPM
.IP \fB4\fP
One or more specified rpm files not exist
.PP
.SH "AUTHORS"
.nf
See the Authors file included with this program.
.fi

View file

@ -1,100 +0,0 @@
.\" package-cleanup
.TH "urpm-package-cleanup" "1" "21 December 2011" "Denis Silakov" ""
.SH "NAME"
urpm-package-cleanup - find and fix rpmdb problems
.SH "SYNOPSIS"
\fBurpm-package-cleanup\fP [options]
.SH "DESCRIPTION"
.PP
\fBurpm-package-cleanup\fP is a program for cleaning up the locally-installed RPMs.
.PP
.SH "GENERAL OPTIONS"
.IP "\fB\-h, \-\-help\fP"
Help; display a help message and then quit\&.
.IP "\fB\-v, \-\-version\fP"
Report program version and exit.
.IP "\fB\-\-leaves\fP"
List leaf nodes in the local RPM database. Leaf nodes are RPMs that
are not relied upon by any other RPM.
.IP "\fB\-\-orphans\fP"
List installed packages which are not available from currently configured
repositories. This is identical to "urpmq --not-available".
.IP "\fB\-\-oldkernels\fP"
Remove old kernel and kernel-devel packages.
.IP "\fB\-\-problems\fP"
List dependency problems in the local RPM database.
.IP "\fB\-\-dupes\fP"
Scan for duplicates in the local RPM database.
.PP
.SH "LEAVES OPTIONS"
.IP "\fB\-\-all\fP"
When listing leaf nodes also list leaf nodes that are
not libraries.
.IP "\fB\-\-leaf\-regex\fP"
A package name that matches this regular expression will be considered a leaf.
.IP "\fB\-\-exclude\-devel\fP"
When listing leaf nodes do not list development packages.
.IP "\fB\-\-exclude\-bin\fP"
When listing leaf nodes do not list packages with files in bin directories.
.PP
.SH "OLDKERNELS OPTIONS"
.IP "\fB\-\-count <COUNT>\fP"
Number of kernel packages to keep on the system (default 2)
.IP "\fB\-\-keepdevel\fP"
Do not remove kernel-devel packages when removing kernels
.PP
.SH "DUPLICATE PACKAGE OPTIONS"
.IP "\fB\-\-cleandupes\fP"
Scan for duplicates in the local RPM database and clean out the
older versions.
.IP "\fB\-\-noscripts\fP"
Disable rpm scriptlets from running when cleaning duplicates
.PP
.SH "DEPENDENCY PROBLEMS OPTIONS"
.IP "\fB\-\-suggests\fP"
List missing suggestions of installed packages
.SH "EXAMPLES"
.IP "List all dependency problems:"
\fBurpm-package-cleanup --problems\fP
.IP "List all packages that are not in any configured repository:"
\fBurpm-package-cleanup --orphans\fP
.IP "Remove old kernels keeping 3 and leaving old kernel-devel packages installed:"
\fBurpm-package-cleanup --oldkernels --count=3 --keepdevel\fP
.PP
.IP "List all leaf packages with no files in a bin directory whose name begins with either 'perl' or 'python':"
\fBurpm-package-cleanup --leaves --exclude-bin --leaf-regex="^(perl)|(python)"\fP
.PP
.SH "FILES"
For some actions urpm-package-cleanup invokes urpmi and relies on its
configuration file:
.PP
.nf
/etc/urpmi/urpmi.cfg
.fi
.PP
.SH "EXIT CODES"
.IP \fB0\fP
Completed successfully
.IP \fB1\fP
Script execution error (wrong option, insufficient permissions, etc.)
.IP \fB2\fP
Unsatisfied dependencies detected
.IP \fB3\fP
Unsatisfied soft dependencies detected
.IP \fB100\fP
Illegal option value
.PP
.SH "SEE ALSO"
.nf
.I urpmi.cfg (1)
.fi
.PP
.SH "AUTHORS"
.nf
See the Authors file included with this program.
.fi

View file

@ -1,77 +0,0 @@
.\" urpm-repoclosure
.TH "urpm-repoclosure" "1" "21 February 2012" "Andrey Ponomarenko" ""
.SH "NAME"
urpm-repoclosure - check closure of a set of RPM packages
.SH "SYNOPSIS"
\fBurpm-repoclosure\fP [options]
.SH "DESCRIPTION"
.PP
\fBurpm-repoclosure\fP a tool for checking closure of a set of RPM packages
.PP
.SH "GENERAL OPTIONS"
.IP "\fB\-h, \-help\fP"
Print this help.
.IP "\fB\-v, \-version\fP"
Print version information.
.IP "\fB\-hdlist <path>\fP"
Path or URL of HDlist (synthesis) to check.
.IP "\fB\-d, \-dir <path>\fP"
The directory with RPM packages to check.
.IP "\fB\-l, \-list <path>\fP"
The list of packages to check.
.IP "\fB\-add, \-update <path>\fP"
The directory with RPM packages that should
be added to the repository or updated.
.IP "\fB\-file\-deps <path>\fP"
Read file\-deps to ignore some unresolved
dependencies.
.IP "\fB\-s, \-static\fP"
Check statically if all required dependencies are
satisfied by provided dependencies in the set of
RPM packages.
.IP "\fB\-dynamic\fP"
Install a set of RPM packages to the local chroot
and check if extra packages were installed.
.IP "\fB\-r, \-check\-release\fP"
Check installation media (DVD).
.IP "\fB\-sign, \-check\-signature\fP"
Validate package signatures.
.IP "\fB\-noclean\fP"
Do not clean urpmi cache.
.IP "\fB\-root <path>\fP"
Where to install packages.
Default: /tmp/...
.PP
.SH "EXIT CODES"
.IP "0 \- Suceess. The tool has run without any errors and has not discover any issues."
.IP "non\-zero \- Failed or the tool has run with errors. In particular:"
.IP "1 \- Failed to run the tool"
.IP "2 \- Discovered dependency problems"
.SH "EXAMPLES"
.IP "Run a static test using an hdlist:"
\fBurpm-repoclosure --hdlist=hdlist.txt\fP
\fBurpm-repoclosure --hdlist=http://mirror.yandex.ru/mandriva/.../synthesis.hdlist.cz\fP
.IP "Check closure of a local set of RPMs:"
\fBurpm-repoclosure --dir=rpms/ --static\fP
.IP "Check a set of RPMs, specified in list.txt:"
\fBurpm-repoclosure --list=list.txt --dynamic\fP
.PP
.SH "AUTHORS"
.nf
See the Authors file included with this program.
.fi

View file

@ -1,49 +0,0 @@
.\" urpm-repodiff
.TH "urpm-repodiff" "1" "21 December 2011" "Vladimir Testov" "Mandriva Package Management"
.SH "NAME"
urpm-repodiff - diff for urpmi repositories
.SH "SYNOPSIS"
\fBurpm-repodiff\fP [options] --old old_repo_baseurl [old_repo_baseurl ...] --new new_repo_baseurl [new_repo_baseurl ...]
.SH "DESCRIPTION"
.PP
\fBurpm-repodiff\fP is a program which will list differences between two sets of
repositories.
.PP
.SH "GENERAL OPTIONS"
.IP "\fB\-\-old, -o\fP"
"Old" repository or list of "old" repositories if several present.
.IP "\fB\-\-new, -n\fP"
"New" repository or list of "new" repositories if several present.
.IP "\fB\-\-quiet, -q\fP"
Quiet mode: hide service messages.
.PP
.SH "USUAL OUTPUT OPTIONS"
.IP "\fB\-\-size, -s\fP"
Show differences in package sizes.
.IP "\fB\-\-simple\fP"
Simple output format.
.IP "\fB\-\-changelog, -s\fP"
Show changelog difference.
.PP
.SH "HTML OUTPUT OPTION"
.IP "\fB\-\-html\fP"
Output difference in format of html page. In case of using this option \fB--size, -s\fP, \fB--simple\fP and \fB--changelog\fP options are ignored.
If \fB--output, -o\fP option is not present, page will be output to file 'repodiff.html' in the current directory.
.PP
.SH "OUTPUT OPTION"
.IP "\fB\-\-output, -o OUTPUT_FILE\fP"
Change standart output to OUTPUT_FILE.
.SH "EXAMPLES"
.IP "Compare packages in two local repositories:"
\fBurpm-repodiff --old /tmp/repo-old --new /tmp/repo-new\fP
.IP "Compare packages in two remote repositories, and two local ones:"
\fBurpm-repodiff --old http://example.com/repo1-old --old /tmp/repo-old --new http://example.com/repo1-new --new /tmp/repo-new\fP
.IP "Compare packages, use simple report format (no chanlog difference), but report difference in package size:"
\fBurpm-repodiff --old /tmp/repo-old --new /tmp/repo-new --size --simple\fP
.PP
.PP
.SH "AUTHORS"
.nf
See the Authors file included with this program.
.fi

View file

@ -1,106 +0,0 @@
.\" urpm-repograph
.TH "urpm-repograph" "1" "21 December 2011" "Vladimir Testov" "Mandriva Package Management"
.SH "NAME"
urpm-repograph - build dependency graph of repository
.SH "SYNOPSIS"
\fBurpm-repograph\fP [options] REPOSITORY
.SH "DESCRIPTION"
.PP
\fBurpm-repograph\fP is a tool for generating dependency graph for REPOSITORY packages.
Output is in the format of language "DOT". Meanwhile it can check for
missing dependecies, track cross-repository dependecies, search and display dependency cycles
(A needs B, B needs C, C needs A), search and display alternatives ("word" is provided by
A, B and C), also the tool with options \fB--whatrequires\fP and \fB--requires-recursive\fP can
select only that part of the graph which is provided (in recursive sense) by PKG or
requires (also in recursive sense) PKG. Note that warning about repository mismatches
will not be shown in the last case.
.PP
.SH "GENERAL OPTIONS"
.IP "\fBREPOSITORY\fP"
The only required argument. URL (starts with "http://" or "ftp://")
or PATH (global or local, can starts with "file://")
to repository (exactly url or path which consists of packages and includes directory "media_info",
which is the only object of interest for this tool. (You can download separately files
"synthesis.hdlist.cz" and "files.xml.lzma" to folder (for example)
"./A/media_info" and run tool with "./A": "urpm-repograph ./A",
"files.xml.lzma" is needed only if \fB--file / -f\fP option is present.)
.IP "\fB\-\-cross, -c CROSS_REPO [CROSS_REPO ...]\fP"
Check \fBCROSS_REPO(s)\fP for cross-repository dependencies. Note that dependencies inside \fBCROSS_REPO(s)\fP
(PKG1 from CROSS_REPO(s) needs PKG2 from CROSS_REPO(s)) will not be shown, still dependencies inside \fBREPOSITORY\fP will be.
.IP "\fB\-\-quiet, -q\fP"
Hide service messages. Hides all kinds of status messages.
Note, that you can not use options \fB--quiet, -q\fP, \fB--nograph, -n\fP and do not use option \fB--verbose, -v\fP at the same time.
.IP "\fB\-\-verbose, -v\fP"
Show extended information. Shows more detailed information. Also shows warnings -
about missing dependecies, self-dependecies, cross-repository dependencies.
Note, that you can not use options \fB--quiet, -q\fP, \fB--nograph, -n\fP and do not use option \fB--verbose, -v\fP at the same time.
.IP "\fB\-\-requires, -r\fP"
Also describes \fB--suggests, -s\fP. These two options declare for which types of dependecies
the graph should be build and whick dependecies should be checked and processed.
\fB--requires, -r\fP - required dependencies, as in RPM spec-file. \fB--suggests, -s\fP - suggested dependencies, as in RPM spec-file.
If none of the options are present then tool works as if \fB--requires, -r\fP option was present.
.IP "\fB\-\-suggest, -s\fP"
See \fB--requires, -r\fP description.
.IP "\fB\-\-file, -f\fP"
Process file dependecies. If not present then tool will skip both checking and processing
dependencies from files. If present, then "files.xml.lzma" should be present.
.IP "\fB\-\-unprovided, -u\fP"
Show unprovided dependencies. Unprovided phrases in requires (and \ or suggests) sections of synthesis.hdlist will be shown in final graph.
Do not use with \fB--broken, -b\fP option, error will be shown and workflow terminated. \fB--broken, -b\fP does the same as \fB--unprovided, -u\fP.
So there is no sense in using these two options together.
.PP
.SH "PACKAGE SPECIFIC OPTIONS"
Only one option in this group can be present. PKG is either packagename (e.g. urpm-tools)
or full package name (with version, release etc). Note that if option from this group is
present then PKG will be checked - if there is no package named PKG in \fBREPOSITORY\fP and
(if \fB--cross, -c\fP option is present) there is no package named PKG in \fBCROSS_REPO(s)\fP
(or if there is no cross-repository dependencies to or from PKG really present in \fBCROSS_REPO(s)\fP)
then selecting of sub-graph will not be performed, warning will be shown and the tool will stop.
Also note that no warnings will be shown (even if \fB--verbose, -v\fP option is present).
If \fB--verbose, -v\fP option is present then list of packages will be written to STDIN. Also some types of warnings will be written to STDIN
when using \fB--verbose, -v\fP.
.IP "\fB\-\-requires-recursive PKG\fP"
Search for packages, which are required by package PKG.
(in recursive sense, for example, if PKG needs PKG2 and PKG2 needs PKG3,
then PKG3 will be also checked and processed and so on)
.IP "\fB\-\-whatrequires PKG\fP"
Search for packages, which requires package PKG.
(in recursive sense, for example, if PKG is needed by PKG2 and PKG2 is needed by PKG3,
then PKG3 will be also checked and processed and so on)
.PP
.SH "ALTERNATIVE TASK OPTIONS"
Only one option from this group can be present. Note that \fB--requires-recursive\fP and \fB--whatrequires\fP processes are first to made (if present).
So, for example, you can select subgraph connected with specific package and then select subgraph of broken packages from the first subgraph.
If \fB--loops, -l\fP, \fB--alternatives, -a\fP or \fB--broken, -b\fP options are present - then another graph will be shown and additional algorithms will be performed.
.IP "\fB\-\-loops, -l\fP"
Search for all simple loops of cycled dependencies.
.IP "\fB\-\-alternatives, -a\fP"
Search for alternative packages providing the same feature.
.IP "\fB\-\-broken, -b\fP"
Search for broken packages and those, which are dependend from broken.
.IP "\fB\-\-different, -d\fP"
Output each loop or each alternative in different file. \fBOUTPUT_FILE\fP is tracted as folder name for new files in that case.
Ignored if both \fB--loops, -l\fP and \fB--alternatives, -a\fP options are absent. Also ignored if \fB--output, -o\fP option is not present.
.PP
.SH "OUTPUT OPTIONS"
Only one option in this group can be present. If not specified, graph will be outputted to STDIN.
.IP "\fB\-\-output, -o OUTPUT_FILE\fP"
Output graph to a specified file OUTPUT_FILE. OUTPUT_FILE is treated as directory name if \fB--different, -d\fP option is present.
.IP "\fB\-\-nograph, -n\fP"
Do not output graph.
Note, that you can not use options \fB--quiet, -q\fP, \fB--nograph, -n\fP and do not use option \fB--verbose, -v\fP at the same time.
.PP
.SH "EXAMPLES"
.IP "Analyze local repository and output graph to file './full-graph.dot', show service messages:"
\fBurpm-repograph /tmp/repo -v -o ./full-graph.dot\fP
.IP "Analyze external repository, hide service messages, show warnings and save them into 'warnings.txt':"
\fBurpm-repograph http://example.com/repo -qvn > warnings.txt\fP
.IP "Analyze two external repository - 'http://example.com/main/release' and additional 'http://example.com/contrib/release'. Select only packages that requires 'example-pkg' (in recursive sense). Search for loops in this group of packages and output every loop in different file in directory '/tmp/tmp-forever':"
\fBurpm-repograph http://example.com/main/release -c http://example.com/contrib/release --whatrequires example-pkg -qad -o /tmp/tmp-forever/\fP
.PP
.PP
.SH "AUTHORS"
.nf
See the Authors file included with this program.
.fi

View file

@ -1,56 +0,0 @@
.\" urpm-repomanage
.TH "urpm-repomanage" "1" "21 December 2011" "Denis Silakov" "Mandriva Package Management"
.SH "NAME"
urpm-repomanage - report newest and oldest packages in a given set
.SH "SYNOPSIS"
\fBurpm-repomanage\fP [-h] [-o | -n] [-r] [-s] [-k KEEP] [-c] [-q | -V] path
.SH "DESCRIPTION"
.PP
\fBurpm-repomanage\fP is a program that scans directory of rpm packages and report newest or oldest packages.
.PP
.SH "ARGUMENTS"
.IP "\fBpath\fP"
Path to directory with rpm packages. The tool traverses directory recursively
and analyzes all RPM packages found
.PP
.SH "OPTIONS"
.IP "\fB\-\-help, -h\fP"
show help message and exit
.IP "\fB\-\-old, -o\fP"
print the older packages
.IP "\fB\-\-new, -n\fP"
print the newest packages (this is the default behavior)
.IP "\fB\-\-remove-old, -r\fP"
remove older packages
.IP "\fB\-\-space, -s\fP"
space separated output, not newline
.IP "\fB\-\-keep KEEP, -k KEEP\fP"
number of newest packages to keep; defaults to 1
.IP "\fB\-\-nocheck, -c\fP"
do not check package payload signatures/digests
.IP "\fB\-\-quiet, -q\fP"
be completely quiet
.IP "\fB\-\-verbose, -V\fP"
be verbose - say which packages are decided to be old
and why (this info is dumped to STDERR)
.SH "EXIT CODES"
.IP "0 \- Suceess. The tool has run without any errors and old packages were not found."
.IP "1 \- No packages were found"
.IP "2 \- Illegal option or missing argument"
.IP "3 \- The tool has run successfully and detected old packages"
.SH "EXAMPLES"
.IP "Scan local directory with packages and for every package name print only file with the latest version:"
\fBurpm-repomanage /tmp/repo\fP
.IP "Scan local directory with packages, for every package detect two latest versions and print older versions. For every old package, print names of newer packages:"
\fBurpm-repomanage --old -V -k 2 /tmp/repo\fP
.IP "Remove older packages in a local directory without printing anything to terminal:"
\fBurpm-repomanage --remove-old -q /tmp/repo\fP
.PP
.PP
.SH "AUTHORS"
.nf
See the Authors file included with this program.
.fi

View file

@ -1,69 +0,0 @@
.\" urpm-reposync
.TH "urpm-reposync" "1" "21 December 2011" "Anton Kirilenko" ""
.SH "NAME"
urpm-sync - synchronize packages on your computer with repository
.SH "SYNOPSIS"
\fBurpm-reposync\fP [options]
.SH "DESCRIPTION"
.PP
\fBurpm-reposync\fP is used to synchronize a set of packages on the local computer with the remote repository
.PP
.SH "OPTIONS"
.IP "\fB\-h, \-\-help\fP"
Help; display a help message and then quit.
.IP "\fB\-v, \-\-verbose\fP"
Verbose (print additional info)
.IP "\fB\-q, \-\-quiet\fP"
Quiet operation
.IP "\fB\-\-include\-media, \-\-media\fP"
Use only selected URPM media
.IP "\fB\-\-exclude\-media\fP"
Do not use selected URPM media
.IP "\fB\-a, \-\-auto\fP"
Do not ask questions, just do it!
.IP "\fB\-\-include-media, \-\-media\fP"
Use only selected URPM media
.IP "\fB\-\-exclude-media\fP"
Do not use selected URPM media
.IP "\fB\-v, \-\-verbose\fP"
Verbose (print additional info)
.IP "\fB\-q, \-\-quiet\fP"
Quiet operation. Senseless without --auto
.IP "\fB\-a, \-\-auto\fP"
Do not ask questions, just do it!
.IP "\fB\-p, \-\-printonly\fP"
Only print the list of actions to be done and do nothing more!
.IP "\fB\-d, \-\-download\fP"
Only download the rpm files, but install or remove nothing.
.IP "\fB\-n, \-\-noremove\fP"
Do not remove packages at all. If some installed package prevent another package from beeing updated - do not update it.
.IP "\fB\-c, \-\-check\fP"
Download packages and check wether they can be installed to your system, but do not install them.
.IP "\fB\-k, \-\-nokernel\fP"
Do nothing with kernels.
.IP "\fB\-\-runselftests\fP"
Run self-tests end exit.
.IP "\fB\-\-detailed\fP"
Show detailed information about packages are going to be removed or installed (why does it have to be done)
.SH "EXIT CODES"
.IP \fB0\fP
Completed successfully
.IP \fB1\fP
Error calling external command (urpmq, rpm, etc.). This command output will be printed before exit
.IP \fB2\fP
Incorrect command line options combination. For example, if you try to execute it with --auto and --quiet
.IP \fB3\fP
Dependencies were resolved incorrectly. Please, contact the tool developer and provide the full program output.
.IP \fB4\fP
Inconsistent repository. Please, contact distributive maintainers and show the the output.
.IP \fB5\fP
Error while downloading rpm file.
.PP
.SH "AUTHORS"
.nf
See the Authors file included with this program.
.fi

View file

@ -1,62 +0,0 @@
#!/usr/bin/python2.7
# -*- coding: UTF-8 -*-
import os, sys
quiet = False
if '--list' in sys.argv:
quiet = True
def qprint(text):
if quiet:
sys.stderr.write(text + '\n')
sys.stderr.flush()
return
print text
def dumb(cmd):
if quiet:
return cmd + ' 1>&2'
else:
return cmd
walkres = os.walk('.')
fls = []
pos = []
for path, dirs, files in walkres:
for file in files:
p = os.path.join(path, file)
if p.endswith(".py"):
fls.append(p)
if p.endswith(".pl"):
fls.append(p)
if p.endswith(".po"):
pos.append(p)
if not fls:
qprint("No python modules found!")
exit(1)
FN = 'urpm-tools.pot'
qprint("Generating " + FN)
cmd = "xgettext -d urpm-tools -o " + FN + ' -c --no-wrap ' + ' '.join(fls)
os.system(dumb(cmd))
LIST_OUT = []
for po in pos:
qprint("Updating " + po)
LIST_OUT.append(po.split('/')[2])
cmd = "msgmerge --no-wrap -U " + po + ' ' + FN
os.system(dumb(cmd))
mo = po[:-2] + 'mo'
qprint ("Compiling " + po)
cmd = "msgfmt -o " + mo + ' ' + po
os.system(dumb(cmd))
if quiet:
print ' '.join(LIST_OUT)

View file

@ -1,339 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
675 Mass Ave, Cambridge, MA 02139, USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Library General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) 19yy <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) 19yy name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Library General
Public License instead of this License.

View file

@ -1,27 +0,0 @@
PYTHON=python
PACKAGE = $(shell basename `pwd`)
PYVER := $(shell $(PYTHON) -c 'import sys; print "%.3s" %(sys.version)')
PYSYSDIR := $(shell $(PYTHON) -c 'import sys; print sys.prefix')
PYLIBDIR = $(PYSYSDIR)/lib/python$(PYVER)
SITEDIR = $(PYLIBDIR)/site-packages
PKGDIR = $(SITEDIR)/$(PACKAGE)
all:
echo "Nothing to do"
clean:
rm -f *.pyc *.pyo *~
install:
mkdir -p $(DESTDIR)/$(PKGDIR)
#copy urpmgraph dir and set permissions for files and folders
cp -rf . $(DESTDIR)/$(PKGDIR)
#don't copy these files
rm -f $(DESTDIR)/$(PKGDIR)/Makefile
rm -f $(DESTDIR)/$(PKGDIR)/COPYING
find $(DESTDIR)/$(PKGDIR) -type f |xargs -l chmod 644 $1
find $(DESTDIR)/$(PKGDIR) -type d |xargs -l chmod 775 $1
#compile python sources
python -m compileall $(DESTDIR)/$(PKGDIR)

View file

@ -1,10 +0,0 @@
import rpm5utils.urpmgraphs
from rpm5utils.urpmgraphs import *
class Rpm5UtilsError(Exception):
""" Exception thrown for anything rpm5utils related. """
def __init__(self, args=None):
Exception.__init__(self, args)

View file

@ -1,423 +0,0 @@
import os
# dict mapping arch -> ( multicompat, best personality, biarch personality )
multilibArches = { "x86_64": ( "athlon", "x86_64", "athlon" ),
"sparc64v": ( "sparcv9v", "sparcv9v", "sparc64v" ),
"sparc64": ( "sparcv9", "sparcv9", "sparc64" ),
"ppc64": ( "ppc", "ppc", "ppc64" ),
"s390x": ( "s390", "s390x", "s390" ),
}
arches = {
# ia32
"athlon": "i686",
"i686": "i586",
"geode": "i586",
"i586": "i486",
"i486": "i386",
"i386": "noarch",
# amd64
"x86_64": "athlon",
"amd64": "x86_64",
"ia32e": "x86_64",
# ppc
"ppc64pseries": "ppc64",
"ppc64iseries": "ppc64",
"ppc64": "ppc",
"ppc": "noarch",
# s390{,x}
"s390x": "s390",
"s390": "noarch",
# sparc
"sparc64v": "sparcv9v",
"sparc64": "sparcv9",
"sparcv9v": "sparcv9",
"sparcv9": "sparcv8",
"sparcv8": "sparc",
"sparc": "noarch",
# alpha
"alphaev7": "alphaev68",
"alphaev68": "alphaev67",
"alphaev67": "alphaev6",
"alphaev6": "alphapca56",
"alphapca56": "alphaev56",
"alphaev56": "alphaev5",
"alphaev5": "alphaev45",
"alphaev45": "alphaev4",
"alphaev4": "alpha",
"alpha": "noarch",
# arm
"armv7l": "armv6l",
"armv6l": "armv5tejl",
"armv5tejl": "armv5tel",
"armv5tel": "noarch",
# super-h
"sh4a": "sh4",
"sh4": "noarch",
"sh3": "noarch",
#itanium
"ia64": "noarch",
}
def legitMultiArchesInSameLib(arch=None):
# this is completely crackrock - if anyone has a better way I
# am all ears
arch = getBestArch(arch)
if isMultiLibArch(arch):
arch = getBaseArch(myarch=arch)
results = [arch]
if arch == 'x86_64' or arch.startswith('sparcv9'):
for (k, v) in arches.items():
if v == arch:
results.append(k)
return results
def canCoinstall(arch1, arch2):
"""Take two arches and return True if it is possible that they can be
installed together with the same nevr. Ex: arch1=i386 and arch2=i686 then
it will return False. arch1=i386 and arch2=x86_64 will return True.
It does not determine whether or not the arches make any sense. Just whether
they could possibly install w/o conflict"""
# if both are a multlibarch then we can't coinstall (x86_64, ia32e)
# if both are not multilibarches then we can't coinstall (i386, i686)
if 'noarch' in [arch1, arch2]: # noarch can never coinstall
return False
if isMultiLibArch(arch=arch1) == isMultiLibArch(arch=arch2):
return False
# this section keeps arch1=x86_64 arch2=ppc from returning True
if arch1 in getArchList(arch2) or arch2 in getArchList(arch1):
return True
return False
# this computes the difference between myarch and targetarch
def archDifference(myarch, targetarch):
if myarch == targetarch:
return 1
if myarch in arches:
ret = archDifference(arches[myarch], targetarch)
if ret != 0:
return ret + 1
return 0
return 0
def score(arch):
return archDifference(canonArch, arch)
def isMultiLibArch(arch=None):
"""returns true if arch is a multilib arch, false if not"""
if arch is None:
arch = canonArch
if arch not in arches: # or we could check if it is noarch
return 0
if arch in multilibArches:
return 1
if arches[arch] in multilibArches:
return 1
return 0
def getBestArchFromList(archlist, myarch=None):
"""
return the best arch from the list for myarch if - myarch is not given,
then return the best arch from the list for the canonArch.
"""
if len(archlist) == 0:
return None
if myarch is None:
myarch = canonArch
mybestarch = getBestArch(myarch)
bestarch = getBestArch(myarch)
if bestarch != myarch:
bestarchchoice = getBestArchFromList(archlist, bestarch)
if bestarchchoice != None and bestarchchoice != "noarch":
return bestarchchoice
thisarch = archlist[0]
for arch in archlist[1:]:
val1 = archDifference(myarch, thisarch)
val2 = archDifference(myarch, arch)
if val1 == 0 and val2 == 0:
continue
if val1 < val2:
if val1 == 0:
thisarch = arch
if val2 < val1:
if val2 != 0:
thisarch = arch
if val1 == val2:
pass
# thisarch should now be our bestarch
# one final check to make sure we're not returning a bad arch
val = archDifference(myarch, thisarch)
if val == 0:
return None
return thisarch
def getArchList(thisarch=None):
# this returns a list of archs that are compatible with arch given
if not thisarch:
thisarch = canonArch
archlist = [thisarch]
while thisarch in arches:
thisarch = arches[thisarch]
archlist.append(thisarch)
# hack hack hack
# sparc64v is also sparc64 compat
if archlist[0] == "sparc64v":
archlist.insert(1,"sparc64")
# if we're a weirdo arch - add noarch on there.
if len(archlist) == 1 and archlist[0] == thisarch:
archlist.append('noarch')
return archlist
def _try_read_cpuinfo():
""" Try to read /proc/cpuinfo ... if we can't ignore errors (ie. proc not
mounted). """
try:
lines = open("/proc/cpuinfo", "r").readlines()
return lines
except:
return []
def getCanonX86Arch(arch):
#
if arch == "i586":
for line in _try_read_cpuinfo():
if line.startswith("model name") and line.find("Geode(TM)") != -1:
return "geode"
return arch
# only athlon vs i686 isn't handled with uname currently
if arch != "i686":
return arch
# if we're i686 and AuthenticAMD, then we should be an athlon
for line in _try_read_cpuinfo():
if line.startswith("vendor") and line.find("AuthenticAMD") != -1:
return "athlon"
# i686 doesn't guarantee cmov, but we depend on it
elif line.startswith("flags") and line.find("cmov") == -1:
return "i586"
return arch
def getCanonPPCArch(arch):
# FIXME: should I do better handling for mac, etc?
if arch != "ppc64":
return arch
machine = None
for line in _try_read_cpuinfo():
if line.find("machine") != -1:
machine = line.split(':')[1]
break
if machine is None:
return arch
if machine.find("CHRP IBM") != -1:
return "ppc64pseries"
if machine.find("iSeries") != -1:
return "ppc64iseries"
return arch
def getCanonSPARCArch(arch):
# Deal with sun4v, sun4u, sun4m cases
SPARCtype = None
for line in _try_read_cpuinfo():
if line.startswith("type"):
SPARCtype = line.split(':')[1]
break
if SPARCtype is None:
return arch
if SPARCtype.find("sun4v") != -1:
if arch.startswith("sparc64"):
return "sparc64v"
else:
return "sparcv9v"
if SPARCtype.find("sun4u") != -1:
if arch.startswith("sparc64"):
return "sparc64"
else:
return "sparcv9"
if SPARCtype.find("sun4m") != -1:
return "sparcv8"
return arch
def getCanonX86_64Arch(arch):
if arch != "x86_64":
return arch
vendor = None
for line in _try_read_cpuinfo():
if line.startswith("vendor_id"):
vendor = line.split(':')[1]
break
if vendor is None:
return arch
if vendor.find("Authentic AMD") != -1 or vendor.find("AuthenticAMD") != -1:
return "amd64"
if vendor.find("GenuineIntel") != -1:
return "ia32e"
return arch
def getCanonArch(skipRpmPlatform = 0):
if not skipRpmPlatform and os.access("/etc/rpm/platform", os.R_OK):
try:
f = open("/etc/rpm/platform", "r")
line = f.readline()
f.close()
(arch, vendor, opersys) = line.split("-", 2)
return arch
except:
pass
arch = os.uname()[4]
if (len(arch) == 4 and arch[0] == "i" and arch[2:4] == "86"):
return getCanonX86Arch(arch)
if arch.startswith("ppc"):
return getCanonPPCArch(arch)
if arch.startswith("sparc"):
return getCanonSPARCArch(arch)
if arch == "x86_64":
return getCanonX86_64Arch(arch)
return arch
canonArch = getCanonArch()
# this gets you the "compat" arch of a biarch pair
def getMultiArchInfo(arch = canonArch):
if arch in multilibArches:
return multilibArches[arch]
if arch in arches and arches[arch] != "noarch":
return getMultiArchInfo(arch = arches[arch])
return None
# get the best usual userspace arch for the arch we're on. this is
# our arch unless we're on an arch that uses the secondary as its
# userspace (eg ppc64, sparc64)
def getBestArch(myarch=None):
if myarch:
arch = myarch
else:
arch = canonArch
if arch.startswith("sparc64"):
arch = multilibArches[arch][1]
if arch.startswith("ppc64"):
arch = 'ppc'
return arch
def getBaseArch(myarch=None):
"""returns 'base' arch for myarch, if specified, or canonArch if not.
base arch is the arch before noarch in the arches dict if myarch is not
a key in the multilibArches."""
if not myarch:
myarch = canonArch
if myarch not in arches: # this is dumb, but <shrug>
return myarch
if myarch.startswith("sparc64"):
return "sparc"
elif myarch.startswith("ppc64"):
return "ppc"
elif myarch.startswith("arm"):
return "arm"
if isMultiLibArch(arch=myarch):
if myarch in multilibArches:
return myarch
else:
return arches[myarch]
if myarch in arches:
basearch = myarch
value = arches[basearch]
while value != 'noarch':
basearch = value
value = arches[basearch]
return basearch
class ArchStorage(object):
"""class for keeping track of what arch we have set and doing various
permutations based on it"""
def __init__(self):
self.canonarch = None
self.basearch = None
self.bestarch = None
self.compatarches = []
self.archlist = []
self.multilib = False
self.setup_arch()
def setup_arch(self, arch=None, archlist_includes_compat_arch=True):
if arch:
self.canonarch = arch
else:
self.canonarch = getCanonArch()
self.basearch = getBaseArch(myarch=self.canonarch)
self.archlist = getArchList(thisarch=self.canonarch)
if not archlist_includes_compat_arch: # - do we bother including i686 and below on x86_64
limit_archlist = []
for a in self.archlist:
if isMultiLibArch(a) or a == 'noarch':
limit_archlist.append(a)
self.archlist = limit_archlist
self.bestarch = getBestArch(myarch=self.canonarch)
self.compatarches = getMultiArchInfo(arch=self.canonarch)
self.multilib = isMultiLibArch(arch=self.canonarch)
self.legit_multi_arches = legitMultiArchesInSameLib(arch = self.canonarch)
def get_best_arch_from_list(self, archlist, fromarch=None):
if not fromarch:
fromarch = self.canonarch
return getBestArchFromList(archlist, myarch=fromarch)
def score(self, arch):
return archDifference(self.canonarch, arch)
def get_arch_list(self, arch):
if not arch:
return self.archlist
return getArchList(thisarch=arch)

View file

@ -1,455 +0,0 @@
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2003 Duke University
import rpm
import types
import gzip
import os
import sys
import locale
import signal
import rpm5utils.transaction
def rpmOutToStr(arg):
if type(arg) != types.StringType:
# and arg is not None:
arg = str(arg)
return arg
def compareEVR((e1, v1, r1), (e2, v2, r2)):
# return 1: a is newer than b
# 0: a and b are the same version
# -1: b is newer than a
if e1 is None:
e1 = '0'
else:
e1 = str(e1)
if v1 is None:
v1 = '0'
else:
v1 = str(v1)
if r1 is None:
r1 = '0'
else:
r1 = str(r1)
if e2 is None:
e2 = '0'
else:
e2 = str(e2)
if v2 is None:
v2 = '0'
else:
v2 = str(v2)
if r2 is None:
r2 = '0'
else:
r2 = str(r2)
#~ print '%s, %s, %s vs %s, %s, %s' % (e1, v1, r1, e2, v2, r2)
rc = rpm.labelCompare((e1, v1, r1), (e2, v2, r2))
#~ print '%s, %s, %s vs %s, %s, %s = %s' % (e1, v1, r1, e2, v2, r2, rc)
return rc
def compareDEVR((d1, e1, v1, r1), (d2, e2, v2, r2)):
# return 1: a is newer than b
# 0: a and b are the same version
# -1: b is newer than a
if d1 is None:
d1 = '0'
if d2 is None:
d2 = '0'
if d1 > d2:
return 1
if d1 < d2:
return -1
rc = compareEVR((e1, v1, r1), (e2, v2, r2))
return rc
def compareVerOnly(v1, v2):
"""compare version strings only using rpm vercmp"""
return compareEVR(('', v1, ''), ('', v2, ''))
def checkSig(ts, package):
"""Takes a transaction set and a package, check it's sigs,
return 0 if they are all fine
return 1 if the gpg key can't be found
return 2 if the header is in someway damaged
return 3 if the key is not trusted
return 4 if the pkg is not gpg or pgp signed"""
value = 0
currentflags = ts.setVSFlags(0)
fdno = os.open(package, os.O_RDONLY)
try:
hdr = ts.hdrFromFdno(fdno)
except rpm.error, e:
if str(e) == "public key not availaiable":
value = 1
if str(e) == "public key not available":
value = 1
if str(e) == "public key not trusted":
value = 3
if str(e) == "error reading package header":
value = 2
else:
error, siginfo = getSigInfo(hdr)
if error == 101:
os.close(fdno)
del hdr
value = 4
else:
del hdr
try:
os.close(fdno)
except OSError, e: # if we're not opened, don't scream about it
pass
ts.setVSFlags(currentflags) # put things back like they were before
return value
def getSigInfo(hdr):
"""checks signature from an hdr hand back signature information and/or
an error code"""
locale.setlocale(locale.LC_ALL, 'C')
string = '%|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|'
siginfo = hdr.sprintf(string)
if siginfo != '(none)':
error = 0
sigtype, sigdate, sigid = siginfo.split(',')
else:
error = 101
sigtype = 'MD5'
sigdate = 'None'
sigid = 'None'
infotuple = (sigtype, sigdate, sigid)
return error, infotuple
def pkgTupleFromHeader(hdr):
"""return a pkgtuple (n, a, e, v, r) from a hdr object, converts
None epoch to 0, as well."""
name = hdr['name']
# RPMTAG_SOURCEPACKAGE: RPMTAG_SOURCERPM is not necessarily there for
# e.g. gpg-pubkeys imported with older rpm versions
# http://lists.baseurl.org/pipermail/yum/2009-January/022275.html
if hdr[rpm.RPMTAG_SOURCERPM] or hdr[rpm.RPMTAG_SOURCEPACKAGE] != 1:
arch = hdr['arch']
else:
arch = 'src'
ver = hdr['version']
rel = hdr['release']
epoch = hdr['epoch']
if epoch is None:
epoch = '0'
pkgtuple = (name, arch, epoch, ver, rel)
return pkgtuple
def pkgDistTupleFromHeader(hdr):
"""the same as above, but appends DistEpoch to the tuple"""
(n,a,e,v,r) = pkgTupleFromHeader(hdr)
d = hdr['distepoch']
if d is None:
d = '0'
pkgtuple = (n,a,e,v,r,d)
return pkgtuple
def rangeCheck(reqtuple, pkgtuple):
"""returns true if the package epoch-ver-rel satisfy the range
requested in the reqtuple:
ex: foo >= 2.1-1"""
# we only ever get here if we have a versioned prco
# nameonly shouldn't ever raise it
#(reqn, reqf, (reqe, reqv, reqr)) = reqtuple
(n, a, e, v, r) = pkgtuple
return rangeCompare(reqtuple, (n, rpm.RPMSENSE_EQUAL, (e, v, r)))
def rangeCompare(reqtuple, provtuple):
"""returns true if provtuple satisfies reqtuple"""
(reqn, reqf, (reqe, reqv, reqr)) = reqtuple
(n, f, (e, v, r)) = provtuple
if reqn != n:
return 0
# unversioned satisfies everything
if not f or not reqf:
return 1
# and you thought we were done having fun
# if the requested release is left out then we have
# to remove release from the package prco to make sure the match
# is a success - ie: if the request is EQ foo 1:3.0.0 and we have
# foo 1:3.0.0-15 then we have to drop the 15 so we can match
if reqr is None:
r = None
if reqe is None:
e = None
if reqv is None: # just for the record if ver is None then we're going to segfault
v = None
# if we just require foo-version, then foo-version-* will match
if r is None:
reqr = None
rc = compareEVR((e, v, r), (reqe, reqv, reqr))
# does not match unless
if rc >= 1:
if (reqf & rpm.RPMSENSE_GREATER) or (reqf & rpm.RPMSENSE_EQUAL):
return 1
if rc == 0:
if (reqf & rpm.RPMSENSE_EQUAL):
return 1
if rc <= -1:
if (reqf & rpm.RPMSENSE_LESS) or (reqf & rpm.RPMSENSE_EQUAL):
return 1
return 0
###########
# Title: Remove duplicates from a sequence
# Submitter: Tim Peters
# From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
def unique(s):
"""Return a list of the elements in s, but without duplicates.
For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3],
unique("abcabc") some permutation of ["a", "b", "c"], and
unique(([1, 2], [2, 3], [1, 2])) some permutation of
[[2, 3], [1, 2]].
For best speed, all sequence elements should be hashable. Then
unique() will usually work in linear time.
If not possible, the sequence elements should enjoy a total
ordering, and if list(s).sort() doesn't raise TypeError it's
assumed that they do enjoy a total ordering. Then unique() will
usually work in O(N*log2(N)) time.
If that's not possible either, the sequence elements must support
equality-testing. Then unique() will usually work in quadratic
time.
"""
n = len(s)
if n == 0:
return []
# Try using a dict first, as that's the fastest and will usually
# work. If it doesn't work, it will usually fail quickly, so it
# usually doesn't cost much to *try* it. It requires that all the
# sequence elements be hashable, and support equality comparison.
u = {}
try:
for x in s:
u[x] = 1
except TypeError:
del u # move on to the next method
else:
return u.keys()
# We can't hash all the elements. Second fastest is to sort,
# which brings the equal elements together; then duplicates are
# easy to weed out in a single pass.
# NOTE: Python's list.sort() was designed to be efficient in the
# presence of many duplicate elements. This isn't true of all
# sort functions in all languages or libraries, so this approach
# is more effective in Python than it may be elsewhere.
try:
t = list(s)
t.sort()
except TypeError:
del t # move on to the next method
else:
assert n > 0
last = t[0]
lasti = i = 1
while i < n:
if t[i] != last:
t[lasti] = last = t[i]
lasti += 1
i += 1
return t[:lasti]
# Brute force is all that's left.
u = []
for x in s:
if x not in u:
u.append(x)
return u
def splitFilename(filename):
"""
Pass in a standard style rpm fullname
Return a name, version, release, epoch, arch, e.g.::
foo-1.0-1.i386.rpm returns foo, 1.0, 1, i386
1:bar-9-123a.ia64.rpm returns bar, 9, 123a, 1, ia64
"""
if filename[-4:] == '.rpm':
filename = filename[:-4]
archIndex = filename.rfind('.')
arch = filename[archIndex+1:]
relIndex = filename[:archIndex].rfind('-')
rel = filename[relIndex+1:archIndex]
verIndex = filename[:relIndex].rfind('-')
ver = filename[verIndex+1:relIndex]
epochIndex = filename.find(':')
if epochIndex == -1:
epoch = ''
else:
epoch = filename[:epochIndex]
name = filename[epochIndex + 1:verIndex]
return name, ver, rel, epoch, arch
def rpm2cpio(fdno, out=sys.stdout, bufsize=2048):
"""Performs roughly the equivalent of rpm2cpio(8).
Reads the package from fdno, and dumps the cpio payload to out,
using bufsize as the buffer size."""
ts = rpm5utils.transaction.initReadOnlyTransaction()
hdr = ts.hdrFromFdno(fdno)
del ts
compr = hdr[rpm.RPMTAG_PAYLOADCOMPRESSOR] or 'gzip'
#XXX FIXME
#if compr == 'bzip2':
# TODO: someone implement me!
#el
if compr != 'gzip':
raise rpm5utils.Rpm5UtilsError, \
'Unsupported payload compressor: "%s"' % compr
f = gzip.GzipFile(None, 'rb', None, os.fdopen(fdno, 'rb', bufsize))
while 1:
tmp = f.read(bufsize)
if tmp == "": break
out.write(tmp)
f.close()
def formatRequire (name, version, flags):
'''
Return a human readable requirement string (ex. foobar >= 2.0)
@param name: requirement name (ex. foobar)
@param version: requirent version (ex. 2.0)
@param flags: binary flags ( 0010 = equal, 0100 = greater than, 1000 = less than )
'''
s = name
if flags and (type(flags) == type(0) or type(flags) == type(0L)): # Flag must be set and a int (or a long, now)
if flags & (rpm.RPMSENSE_LESS | rpm.RPMSENSE_GREATER |
rpm.RPMSENSE_EQUAL):
s = s + " "
if flags & rpm.RPMSENSE_LESS:
s = s + "<"
if flags & rpm.RPMSENSE_GREATER:
s = s + ">"
if flags & rpm.RPMSENSE_EQUAL:
s = s + "="
if version:
s = "%s %s" %(s, version)
return s
def flagToString(flags):
flags = flags & 0xf
if flags == 0: return None
elif flags == 2: return 'LT'
elif flags == 4: return 'GT'
elif flags == 8: return 'EQ'
elif flags == 10: return 'LE'
elif flags == 12: return 'GE'
return flags
def stringToVersion(verstring):
if verstring in [None, '']:
return (None, None, None)
i = verstring.find(':')
if i != -1:
try:
epoch = str(long(verstring[i:]))
except ValueError:
# look, garbage in the epoch field, how fun, kill it
epoch = '0' # this is our fallback, deal
else:
epoch = '0'
j = verstring.find('-')
if j != -1:
if verstring[i + 1:j] == '':
version = None
else:
version = verstring[i + 1:j]
release = verstring[j + 1:]
else:
if verstring[i + 1:] == '':
version = None
else:
version = verstring[i + 1:]
release = None
return (epoch, version, release)
def hdrFromPackage(ts, package):
"""hand back the rpm header or raise an Error if the pkg is fubar"""
try:
fdno = os.open(package, os.O_RDONLY)
except OSError, e:
raise rpm5utils.Rpm5UtilsError, 'Unable to open file'
# XXX: We should start a readonly ts here, so we don't get the options
# from the other one (sig checking, etc)
try:
hdr = ts.hdrFromFdno(fdno)
except rpm.error, e:
os.close(fdno)
raise rpm5utils.Rpm5UtilsError, "RPM Error opening Package"
if type(hdr) != rpm.hdr:
os.close(fdno)
raise rpm5utils.Rpm5UtilsError, "RPM Error opening Package (type)"
os.close(fdno)
return hdr
def checkSignals():
if hasattr(rpm, "checkSignals") and hasattr(rpm, 'signalsCaught'):
if rpm.signalsCaught([signal.SIGINT,
signal.SIGTERM,
signal.SIGPIPE,
signal.SIGQUIT,
signal.SIGHUP]):
sys.exit(1)

View file

@ -1,63 +0,0 @@
import rpm5utils.updates
import rpm5utils.arch
instlist = [('foo', 'i386', '0', '1', '1'),
('do', 'i386', '0', '2', '3'),
('glibc', 'i386', '0', '1', '1'),
('bar', 'noarch', '0', '2', '1'),
('baz', 'i686', '0', '2', '3'),
('baz', 'x86_64', '0','1','4'),
('foo', 'i686', '0', '1', '1'),
('cyrus-sasl','sparcv9', '0', '1', '1')]
availlist = [('foo', 'i686', '0', '1', '3'),
('do', 'noarch', '0', '3', '3'),
('do', 'noarch', '0', '4', '3'),
('foo', 'i386', '0', '1', '3'),
('foo', 'i686', '0', '1', '2'),
('glibc', 'i686', '0', '1', '2'),
('glibc', 'i386', '0', '1', '2'),
('bar', 'noarch', '0', '2', '2'),
('baz', 'noarch', '0', '2', '4'),
('baz', 'i686', '0', '2', '4'),
('baz', 'x86_64', '0', '1', '5'),
('baz', 'ppc', '0', '1', '5'),
('cyrus-sasl','sparcv9', '0', '1', '2'),
('cyrus-sasl','sparc64', '0', '1', '2'),]
obslist = {('quux', 'noarch', '0', '1', '3'): [('bar', None, (None, None, None))],
('quuxish', 'noarch', '0', '1', '3'):[('foo', 'GE', ('0', '1', None))],
}
up = rpm5utils.updates.Updates(instlist, availlist)
up.debug=1
up.exactarch=1
#up.myarch = 'sparc64'
up._is_multilib = rpm5utils.arch.isMultiLibArch(up.myarch)
up._archlist = rpm5utils.arch.getArchList(up.myarch)
print up._archlist
up._multilib_compat_arches = rpm5utils.arch.getMultiArchInfo(up.myarch)
up.doUpdates()
up.condenseUpdates()
for tup in up.updatesdict.keys():
(old_n, old_a, old_e, old_v, old_r) = tup
for (n, a, e, v, r) in up.updatesdict[tup]:
print '%s.%s %s:%s-%s updated by %s.%s %s:%s-%s' % (old_n,
old_a, old_e, old_v, old_r, n, a, e, v, r)
up.rawobsoletes = obslist
up.doObsoletes()
for tup in up.obsoletes.keys():
(old_n, old_a, old_e, old_v, old_r) = tup
for (n, a, e, v, r) in up.obsoletes[tup]:
print '%s.%s %s:%s-%s obsoletes %s.%s %s:%s-%s' % (old_n,
old_a, old_e, old_v, old_r, n, a, e, v, r)

View file

@ -1,192 +0,0 @@
#
# Client code for Update Agent
# Copyright (c) 1999-2002 Red Hat, Inc. Distributed under GPL.
#
# Adrian Likins <alikins@redhat.com>
# Some Edits by Seth Vidal <skvidal@phy.duke.edu>
#
# a couple of classes wrapping up transactions so that we
# can share transactions instead of creating new ones all over
#
import rpm
import miscutils
read_ts = None
ts = None
# wrapper/proxy class for rpm.Transaction so we can
# instrument it, etc easily
class TransactionWrapper:
def __init__(self, root='/'):
self.ts = rpm.TransactionSet(root)
self._methods = ['check',
'order',
'addErase',
'addInstall',
'run',
'pgpImportPubkey',
'pgpPrtPkts',
'problems',
'setFlags',
'setVSFlags',
'setProbFilter',
'hdrFromFdno',
'next',
'clean']
self.tsflags = []
self.open = True
def __del__(self):
# Automatically close the rpm transaction when the reference is lost
self.close()
def close(self):
if self.open:
self.ts.closeDB()
self.ts = None
self.open = False
def dbMatch(self, *args, **kwds):
if 'patterns' in kwds:
patterns = kwds.pop('patterns')
else:
patterns = []
mi = self.ts.dbMatch(*args, **kwds)
for (tag, tp, pat) in patterns:
mi.pattern(tag, tp, pat)
return mi
def __getattr__(self, attr):
if attr in self._methods:
return self.getMethod(attr)
else:
raise AttributeError, attr
def __iter__(self):
return self.ts
def getMethod(self, method):
# in theory, we can override this with
# profile/etc info
return getattr(self.ts, method)
# push/pop methods so we dont lose the previous
# set value, and we can potentiall debug a bit
# easier
def pushVSFlags(self, flags):
self.tsflags.append(flags)
self.ts.setVSFlags(self.tsflags[-1])
def popVSFlags(self):
del self.tsflags[-1]
self.ts.setVSFlags(self.tsflags[-1])
def addTsFlag(self, flag):
curflags = self.ts.setFlags(0)
self.ts.setFlags(curflags | flag)
def getTsFlags(self):
curflags = self.ts.setFlags(0)
self.ts.setFlags(curflags)
return curflags
def isTsFlagSet(self, flag):
val = self.getTsFlags()
return bool(flag & val)
def setScriptFd(self, fd):
self.ts.scriptFd = fd.fileno()
# def addProblemFilter(self, filt):
# curfilter = self.ts.setProbFilter(0)
# self.ts.setProbFilter(cutfilter | filt)
def test(self, cb, conf={}):
"""tests the ts we've setup, takes a callback function and a conf dict
for flags and what not"""
origflags = self.getTsFlags()
self.addTsFlag(rpm.RPMTRANS_FLAG_TEST)
# FIXME GARBAGE - remove once this is reimplemented elsehwere
# KEEPING FOR API COMPLIANCE ONLY
if conf.get('diskspacecheck') == 0:
self.ts.setProbFilter(rpm.RPMPROB_FILTER_DISKSPACE)
tserrors = self.ts.run(cb.callback, '')
self.ts.setFlags(origflags)
reserrors = []
if tserrors:
for (descr, (etype, mount, need)) in tserrors:
reserrors.append(descr)
return reserrors
def returnLeafNodes(self, headers=False):
"""returns a list of package tuples (n,a,e,v,r) that are not required by
any other package on the system
If headers is True then it will return a list of (header, index) tuples
"""
req = {}
orphan = []
mi = self.dbMatch()
if mi is None: # this is REALLY unlikely but let's just say it for the moment
return orphan
# prebuild the req dict
for h in mi:
if h['name'] == 'gpg-pubkey':
continue
if not h[rpm.RPMTAG_REQUIRENAME]:
continue
tup = miscutils.pkgTupleFromHeader(h)
for r in h[rpm.RPMTAG_REQUIRENAME]:
if r not in req:
req[r] = set()
req[r].add(tup)
mi = self.dbMatch()
if mi is None:
return orphan
def _return_all_provides(hdr):
""" Return all the provides, via yield. """
# These are done one by one, so that we get lazy loading
for prov in hdr[rpm.RPMTAG_PROVIDES]:
yield prov
for prov in hdr[rpm.RPMTAG_FILENAMES]:
yield prov
for h in mi:
if h['name'] == 'gpg-pubkey':
continue
preq = 0
tup = miscutils.pkgTupleFromHeader(h)
for p in _return_all_provides(h):
if p in req:
# Don't count a package that provides its require
s = req[p]
if len(s) > 1 or tup not in s:
preq = preq + 1
break
if preq == 0:
if headers:
orphan.append((h, mi.instance()))
else:
orphan.append(h)
#~ orphan.append(tup)
return orphan
def initReadOnlyTransaction(root='/'):
read_ts = TransactionWrapper(root=root)
read_ts.pushVSFlags((rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS))
return read_ts

View file

@ -1,723 +0,0 @@
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2004 Duke University
import rpm5utils
import rpm5utils.miscutils
import rpm5utils.arch
def _vertup_cmp(tup1, tup2):
return rpm5utils.miscutils.compareEVR(tup1, tup2)
class Updates:
"""
This class computes and keeps track of updates and obsoletes.
initialize, add installed packages, add available packages (both as
unique lists of name, arch, ver, rel, epoch tuples), add an optional dict
of obsoleting packages with obsoletes and what they obsolete ie::
foo, i386, 0, 1.1, 1: bar >= 1.1.
"""
def __init__(self, instlist, availlist):
self.installed = instlist # list of installed pkgs (n, a, e, v, r)
self.available = availlist # list of available pkgs (n, a, e, v, r)
self.rawobsoletes = {} # dict of obsoleting package->[what it obsoletes]
self._obsoletes_by_name = None
self.obsoleted_dict = {} # obsoleted pkgtup -> [ obsoleting pkgtups ]
self.obsoleting_dict = {} # obsoleting pkgtup -> [ obsoleted pkgtups ]
self.exactarch = 1 # don't change archs by default
self.exactarchlist = set(['kernel', 'kernel-smp', 'glibc',
'kernel-hugemem',
'kernel-enterprise', 'kernel-bigmem',
'kernel-BOOT'])
self.myarch = rpm5utils.arch.canonArch # set this if you want to
# test on some other arch
# otherwise leave it alone
self._is_multilib = rpm5utils.arch.isMultiLibArch(self.myarch)
self._archlist = rpm5utils.arch.getArchList(self.myarch)
self._multilib_compat_arches = rpm5utils.arch.getMultiArchInfo(self.myarch)
# make some dicts from installed and available
self.installdict = self.makeNADict(self.installed, 1)
self.availdict = self.makeNADict(self.available, 0, # Done in doUpdate
filter=self.installdict)
# holder for our updates dict
self.updatesdict = {}
self.updating_dict = {}
#debug, ignore me
self.debug = 0
self.obsoletes = {}
def _delFromDict(self, dict_, keys, value):
for key in keys:
if key not in dict_:
continue
dict_[key] = filter(value.__ne__, dict_[key])
if not dict_[key]:
del dict_[key]
def _delFromNADict(self, dict_, pkgtup):
(n, a, e, v, r) = pkgtup
for aa in (a, None):
if (n, aa) in dict_:
dict_[(n, aa)] = filter((e,v,r).__ne__, dict_[(n, aa)])
if not dict_[(n, aa)]:
del dict_[(n, aa)]
def delPackage(self, pkgtup):
"""remove available pkgtup that is no longer available"""
if pkgtup not in self.available:
return
self.available.remove(pkgtup)
self._delFromNADict(self.availdict, pkgtup)
self._delFromDict(self.updating_dict, self.updatesdict.get(pkgtup, []), pkgtup)
self._delFromDict(self.updatesdict, self.updating_dict.get(pkgtup, []), pkgtup)
if pkgtup in self.rawobsoletes:
if self._obsoletes_by_name:
for name, flag, version in self.rawobsoletes[pkgtup]:
self._delFromDict(self._obsoletes_by_name, [name], (flag, version, pkgtup))
del self.rawobsoletes[pkgtup]
self._delFromDict(self.obsoleted_dict, self.obsoleting_dict.get(pkgtup, []), pkgtup)
self._delFromDict(self.obsoleting_dict, self.obsoleted_dict.get(pkgtup, []), pkgtup)
def debugprint(self, msg):
if self.debug:
print msg
def makeNADict(self, pkglist, Nonelists, filter=None):
"""return lists of (e,v,r) tuples as value of a dict keyed on (n, a)
optionally will return a (n, None) entry with all the a for that
n in tuples of (a,e,v,r)"""
returndict = {}
for (n, a, e, v, r) in pkglist:
if filter and (n, None) not in filter:
continue
if (n, a) not in returndict:
returndict[(n, a)] = []
if (e,v,r) in returndict[(n, a)]:
continue
returndict[(n, a)].append((e,v,r))
if Nonelists:
if (n, None) not in returndict:
returndict[(n, None)] = []
if (a,e,v,r) in returndict[(n, None)]:
continue
returndict[(n, None)].append((a, e, v, r))
return returndict
def returnNewest(self, evrlist):
"""takes a list of (e, v, r) tuples and returns the newest one"""
if len(evrlist)==0:
raise rpm5utils.Rpm5UtilsError, "Zero Length List in returnNewest call"
if len(evrlist)==1:
return evrlist[0]
(new_e, new_v, new_r) = evrlist[0] # we'll call the first ones 'newest'
for (e, v, r) in evrlist[1:]:
rc = rpm5utils.miscutils.compareEVR((e, v, r), (new_e, new_v, new_r))
if rc > 0:
new_e = e
new_v = v
new_r = r
return (new_e, new_v, new_r)
def returnHighestVerFromAllArchsByName(self, name, archlist, pkglist):
"""returns a list of package tuples in a list (n, a, e, v, r)
takes a package name, a list of archs, and a list of pkgs in
(n, a, e, v, r) form."""
returnlist = []
high_vertup = None
for pkgtup in pkglist:
(n, a, e, v, r) = pkgtup
# FIXME: returnlist used to _possibly_ contain things not in
# archlist ... was that desired?
if name == n and a in archlist:
vertup = (e, v, r)
if (high_vertup is None or
(_vertup_cmp(high_vertup, vertup) < 0)):
high_vertup = vertup
returnlist = []
if vertup == high_vertup:
returnlist.append(pkgtup)
return returnlist
def condenseUpdates(self):
"""remove any accidental duplicates in updates"""
for tup in self.updatesdict:
if len(self.updatesdict[tup]) > 1:
mylist = self.updatesdict[tup]
self.updatesdict[tup] = rpm5utils.miscutils.unique(mylist)
def checkForObsolete(self, pkglist, newest=1):
"""accept a list of packages to check to see if anything obsoletes them
return an obsoleted_dict in the format of makeObsoletedDict"""
if self._obsoletes_by_name is None:
self._obsoletes_by_name = {}
for pkgtup, obsoletes in self.rawobsoletes.iteritems():
for name, flag, version in obsoletes:
self._obsoletes_by_name.setdefault(name, []).append(
(flag, version, pkgtup) )
obsdict = {} # obseleting package -> [obsoleted package]
for pkgtup in pkglist:
name = pkgtup[0]
for obs_flag, obs_version, obsoleting in self._obsoletes_by_name.get(name, []):
if obs_flag in [None, 0] and name == obsoleting[0]: continue
if rpm5utils.miscutils.rangeCheck( (name, obs_flag, obs_version), pkgtup):
obsdict.setdefault(obsoleting, []).append(pkgtup)
if not obsdict:
return {}
obslist = obsdict.keys()
if newest:
obslist = self._reduceListNewestByNameArch(obslist)
returndict = {}
for new in obslist:
for old in obsdict[new]:
if old not in returndict:
returndict[old] = []
returndict[old].append(new)
return returndict
def doObsoletes(self):
"""figures out what things available obsolete things installed, returns
them in a dict attribute of the class."""
obsdict = {} # obseleting package -> [obsoleted package]
# this needs to keep arch in mind
# if foo.i386 obsoletes bar
# it needs to obsoletes bar.i386 preferentially, not bar.x86_64
# if there is only one bar and only one foo then obsolete it, but try to
# match the arch.
# look through all the obsoleting packages look for multiple archs per name
# if you find it look for the packages they obsolete
#
obs_arches = {}
for (n, a, e, v, r) in self.rawobsoletes:
if n not in obs_arches:
obs_arches[n] = []
obs_arches[n].append(a)
for pkgtup in self.rawobsoletes:
(name, arch, epoch, ver, rel) = pkgtup
for (obs_n, flag, (obs_e, obs_v, obs_r)) in self.rawobsoletes[(pkgtup)]:
if (obs_n, None) in self.installdict:
for (rpm_a, rpm_e, rpm_v, rpm_r) in self.installdict[(obs_n, None)]:
if flag in [None, 0] or \
rpm5utils.miscutils.rangeCheck((obs_n, flag, (obs_e, obs_v, obs_r)),
(obs_n, rpm_a, rpm_e, rpm_v, rpm_r)):
# make sure the obsoleting pkg is not already installed
willInstall = 1
if (name, None) in self.installdict:
for (ins_a, ins_e, ins_v, ins_r) in self.installdict[(name, None)]:
pkgver = (epoch, ver, rel)
installedver = (ins_e, ins_v, ins_r)
if self.returnNewest((pkgver, installedver)) == installedver:
willInstall = 0
break
if rpm_a != arch and rpm_a in obs_arches[name]:
willInstall = 0
if willInstall:
if pkgtup not in obsdict:
obsdict[pkgtup] = []
obsdict[pkgtup].append((obs_n, rpm_a, rpm_e, rpm_v, rpm_r))
self.obsoletes = obsdict
self.makeObsoletedDict()
def makeObsoletedDict(self):
"""creates a dict of obsoleted packages -> [obsoleting package], this
is to make it easier to look up what package obsoletes what item in
the rpmdb"""
self.obsoleted_dict = {}
for new in self.obsoletes:
for old in self.obsoletes[new]:
if old not in self.obsoleted_dict:
self.obsoleted_dict[old] = []
self.obsoleted_dict[old].append(new)
self.obsoleting_dict = {}
for obsoleted, obsoletings in self.obsoleted_dict.iteritems():
for obsoleting in obsoletings:
self.obsoleting_dict.setdefault(obsoleting, []).append(obsoleted)
def doUpdates(self):
"""check for key lists as populated then commit acts of evil to
determine what is updated and/or obsoleted, populate self.updatesdict
"""
# best bet is to chew through the pkgs and throw out the new ones early
# then deal with the ones where there are a single pkg installed and a
# single pkg available
# then deal with the multiples
# we should take the whole list as a 'newlist' and remove those entries
# which are clearly:
# 1. updates
# 2. identical to the ones in ourdb
# 3. not in our archdict at all
simpleupdate = []
complexupdate = []
updatedict = {} # (old n, a, e, v, r) : [(new n, a, e, v, r)]
# make the new ones a list b/c while we _shouldn't_
# have multiple updaters, we might and well, it needs
# to be solved one way or the other <sigh>
newpkgs = self.availdict
archlist = self._archlist
for (n, a) in newpkgs.keys():
if a not in archlist:
# high log here
del newpkgs[(n, a)]
continue
# remove the older stuff - if we're doing an update we only want the
# newest evrs
for (n, a) in newpkgs:
(new_e,new_v,new_r) = self.returnNewest(newpkgs[(n, a)])
for (e, v, r) in newpkgs[(n, a)][:]:
if (new_e, new_v, new_r) != (e, v, r):
newpkgs[(n, a)].remove((e, v, r))
for (n, a) in newpkgs:
# simple ones - look for exact matches or older stuff
if (n, a) in self.installdict:
for (rpm_e, rpm_v, rpm_r) in self.installdict[(n, a)]:
try:
(e, v, r) = self.returnNewest(newpkgs[(n,a)])
except rpm5utils.Rpm5UtilsError:
continue
else:
rc = rpm5utils.miscutils.compareEVR((e, v, r), (rpm_e, rpm_v, rpm_r))
if rc <= 0:
try:
newpkgs[(n, a)].remove((e, v, r))
except ValueError:
pass
# Now we add the (n, None) entries back...
for na in newpkgs.keys():
all_arches = map(lambda x: (na[1], x[0], x[1], x[2]), newpkgs[na])
newpkgs.setdefault((na[0], None), []).extend(all_arches)
# get rid of all the empty dict entries:
for nakey in newpkgs.keys():
if len(newpkgs[nakey]) == 0:
del newpkgs[nakey]
# ok at this point our newpkgs list should be thinned, we should have only
# the newest e,v,r's and only archs we can actually use
for (n, a) in newpkgs:
if a is None: # the None archs are only for lookups
continue
if (n, None) in self.installdict:
installarchs = []
availarchs = []
for (a, e, v ,r) in newpkgs[(n, None)]:
availarchs.append(a)
for (a, e, v, r) in self.installdict[(n, None)]:
installarchs.append(a)
if len(availarchs) > 1 or len(installarchs) > 1:
self.debugprint('putting %s in complex update' % n)
complexupdate.append(n)
else:
#log(4, 'putting %s in simple update list' % name)
self.debugprint('putting %s in simple update' % n)
simpleupdate.append((n, a))
# we have our lists to work with now
# simple cases
for (n, a) in simpleupdate:
# try to be as precise as possible
if n in self.exactarchlist:
if (n, a) in self.installdict:
(rpm_e, rpm_v, rpm_r) = self.returnNewest(self.installdict[(n, a)])
if (n, a) in newpkgs:
(e, v, r) = self.returnNewest(newpkgs[(n, a)])
rc = rpm5utils.miscutils.compareEVR((e, v, r), (rpm_e, rpm_v, rpm_r))
if rc > 0:
# this is definitely an update - put it in the dict
if (n, a, rpm_e, rpm_v, rpm_r) not in updatedict:
updatedict[(n, a, rpm_e, rpm_v, rpm_r)] = []
updatedict[(n, a, rpm_e, rpm_v, rpm_r)].append((n, a, e, v, r))
else:
# we could only have 1 arch in our rpmdb and 1 arch of pkg
# available - so we shouldn't have to worry about the lists, here
# we just need to find the arch of the installed pkg so we can
# check it's (e, v, r)
(rpm_a, rpm_e, rpm_v, rpm_r) = self.installdict[(n, None)][0]
if (n, None) in newpkgs:
for (a, e, v, r) in newpkgs[(n, None)]:
rc = rpm5utils.miscutils.compareEVR((e, v, r), (rpm_e, rpm_v, rpm_r))
if rc > 0:
# this is definitely an update - put it in the dict
if (n, rpm_a, rpm_e, rpm_v, rpm_r) not in updatedict:
updatedict[(n, rpm_a, rpm_e, rpm_v, rpm_r)] = []
updatedict[(n, rpm_a, rpm_e, rpm_v, rpm_r)].append((n, a, e, v, r))
# complex cases
# we're multilib/biarch
# we need to check the name.arch in two different trees
# one for the multiarch itself and one for the compat arch
# ie: x86_64 and athlon(i686-i386) - we don't want to descend
# x86_64->i686
# however, we do want to descend x86_64->noarch, sadly.
archlists = []
if self._is_multilib:
if self.myarch in rpm5utils.arch.multilibArches:
biarches = [self.myarch]
else:
biarches = [self.myarch, rpm5utils.arch.arches[self.myarch]]
biarches.append('noarch')
multicompat = self._multilib_compat_arches[0]
multiarchlist = rpm5utils.arch.getArchList(multicompat)
archlists = [ set(biarches), set(multiarchlist) ]
# archlists = [ biarches, multiarchlist ]
else:
archlists = [ set(archlist) ]
# archlists = [ archlist ]
for n in complexupdate:
for thisarchlist in archlists:
# we need to get the highest version and the archs that have it
# of the installed pkgs
tmplist = []
for (a, e, v, r) in self.installdict[(n, None)]:
tmplist.append((n, a, e, v, r))
highestinstalledpkgs = self.returnHighestVerFromAllArchsByName(n,
thisarchlist, tmplist)
hipdict = self.makeNADict(highestinstalledpkgs, 0)
if n in self.exactarchlist:
tmplist = []
for (a, e, v, r) in newpkgs[(n, None)]:
tmplist.append((n, a, e, v, r))
highestavailablepkgs = self.returnHighestVerFromAllArchsByName(n,
thisarchlist, tmplist)
hapdict = self.makeNADict(highestavailablepkgs, 0)
for (n, a) in hipdict:
if (n, a) in hapdict:
self.debugprint('processing %s.%s' % (n, a))
# we've got a match - get our versions and compare
(rpm_e, rpm_v, rpm_r) = hipdict[(n, a)][0] # only ever going to be first one
(e, v, r) = hapdict[(n, a)][0] # there can be only one
rc = rpm5utils.miscutils.compareEVR((e, v, r), (rpm_e, rpm_v, rpm_r))
if rc > 0:
# this is definitely an update - put it in the dict
if (n, a, rpm_e, rpm_v, rpm_r) not in updatedict:
updatedict[(n, a, rpm_e, rpm_v, rpm_r)] = []
updatedict[(n, a, rpm_e, rpm_v, rpm_r)].append((n, a, e, v, r))
else:
self.debugprint('processing %s' % n)
# this is where we have to have an arch contest if there
# is more than one arch updating with the highest ver
instarchs = []
for (n,a) in hipdict:
instarchs.append(a)
rpm_a = rpm5utils.arch.getBestArchFromList(instarchs, myarch=self.myarch)
if rpm_a is None:
continue
tmplist = []
for (a, e, v, r) in newpkgs[(n, None)]:
tmplist.append((n, a, e, v, r))
highestavailablepkgs = self.returnHighestVerFromAllArchsByName(n,
thisarchlist, tmplist)
hapdict = self.makeNADict(highestavailablepkgs, 0)
availarchs = []
for (n,a) in hapdict:
availarchs.append(a)
a = rpm5utils.arch.getBestArchFromList(availarchs, myarch=self.myarch)
if a is None:
continue
(rpm_e, rpm_v, rpm_r) = hipdict[(n, rpm_a)][0] # there can be just one
(e, v, r) = hapdict[(n, a)][0] # just one, I'm sure, I swear!
rc = rpm5utils.miscutils.compareEVR((e, v, r), (rpm_e, rpm_v, rpm_r))
if rc > 0:
# this is definitely an update - put it in the dict
if (n, rpm_a, rpm_e, rpm_v, rpm_r) not in updatedict:
updatedict[(n, rpm_a, rpm_e, rpm_v, rpm_r)] = []
updatedict[(n, rpm_a, rpm_e, rpm_v, rpm_r)].append((n, a, e, v, r))
self.updatesdict = updatedict
self.makeUpdatingDict()
def makeUpdatingDict(self):
"""creates a dict of available packages -> [installed package], this
is to make it easier to look up what package will be updating what
in the rpmdb"""
self.updating_dict = {}
for old in self.updatesdict:
for new in self.updatesdict[old]:
if new not in self.updating_dict:
self.updating_dict[new] = []
self.updating_dict[new].append(old)
def reduceListByNameArch(self, pkglist, name=None, arch=None):
"""returns a set of pkg naevr tuples reduced based on name or arch"""
returnlist = []
if name or arch:
for (n, a, e, v, r) in pkglist:
if name:
if name == n:
returnlist.append((n, a, e, v, r))
continue
if arch:
if arch == a:
returnlist.append((n, a, e, v, r))
continue
else:
returnlist = pkglist
return returnlist
def getUpdatesTuples(self, name=None, arch=None):
"""returns updates for packages in a list of tuples of:
(updating naevr, installed naevr)"""
returnlist = []
for oldtup in self.updatesdict:
for newtup in self.updatesdict[oldtup]:
returnlist.append((newtup, oldtup))
# self.reduceListByNameArch() for double tuples
tmplist = []
if name:
for ((n, a, e, v, r), oldtup) in returnlist:
if name != n:
tmplist.append(((n, a, e, v, r), oldtup))
if arch:
for ((n, a, e, v, r), oldtup) in returnlist:
if arch != a:
tmplist.append(((n, a, e, v, r), oldtup))
for item in tmplist:
try:
returnlist.remove(item)
except ValueError:
pass
return returnlist
def getUpdatesList(self, name=None, arch=None):
"""returns updating packages in a list of (naevr) tuples"""
returnlist = []
for oldtup in self.updatesdict:
for newtup in self.updatesdict[oldtup]:
returnlist.append(newtup)
returnlist = self.reduceListByNameArch(returnlist, name, arch)
return returnlist
# NOTE: This returns obsoleters and obsoletees, but narrows based on
# _obsoletees_ (unlike getObsoletesList). Look at getObsoletersTuples
def getObsoletesTuples(self, newest=0, name=None, arch=None):
"""returns obsoletes for packages in a list of tuples of:
(obsoleting naevr, installed naevr). You can specify name and/or
arch of the installed package to narrow the results.
You can also specify newest=1 to get the set of newest pkgs (name, arch)
sorted, that obsolete something"""
tmplist = []
obslist = self.obsoletes.keys()
if newest:
obslist = self._reduceListNewestByNameArch(obslist)
for obstup in obslist:
for rpmtup in self.obsoletes[obstup]:
tmplist.append((obstup, rpmtup))
# self.reduceListByNameArch() for double tuples
returnlist = []
if name or arch:
for (obstup, (n, a, e, v, r)) in tmplist:
if name:
if name == n:
returnlist.append((obstup, (n, a, e, v, r)))
continue
if arch:
if arch == a:
returnlist.append((obstup, (n, a, e, v, r)))
continue
else:
returnlist = tmplist
return returnlist
# NOTE: This returns obsoleters and obsoletees, but narrows based on
# _obsoleters_ (like getObsoletesList).
def getObsoletersTuples(self, newest=0, name=None, arch=None):
"""returns obsoletes for packages in a list of tuples of:
(obsoleting naevr, installed naevr). You can specify name and/or
arch of the obsoleting package to narrow the results.
You can also specify newest=1 to get the set of newest pkgs (name, arch)
sorted, that obsolete something"""
tmplist = []
obslist = self.obsoletes.keys()
if newest:
obslist = self._reduceListNewestByNameArch(obslist)
for obstup in obslist:
for rpmtup in self.obsoletes[obstup]:
tmplist.append((obstup, rpmtup))
# self.reduceListByNameArch() for double tuples
returnlist = []
if name or arch:
for ((n, a, e, v, r), insttup) in tmplist:
if name:
if name == n:
returnlist.append(((n, a, e, v, r), insttup))
continue
if arch:
if arch == a:
returnlist.append(((n, a, e, v, r), insttup))
continue
else:
returnlist = tmplist
return returnlist
# NOTE: This returns _obsoleters_, and narrows based on that (unlike
# getObsoletesTuples, but like getObsoletersTuples)
def getObsoletesList(self, newest=0, name=None, arch=None):
"""returns obsoleting packages in a list of naevr tuples of just the
packages that obsolete something that is installed. You can specify
name and/or arch of the obsoleting packaging to narrow the results.
You can also specify newest=1 to get the set of newest pkgs (name, arch)
sorted, that obsolete something"""
tmplist = self.obsoletes.keys()
if newest:
tmplist = self._reduceListNewestByNameArch(tmplist)
returnlist = self.reduceListByNameArch(tmplist, name, arch)
return returnlist
def getObsoletedList(self, newest=0, name=None):
"""returns a list of pkgtuples obsoleting the package in name"""
returnlist = []
for new in self.obsoletes:
for obstup in self.obsoletes[new]:
(n, a, e, v, r) = obstup
if n == name:
returnlist.append(new)
continue
return returnlist
def getOthersList(self, name=None, arch=None):
"""returns a naevr tuple of the packages that are neither installed
nor an update - this may include something that obsoletes an installed
package"""
updates = {}
inst = {}
tmplist = []
for pkgtup in self.getUpdatesList():
updates[pkgtup] = 1
for pkgtup in self.installed:
inst[pkgtup] = 1
for pkgtup in self.available:
if pkgtup not in updates and pkgtup not in inst:
tmplist.append(pkgtup)
returnlist = self.reduceListByNameArch(tmplist, name, arch)
return returnlist
def _reduceListNewestByNameArch(self, tuplelist):
"""return list of newest packages based on name, arch matching
this means(in name.arch form): foo.i386 and foo.noarch are not
compared to each other for highest version only foo.i386 and
foo.i386 will be compared"""
highdict = {}
done = False
for pkgtup in tuplelist:
(n, a, e, v, r) = pkgtup
if (n, a) not in highdict:
highdict[(n, a)] = pkgtup
else:
pkgtup2 = highdict[(n, a)]
done = True
(n2, a2, e2, v2, r2) = pkgtup2
rc = rpm5utils.miscutils.compareEVR((e,v,r), (e2, v2, r2))
if rc > 0:
highdict[(n, a)] = pkgtup
if not done:
return tuplelist
return highdict.values()
# def getProblems(self):
# """return list of problems:
# - Packages that are both obsoleted and updated.
# - Packages that have multiple obsoletes.
# - Packages that _still_ have multiple updates
# """

View file

@ -1,66 +0,0 @@
"""
NetworkX
========
NetworkX (NX) is a Python package for the creation, manipulation, and
study of the structure, dynamics, and functions of complex networks.
https://networkx.lanl.gov/
Using
-----
Just write in Python
>>> import networkx as nx
>>> G=nx.Graph()
>>> G.add_edge(1,2)
>>> G.add_node("spam")
>>> print(G.nodes())
[1, 2, 'spam']
>>> print(G.edges())
[(1, 2)]
"""
# Copyright (C) 2004-2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Add platform dependent shared library path to sys.path
#
from __future__ import absolute_import
import sys
if sys.version_info[:2] < (2, 6):
m = "Python version 2.6 or later is required for NetworkX (%d.%d detected)."
raise ImportError(m % sys.version_info[:2])
del sys
# Release data
# these packages work with Python >= 2.6
from rpm5utils.urpmgraphs.exception import *
import rpm5utils.urpmgraphs.classes
from rpm5utils.urpmgraphs.classes import *
import rpm5utils.urpmgraphs.convert
from rpm5utils.urpmgraphs.convert import *
#import urpmgraphs.relabel
#from urpmgraphs.relabel import *
#import urpmgraphs.generators
#from urpmgraphs.generators import *
#from urpmgraphs.readwrite import *
#import urpmgraphs.readwrite
#Need to test with SciPy, when available
import rpm5utils.urpmgraphs.algorithms
from rpm5utils.urpmgraphs.algorithms import *
#import urpmgraphs.linalg
#from urpmgraphs.linalg import *
#from urpmgraphs.tests.test import run as test
#import urpmgraphs.utils
#import urpmgraphs.drawing
#from urpmgraphs.drawing import *

View file

@ -1,2 +0,0 @@
from rpm5utils.urpmgraphs.algorithms.components import *
from rpm5utils.urpmgraphs.algorithms.cycles import *

View file

@ -1,2 +0,0 @@
#from urpmgraphs.algorithms.components.connected import *
from rpm5utils.urpmgraphs.algorithms.components.strongly_connected import *

View file

@ -1,321 +0,0 @@
# -*- coding: utf-8 -*-
"""
Strongly connected components.
"""
__authors__ = "\n".join(['Eben Kenah',
'Aric Hagberg (hagberg@lanl.gov)'
'Christopher Ellison'])
# Copyright (C) 2004-2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['number_strongly_connected_components',
'strongly_connected_components',
'strongly_connected_component_subgraphs',
'is_strongly_connected',
'strongly_connected_components_recursive',
'kosaraju_strongly_connected_components',
'condensation',
]
import rpm5utils as nx
def strongly_connected_components(G):
"""Return nodes in strongly connected components of graph.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : list of lists
A list of nodes for each component of G.
The list is ordered from largest connected component to smallest.
See Also
--------
connected_components
Notes
-----
Uses Tarjan's algorithm with Nuutila's modifications.
Nonrecursive version of algorithm.
References
----------
.. [1] Depth-first search and linear graph algorithms, R. Tarjan
SIAM Journal of Computing 1(2):146-160, (1972).
.. [2] On finding the strongly connected components in a directed graph.
E. Nuutila and E. Soisalon-Soinen
Information Processing Letters 49(1): 9-14, (1994)..
"""
preorder={}
lowlink={}
scc_found={}
scc_queue = []
scc_list=[]
i=0 # Preorder counter
for source in G:
if source not in scc_found:
queue=[source]
while queue:
v=queue[-1]
if v not in preorder:
i=i+1
preorder[v]=i
done=1
v_nbrs=G[v]
for w in v_nbrs:
if w not in preorder:
queue.append(w)
done=0
break
if done==1:
lowlink[v]=preorder[v]
for w in v_nbrs:
if w not in scc_found:
if preorder[w]>preorder[v]:
lowlink[v]=min([lowlink[v],lowlink[w]])
else:
lowlink[v]=min([lowlink[v],preorder[w]])
queue.pop()
if lowlink[v]==preorder[v]:
scc_found[v]=True
scc=[v]
while scc_queue and preorder[scc_queue[-1]]>preorder[v]:
k=scc_queue.pop()
scc_found[k]=True
scc.append(k)
scc_list.append(scc)
else:
scc_queue.append(v)
scc_list.sort(key=len,reverse=True)
return scc_list
def kosaraju_strongly_connected_components(G,source=None):
"""Return nodes in strongly connected components of graph.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : list of lists
A list of nodes for each component of G.
The list is ordered from largest connected component to smallest.
See Also
--------
connected_components
Notes
-----
Uses Kosaraju's algorithm.
"""
components=[]
G=G.reverse(copy=False)
post=list(nx.dfs_postorder_nodes(G,source=source))
G=G.reverse(copy=False)
seen={}
while post:
r=post.pop()
if r in seen:
continue
c=nx.dfs_preorder_nodes(G,r)
new=[v for v in c if v not in seen]
seen.update([(u,True) for u in new])
components.append(new)
components.sort(key=len,reverse=True)
return components
def strongly_connected_components_recursive(G):
"""Return nodes in strongly connected components of graph.
Recursive version of algorithm.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : list of lists
A list of nodes for each component of G.
The list is ordered from largest connected component to smallest.
See Also
--------
connected_components
Notes
-----
Uses Tarjan's algorithm with Nuutila's modifications.
References
----------
.. [1] Depth-first search and linear graph algorithms, R. Tarjan
SIAM Journal of Computing 1(2):146-160, (1972).
.. [2] On finding the strongly connected components in a directed graph.
E. Nuutila and E. Soisalon-Soinen
Information Processing Letters 49(1): 9-14, (1994)..
"""
def visit(v,cnt):
root[v]=cnt
visited[v]=cnt
cnt+=1
stack.append(v)
for w in G[v]:
if w not in visited: visit(w,cnt)
if w not in component:
root[v]=min(root[v],root[w])
if root[v]==visited[v]:
component[v]=root[v]
tmpc=[v] # hold nodes in this component
while stack[-1]!=v:
w=stack.pop()
component[w]=root[v]
tmpc.append(w)
stack.remove(v)
scc.append(tmpc) # add to scc list
scc=[]
visited={}
component={}
root={}
cnt=0
stack=[]
for source in G:
if source not in visited:
visit(source,cnt)
scc.sort(key=len,reverse=True)
return scc
def strongly_connected_component_subgraphs(G):
"""Return strongly connected components as subgraphs.
Parameters
----------
G : NetworkX Graph
A graph.
Returns
-------
glist : list
A list of graphs, one for each strongly connected component of G.
See Also
--------
connected_component_subgraphs
Notes
-----
The list is ordered from largest strongly connected component to smallest.
"""
cc=strongly_connected_components(G)
graph_list=[]
for c in cc:
graph_list.append(G.subgraph(c))
return graph_list
def number_strongly_connected_components(G):
"""Return number of strongly connected components in graph.
Parameters
----------
G : NetworkX graph
A directed graph.
Returns
-------
n : integer
Number of strongly connected components
See Also
--------
connected_components
Notes
-----
For directed graphs only.
"""
return len(strongly_connected_components(G))
def is_strongly_connected(G):
"""Test directed graph for strong connectivity.
Parameters
----------
G : NetworkX Graph
A directed graph.
Returns
-------
connected : bool
True if the graph is strongly connected, False otherwise.
See Also
--------
strongly_connected_components
Notes
-----
For directed graphs only.
"""
if not G.is_directed():
raise nx.NetworkXError("""Not allowed for undirected graph G.
See is_connected() for connectivity test.""")
if len(G)==0:
raise nx.NetworkXPointlessConcept(
"""Connectivity is undefined for the null graph.""")
return len(strongly_connected_components(G)[0])==len(G)
def condensation(G):
"""Returns the condensation of G.
The condensation of G is the graph with each of the strongly connected
components contracted into a single node.
Parameters
----------
G : NetworkX Graph
A directed graph.
Returns
-------
cG : NetworkX DiGraph
The condensation of G.
Notes
-----
After contracting all strongly connected components to a single node,
the resulting graph is a directed acyclic graph.
"""
scc = strongly_connected_components(G)
mapping = dict([(n,tuple(sorted(c))) for c in scc for n in c])
cG = nx.DiGraph()
for u in mapping:
cG.add_node(mapping[u])
for _,v,d in G.edges_iter(u, data=True):
if v not in mapping[u]:
cG.add_edge(mapping[u], mapping[v])
return cG

View file

@ -1,122 +0,0 @@
"""
========================
Cycle finding algorithms
========================
"""
# Copyright (C) 2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import rpm5utils as nx
from collections import defaultdict
__all__ = ['simple_cycles']
__author__ = "\n".join(['Jon Olav Vik <jonovik@gmail.com>',
'Aric Hagberg <hagberg@lanl.gov>'])
def simple_cycles(G):
"""Find simple cycles (elementary circuits) of a directed graph.
An simple cycle, or elementary circuit, is a closed path where no
node appears twice, except that the first and last node are the same.
Two elementary circuits are distinct if they are not cyclic permutations
of each other.
Parameters
----------
G : NetworkX DiGraph
A directed graph
Returns
-------
A list of circuits, where each circuit is a list of nodes, with the first
and last node being the same.
Example:
>>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)])
>>> nx.simple_cycles(G)
[[0, 0], [0, 1, 2, 0], [0, 2, 0], [1, 2, 1], [2, 2]]
See Also
--------
cycle_basis (for undirected graphs)
Notes
-----
The implementation follows pp. 79-80 in [1]_.
The time complexity is O((n+e)(c+1)) for n nodes, e edges and c
elementary circuits.
References
----------
.. [1] Finding all the elementary circuits of a directed graph.
D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
http://dx.doi.org/10.1137/0204007
See Also
--------
cycle_basis
"""
# Jon Olav Vik, 2010-08-09
def _unblock(thisnode):
"""Recursively unblock and remove nodes from B[thisnode]."""
if blocked[thisnode]:
blocked[thisnode] = False
while B[thisnode]:
_unblock(B[thisnode].pop())
def circuit(thisnode, startnode, component):
closed = False # set to True if elementary path is closed
path.append(thisnode)
blocked[thisnode] = True
for nextnode in component[thisnode]: # direct successors of thisnode
if nextnode == startnode:
result.append(path + [startnode])
closed = True
elif not blocked[nextnode]:
if circuit(nextnode, startnode, component):
closed = True
if closed:
_unblock(thisnode)
else:
for nextnode in component[thisnode]:
if thisnode not in B[nextnode]: # TODO: use set for speedup?
B[nextnode].append(thisnode)
path.pop() # remove thisnode from path
return closed
if not G.is_directed():
raise nx.NetworkXError(\
"simple_cycles() not implemented for undirected graphs.")
path = [] # stack of nodes in current path
blocked = defaultdict(bool) # vertex: blocked from search?
B = defaultdict(list) # graph portions that yield no elementary circuit
result = [] # list to accumulate the circuits found
# Johnson's algorithm requires some ordering of the nodes.
# They might not be sortable so we assign an arbitrary ordering.
ordering=dict(zip(G,range(len(G))))
for s in ordering:
# Build the subgraph induced by s and following nodes in the ordering
subgraph = G.subgraph(node for node in G
if ordering[node] >= ordering[s])
# Find the strongly connected component in the subgraph
# that contains the least node according to the ordering
strongcomp = nx.strongly_connected_components(subgraph)
mincomp=min(strongcomp,
key=lambda nodes: min(ordering[n] for n in nodes))
component = G.subgraph(mincomp)
if component:
# smallest node in the component according to the ordering
startnode = min(component,key=ordering.__getitem__)
for node in component:
blocked[node] = False
B[node][:] = []
dummy=circuit(startnode, startnode, component)
return result

View file

@ -1,3 +0,0 @@
from rpm5utils.urpmgraphs.classes.graph import Graph
from rpm5utils.urpmgraphs.classes.digraph import DiGraph
from rpm5utils.urpmgraphs.classes.function import *

View file

@ -1,996 +0,0 @@
"""Base class for directed graphs."""
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from copy import deepcopy
import rpm5utils as nx
from rpm5utils.urpmgraphs.classes.graph import Graph
from rpm5utils.urpmgraphs.exception import NetworkXError
#import urpmgraphs.convert as convert
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
class DiGraph(Graph):
"""
Base class for directed graphs.
A DiGraph stores nodes and edges with optional data, or attributes.
DiGraphs hold directed edges. Self loops are allowed but multiple
(parallel) edges are not.
Nodes can be arbitrary (hashable) Python objects with optional
key/value attributes.
Edges are represented as links between nodes with optional
key/value attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
Graph
MultiGraph
MultiDiGraph
Examples
--------
Create an empty graph structure (a "null graph") with no nodes and
no edges.
>>> G = nx.DiGraph()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node(1)
Add the nodes from any container (a list, dict, set or
even the lines from a file or the nodes from another graph).
>>> G.add_nodes_from([2,3])
>>> G.add_nodes_from(range(100,110))
>>> H=nx.Graph()
>>> H.add_path([0,1,2,3,4,5,6,7,8,9])
>>> G.add_nodes_from(H)
In addition to strings and integers any hashable Python object
(except None) can represent a node, e.g. a customized node object,
or even another Graph.
>>> G.add_node(H)
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(1, 2)
a list of edges,
>>> G.add_edges_from([(1,2),(1,3)])
or a collection of edges,
>>> G.add_edges_from(H.edges())
If some edges connect nodes not yet in the graph, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Attributes:**
Each graph, node, and edge can hold key/value attribute pairs
in an associated attribute dictionary (the keys must be hashable).
By default these are empty, but can be added or changed using
add_edge, add_node or direct manipulation of the attribute
dictionaries named graph, node and edge respectively.
>>> G = nx.DiGraph(day="Friday")
>>> G.graph
{'day': 'Friday'}
Add node attributes using add_node(), add_nodes_from() or G.node
>>> G.add_node(1, time='5pm')
>>> G.add_nodes_from([3], time='2pm')
>>> G.node[1]
{'time': '5pm'}
>>> G.node[1]['room'] = 714
>>> G.nodes(data=True)
[(1, {'room': 714, 'time': '5pm'}), (3, {'time': '2pm'})]
Warning: adding a node to G.node does not add it to the graph.
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edge.
>>> G.add_edge(1, 2, weight=4.7 )
>>> G.add_edges_from([(3,4),(4,5)], color='red')
>>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
>>> G[1][2]['weight'] = 4.7
>>> G.edge[1][2]['weight'] = 4
**Shortcuts:**
Many common graph features allow python syntax to speed reporting.
>>> 1 in G # check if node in graph
True
>>> [n for n in G if n<3] # iterate through nodes
[1, 2]
>>> len(G) # number of nodes in graph
5
>>> G[1] # adjacency dict keyed by neighbor to edge attributes
... # Note: you should not change this dict manually!
{2: {'color': 'blue', 'weight': 4}}
The fastest way to traverse all edges of a graph is via
adjacency_iter(), but the edges() method is often more convenient.
>>> for n,nbrsdict in G.adjacency_iter():
... for nbr,eattr in nbrsdict.items():
... if 'weight' in eattr:
... (n,nbr,eattr['weight'])
(1, 2, 4)
(2, 3, 8)
>>> [ (u,v,edata['weight']) for u,v,edata in G.edges(data=True) if 'weight' in edata ]
[(1, 2, 4), (2, 3, 8)]
**Reporting:**
Simple graph information is obtained using methods.
Iterator versions of many reporting methods exist for efficiency.
Methods exist for reporting nodes(), edges(), neighbors() and degree()
as well as the number of nodes and edges.
For details on these and other miscellaneous methods, see below.
"""
def __init__(self, data=None, **attr):
"""Initialize a graph with edges, name, graph attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
name : string, optional (default='')
An optional name for the graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
convert
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G = nx.Graph(name='my graph')
>>> e = [(1,2),(2,3),(3,4)] # list of edges
>>> G = nx.Graph(e)
Arbitrary graph attribute pairs (key=value) may be assigned
>>> G=nx.Graph(e, day="Friday")
>>> G.graph
{'day': 'Friday'}
"""
self.graph = {} # dictionary for graph attributes
self.node = {} # dictionary for node attributes
# We store two adjacency lists:
# the predecessors of node n are stored in the dict self.pred
# the successors of node n are stored in the dict self.succ=self.adj
self.adj = {} # empty adjacency dictionary
self.pred = {} # predecessor
self.succ = self.adj # successor
# attempt to load graph with data
if data is not None:
convert.to_networkx_graph(data,create_using=self)
# load graph attributes (must be after convert)
self.graph.update(attr)
self.edge=self.adj
def add_node(self, n, attr_dict=None, **attr):
"""Add a single node n and update node attributes.
Parameters
----------
n : node
A node can be any hashable Python object except None.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of node attributes. Key/value pairs will
update existing data associated with the node.
attr : keyword arguments, optional
Set or change attributes using key=value.
See Also
--------
add_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_node(1)
>>> G.add_node('Hello')
>>> K3 = nx.Graph([(0,1),(1,2),(2,0)])
>>> G.add_node(K3)
>>> G.number_of_nodes()
3
Use keywords set/change node attributes:
>>> G.add_node(1,size=10)
>>> G.add_node(3,weight=0.4,UTM=('13S',382871,3972649))
Notes
-----
A hashable object is one that can be used as a key in a Python
dictionary. This includes strings, numbers, tuples of strings
and numbers, etc.
On many platforms hashable items also include mutables such as
NetworkX Graphs, though one should be careful that the hash
doesn't change on mutables.
"""
# set up attribute dict
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dictionary.")
if n not in self.succ:
self.succ[n] = {}
self.pred[n] = {}
self.node[n] = attr_dict
else: # update attr even if node already exists
self.node[n].update(attr_dict)
def add_nodes_from(self, nodes, **attr):
"""Add multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.).
OR
A container of (node, attribute dict) tuples.
Node attributes are updated using the attribute dict.
attr : keyword arguments, optional (default= no attributes)
Update attributes for all nodes in nodes.
Node attributes specified in nodes as a tuple
take precedence over attributes specified generally.
See Also
--------
add_node
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_nodes_from('Hello')
>>> K3 = nx.Graph([(0,1),(1,2),(2,0)])
>>> G.add_nodes_from(K3)
>>> sorted(G.nodes(),key=str)
[0, 1, 2, 'H', 'e', 'l', 'o']
Use keywords to update specific node attributes for every node.
>>> G.add_nodes_from([1,2], size=10)
>>> G.add_nodes_from([3,4], weight=0.4)
Use (node, attrdict) tuples to update attributes for specific
nodes.
>>> G.add_nodes_from([(1,dict(size=11)), (2,{'color':'blue'})])
>>> G.node[1]['size']
11
>>> H = nx.Graph()
>>> H.add_nodes_from(G.nodes(data=True))
>>> H.node[1]['size']
11
"""
for n in nodes:
try:
newnode=n not in self.succ
except TypeError:
nn,ndict = n
if nn not in self.succ:
self.succ[nn] = {}
self.pred[nn] = {}
newdict = attr.copy()
newdict.update(ndict)
self.node[nn] = newdict
else:
olddict = self.node[nn]
olddict.update(attr)
olddict.update(ndict)
continue
if newnode:
self.succ[n] = {}
self.pred[n] = {}
self.node[n] = attr.copy()
else:
self.node[n].update(attr)
def add_edge(self, u, v, attr_dict=None, **attr):
"""Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Edge attributes can be specified with keywords or by providing
a dictionary with key/value pairs. See examples below.
Parameters
----------
u,v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of edge attributes. Key/value pairs will
update existing data associated with the edge.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edges_from : add a collection of edges
Notes
-----
Adding an edge that already exists updates the edge data.
NetworkX algorithms designed for weighted graphs use as
the edge weight a numerical value assigned to the keyword
'weight'.
Examples
--------
The following all add the edge e=(1,2) to graph G:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = (1,2)
>>> G.add_edge(1, 2) # explicit two-node form
>>> G.add_edge(*e) # single edge as tuple of two nodes
>>> G.add_edges_from( [(1,2)] ) # add edges from iterable container
Associate data to edges using keywords:
>>> G.add_edge(1, 2, weight=3)
>>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
"""
# set up attribute dict
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dictionary.")
# add nodes
if u not in self.succ:
self.succ[u]={}
self.pred[u]={}
self.node[u] = {}
if v not in self.succ:
self.succ[v]={}
self.pred[v]={}
self.node[v] = {}
# add the edge
datadict=self.adj[u].get(v,{})
datadict.update(attr_dict)
self.succ[u][v]=datadict
self.pred[v][u]=datadict
def has_successor(self, u, v):
"""Return True if node u has successor v.
This is true if graph has the edge u->v.
"""
return (u in self.succ and v in self.succ[u])
def has_predecessor(self, u, v):
"""Return True if node u has predecessor v.
This is true if graph has the edge u<-v.
"""
return (u in self.pred and v in self.pred[u])
def successors_iter(self,n):
"""Return an iterator over successor nodes of n.
neighbors_iter() and successors_iter() are the same.
"""
try:
return iter(self.succ[n].keys())
except KeyError:
raise NetworkXError("The node %s is not in the digraph."%(n,))
def predecessors_iter(self,n):
"""Return an iterator over predecessor nodes of n."""
try:
return iter(self.pred[n].keys())
except KeyError:
raise NetworkXError("The node %s is not in the digraph."%(n,))
def successors(self, n):
"""Return a list of successor nodes of n.
neighbors() and successors() are the same function.
"""
return list(self.successors_iter(n))
def predecessors(self, n):
"""Return a list of predecessor nodes of n."""
return list(self.predecessors_iter(n))
# digraph definitions
neighbors = successors
neighbors_iter = successors_iter
def edges_iter(self, nbunch=None, data=False):
"""Return an iterator over the edges.
Edges are returned as tuples with optional data
in the order (node, neighbor, data).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict in 3-tuple (u,v,data).
Returns
-------
edge_iter : iterator
An iterator of (u,v) or (u,v,d) tuples of edges.
See Also
--------
edges : return a list of edges
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> [e for e in G.edges_iter()]
[(0, 1), (1, 2), (2, 3)]
>>> list(G.edges_iter(data=True)) # default data is {} (empty dict)
[(0, 1, {}), (1, 2, {}), (2, 3, {})]
>>> list(G.edges_iter([0,2]))
[(0, 1), (2, 3)]
>>> list(G.edges_iter(0))
[(0, 1)]
"""
if nbunch is None:
nodes_nbrs=iter(self.adj.items())
else:
nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch))
if data:
for n,nbrs in nodes_nbrs:
for nbr,data in nbrs.items():
yield (n,nbr,data)
else:
for n,nbrs in nodes_nbrs:
for nbr in nbrs:
yield (n,nbr)
# alias out_edges to edges
out_edges_iter=edges_iter
out_edges=Graph.edges
def in_edges_iter(self, nbunch=None, data=False):
"""Return an iterator over the incoming edges.
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict in 3-tuple (u,v,data).
Returns
-------
in_edge_iter : iterator
An iterator of (u,v) or (u,v,d) tuples of incoming edges.
See Also
--------
edges_iter : return an iterator of edges
"""
if nbunch is None:
nodes_nbrs=iter(self.pred.items())
else:
nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch))
if data:
for n,nbrs in nodes_nbrs:
for nbr,data in nbrs.items():
yield (nbr,n,data)
else:
for n,nbrs in nodes_nbrs:
for nbr in nbrs:
yield (nbr,n)
def in_edges(self, nbunch=None, data=False):
"""Return a list of the incoming edges.
See Also
--------
edges : return a list of edges
"""
return list(self.in_edges_iter(nbunch, data))
def degree_iter(self, nbunch=None, weighted=False):
"""Return an iterator for (node, degree).
The node degree is the number of edges adjacent to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weighted : bool, optional (default=False)
If True return the sum of edge weights adjacent to the node.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree, in_degree, out_degree, in_degree_iter, out_degree_iter
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph
>>> G.add_path([0,1,2,3])
>>> list(G.degree_iter(0)) # node 0 with degree 1
[(0, 1)]
>>> list(G.degree_iter([0,1]))
[(0, 1), (1, 2)]
"""
if nbunch is None:
nodes_nbrs=zip(iter(self.succ.items()),iter(self.pred.items()))
else:
nodes_nbrs=zip(
((n,self.succ[n]) for n in self.nbunch_iter(nbunch)),
((n,self.pred[n]) for n in self.nbunch_iter(nbunch)))
if weighted:
# edge weighted graph - degree is sum of edge weights
for (n,succ),(n2,pred) in nodes_nbrs:
yield (n,
sum((succ[nbr].get('weight',1) for nbr in succ))+
sum((pred[nbr].get('weight',1) for nbr in pred)))
else:
for (n,succ),(n2,pred) in nodes_nbrs:
yield (n,len(succ)+len(pred))
def in_degree_iter(self, nbunch=None, weighted=False):
"""Return an iterator for (node, in-degree).
The node in-degree is the number of edges pointing in to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weighted : bool, optional (default=False)
If True return the sum of edge weights adjacent to the node.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, in-degree).
See Also
--------
degree, in_degree, out_degree, out_degree_iter
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_path([0,1,2,3])
>>> list(G.in_degree_iter(0)) # node 0 with degree 0
[(0, 0)]
>>> list(G.in_degree_iter([0,1]))
[(0, 0), (1, 1)]
"""
if nbunch is None:
nodes_nbrs=iter(self.pred.items())
else:
nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch))
if weighted:
# edge weighted graph - degree is sum of edge weights
for n,nbrs in nodes_nbrs:
yield (n, sum(data.get('weight',1) for data in nbrs.values()))
else:
for n,nbrs in nodes_nbrs:
yield (n,len(nbrs))
def out_degree_iter(self, nbunch=None, weighted=False):
"""Return an iterator for (node, out-degree).
The node out-degree is the number of edges pointing out of the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weighted : bool, optional (default=False)
If True return the sum of edge weights adjacent to the node.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, out-degree).
See Also
--------
degree, in_degree, out_degree, in_degree_iter
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_path([0,1,2,3])
>>> list(G.out_degree_iter(0)) # node 0 with degree 1
[(0, 1)]
>>> list(G.out_degree_iter([0,1]))
[(0, 1), (1, 1)]
"""
if nbunch is None:
nodes_nbrs=iter(self.succ.items())
else:
nodes_nbrs=((n,self.succ[n]) for n in self.nbunch_iter(nbunch))
if weighted:
# edge weighted graph - degree is sum of edge weights
for n,nbrs in nodes_nbrs:
yield (n, sum(data.get('weight',1) for data in nbrs.values()))
else:
for n,nbrs in nodes_nbrs:
yield (n,len(nbrs))
def in_degree(self, nbunch=None, weighted=False):
"""Return the in-degree of a node or nodes.
The node in-degree is the number of edges pointing in to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weighted : bool, optional (default=False)
If True return the sum of edge weights adjacent to the node.
Returns
-------
nd : dictionary, or number
A dictionary with nodes as keys and in-degree as values or
a number if a single node is specified.
See Also
--------
degree, out_degree, in_degree_iter
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph
>>> G.add_path([0,1,2,3])
>>> G.in_degree(0)
0
>>> G.in_degree([0,1])
{0: 0, 1: 1}
>>> list(G.in_degree([0,1]).values())
[0, 1]
"""
if nbunch in self: # return a single node
return next(self.in_degree_iter(nbunch,weighted=weighted))[1]
else: # return a dict
return dict(self.in_degree_iter(nbunch,weighted=weighted))
def out_degree(self, nbunch=None, weighted=False):
"""Return the out-degree of a node or nodes.
The node out-degree is the number of edges pointing out of the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weighted : bool, optional (default=False)
If True return the sum of edge weights adjacent to the node.
Returns
-------
nd : dictionary, or number
A dictionary with nodes as keys and out-degree as values or
a number if a single node is specified.
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph
>>> G.add_path([0,1,2,3])
>>> G.out_degree(0)
1
>>> G.out_degree([0,1])
{0: 1, 1: 1}
>>> list(G.out_degree([0,1]).values())
[1, 1]
"""
if nbunch in self: # return a single node
return next(self.out_degree_iter(nbunch,weighted=weighted))[1]
else: # return a dict
return dict(self.out_degree_iter(nbunch,weighted=weighted))
def clear(self):
"""Remove all nodes and edges from the graph.
This also removes the name, and all graph, node, and edge attributes.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.clear()
>>> G.nodes()
[]
>>> G.edges()
[]
"""
self.succ.clear()
self.pred.clear()
self.node.clear()
self.graph.clear()
def is_multigraph(self):
"""Return True if graph is a multigraph, False otherwise."""
return False
def is_directed(self):
"""Return True if graph is directed, False otherwise."""
return True
def to_directed(self):
"""Return a directed copy of the graph.
Returns
-------
G : DiGraph
A deepcopy of the graph.
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar D=DiGraph(G) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Examples
--------
>>> G = nx.Graph() # or MultiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1), (1, 0)]
If already directed, return a (deep) copy
>>> G = nx.DiGraph() # or MultiDiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1)]
"""
return deepcopy(self)
def to_undirected(self, reciprocal=False):
"""Return an undirected representation of the digraph.
Parameters
----------
reciprocal : bool (optional)
If True only keep edges that appear in both directions
in the original digraph.
Returns
-------
G : Graph
An undirected graph with the same name and nodes and
with edge (u,v,data) if either (u,v,data) or (v,u,data)
is in the digraph. If both edges exist in digraph and
their edge data is different, only one edge is created
with an arbitrary choice of which edge data to use.
You must check and correct for this manually if desired.
Notes
-----
If edges in both directions (u,v) and (v,u) exist in the
graph, attributes for the new undirected edge will be a combination of
the attributes of the directed edges. The edge data is updated
in the (arbitrary) order that the edges are encountered. For
more customized control of the edge attributes use add_edge().
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar G=DiGraph(D) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
"""
H=Graph()
H.name=self.name
H.add_nodes_from(self)
if reciprocal is True:
H.add_edges_from( (u,v,deepcopy(d))
for u,nbrs in self.adjacency_iter()
for v,d in nbrs.items()
if v in self.pred[u])
else:
H.add_edges_from( (u,v,deepcopy(d))
for u,nbrs in self.adjacency_iter()
for v,d in nbrs.items() )
H.graph=deepcopy(self.graph)
H.node=deepcopy(self.node)
return H
def reverse(self, copy=True):
"""Return the reverse of the graph.
The reverse is a graph with the same nodes and edges
but with the directions of the edges reversed.
Parameters
----------
copy : bool optional (default=True)
If True, return a new DiGraph holding the reversed edges.
If False, reverse the reverse graph is created using
the original graph (this changes the original graph).
"""
if copy:
H = self.__class__(name="Reverse of (%s)"%self.name)
H.pred=self.succ.copy()
H.adj=self.pred.copy()
H.succ=H.adj
H.graph=self.graph.copy()
H.node=self.node.copy()
else:
self.pred,self.succ=self.succ,self.pred
self.adj=self.succ
H=self
return H
def subgraph(self, nbunch):
"""Return the subgraph induced on nodes in nbunch.
The induced subgraph of the graph contains the nodes in nbunch
and the edges between those nodes.
Parameters
----------
nbunch : list, iterable
A container of nodes which will be iterated through once.
Returns
-------
G : Graph
A subgraph of the graph with the same edge attributes.
Notes
-----
The graph, edge or node attributes just point to the original graph.
So changes to the node or edge structure will not be reflected in
the original graph while changes to the attributes will.
To create a subgraph with its own copy of the edge/node attributes use:
nx.Graph(G.subgraph(nbunch))
If edge attributes are containers, a deep copy can be obtained using:
G.subgraph(nbunch).copy()
For an inplace reduction of a graph to a subgraph you can remove nodes:
G.remove_nodes_from([ n in G if n not in set(nbunch)])
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> H = G.subgraph([0,1,2])
>>> H.edges()
[(0, 1), (1, 2)]
"""
bunch = self.nbunch_iter(nbunch)
# create new graph and copy subgraph into it
H = self.__class__()
# namespace shortcuts for speed
H_succ=H.succ
H_pred=H.pred
self_succ=self.succ
# add nodes
for n in bunch:
H_succ[n]={}
H_pred[n]={}
# add edges
for u in H_succ:
Hnbrs=H_succ[u]
for v,datadict in self_succ[u].items():
if v in H_succ:
# add both representations of edge: u-v and v-u
Hnbrs[v]=datadict
H_pred[v][u]=datadict
# copy node and attribute dictionaries
for n in H:
H.node[n]=self.node[n]
H.graph=self.graph
return H

View file

@ -1,375 +0,0 @@
"""
Functional interface to graph methods and assorted utilities.
"""
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
# Copyright (C) 2004-2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
import rpm5utils as nx
# functional style helpers
__all__ = ['nodes', 'edges', 'degree', 'degree_histogram', 'neighbors',
'number_of_nodes', 'number_of_edges', 'density',
'nodes_iter', 'edges_iter', 'is_directed','info',
'freeze','is_frozen','subgraph','create_empty_copy',
'set_node_attributes','get_node_attributes',
'set_edge_attributes','get_edge_attributes']
def nodes(G):
"""Return a copy of the graph nodes in a list."""
return G.nodes()
def nodes_iter(G):
"""Return an iterator over the graph nodes."""
return G.nodes_iter()
def edges(G,nbunch=None):
"""Return list of edges adjacent to nodes in nbunch.
Return all edges if nbunch is unspecified or nbunch=None.
For digraphs, edges=out_edges
"""
return G.edges(nbunch)
def edges_iter(G,nbunch=None):
"""Return iterator over edges adjacent to nodes in nbunch.
Return all edges if nbunch is unspecified or nbunch=None.
For digraphs, edges=out_edges
"""
return G.edges_iter(nbunch)
def degree(G,nbunch=None,weighted=False):
"""Return degree of single node or of nbunch of nodes.
If nbunch is ommitted, then return degrees of *all* nodes.
"""
return G.degree(nbunch,weighted=weighted)
def neighbors(G,n):
"""Return a list of nodes connected to node n. """
return G.neighbors(n)
def number_of_nodes(G):
"""Return the number of nodes in the graph."""
return G.number_of_nodes()
def number_of_edges(G):
"""Return the number of edges in the graph. """
return G.number_of_edges()
def density(G):
r"""Return the density of a graph.
The density for undirected graphs is
.. math::
d = \frac{2m}{n(n-1)},
and for directed graphs is
.. math::
d = \frac{m}{n(n-1)},
where `n` is the number of nodes and `m` is the number of edges in `G`.
Notes
-----
The density is 0 for an graph without edges and 1.0 for a complete graph.
The density of multigraphs can be higher than 1.
"""
n=number_of_nodes(G)
m=number_of_edges(G)
if m==0: # includes cases n==0 and n==1
d=0.0
else:
if G.is_directed():
d=m/float(n*(n-1))
else:
d= m*2.0/float(n*(n-1))
return d
def degree_histogram(G):
"""Return a list of the frequency of each degree value.
Parameters
----------
G : Networkx graph
A graph
Returns
-------
hist : list
A list of frequencies of degrees.
The degree values are the index in the list.
Notes
-----
Note: the bins are width one, hence len(list) can be large
(Order(number_of_edges))
"""
degseq=list(G.degree().values())
dmax=max(degseq)+1
freq= [ 0 for d in range(dmax) ]
for d in degseq:
freq[d] += 1
return freq
def is_directed(G):
""" Return True if graph is directed."""
return G.is_directed()
def freeze(G):
"""Modify graph to prevent addition of nodes or edges.
Parameters
-----------
G : graph
A NetworkX graph
Examples
--------
>>> G=nx.Graph()
>>> G.add_path([0,1,2,3])
>>> G=nx.freeze(G)
>>> try:
... G.add_edge(4,5)
... except nx.NetworkXError as e:
... print(str(e))
Frozen graph can't be modified
Notes
-----
This does not prevent modification of edge data.
To "unfreeze" a graph you must make a copy.
See Also
--------
is_frozen
"""
def frozen(*args):
raise nx.NetworkXError("Frozen graph can't be modified")
G.add_node=frozen
G.add_nodes_from=frozen
G.remove_node=frozen
G.remove_nodes_from=frozen
G.add_edge=frozen
G.add_edges_from=frozen
G.remove_edge=frozen
G.remove_edges_from=frozen
G.clear=frozen
G.frozen=True
return G
def is_frozen(G):
"""Return True if graph is frozen.
Parameters
-----------
G : graph
A NetworkX graph
See Also
--------
freeze
"""
try:
return G.frozen
except AttributeError:
return False
def subgraph(G, nbunch):
"""Return the subgraph induced on nodes in nbunch.
Parameters
----------
G : graph
A NetworkX graph
nbunch : list, iterable
A container of nodes that will be iterated through once (thus
it should be an iterator or be iterable). Each element of the
container should be a valid node type: any hashable type except
None. If nbunch is None, return all edges data in the graph.
Nodes in nbunch that are not in the graph will be (quietly)
ignored.
Notes
-----
subgraph(G) calls G.subgraph()
"""
return G.subgraph(nbunch)
def create_empty_copy(G,with_nodes=True):
"""Return a copy of the graph G with all of the edges removed.
Parameters
----------
G : graph
A NetworkX graph
with_nodes : bool (default=True)
Include nodes.
Notes
-----
Graph, node, and edge data is not propagated to the new graph.
"""
H=G.__class__()
if with_nodes:
H.add_nodes_from(G)
return H
def info(G, n=None):
"""Print short summary of information for the graph G or the node n.
Parameters
----------
G : Networkx graph
A graph
n : node (any hashable)
A node in the graph G
"""
info='' # append this all to a string
if n is None:
info+="Name: %s\n"%G.name
type_name = [type(G).__name__]
info+="Type: %s\n"%",".join(type_name)
info+="Number of nodes: %d\n"%G.number_of_nodes()
info+="Number of edges: %d\n"%G.number_of_edges()
nnodes=G.number_of_nodes()
if len(G) > 0:
if G.is_directed():
info+="Average in degree: %8.4f\n"%\
(sum(G.in_degree().values())/float(nnodes))
info+="Average out degree: %8.4f"%\
(sum(G.out_degree().values())/float(nnodes))
else:
s=sum(G.degree().values())
info+="Average degree: %8.4f"%\
(float(s)/float(nnodes))
else:
if n not in G:
raise nx.NetworkXError("node %s not in graph"%(n,))
info+="Node % s has the following properties:\n"%n
info+="Degree: %d\n"%G.degree(n)
info+="Neighbors: "
info+=' '.join(str(nbr) for nbr in G.neighbors(n))
return info
def set_node_attributes(G,name,attributes):
"""Set node attributes from dictionary of nodes and values
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
attributes: dict
Dictionary of attributes keyed by node.
Examples
--------
>>> G=nx.path_graph(3)
>>> bb=nx.betweenness_centrality(G)
>>> nx.set_node_attributes(G,'betweenness',bb)
>>> G.node[1]['betweenness']
1.0
"""
for node,value in attributes.items():
G.node[node][name]=value
def get_node_attributes(G,name):
"""Get node attributes from graph
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
Returns
-------
Dictionary of attributes keyed by node.
Examples
--------
>>> G=nx.Graph()
>>> G.add_nodes_from([1,2,3],color='red')
>>> color=nx.get_node_attributes(G,'color')
>>> color[1]
'red'
"""
return dict( (n,d[name]) for n,d in G.node.items() if name in d)
def set_edge_attributes(G,name,attributes):
"""Set edge attributes from dictionary of edge tuples and values
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
attributes: dict
Dictionary of attributes keyed by edge (tuple).
Examples
--------
>>> G=nx.path_graph(3)
>>> bb=nx.edge_betweenness_centrality(G)
>>> nx.set_edge_attributes(G,'betweenness',bb)
>>> G[1][2]['betweenness']
4.0
"""
for (u,v),value in attributes.items():
G[u][v][name]=value
def get_edge_attributes(G,name):
"""Get edge attributes from graph
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
Returns
-------
Dictionary of attributes keyed by node.
Examples
--------
>>> G=nx.Graph()
>>> G.add_path([1,2,3],color='red')
>>> color=nx.get_edge_attributes(G,'color')
>>> color[(1,2)]
'red'
"""
return dict( ((u,v),d[name]) for u,v,d in G.edges(data=True) if name in d)

File diff suppressed because it is too large Load diff

View file

@ -1,708 +0,0 @@
"""
This module provides functions to convert
NetworkX graphs to and from other formats.
The preferred way of converting data to a NetworkX graph
is through the graph constuctor. The constructor calls
the to_networkx_graph() function which attempts to guess the
input type and convert it automatically.
Examples
--------
Create a 10 node random graph from a numpy matrix
>>> import numpy
>>> a=numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10))
>>> D=nx.DiGraph(a)
or equivalently
>>> D=nx.to_networkx_graph(a,create_using=nx.DiGraph())
Create a graph with a single edge from a dictionary of dictionaries
>>> d={0: {1: 1}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
See Also
--------
nx_pygraphviz, nx_pydot
"""
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
# Copyright (C) 2006-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import warnings
import rpm5utils as nx
__all__ = ['to_networkx_graph',
'from_dict_of_dicts', 'to_dict_of_dicts',
'from_dict_of_lists', 'to_dict_of_lists',
'from_edgelist', 'to_edgelist',
'from_numpy_matrix', 'to_numpy_matrix',
'to_numpy_recarray'
]
def _prep_create_using(create_using):
"""Return a graph object ready to be populated.
If create_using is None return the default (just networkx.Graph())
If create_using.clear() works, assume it returns a graph object.
Otherwise raise an exception because create_using is not a networkx graph.
"""
if create_using is None:
G=nx.Graph()
else:
G=create_using
try:
G.clear()
except:
raise TypeError("Input graph is not a networkx graph type")
return G
def to_networkx_graph(data,create_using=None,multigraph_input=False):
"""Make a NetworkX graph from a known data structure.
The preferred way to call this is automatically
from the class constructor
>>> d={0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
instead of the equivalent
>>> G=nx.from_dict_of_dicts(d)
Parameters
----------
data : a object to be converted
Current known types are:
any NetworkX graph
dict-of-dicts
dist-of-lists
list of edges
numpy matrix
numpy ndarray
scipy sparse matrix
pygraphviz agraph
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
If True and data is a dict_of_dicts,
try to create a multigraph assuming dict_of_dict_of_lists.
If data and create_using are both multigraphs then create
a multigraph from a multigraph.
"""
# NX graph
if hasattr(data,"adj"):
try:
result= from_dict_of_dicts(data.adj,\
create_using=create_using,\
multigraph_input=data.is_multigraph())
if hasattr(data,'graph') and isinstance(data.graph,dict):
result.graph=data.graph.copy()
if hasattr(data,'node') and isinstance(data.node,dict):
result.node=dict( (n,dd.copy()) for n,dd in data.node.items() )
return result
except:
raise nx.NetworkXError("Input is not a correct NetworkX graph.")
# pygraphviz agraph
if hasattr(data,"is_strict"):
try:
return nx.from_agraph(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a correct pygraphviz graph.")
# dict of dicts/lists
if isinstance(data,dict):
try:
return from_dict_of_dicts(data,create_using=create_using,\
multigraph_input=multigraph_input)
except:
try:
return from_dict_of_lists(data,create_using=create_using)
except:
raise TypeError("Input is not known type.")
# list or generator of edges
if (isinstance(data,list)
or hasattr(data,'next')
or hasattr(data, '__next__')):
try:
return from_edgelist(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a valid edge list")
# numpy matrix or ndarray
try:
import numpy
if isinstance(data,numpy.matrix) or \
isinstance(data,numpy.ndarray):
try:
return from_numpy_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct numpy matrix or array.")
except ImportError:
warnings.warn('numpy not found, skipping conversion test.',
ImportWarning)
# scipy sparse matrix - any format
try:
import scipy
if hasattr(data,"format"):
try:
return from_scipy_sparse_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct scipy sparse matrix type.")
except ImportError:
warnings.warn('scipy not found, skipping conversion test.',
ImportWarning)
raise nx.NetworkXError(\
"Input is not a known data type for conversion.")
return
def convert_to_undirected(G):
"""Return a new undirected representation of the graph G.
"""
return G.to_undirected()
def convert_to_directed(G):
"""Return a new directed representation of the graph G.
"""
return G.to_directed()
def to_dict_of_lists(G,nodelist=None):
"""Return adjacency representation of graph as a dictionary of lists.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
Notes
-----
Completely ignores edge data for MultiGraph and MultiDiGraph.
"""
if nodelist is None:
nodelist=G
d = {}
for n in nodelist:
d[n]=[nbr for nbr in G.neighbors(n) if nbr in nodelist]
return d
def from_dict_of_lists(d,create_using=None):
"""Return a graph from a dictionary of lists.
Parameters
----------
d : dictionary of lists
A dictionary of lists adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> dol= {0:[1]} # single edge (0,1)
>>> G=nx.from_dict_of_lists(dol)
or
>>> G=nx.Graph(dol) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
if G.is_multigraph() and not G.is_directed():
# a dict_of_lists can't show multiedges. BUT for undirected graphs,
# each edge shows up twice in the dict_of_lists.
# So we need to treat this case separately.
seen={}
for node,nbrlist in d.items():
for nbr in nbrlist:
if nbr not in seen:
G.add_edge(node,nbr)
seen[node]=1 # don't allow reverse edge to show up
else:
G.add_edges_from( ((node,nbr) for node,nbrlist in d.items()
for nbr in nbrlist) )
return G
def to_dict_of_dicts(G,nodelist=None,edge_data=None):
"""Return adjacency representation of graph as a dictionary of dictionaries.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
edge_data : list, optional
If provided, the value of the dictionary will be
set to edge_data for all edges. This is useful to make
an adjacency matrix type representation with 1 as the edge data.
If edgedata is None, the edgedata in G is used to fill the values.
If G is a multigraph, the edgedata is a dict for each pair (u,v).
"""
dod={}
if nodelist is None:
if edge_data is None:
for u,nbrdict in G.adjacency_iter():
dod[u]=nbrdict.copy()
else: # edge_data is not None
for u,nbrdict in G.adjacency_iter():
dod[u]=dod.fromkeys(nbrdict, edge_data)
else: # nodelist is not None
if edge_data is None:
for u in nodelist:
dod[u]={}
for v,data in ((v,data) for v,data in G[u].items() if v in nodelist):
dod[u][v]=data
else: # nodelist and edge_data are not None
for u in nodelist:
dod[u]={}
for v in ( v for v in G[u] if v in nodelist):
dod[u][v]=edge_data
return dod
def from_dict_of_dicts(d,create_using=None,multigraph_input=False):
"""Return a graph from a dictionary of dictionaries.
Parameters
----------
d : dictionary of dictionaries
A dictionary of dictionaries adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
When True, the values of the inner dict are assumed
to be containers of edge data for multiple edges.
Otherwise this routine assumes the edge data are singletons.
Examples
--------
>>> dod= {0: {1:{'weight':1}}} # single edge (0,1)
>>> G=nx.from_dict_of_dicts(dod)
or
>>> G=nx.Graph(dod) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
# is dict a MultiGraph or MultiDiGraph?
if multigraph_input:
# make a copy of the list of edge data (but not the edge data)
if G.is_directed():
if G.is_multigraph():
G.add_edges_from( (u,v,key,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else:
G.add_edges_from( (u,v,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else: # Undirected
if G.is_multigraph():
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,key,data)
for key,data in datadict.items()
)
seen.add((v,u))
else:
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,data)
for key,data in datadict.items() )
seen.add((v,u))
else: # not a multigraph to multigraph transfer
if G.is_multigraph() and not G.is_directed():
# d can have both representations u-v, v-u in dict. Only add one.
# We don't need this check for digraphs since we add both directions,
# or for Graph() since it is done implicitly (parallel edges not allowed)
seen=set()
for u,nbrs in d.items():
for v,data in nbrs.items():
if (u,v) not in seen:
G.add_edge(u,v,attr_dict=data)
seen.add((v,u))
else:
G.add_edges_from( ( (u,v,data)
for u,nbrs in d.items()
for v,data in nbrs.items()) )
return G
def to_edgelist(G,nodelist=None):
"""Return a list of edges in the graph.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
"""
if nodelist is None:
return G.edges(data=True)
else:
return G.edges(nodelist,data=True)
def from_edgelist(edgelist,create_using=None):
"""Return a graph from a list of edges.
Parameters
----------
edgelist : list or iterator
Edge tuples
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> edgelist= [(0,1)] # single edge (0,1)
>>> G=nx.from_edgelist(edgelist)
or
>>> G=nx.Graph(edgelist) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_edges_from(edgelist)
return G
def to_numpy_matrix(G, nodelist=None, dtype=None, order=None,
multigraph_weight=sum, weight='weight'):
"""Return the graph adjacency matrix as a NumPy matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data type, optional
A valid single NumPy data type used to initialize the array.
This must be a simple type such as int or numpy.float64 and
not a compound data type (see to_numpy_recarray)
If None, then the NumPy default is used.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight: string, optional
Edge data key corresponding to the edge weight.
Returns
-------
M : NumPy matrix
Graph adjacency matrix.
See Also
--------
to_numpy_recarray, from_numpy_matrix
Notes
-----
The matrix entries are assigned with weight edge attribute. When
an edge does not have the weight attribute, the value of the entry is 1.
For multiple edges, the values of the entries are the sums of the edge
attributes for each edge.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_numpy_matrix(G, nodelist=[0,1,2])
matrix([[ 0., 2., 0.],
[ 1., 0., 0.],
[ 0., 0., 4.]])
"""
try:
import numpy as np
except ImportError:
raise ImportError(\
"to_numpy_matrix() requires numpy: http://scipy.org/ ")
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
if G.is_multigraph():
# Handle MultiGraphs and MultiDiGraphs
# array of nan' to start with, any leftover nans will be converted to 0
# nans are used so we can use sum, min, max for multigraphs
M = np.zeros((nlen,nlen), dtype=dtype, order=order)+np.nan
# use numpy nan-aware operations
operator={sum:np.nansum, min:np.nanmin, max:np.nanmax}
try:
op=operator[multigraph_weight]
except:
raise ValueError('multigraph_weight must be sum, min, or max')
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i,j = index[u],index[v]
e_weight = attrs.get(weight, 1)
M[i,j] = op([e_weight,M[i,j]])
if undirected:
M[j,i] = M[i,j]
# convert any nans to zeros
M = np.asmatrix(np.nan_to_num(M))
else:
# Graph or DiGraph, this is much faster than above
M = np.zeros((nlen,nlen), dtype=dtype, order=order)
for u,nbrdict in G.adjacency_iter():
for v,d in nbrdict.items():
try:
M[index[u],index[v]]=d.get(weight,1)
except KeyError:
pass
M = np.asmatrix(M)
return M
def from_numpy_matrix(A,create_using=None):
"""Return a graph from numpy matrix.
The numpy matrix is interpreted as an adjacency matrix for the graph.
Parameters
----------
A : numpy matrix
An adjacency matrix representation of a graph
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
Notes
-----
If the numpy matrix has a single data type for each matrix entry it
will be converted to an appropriate Python data type.
If the numpy matrix has a user-specified compound data type the names
of the data fields will be used as attribute keys in the resulting
NetworkX graph.
See Also
--------
to_numpy_matrix, to_numpy_recarray
Examples
--------
Simple integer weights on edges:
>>> import numpy
>>> A=numpy.matrix([[1,1],[2,1]])
>>> G=nx.from_numpy_matrix(A)
User defined compound data type on edges:
>>> import numpy
>>> dt=[('weight',float),('cost',int)]
>>> A=numpy.matrix([[(1.0,2)]],dtype=dt)
>>> G=nx.from_numpy_matrix(A)
>>> G.edges(data=True)
[(0, 0, {'cost': 2, 'weight': 1.0})]
"""
kind_to_python_type={'f':float,
'i':int,
'u':int,
'b':bool,
'c':complex,
'S':str,
'V':'void'}
try: # Python 3.x
blurb = chr(1245) # just to trigger the exception
kind_to_python_type['U']=str
except ValueError: # Python 2.6+
kind_to_python_type['U']=unicode
# This should never fail if you have created a numpy matrix with numpy...
try:
import numpy as np
except ImportError:
raise ImportError(\
"from_numpy_matrix() requires numpy: http://scipy.org/ ")
G=_prep_create_using(create_using)
n,m=A.shape
if n!=m:
raise nx.NetworkXError("Adjacency matrix is not square.",
"nx,ny=%s"%(A.shape,))
dt=A.dtype
try:
python_type=kind_to_python_type[dt.kind]
except:
raise TypeError("Unknown numpy data type: %s"%dt)
# make sure we get isolated nodes
G.add_nodes_from(range(n))
# get a list of edges
x,y=np.asarray(A).nonzero()
# handle numpy constructed data type
if python_type is 'void':
fields=sorted([(offset,dtype,name) for name,(dtype,offset) in
A.dtype.fields.items()])
for (u,v) in zip(x,y):
attr={}
for (offset,dtype,name),val in zip(fields,A[u,v]):
attr[name]=kind_to_python_type[dtype.kind](val)
G.add_edge(u,v,attr)
else: # basic data type
G.add_edges_from( ((u,v,{'weight':python_type(A[u,v])})
for (u,v) in zip(x,y)) )
return G
def to_numpy_recarray(G,nodelist=None,
dtype=[('weight',float)],
order=None):
"""Return the graph adjacency matrix as a NumPy recarray.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy named dtype used to initialize the NumPy recarray.
The data type names are assumed to be keys in the graph edge attribute
dictionary.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
Returns
-------
M : NumPy recarray
The graph with specified edge data as a Numpy recarray
Notes
-----
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Examples
--------
>>> G = nx.Graph()
>>> G.add_edge(1,2,weight=7.0,cost=5)
>>> A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)])
>>> print(A.weight)
[[ 0. 7.]
[ 7. 0.]]
>>> print(A.cost)
[[0 5]
[5 0]]
"""
try:
import numpy as np
except ImportError:
raise ImportError(\
"to_numpy_matrix() requires numpy: http://scipy.org/ ")
if G.is_multigraph():
raise nx.NetworkXError("Not implemented for multigraphs.")
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
M = np.zeros((nlen,nlen), dtype=dtype, order=order)
names=M.dtype.names
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i,j = index[u],index[v]
values=tuple([attrs[n] for n in names])
M[i,j] = values
if undirected:
M[j,i] = M[i,j]
return M.view(np.recarray)

View file

@ -1,49 +0,0 @@
# -*- coding: utf-8 -*-
"""
**********
Exceptions
**********
Base exceptions and errors for NetworkX.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)\nPieter Swart (swart@lanl.gov)\nDan Schult(dschult@colgate.edu)\nLoïc Séguin-C. <loicseguin@gmail.com>"""
# Copyright (C) 2004-2008 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Exception handling
# the root of all Exceptions
class NetworkXException(Exception):
"""Base class for exceptions in NetworkX."""
class NetworkXError(NetworkXException):
"""Exception for a serious error in NetworkX"""
class NetworkXPointlessConcept(NetworkXException):
"""Harary, F. and Read, R. "Is the Null Graph a Pointless Concept?"
In Graphs and Combinatorics Conference, George Washington University.
New York: Springer-Verlag, 1973.
"""
class NetworkXAlgorithmError(NetworkXException):
"""Exception for unexpected termination of algorithms."""
class NetworkXUnfeasible(NetworkXAlgorithmError):
"""Exception raised by algorithms trying to solve a problem
instance that has no feasible solution."""
class NetworkXNoPath(NetworkXUnfeasible):
"""Exception for algorithms that should return a path when running
on graphs where such a path does not exist."""
class NetworkXUnbounded(NetworkXAlgorithmError):
"""Exception raised by algorithms trying to solve a maximization
or a minimization problem instance that is unbounded."""

View file

@ -1,675 +0,0 @@
#!/usr/bin/python2.7
# -*- coding: UTF-8 -*-
'''
" urpm-downloader for URPM-based linux
" A tool for downloading RPMs from URPM-based linux repositories.
"
" Copyright (C) 2011 ROSA Laboratory.
" Written by Anton Kirilenko <anton.kirilenko@rosalab.ru>
"
" PLATFORMS
" =========
" Linux
"
" REQUIREMENTS
" ============
" - python 2.7
" - python-rpm 5.3
" - urpmi 6.68
"
" This program is free software: you can redistribute it and/or modify
" it under the terms of the GNU General Public License or the GNU Lesser
" General Public License as published by the Free Software Foundation,
" either version 2 of the Licenses, or (at your option) any later version.
"
" This program is distributed in the hope that it will be useful,
" but WITHOUT ANY WARRANTY; without even the implied warranty of
" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
" GNU General Public License for more details.
"
" You should have received a copy of the GNU General Public License
" and the GNU Lesser General Public License along with this program.
" If not, see <http://www.gnu.org/licenses/>.
'''
import argparse
import sys
import subprocess
import os
import re
from urllib import urlretrieve
import rpm
from urllib2 import urlopen, HTTPError, URLError
import shutil
import configparser
cp = ConfigParser.RawConfigParser()
exit()
import gettext
#gettext.install('urpm-tools', 'locale', unicode=True, names=['gettext'])
gettext.install('urpm-tools')
#t = gettext.translation('urpm-tools', 'locale', fallback=True)
#_ = t.ugettext
def vprint(text):
'''Print the message only if verbose mode is on'''
if(command_line_arguments.verbose):
print(text)
def qprint(text):
'''Print the message only if quiet mode is off'''
if(not command_line_arguments.quiet):
print(text)
def eprint(text, fatal=False, code=1):
'''Print the message to stderr. Exit if fatal'''
print >> sys.stderr, text
if (fatal):
exit(code)
def url_exists(url):
'''Return True if the given url or local path exists. Otherwise, return False.'''
if(url.startswith("file://") or url.startswith("/")):
return os.path.isfile(url)
#try to open file
try:
r = urlopen(url)
return True
except (HTTPError,URLError):
return False
def parse_command_line():
''' Parse command line, adjust some flags and warn in some cases'''
global command_line_arguments
arg_parser = argparse.ArgumentParser(description=_('A tool for downloading RPMs and SRPMs from URPM-based linux repositories'),
epilog=_("If none of the options -b, -s, -d turned on, it will be treated as -b"))
arg_parser.add_argument('packages', action='store',nargs = '+', help=_("Package name(s) to download. It can contain not only package names, but (S)RPM files too. In this case package name extracted from this file will be used"))
arg_parser.add_argument('-u', '--urls', action='store_true', help=_("Instead of downloading files, list the URLs that would be processed"))
arg_parser.add_argument('-r', '--resolve', action='store_true', help=_("When downloading RPMs, resolve dependencies and also download the required packages, if they are not already installed"))
arg_parser.add_argument('-a', '--resolve-all', action='store_true', help=_("When downloading RPMs, resolve dependencies and also download the required packages, even if they are already installed"))
arg_parser.add_argument('-b', '--binary', action='store_true', help=_("Download binary RPMs"))
arg_parser.add_argument('-s', '--source', action='store_true', help=_("Download the source RPMs (SRPMs)"))
arg_parser.add_argument('-d', '--debug-info', action='store_true', help=_("Download debug RPMs"))
arg_parser.add_argument('-D', '--debug-info-install', action='store_true', help=_("Download debug RPMs and install"))
arg_parser.add_argument('--version', action='version', version=VERSION)
arg_parser.add_argument('-v', '--verbose', action='store_true', help=_("Verbose (print additional info)"))
arg_parser.add_argument('-q', '--quiet', action='store_true', help=_("Quiet operation."))
arg_parser.add_argument('--include-media', '--media', action='append',nargs = '+', help=_("Use only selected URPM media"))
arg_parser.add_argument('--exclude-media', action='append',nargs = '+', help=_("Do not use selected URPM media"))
arg_parser.add_argument('-x', '--exclude-packages', action='store',nargs = '+', help=_("Exclude package(s) by regex"))
arg_parser.add_argument('-i', '--ignore-errors', action='store_true', help=_("Try to continue when error occurs"))
arg_parser.add_argument('-o', '--overwrite', action='store_true', help=_("If the file already exists, download it again and overwrite the old one"))
arg_parser.add_argument('--all-alternatives', action='store_true', help=_("If package dependency can be satisfied by several packages, download all of them (by default, only the first one is downloaded)"))
arg_parser.add_argument('--all-versions', action='store_true', help=_("If different versions of package present in repository, process them all"))
#arg_parser.add_argument('--self-test', action='store_true', help="Test urpm-downloader end exit")
arg_parser.add_argument('--dest-dir', action='store', help=_("Specify a destination directory for the download"))
command_line_arguments = arg_parser.parse_args(sys.argv[1:])
if(command_line_arguments.debug_info_install):
command_line_arguments.debug_info = True
if(not command_line_arguments.debug_info and not command_line_arguments.source):
command_line_arguments.binary = True
if(command_line_arguments.resolve_all):
command_line_arguments.resolve = True
if(command_line_arguments.exclude_packages is None):
command_line_arguments.exclude_packages = []
if(command_line_arguments.verbose and command_line_arguments.quiet):
eprint(_("Use of --verbose with --quiet is senseless. Turning verbose mode off."))
command_line_arguments.verbose = False
if(command_line_arguments.resolve and command_line_arguments.source and command_line_arguments.urls):
eprint(_("Note that resolving of SRPM dependencies is not possible until SRPM downloaded. So, it will be done despite --urls"))
if(command_line_arguments.dest_dir is not None):
if(not os.path.exists(command_line_arguments.dest_dir) or not os.path.isdir(command_line_arguments.dest_dir)):
os.mkdir(command_line_arguments.dest_dir)
else:
command_line_arguments.dest_dir = os.getcwd()
def get_command_output(command, fatal_fails=True):
'''Execute command using subprocess.Popen and return its stdout output string. If
return code is not 0, print error message end exit'''
vprint("Executing command: " + str(command))
res = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = list(res.communicate())
vprint('Output: ' + str(output))
if sys.stdout.encoding:
if output[0]:
output[0] = output[0].decode(sys.stdout.encoding).encode("UTF-8")
if output[1]:
output[1] = output[1].decode(sys.stdout.encoding).encode("UTF-8")
if(res.returncode != 0 and fatal_fails): # if not fatal_fails, do nothing. Caller have to deal with that himself
eprint(_("Error while calling command") + " '" + " ".join(command) + "'")
if(output[1] != None or output[0] != None):
eprint(_("Error message: \n")+ ((output[0].strip() + "\n") if output[0]!=None else "") +
(output[1].strip() if output[1]!=None else "") )
exit(1)
return [output[0], output[1], res.returncode]
def parse_packages(pkgs_list, toresolve):
''' Takes a list of package names, some of that are alternative (like 'pkg1|pkg2')
and returns a list of package names without '|' '''
output = []
for pkg in pkgs_list:
pkgs = pkg.split("|")
if(len(pkgs)>1):
vprint("Aternatives found: " + str(pkgs))
if(command_line_arguments.all_alternatives): # download all the alternatives
for p in pkgs:
output.append(p)
else: # download only the firsl package(first in alphabetical order)
#check if one of the packages already ion the 'toresolve' list
already_presents = False
for p in pkgs:
if(p in toresolve or p in output):
already_presents = True
break
#if not - add the first package
if(not already_presents):
output.append(sorted(pkgs)[0])
if(len(pkgs)>1):
vprint("Selected: " + sorted(pkgs)[0])
return output
def get_installed_packages():
'''Makes 'installed_packages' be filled with installed packages data and look like
{pkg_namei:[[version1,relese1], [version2,relese2], ...], ...} '''
global installed_packages, installed_loaded
if(installed_loaded):
return
installed_loaded = True
installed_packages = {}
ts = rpm.TransactionSet()
mi = ts.dbMatch()
for h in mi:
if(h['name'] not in installed_packages):
installed_packages[h['name']] = []
installed_packages[h['name']].append( [h['version'], h['release']] )
vprint("The list of installed packages loaded")
def check_what_to_skip(package_names):
''' Get the list of package names and return a list of packages from it, that don't have to be downloaded '''
def should_be_excluded(pkg):
for line in command_line_arguments.exclude_packages:
if(re.search(line, pkg) is not None):
return True
return False
vprint("Check package to skip...")
pkgs = package_names[:]
to_skip = []
# remove packages that have to be excluded dew to command line arguments
for pkg in pkgs[:]:
if(should_be_excluded(pkg)):
pkgs.remove(pkg)
to_skip.append(pkg)
if(command_line_arguments.resolve_all):
return to_skip
# Skip packages, that are already installed and have the same version
get_installed_packages()
#remove from to_skip candidates all the packages, which are not installed
for pkg in pkgs[:]:
if(pkg not in installed_packages):
pkgs.remove(pkg)
vprint("Retrieving possible downloading package versions...")
res = get_command_output(cmd + ['--sources'] + pkgs)
urls = res[0].strip().split('\n')
vprint("A list of urls retrieved: " + str(urls))
to_download = {}
rpms = {}
for url in urls: # collect data
res = get_package_fields(url)
if(res[0] not in rpms):
rpms[res[0]] = []
rpms[res[0]].append(res[1:4])
if(not command_line_arguments.all_versions):
vprint("Removing urls of the older versions...")
for pkg in rpms.keys()[:]: # filter
L = rpms[pkg]
while(len(L) > 1):
if(rpm.evrCompare(L[0][0], L[1][0]) == 1):
del L[1]
else:
del L[0]
# regroup data: to_download[pkg_name] = [ver-rel1, ver-rel2, ...]
for pkg in rpms:
if(pkg not in to_download):
to_download[pkg] = []
for item in rpms[pkg]:
to_download[pkg].append(item[0]) # item[0] == version
vprint("Checking what to skip...")
for pkg in pkgs:
installed_versions = ['-'.join(i) for i in installed_packages[pkg]]
#print pkg, str(installed_versions)
for ver in to_download[pkg][:]:
if (ver in installed_versions):
to_download[pkg].remove(ver)
if(len(to_download[pkg]) == 0):
to_download.pop(pkg)
to_skip.append(pkg)
vprint("Skipping " + pkg)
return to_skip
def resolve_packages(package_names):
'''Returns a list of packages recursively resoled from given list'''
global installed_packages
resolved_packages = []
def _resolve_packages(pkg_names):
toresolve = []
pkgs = parse_packages(pkg_names, toresolve)
to_skip = check_what_to_skip(pkgs)
for pkg in pkgs[:]:
if(pkg in resolved_packages or (pkg in to_skip and (pkg not in package_names or resolve_source))):
# don't resolve its dependencies.
pkgs.remove(pkg)
else:
resolved_packages.append(pkg)
toresolve.append(pkg)
if (len(toresolve) == 0):
return
vprint ("Resolving " + str(toresolve))
names = get_command_output(['urpmq', "--requires-recursive"] + toresolve)[0].strip().split("\n")
_resolve_packages(names)
_resolve_packages(package_names)
return resolved_packages
def get_srpm_names(pkgs):
'''get a list of srpms names for every given package name. Returns a dictionary {pakage_name_1:[srpm_name_1, srpm_name_2,...], ...}'''
srpms = {}
cmd_tmp = cmd[:] + ['--sourcerpm'] + pkgs
names = get_command_output(cmd_tmp)[0]
for line in names.split("\n"):
line = line.strip()
if(line == ''):
continue
n = line.split(":")[0].strip()
v = ":".join((line.split(":")[1:])).strip()
if(n not in srpms):
srpms[n] = []
srpms[n].append(v)
return srpms
def get_srpm_url(url):
if(url.startswith("file://") or url.startswith("/")):
return url
tmp = url.split("/")
tmp[-4] = "SRPMS"
del tmp[-3]
return "/".join(tmp)
def list_srpm_urls():
global cmd, srpm_urls_loaded
try:
srpm_urls_loaded
return srpm_urls
except:
srpm_urls_loaded = True
vprint("Loading list of SRPM URLs...")
re_slash = re.compile("/")
lines = get_command_output(cmd + ["--list-url"])[0].strip().split("\n")
media = get_command_output(cmd + ["--list-media", 'active'])[0].strip().split("\n")
srpm_urls = []
for line in lines:
parts = line.split(" ")
medium = ' '.join(parts[:-1])
if medium not in media:
continue
url = parts[-1]
if(url.endswith("/")):
url = url[:-1]
if(re_slash.search(url) is not None):
srpm_urls.append(get_srpm_url(url))
return srpm_urls
def try_download(url):
''' Try to download file and return True if success, else return False '''
path = os.path.join(command_line_arguments.dest_dir, os.path.basename(url))
vprint("Trying to download file " + url)
try:
if(not os.path.exists(path) or command_line_arguments.overwrite):
#(path, msg) = urlretrieve(url, path)
if(url.startswith('/')): # local file
shutil.copyfile(url, path)
else:
fd = urlopen(url)
file = open(path, 'w')
file.write(fd.read())
file.close()
fd.close()
qprint (_("* Downloaded: ") + url)
else:
qprint (_("* File exists, skipping: ") + url)
return None
except IOError, e:
return e
def get_package_fields(rpmname):
''' Return [name, version, suffix, path(prefix)] for given rpm file or package name '''
suffix = ""
path = os.path.dirname(rpmname)
if(path):
path += "/"
filename = False
rpmname = os.path.basename(rpmname)
if(rpmname.endswith(".rpm")):
suffix = ".rpm"
rpmname = rpmname[:-4]
filename = True
if(rpmname.endswith(".src")):
suffix = ".src" + suffix
rpmname = rpmname[:-4]
name = rpmname.split("-")[:-2]
version = rpmname.split("-")[-2:]
else:
re_version = re.compile("(\.)?((alpha)|(cvs)|(svn)|(r))?\d+((mdv)|(mdk)|(mnb))")
if(filename):
parts = rpmname.split('.')
suffix = "." + parts[-1] + suffix
rpmname = '.'.join(parts[:-1]) # remove the architecture part
sections = rpmname.split("-")
if(re_version.search(sections[-1]) == None):
name = sections[:-3]
version = sections[-3:-1]
suffix = "-" + sections[-1] + suffix
else:
name = sections[:-2]
version = sections[-2:]
return ["-".join(name), "-".join(version), suffix, path]
#url = 'ftp://ftp.sunet.se/pub/Linux/distributions/mandrakelinux/official/2011/x86_64/media/contrib/release/lib64oil0.3_0-0.3.17-2mdv2011.0.x86_64.rpm'
#url = 'ftp://ftp.sunet.se/pub/Linux/distributions/mandrakelinux/official/2011/x86_64/media/contrib/release/liboil-tools-0.3.17-2mdv2011.0.x86_64.rpm'
#res = get_package_fields(url)
#print res
#exit()
def filter_versions(rpm_list):
''' When different versions of one package given, remove older version and returns only the newest one for every package. '''
if(command_line_arguments.all_versions):
return rpm_list
rpms = {}
vprint("Filtering input: " + str(rpm_list))
for srpm in rpm_list: # collect data
res = get_package_fields(srpm)
if(res[0] not in rpms):
rpms[res[0]] = []
rpms[res[0]].append(res[1:4])
for pkg in rpms.keys()[:]: # filter
L = rpms[pkg]
while(len(L)> 1):
if(rpm.evrCompare(L[0][0], L[1][0]) == 1):
del L[1]
else:
del L[0]
output = []
for pkg in rpms: # assembling package names
output.append ( rpms[pkg][0][2] + pkg + "-" + rpms[pkg][0][0] + rpms[pkg][0][1])
vprint ("Filtering output: " + str(output))
return output
def download_srpm(package, srpms):
'''download the srpm with a given name. Try to find it in the repository. Returns a list of downloaded file names'''
vprint("downloading srpm(s) for package " + package)
srpm_urls = list_srpm_urls()
downloaded = []
for srpm in filter_versions(srpms[package]):
count = 0
for srpm_url in srpm_urls:
url = srpm_url + "/" + srpm
if(command_line_arguments.urls): # a correct url have to be printed!
if(not url_exists(url)):
continue
qprint(url)
if(not command_line_arguments.resolve):
count += 1
break
if(try_download(url) == None):
count += 1
downloaded.append(os.path.join(command_line_arguments.dest_dir, os.path.basename(url)))
break
if(count == 0):
eprint(_("Can not download SRPM for package") + srpm)
if(not command_line_arguments.ignore_errors):
exit(2)
return downloaded
def download_rpm(pkgs_to_download):
global resolve_source, downloaded_debug_pkgs
vprint("downloading packages " + ", ".join (pkgs_to_download))
cmd_bin = cmd[:] + ['--sources'] + pkgs_to_download
urls = get_command_output(cmd_bin)[0].strip().split("\n")
urls = filter_versions(urls)
if(command_line_arguments.binary or resolve_source):
for url in urls:
if(command_line_arguments.urls):
qprint(url)
continue
res = try_download(url)
if(res != None):
eprint(_("Can not download RPM") + "%s\n(%s)" % (url, res) )
if(not command_line_arguments.ignore_errors):
exit(3)
if(command_line_arguments.debug_info):
pkgs_to_download_debug = [p+"-debug" for p in pkgs_to_download[:]]
qprint(_("Resolving debug-info packages..."))
cmd_debug = ['urpmq', '--media', 'debug', '--sources'] + pkgs_to_download_debug
res = get_command_output(cmd_debug, fatal_fails=False)
# urpmq output. RU: Нет пакета с названием
text = _("No package named ")
vprint("Removing missed debug packages from query...")
removed = []
if(res[2] != 0): # return code is not 0
for line in res[1].split("\n"):
if line.startswith(text):
pkg = line[len(text):]
pkgs_to_download_debug.remove(pkg)
removed.append(pkg)
vprint("Removed %d packages" % len(removed))
vprint(removed)
cmd_debug = ['urpmq', '--media', 'debug', '--sources'] + pkgs_to_download_debug
urls = get_command_output(cmd_debug)[0].strip().split("\n")
urls = filter_versions(urls)
for url in urls:
if(command_line_arguments.urls):
qprint(url)
continue
res = try_download(url)
if(res != None):
eprint(_("Can not download RPM") + "%s:\n(%s)\n" % (os.path.basename(url), res) +
_("Maybe you need to update urpmi database (urpmi.update -a)?") )
if(not command_line_arguments.ignore_errors):
exit(2)
else:
path = os.path.join(command_line_arguments.dest_dir, os.path.basename(url))
downloaded_debug_pkgs.append(path)
if(command_line_arguments.debug_info_install):
for pkg in downloaded_debug_pkgs:
qprint(_('Installing ') + os.path.basename(str(pkg)) + "...")
command = ['rpm', '-i', pkg]
res = get_command_output(command,fatal_fails=False)
if(res[2] != 0): # rpm return code is not 0
qprint(_('Error while calling command') + ' "' + ' '.join(command) + '":\n' + res[1].strip())
def filter_debug_rpm_urls(input_urls):
command = ['urpmq', '--media', 'debug', '--sources', pkg_name + "-debug"]
res = get_command_output(command, fatal_fails=False)
if(res[2] != 0): # return code is not 0
qprint(_("Debug package for '%s' not found") % pkg_name)
return []
names = res[0].strip().split("\n")
if(command_line_arguments.all_versions):
return names
get_installed_packages()
#print names
#print installed_packages[pkg_name]
urls = []
for n in names:
res = get_package_fields(os.path.basename(n))
version = "-".join(res[1].split("-")[0:2] )
if(pkg_name not in installed_packages):
break
for inst_pkg in installed_packages[pkg_name]:
if(version == inst_pkg[0] + "-" + inst_pkg[1]):
urls.append(n)
break
return urls
def Main():
global cmd, resolve_source
resolve_source = False # variable that makes download_rpm to download resolved build-deps
cmd = ['urpmq']
if(command_line_arguments.include_media != None):
media = ''
for i in command_line_arguments.include_media:
media = ",".join([media]+i)
cmd = cmd + ['--media', media[1:]]
if(command_line_arguments.exclude_media != None):
media = ''
for i in command_line_arguments.exclude_media:
media = ",".join([media]+i)
cmd = cmd + ['--excludemedia', media[1:]]
missing_files = []
for pkg in command_line_arguments.packages[:]:
if(pkg.endswith(".rpm")):
if(not os.path.exists(pkg) or not os.path.isfile(pkg)):
missing_files.append(pkg)
continue
name = get_rpm_tag_from_file("name", pkg)
command_line_arguments.packages.remove(pkg)
command_line_arguments.packages.append(name)
if(missing_files):
eprint(_("Parameters that end with '.rpm' seem to be local files, but the folowing files do not exist: ") + ", ".join(missing_files))
if(not command_line_arguments.ignore_errors):
exit(4)
if(command_line_arguments.source):
download(command_line_arguments.packages, True)
if(command_line_arguments.binary or (not command_line_arguments.source and command_line_arguments.debug_info)):
download(command_line_arguments.packages, False)
def get_rpm_tag_from_file(tag, file):
rpm_ts = rpm.TransactionSet()
fd = os.open(file, os.O_RDONLY)
rpm_hdr = rpm_ts.hdrFromFdno(fd)
os.close(fd)
return rpm_hdr.sprintf("%{" + tag + "}").strip()
def download(packages, src):
global resolve_source
pkgs_to_download = packages
if(src):
if(command_line_arguments.urls):
qprint(_("Searching src.rpm file(s) in repository..."))
else:
qprint(_("Downloading src.rpm file(s)..."))
srpms = get_srpm_names(packages)
#for pkg in packages[:]:
#if (pkg not in srpms:
#eprint("Package " + pkg + " not fond!")
#if(not command_line_arguments.ignore_errors):
# exit(1)
#else:
# eprint ("Package is dequeued.")
#packages.remove(pkg)
srpms_list= []
for package in packages:
srpms_list = srpms_list + download_srpm(package, srpms)
if(len(srpms_list) == 0):
return
if(command_line_arguments.resolve):
resolve_source = True
pkgs = []
lines = get_command_output(cmd + ['--requires-recursive'] + srpms_list)[0].strip().split("\n")
pkgs = parse_packages(lines, [])
download(pkgs, False)
resolve_source = False
else:
pkgs_to_download = packages
if(command_line_arguments.resolve):
if(resolve_source):
qprint(_("Resolving build dependencies..."))
else:
qprint(_("Resolving dependencies..."))
pkgs_to_download = resolve_packages(packages)
qprint (_("Resolved %d packages") % len(pkgs_to_download))
if(len(pkgs_to_download) == 0):
qprint(_("Nothing to download"))
return
download_rpm(pkgs_to_download)
downloaded_debug_pkgs = []
installed_loaded=False
VERSION = "urpm-downloader 2.2.4"
if __name__ == '__main__':
parse_command_line()
Main()

View file

@ -1,556 +0,0 @@
#!/usr/bin/python
'''
" Package cleanup utility for distributions using urpm
" Based on package-cleanup from yum-utils
"
" Copyright (C) 2011 ROSA Laboratory.
" Written by Denis Silakov <denis.silakov@rosalab.ru>
"
" This program is free software: you can redistribute it and/or modify
" it under the terms of the GNU General Public License or the GNU Lesser
" General Public License as published by the Free Software Foundation,
" either version 2 of the Licenses, or (at your option) any later version.
"
" This program is distributed in the hope that it will be useful,
" but WITHOUT ANY WARRANTY; without even the implied warranty of
" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
" GNU General Public License for more details.
"
" You should have received a copy of the GNU General Public License
" and the GNU Lesser General Public License along with this program.
" If not, see <http://www.gnu.org/licenses/>.
'''
import sys
import logging
import os
import re
import subprocess
import string
import urpmmisc
import types
from rpm5utils import miscutils, arch, transaction
import argparse
import rpm
import gettext
gettext.install('urpm-tools')
def exactlyOne(l):
return len(filter(None, l)) == 1
class PackageCleanup():
NAME = 'urpm-package-cleanup'
VERSION = '0.1'
USAGE = """
urpm-package-cleanup: helps find problems in the rpmdb of system and correct them
usage: urpm-package-cleanup --problems or --leaves or --orphans or --oldkernels
"""
def __init__(self):
self.addCmdOptions()
self.main()
def addCmdOptions(self):
self.ArgParser = argparse.ArgumentParser(description=_('Find problems in the rpmdb of system and correct them'))
self.ArgParser.add_argument("--qf", "--queryformat", dest="qf",
action="store",
default='%{NAME}-%{VERSION}-%{RELEASE}.%{ARCH}',
help=_("Query format to use for output."))
self.ArgParser.add_argument("--auto", default=False,
dest="auto",action="store_true",
help=_('Use non-interactive mode'))
self.ArgParser.add_argument("--version", action='version', version=self.VERSION)
probgrp = self.ArgParser.add_argument_group(_('Orphans Options'))
probgrp.add_argument("--orphans", default=False,
dest="orphans",action="store_true",
help=_('List installed packages which are not available from'\
' currently configured repositories'))
probgrp.add_argument("--update", default=False,
dest="update",action="store_true",
help=_('Use only update media. This means that urpmq will search'\
' and resolve dependencies only in media marked as containing updates'\
' (e.g. which have been created with "urpmi.addmedia --update").'))
probgrp.add_argument("--media", metavar='media', nargs='+',
help=_('Select specific media to be used, instead of defaulting to all available '\
'media (or all update media if --update is used). No rpm will be found in '
'other media.'))
probgrp.add_argument("--excludemedia", metavar='media', nargs='+',
help=_('Do not use the specified media.'))
probgrp = self.ArgParser.add_argument_group(_('Dependency Problems Options'))
probgrp.add_argument("--problems", default=False,
dest="problems", action="store_true",
help=_('List dependency problems in the local RPM database'))
probgrp.add_argument("--suggests", default=False,
dest="suggests", action="store_true",
help=_('List missing suggestions of installed packages'))
dupegrp = self.ArgParser.add_argument_group(_('Duplicate Package Options'))
dupegrp.add_argument("--dupes", default=False,
dest="dupes", action="store_true",
help=_('Scan for duplicates in your rpmdb'))
dupegrp.add_argument("--cleandupes", default=False,
dest="cleandupes", action="store_true",
help=_('Scan for duplicates in your rpmdb and remove older '))
dupegrp.add_argument("--noscripts", default=False,
dest="noscripts", action="store_true",
help=_("disable rpm scriptlets from running when cleaning duplicates"))
leafgrp = self.ArgParser.add_argument_group(_('Leaf Node Options'))
leafgrp.add_argument("--leaves", default=False, dest="leaves",
action="store_true",
help=_('List leaf nodes in the local RPM database'))
leafgrp.add_argument("--all", default=False, dest="all_nodes",
action="store_true",
help=_('list all packages leaf nodes that do not match'\
' leaf-regex'))
leafgrp.add_argument("--leaf-regex",
default="(^(compat-)?lib(?!reoffice).+|.*libs?[\d-]*|.*-data$)",
help=_('A package name that matches this regular expression' \
' (case insensitively) is a leaf'))
leafgrp.add_argument("--exclude-devel", default=False,
action="store_true",
help=_('do not list development packages as leaf nodes'))
leafgrp.add_argument("--exclude-bin", default=False,
action="store_true",
help=_('do not list packages with files in a bin dirs as '\
'leaf nodes'))
kernelgrp = self.ArgParser.add_argument_group(_('Old Kernel Options'))
kernelgrp.add_argument("--oldkernels", default=False,
dest="kernels",action="store_true",
help=_("Remove old kernel and kernel-devel packages"))
kernelgrp.add_argument("--count",default=2,dest="kernelcount",
action="store",
help=_('Number of kernel packages to keep on the '\
'system (default 2)'))
kernelgrp.add_argument("--keepdevel", default=False, dest="keepdevel",
action="store_true",
help=_('Do not remove kernel-devel packages when '
'removing kernels'))
def _removePkg(self, pkg):
"""remove given package"""
# No smart behavior yet, simply call urpme for the package
pkgName = pkg['name'] + "-" + pkg['version']
if pkg['release']:
pkgName += '-' + pkg['release']
eraseOpts = string.join(self.tsflags, " ")
if eraseOpts:
subprocess.call(['urpme', pkgName, eraseOpts])
else:
subprocess.call(['urpme', pkgName])
@staticmethod
def _genDeptup(name, flags, version):
""" Given random stuff, generate a usable dep tuple. """
if flags == 0:
flags = None
if type(version) is types.StringType:
(r_e, r_v, r_r) = miscutils.stringToVersion(version)
# would this ever be a ListType?
elif type(version) in (types.TupleType, types.ListType):
(r_e, r_v, r_r) = version
else:
# FIXME: This isn't always type(version) is types.NoneType:
# ...not sure what it is though, come back to this
r_e = r_v = r_r = None
deptup = (name, urpmmisc.share_data(flags),
(urpmmisc.share_data(r_e), urpmmisc.share_data(r_v),
urpmmisc.share_data(r_r)))
return urpmmisc.share_data(deptup)
def _getProvides(self, req, flags, ver):
"""searches the rpmdb for what provides the arguments
returns a list of pkg objects of providing packages, possibly empty"""
ts = rpm.TransactionSet()
mi = ts.dbMatch('provides', req)
deptup = self._genDeptup(req, flags, ver)
if deptup in self._get_pro_cache:
return self._get_pro_cache[deptup]
r_v = deptup[2][1]
result = { }
for po in mi:
prov_idx = 0
for prov in po['provides']:
if prov != req:
prov_idx += 1
continue
prov_ver = po['provideversion'][prov_idx]
prov_flags = po['provideflags'][prov_idx]
prov_idx += 1
if req[0] == '/' and r_v is None:
result[po] = [(req, None, (None, None, None))]
continue
if deptup[2][1] is None and deptup[2][2] is None and deptup[2][0] is None:
result[po] = [(req, None, (None, None, None))]
else:
provtup = (req, prov_flags, (po['epoch'], po['version'], po['release']))
matched = miscutils.rangeCompare(deptup, provtup)
if not matched:
print "NOT MATCHED " + str(deptup) + " VS " + str(provtup)
if matched:
result[po] = [(req, None, (None, None, None))]
self._get_pro_cache[deptup] = result
# Check if we have dependency on file not listed
# directly in PROVIDES
if not result and req[0] == '/' and r_v is None:
mi = ts.dbMatch('filepaths', req)
for po in mi:
result[po] = [(req, None, (None, None, None))]
return result
def _find_missing_deps(self, pkgs):
"""find any missing dependencies for any installed package in pkgs"""
providers = {} # To speed depsolving, don't recheck deps that have
# already been checked
problems = []
missing_suggests = []
for po in pkgs:
req_idx = 0;
for req in po['requires']:
ver = po['requireversion'][req_idx]
flags = po['requireflags'][req_idx]
req_idx += 1
if req.startswith('rpmlib'): continue # ignore rpmlib deps
if (req,flags,ver) not in providers:
resolve_sack = self._getProvides(req,flags,ver)
else:
resolve_sack = providers[(req,flags,ver)]
if len(resolve_sack) < 1:
#~ flags = yum.depsolve.flags.get(flags, flags)
missing = miscutils.formatRequire(req,ver,flags)
# RPMSENSE_MISSINGOK == (1 << 19)
if req in po['suggests'] or flags & (1 << 19):
missing_suggests.append((po, "suggests %s" % missing))
else:
problems.append((po, "requires %s" % missing))
else:
# Store the resolve_sack so that we can re-use it if another
# package has the same requirement
providers[(req,flags,ver)] = resolve_sack
return [problems, missing_suggests]
def _find_installed_duplicates(self, ignore_kernel=True):
"""find installed duplicate packages returns a dict of
pkgname = [[dupe1, dupe2], [dupe3, dupe4]] """
multipkgs = {}
singlepkgs = {}
results = {}
ts = rpm.TransactionSet()
mi = ts.dbMatch()
for pkg in mi:
# just skip kernels and everyone is happier
if ignore_kernel:
if 'kernel' in pkg['provides_names']:
continue
if pkg['name'].startswith('kernel'):
continue
# public keys from different repos may have different versions
if pkg['name'].startswith('gpg-pubkey'):
continue
name = pkg['name']
if name in multipkgs or name in singlepkgs:
continue
pkgs = ts.dbMatch( 'name', name )
for po in pkgs:
if name not in multipkgs:
multipkgs[name] = []
if name not in singlepkgs:
singlepkgs[name] = []
if arch.isMultiLibArch(arch=po['arch']):
multipkgs[name].append(po)
elif po['arch'] == 'noarch':
multipkgs[name].append(po)
singlepkgs[name].append(po)
elif not arch.isMultiLibArch(arch=po['arch']):
singlepkgs[name].append(po)
else:
print _("Warning: neither single nor multi lib arch: %s ") % po['arch']
for (name, pkglist) in multipkgs.items() + singlepkgs.items():
if len(pkglist) <= 1:
continue
if name not in results:
results[name] = []
if pkglist not in results[name]:
results[name].append(pkglist)
return results
def _remove_old_dupes(self):
"""add older duplicate pkgs to be removed in the transaction"""
dupedict = self._find_installed_duplicates()
removedupes = []
for (name,dupelists) in dupedict.items():
for dupelist in dupelists:
dupelist.sort()
for lowpo in dupelist[0:-1]:
removedupes.append(lowpo)
# No smart behavior yet, simply call urpme for every package
for po in removedupes:
self._removePkg(po)
def _should_show_leaf(self, po, leaf_regex, exclude_devel, exclude_bin):
"""
Determine if the given pkg should be displayed as a leaf or not.
Return True if the pkg should be shown, False if not.
"""
if po['name'] == 'gpg-pubkey':
return False
name = po['name']
if exclude_devel and name.endswith('devel'):
return False
if exclude_bin:
for file_name in po['filepaths']:
if file_name.find('bin') != -1:
return False
if leaf_regex.match(name):
return True
return False
def _get_kernels(self):
"""return a list of all installed kernels, sorted newest to oldest"""
ts = rpm.TransactionSet()
mi = ts.dbMatch('provides','kernel')
kernlist = []
for h in mi:
kernlist.append(h)
kernlist.sort()
kernlist.reverse()
return kernlist
def _get_old_kernel_devel(self, kernels, removelist):
""" List all kernel devel packages that either belong to kernel versions that
are no longer installed or to kernel version that are in the removelist"""
devellist = []
ts = rpm.TransactionSet()
mi = ts.dbMatch('provides','kernel-devel')
for po in mi:
# For all kernel-devel packages see if there is a matching kernel
# in kernels but not in removelist
keep = False
for kernel in kernels:
if kernel in removelist:
continue
(kname,karch,kepoch,kver,krel) = (kernel['name'],kernel['arch'],kernel['epoch'],kernel['version'],kernel['release'])
(dname,darch,depoch,dver,drel) = (po['name'],po['arch'],po['epoch'],po['version'],po['release'])
if (karch,kepoch,kver,krel) == (darch,depoch,dver,drel):
keep = True
if not keep:
devellist.append(po)
return devellist
def _remove_old_kernels(self, count, keepdevel):
"""Remove old kernels, keep at most count kernels (and always keep the running
kernel"""
count = int(count)
kernels = self._get_kernels()
runningkernel = os.uname()[2]
# Vanilla kernels dont have a release, only a version
if '-' in runningkernel:
splt = runningkernel.split('-')
if len(splt) == 2:
(kver,krel) = splt
else: # Handle cases where a custom build kernel has an extra '-' in the release
kver=splt[1]
krel="-".join(splt[1:])
if krel.split('.')[-1] == os.uname()[-1]:
krel = ".".join(krel.split('.')[:-1])
else:
kver = runningkernel
krel = ""
remove = kernels[count:]
toremove = []
# Remove running kernel from remove list
for kernel in remove:
if kernel['version'] == kver and krel.startswith(kernel['release']):
print _("Not removing kernel %(kver)s-%(krel)s because it is the running kernel") % {'kver': kver, 'krel': krel}
else:
toremove.append(kernel)
# Now extend the list with all kernel-devel pacakges that either
# have no matching kernel installed or belong to a kernel that is to
# be removed
if not keepdevel:
toremove.extend(self._get_old_kernel_devel(kernels, toremove))
for po in toremove:
self._removePkg(po)
def main(self):
opts = self.ArgParser.parse_args(sys.argv[1:])
if not exactlyOne([opts.problems, opts.dupes, opts.leaves, opts.kernels,
opts.orphans, opts.cleandupes]):
print self.ArgParser.format_help()
sys.exit(1)
self.tsflags = []
if opts.problems:
ts = rpm.TransactionSet()
mi = ts.dbMatch()
self._get_pro_cache = {}
(issues, missing_suggests) = self._find_missing_deps(mi)
for (pkg, prob) in issues:
print _('Package %(qf)s %(prob)s') % {'qf': pkg.sprintf(opts.qf), 'prob': prob}
if( opts.suggests ):
print _("Missing suggests:")
for (pkg, prob) in missing_suggests:
print 'Package %s %s' % (pkg.sprintf(opts.qf), prob)
if issues:
sys.exit(2)
else:
if (not opts.suggests) or (len(missing_suggests) == 0):
print _('No Problems Found')
sys.exit(0)
else:
sys.exit(3)
if opts.dupes:
dupes = self._find_installed_duplicates()
for name, pkglists in dupes.items():
for pkglist in pkglists:
for pkg in pkglist:
print '%s' % pkg.sprintf(opts.qf)
sys.exit(0)
if opts.kernels:
if os.geteuid() != 0:
print _("Error: Cannot remove kernels as a user, must be root")
sys.exit(1)
if int(opts.kernelcount) < 1:
print _("Error: should keep at least 1 kernel!")
sys.exit(100)
if opts.auto:
self.tsflags.append('--auto')
self._remove_old_kernels(opts.kernelcount, opts.keepdevel)
sys.exit(0)
#~ self.run_with_package_names.add('yum-utils')
#~ if hasattr(self, 'doUtilBuildTransaction'):
#~ errc = self.doUtilBuildTransaction()
#~ if errc:
#~ sys.exit(errc)
#~ else:
#~ try:
#~ self.buildTransaction()
#~ except yum.Errors.YumBaseError, e:
#~ self.logger.critical("Error building transaction: %s" % e)
#~ sys.exit(1)
#~
#~ if len(self.tsInfo) < 1:
#~ print 'No old kernels to remove'
#~ sys.exit(0)
#~
#~ sys.exit(self.doUtilTransaction())
if opts.leaves:
self._ts = transaction.TransactionWrapper()
leaves = self._ts.returnLeafNodes()
leaf_reg = re.compile(opts.leaf_regex, re.IGNORECASE)
for po in sorted(leaves):
if opts.all_nodes or \
self._should_show_leaf(po, leaf_reg, opts.exclude_devel,
opts.exclude_bin):
print po.sprintf(opts.qf)
sys.exit(0)
if opts.orphans:
""" Just a wrapper that invokes urpmq """
aux_opts = ""
if opts.excludemedia:
aux_opts = " --excludemedia " + " ".join(opts.excludemedia)
if opts.media:
aux_opts += " --media " + " ".join(opts.media)
if opts.update:
aux_opts += " --update "
subprocess.call(["urpmq", "--not-available", aux_opts])
sys.exit(0)
if opts.cleandupes:
if os.geteuid() != 0:
print _("Error: Cannot remove packages as a user, must be root")
sys.exit(1)
if opts.noscripts:
self.tsflags.append('--noscripts')
if opts.auto:
self.tsflags.append('--auto')
self._remove_old_dupes()
#~ self.run_with_package_names.add('yum-utils')
#~ if hasattr(self, 'doUtilBuildTransaction'):
#~ errc = self.doUtilBuildTransaction()
#~ if errc:
#~ sys.exit(errc)
#~ else:
#~ try:
#~ self.buildTransaction()
#~ except yum.Errors.YumBaseError, e:
#~ self.logger.critical("Error building transaction: %s" % e)
#~ sys.exit(1)
#~ if len(self.tsInfo) < 1:
#~ print 'No duplicates to remove'
#~ sys.exit(0)
if __name__ == '__main__':
# setup_locale()
util = PackageCleanup()

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,239 +0,0 @@
#!/usr/bin/python
'''
" Repomanage utility for distributions using urpm
"
" The tool traverses a directory, build a dict of
" foo[(name, arch)] = [/path/to/file/that/is/highest, /path/to/equalfile]
" and then reports newest/old packages
"
" Based on repomanage from yum-utils
"
" Copyright (C) 2011 ROSA Laboratory.
" Written by Denis Silakov <denis.silakov@rosalab.ru>
"
" This program is free software: you can redistribute it and/or modify
" it under the terms of the GNU General Public License or the GNU Lesser
" General Public License as published by the Free Software Foundation,
" either version 2 of the Licenses, or (at your option) any later version.
"
" This program is distributed in the hope that it will be useful,
" but WITHOUT ANY WARRANTY; without even the implied warranty of
" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
" GNU General Public License for more details.
"
" You should have received a copy of the GNU General Public License
" and the GNU Lesser General Public License along with this program.
" If not, see <http://www.gnu.org/licenses/>.
'''
import os
import sys
import rpm
import fnmatch
import subprocess
import string
from rpm5utils import miscutils, arch, transaction, Rpm5UtilsError
import urpmmisc
import argparse
import gettext
gettext.install('urpm-tools')
def errorprint(stuff):
print >> sys.stderr, stuff
def getFileList(path, ext, filelist):
"""Return all files in path matching ext, store them in filelist, recurse dirs
return list object"""
extlen = len(ext)
try:
dir_list = os.listdir(path)
except OSError, e:
errorprint(_('Error accessing directory %(path)s, %(e)s') % {"path": path,"e": str(e)})
return []
for d in dir_list:
if os.path.isdir(path + '/' + d):
filelist = getFileList(path + '/' + d, ext, filelist)
else:
if string.lower(d[-extlen:]) == '%s' % (ext):
newpath = os.path.normpath(path + '/' + d)
filelist.append(newpath)
return filelist
def trimRpms(rpms, excludeGlobs):
badrpms = []
for fn in rpms:
for glob in excludeGlobs:
if fnmatch.fnmatch(fn, glob):
#~ print 'excluded: %s' % fn
if fn not in badrpms:
badrpms.append(fn)
for fn in badrpms:
if fn in rpms:
rpms.remove(fn)
return rpms
def parseargs(args):
parser = argparse.ArgumentParser(description=_('manage a directory of rpm packages and report newest or oldest packages'))
# new is only used to make sure that the user is not trying to get both
# new and old, after this old and not old will be used.
# (default = not old = new)
parser.add_argument("path", metavar="path",
help=_('path to directory with rpm packages'))
group = parser.add_mutually_exclusive_group();
group.add_argument("-o", "--old", default=False, action="store_true",
help=_('print the older packages'))
group.add_argument("-n", "--new", default=False, action="store_true",
help=_('print the newest packages (this is the default behavior)'))
parser.add_argument("-r", "--remove-old", default=False, action="store_true",
help=_('remove older packages'))
parser.add_argument("-s", "--space", default=False, action="store_true",
help=_('space separated output, not newline'))
parser.add_argument("-k", "--keep", default=1, dest='keep', action="store",
help=_('number of newest packages to keep - defaults to 1'))
parser.add_argument("-c", "--nocheck", default=0, action="store_true",
help=_('do not check package payload signatures/digests'))
group_log = parser.add_mutually_exclusive_group();
group_log.add_argument("-q", "--quiet", default=0, action="store_true",
help=_('be completely quiet'))
group_log.add_argument("-V", "--verbose", default=False, action="store_true",
help=_('be verbose - say which packages are decided to be old and why \
(this info is dumped to STDERR)'))
opts = parser.parse_args()
return opts
def main(args):
options = parseargs(args)
mydir = options.path
rpmList = []
rpmList = getFileList(mydir, '.rpm', rpmList)
verfile = {}
pkgdict = {} # hold all of them - put them in (n,a) = [(e,v,r),(e1,v1,r1)]
keepnum = int(options.keep)*(-1) # the number of items to keep
if len(rpmList) == 0:
errorprint(_('No files to process'))
sys.exit(1)
ts = rpm.TransactionSet()
if options.nocheck:
ts.setVSFlags(~(rpm._RPMVSF_NOPAYLOAD))
else:
ts.setVSFlags(~(rpm.RPMVSF_NOMD5|rpm.RPMVSF_NEEDPAYLOAD))
for pkg in rpmList:
try:
hdr = miscutils.hdrFromPackage(ts, pkg)
except Rpm5UtilsError, e:
msg = _("Error opening pkg %(pkg)s: %(err)s") % {"pkg": pkg, "err": str(e)}
errorprint(msg)
continue
pkgtuple = miscutils.pkgDistTupleFromHeader(hdr)
(n,a,e,v,r,d) = pkgtuple
del hdr
if (n,a) not in pkgdict:
pkgdict[(n,a)] = []
pkgdict[(n,a)].append((e,v,r,d))
if pkgtuple not in verfile:
verfile[pkgtuple] = []
verfile[pkgtuple].append(pkg)
for natup in pkgdict.keys():
evrlist = pkgdict[natup]
if len(evrlist) > 1:
evrlist = urpmmisc.unique(evrlist)
evrlist.sort(miscutils.compareDEVR)
pkgdict[natup] = evrlist
del ts
# now we have our dicts - we can return whatever by iterating over them
outputpackages = []
# a flag indicating that old packages were found
old_found = 0
#if new
if not options.old:
for (n,a) in pkgdict.keys():
evrlist = pkgdict[(n,a)]
if len(evrlist) < abs(keepnum):
newevrs = evrlist
else:
newevrs = evrlist[keepnum:]
if len(evrlist[:keepnum]) > 0:
old_found = 1
if options.remove_old:
for dropped in evrlist[:keepnum]:
(e,v,r,d) = dropped
pkg = str(verfile[(n,a,e,v,r,d)]).replace("['","").replace("']","")
subprocess.call(["rm", pkg])
if options.verbose:
for dropped in evrlist[:keepnum]:
(e,v,r,d) = dropped
print >> sys.stderr, _("Dropped ") + str(verfile[(n,a,e,v,r,d)])
print >> sys.stderr, _(" superseded by: ")
for left in newevrs:
(e,v,r,d) = left
print >> sys.stderr, " " + str(verfile[(n,a,e,v,r,d)])
for (e,v,r,d) in newevrs:
for pkg in verfile[(n,a,e,v,r,d)]:
outputpackages.append(pkg)
if options.old:
for (n,a) in pkgdict.keys():
evrlist = pkgdict[(n,a)]
if len(evrlist) < abs(keepnum):
continue
oldevrs = evrlist[:keepnum]
if len(oldevrs) > 0:
old_found = 1
for (e,v,r,d) in oldevrs:
for pkg in verfile[(n,a,e,v,r,d)]:
outputpackages.append(pkg)
if options.remove_old:
subprocess.call(["rm", "-f", pkg])
if options.verbose:
print >> sys.stderr, _("Dropped ") + pkg
print >> sys.stderr, _(" superseded by: ")
for left in evrlist[keepnum:]:
(e,v,r,d) = left
print >> sys.stderr, " " + str(verfile[(n,a,e,v,r,d)])
if not options.quiet:
outputpackages.sort()
for pkg in outputpackages:
if options.space:
print '%s' % pkg,
else:
print pkg
if old_found==1:
sys.exit(3)
if __name__ == "__main__":
main(sys.argv)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,80 +0,0 @@
Name: urpm-tools
Version: 2.1
Release: 1
Summary: Utilities that help to work with URPM-based repositories
Group: System/Configuration/Packaging
License: GPLv2
URL: http://wiki.rosalab.ru/index.php/Urpm-tools
Source0: %{name}-%{version}.tar.gz
BuildArch: noarch
BuildRoot: %{name}-%{version}
Requires: urpmi >= 6.68
Requires: python-rpm >= 5.3
Requires: libxml2-python >= 2.7
Requires: gzip
Requires: python-rpm5utils = %{version}
%description
%{name} is a collection of utilities for URPM-based repositories.
They make URPM-based repositories easier and more powerful to use.
These tools include: urpm-downloader, urpm-package-cleanup,
urpm-repoclosure, urpm-repodiff, urpm-repomanage, urpm-repograph,
urpm-reposync
%package -n python-rpm5utils
Group: Development/Python
Summary: Auxiliary modules to work with rpm
Provides: python-rpm5utils = %{version}-%{release}
%description -n python-rpm5utils
%{name} contains some useful modules that are used by %{name}.
Mostly taken from yum.
%prep
%setup -q -n %{name}-%{version}
%install
rm -rf %{buildroot}
make install DESTDIR=$RPM_BUILD_ROOT
%find_lang %{name}
%files -f %{name}.lang
%defattr(-,root,root,-)
%{_bindir}/urpm-downloader
%{_bindir}/urpm-package-cleanup
%{_bindir}/urpm-repoclosure
%{_bindir}/urpm-repodiff
%{_bindir}/urpm-repomanage
%{_bindir}/urpm-repograph
%{_bindir}/urpm-reposync
%{_mandir}/man1/urpm-downloader.1.xz
%{_mandir}/man1/urpm-package-cleanup.1.xz
%{_mandir}/man1/urpm-repoclosure.1.xz
%{_mandir}/man1/urpm-repodiff.1.xz
%{_mandir}/man1/urpm-repomanage.1.xz
%{_mandir}/man1/urpm-repograph.1.xz
%{_mandir}/man1/urpm-reposync.1.xz
%{_datadir}/locale/*/LC_MESSAGES/urpm-tools.mo
%doc COPYING
%files -n python-rpm5utils
%defattr(-,root,root,-)
%dir %{py_puresitedir}/rpm5utils
%dir %{py_puresitedir}/rpm5utils/tests
%dir %{py_puresitedir}/rpm5utils/urpmgraphs
%dir %{py_puresitedir}/rpm5utils/urpmgraphs/algorithms
%dir %{py_puresitedir}/rpm5utils/urpmgraphs/algorithms/components
%dir %{py_puresitedir}/rpm5utils/urpmgraphs/classes
%{py_puresitedir}/urpmmisc.py
%{py_puresitedir}/rpm5utils/*.py*
%{py_puresitedir}/rpm5utils/tests/*.py*
%{py_puresitedir}/rpm5utils/urpmgraphs/*.py*
%{py_puresitedir}/rpm5utils/urpmgraphs/algorithms/*.py*
%{py_puresitedir}/rpm5utils/urpmgraphs/algorithms/components/*.py*
%{py_puresitedir}/rpm5utils/urpmgraphs/classes/*.py*
%doc rpm5utils/COPYING

View file

@ -1,182 +0,0 @@
'''
" Miscellaneous routines used by urpm-tools
"
" Taken from yum's misc.py
'''
import types
_share_data_store = {}
_share_data_store_u = {}
def share_data(value):
""" Take a value and use the same value from the store,
if the value isn't in the store this one becomes the shared version. """
# We don't want to change the types of strings, between str <=> unicode
# and hash('a') == hash(u'a') ... so use different stores.
# In theory eventaully we'll have all of one type, but don't hold breath.
store = _share_data_store
if isinstance(value, unicode):
store = _share_data_store_u
# hahahah, of course the above means that:
# hash(('a', 'b')) == hash((u'a', u'b'))
# ...which we have in deptuples, so just screw sharing those atm.
if type(value) == types.TupleType:
return value
return store.setdefault(value, value)
def string_to_prco_tuple(prcoString):
"""returns a prco tuple (name, flags, (e, v, r)) for a string"""
if type(prcoString) == types.TupleType:
(n, f, v) = prcoString
else:
n = prcoString
f = v = None
# We love GPG keys as packages, esp. awesome provides like:
# gpg(Fedora (13) <fedora@fedoraproject.org>)
if n[0] != '/' and not n.startswith("gpg("):
# not a file dep - look at it for being versioned
prco_split = n.split()
if len(prco_split) == 3:
n, f, v = prco_split
# now we have 'n, f, v' where f and v could be None and None
if f is not None and f not in constants.LETTERFLAGS:
if f not in constants.SYMBOLFLAGS:
try:
f = flagToString(int(f))
except (ValueError,TypeError), e:
raise Errors.MiscError, 'Invalid version flag: %s' % f
else:
f = constants.SYMBOLFLAGS[f]
if type(v) in (types.StringType, types.NoneType, types.UnicodeType):
(prco_e, prco_v, prco_r) = stringToVersion(v)
elif type(v) in (types.TupleType, types.ListType):
(prco_e, prco_v, prco_r) = v
#now we have (n, f, (e, v, r)) for the thing specified
return (n, f, (prco_e, prco_v, prco_r))
###########
# Title: Remove duplicates from a sequence
# Submitter: Tim Peters
# From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
def unique(s):
"""Return a list of the elements in s, but without duplicates.
For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3],
unique("abcabc") some permutation of ["a", "b", "c"], and
unique(([1, 2], [2, 3], [1, 2])) some permutation of
[[2, 3], [1, 2]].
For best speed, all sequence elements should be hashable. Then
unique() will usually work in linear time.
If not possible, the sequence elements should enjoy a total
ordering, and if list(s).sort() doesn't raise TypeError it's
assumed that they do enjoy a total ordering. Then unique() will
usually work in O(N*log2(N)) time.
If that's not possible either, the sequence elements must support
equality-testing. Then unique() will usually work in quadratic
time.
"""
n = len(s)
if n == 0:
return []
# Try using a set first, as that's the fastest and will usually
# work. If it doesn't work, it will usually fail quickly, so it
# usually doesn't cost much to *try* it. It requires that all the
# sequence elements be hashable, and support equality comparison.
try:
u = set(s)
except TypeError:
pass
else:
return list(u)
# We can't hash all the elements. Second fastest is to sort,
# which brings the equal elements together; then duplicates are
# easy to weed out in a single pass.
# NOTE: Python's list.sort() was designed to be efficient in the
# presence of many duplicate elements. This isn't true of all
# sort functions in all languages or libraries, so this approach
# is more effective in Python than it may be elsewhere.
try:
t = list(s)
t.sort()
except TypeError:
del t # move on to the next method
else:
assert n > 0
last = t[0]
lasti = i = 1
while i < n:
if t[i] != last:
t[lasti] = last = t[i]
lasti += 1
i += 1
return t[:lasti]
# Brute force is all that's left.
u = []
for x in s:
if x not in u:
u.append(x)
return u
def GetUrlFromRepoName(reponame):
urpmi = open("/etc/urpmi/urpmi.cfg")
if not urpmi:
print "cannot open file urpmi.cfg"
return None
i = 0
repo_dict = {}
name = ''
isignore = 0
isupdate = 0
mirrorlist = ''
withdir = ''
for line in urpmi:
line = line.strip()
if line.endswith('{'):
line = line[:-1].strip()
line = line.lower()
line = line.split("\ ")
line = ' '.join(line)
name = line
elif line.startswith("ignore"):
isignore = 1
elif line.startswith("update"):
isupdate = 1
elif line.startswith("mirrorlist: "):
line = line[12:]
if not line.startswith('$'):
if not line.endswith('/'):
line = line + '/'
mirrorlist = line
elif line.startswith("with-dir: "):
line = line[10:]
withdir = line
elif line.startswith('}'):
if mirrorlist == '':
path = None
else:
path = mirrorlist + withdir
if (name) and (path):
repo_dict[name]=(isignore, isupdate, path)
name = ''
isignore = 0
isupdate = 0
mirrorlist = ''
withdir = ''
urpmi.close()
name2 = reponame.lower()
if name2 not in repo_dict:
return (None, None, None)
else:
return repo_dict[name2]