From fe4d955a906d90ceb9f06ba12c21ecfbbf46d410 Mon Sep 17 00:00:00 2001 From: Anton Kirilenko Date: Wed, 5 Sep 2012 15:45:44 +0400 Subject: [PATCH] Initial commit. Unstable urpm-reposync. --- AUTHORS | 9 + COPYING | 339 ++++ Makefile | 43 + README | 7 + docs/urpm-downloader.1 | 85 + docs/urpm-package-cleanup.1 | 100 + docs/urpm-repoclosure.1 | 77 + docs/urpm-repodiff.1 | 49 + docs/urpm-repograph.1 | 106 + docs/urpm-repomanage.1 | 56 + docs/urpm-reposync.1 | 69 + locale/ru/LC_MESSAGES/urpm-tools.po | 1422 +++++++++++++ localizer.py | 62 + rpm5utils/COPYING | 339 ++++ rpm5utils/Makefile | 27 + rpm5utils/__init__.py | 10 + rpm5utils/arch.py | 423 ++++ rpm5utils/miscutils.py | 455 +++++ rpm5utils/tests/updates-test.py | 63 + rpm5utils/transaction.py | 192 ++ rpm5utils/updates.py | 723 +++++++ rpm5utils/urpmgraphs/__init__.py | 66 + rpm5utils/urpmgraphs/algorithms/__init__.py | 2 + .../algorithms/components/__init__.py | 2 + .../components/strongly_connected.py | 321 +++ rpm5utils/urpmgraphs/algorithms/cycles.py | 122 ++ rpm5utils/urpmgraphs/classes/__init__.py | 3 + rpm5utils/urpmgraphs/classes/digraph.py | 996 +++++++++ rpm5utils/urpmgraphs/classes/function.py | 375 ++++ rpm5utils/urpmgraphs/classes/graph.py | 1804 +++++++++++++++++ rpm5utils/urpmgraphs/convert.py | 708 +++++++ rpm5utils/urpmgraphs/exception.py | 49 + urpm-downloader.py | 675 ++++++ urpm-package-cleanup.py | 556 +++++ urpm-repoclosure.pl | 1167 +++++++++++ urpm-repodiff.py | 1379 +++++++++++++ urpm-repograph.py | 1472 ++++++++++++++ urpm-repomanage.py | 239 +++ urpm-reposync.py | 1332 ++++++++++++ urpm-tools.pot | 1101 ++++++++++ urpm-tools.spec | 80 + urpm-tools/AUTHORS | 9 + urpm-tools/COPYING | 339 ++++ urpm-tools/Makefile | 43 + urpm-tools/README | 7 + urpm-tools/docs/urpm-downloader.1 | 85 + urpm-tools/docs/urpm-package-cleanup.1 | 100 + urpm-tools/docs/urpm-repoclosure.1 | 77 + urpm-tools/docs/urpm-repodiff.1 | 49 + urpm-tools/docs/urpm-repograph.1 | 106 + urpm-tools/docs/urpm-repomanage.1 | 56 + urpm-tools/docs/urpm-reposync.1 | 69 + .../locale/ru/LC_MESSAGES/urpm-tools.po | 1422 +++++++++++++ urpm-tools/localizer.py | 62 + urpm-tools/rpm5utils/COPYING | 339 ++++ urpm-tools/rpm5utils/Makefile | 27 + urpm-tools/rpm5utils/__init__.py | 10 + urpm-tools/rpm5utils/arch.py | 423 ++++ urpm-tools/rpm5utils/miscutils.py | 455 +++++ urpm-tools/rpm5utils/tests/updates-test.py | 63 + urpm-tools/rpm5utils/transaction.py | 192 ++ urpm-tools/rpm5utils/updates.py | 723 +++++++ urpm-tools/rpm5utils/urpmgraphs/__init__.py | 66 + .../urpmgraphs/algorithms/__init__.py | 2 + .../algorithms/components/__init__.py | 2 + .../components/strongly_connected.py | 321 +++ .../rpm5utils/urpmgraphs/algorithms/cycles.py | 122 ++ .../rpm5utils/urpmgraphs/classes/__init__.py | 3 + .../rpm5utils/urpmgraphs/classes/digraph.py | 996 +++++++++ .../rpm5utils/urpmgraphs/classes/function.py | 375 ++++ .../rpm5utils/urpmgraphs/classes/graph.py | 1804 +++++++++++++++++ urpm-tools/rpm5utils/urpmgraphs/convert.py | 708 +++++++ urpm-tools/rpm5utils/urpmgraphs/exception.py | 49 + urpm-tools/urpm-downloader.py | 675 ++++++ urpm-tools/urpm-package-cleanup.py | 556 +++++ urpm-tools/urpm-repoclosure.pl | 1167 +++++++++++ urpm-tools/urpm-repodiff.py | 1379 +++++++++++++ urpm-tools/urpm-repograph.py | 1472 ++++++++++++++ urpm-tools/urpm-repomanage.py | 239 +++ urpm-tools/urpm-reposync.py | 1223 +++++++++++ urpm-tools/urpm-tools.pot | 1101 ++++++++++ urpm-tools/urpm-tools.spec | 80 + urpm-tools/urpmmisc.py | 182 ++ urpmmisc.py | 182 ++ 84 files changed, 34465 insertions(+) create mode 100644 AUTHORS create mode 100644 COPYING create mode 100644 Makefile create mode 100644 README create mode 100644 docs/urpm-downloader.1 create mode 100644 docs/urpm-package-cleanup.1 create mode 100644 docs/urpm-repoclosure.1 create mode 100644 docs/urpm-repodiff.1 create mode 100644 docs/urpm-repograph.1 create mode 100644 docs/urpm-repomanage.1 create mode 100644 docs/urpm-reposync.1 create mode 100644 locale/ru/LC_MESSAGES/urpm-tools.po create mode 100755 localizer.py create mode 100644 rpm5utils/COPYING create mode 100644 rpm5utils/Makefile create mode 100644 rpm5utils/__init__.py create mode 100644 rpm5utils/arch.py create mode 100644 rpm5utils/miscutils.py create mode 100644 rpm5utils/tests/updates-test.py create mode 100644 rpm5utils/transaction.py create mode 100644 rpm5utils/updates.py create mode 100644 rpm5utils/urpmgraphs/__init__.py create mode 100644 rpm5utils/urpmgraphs/algorithms/__init__.py create mode 100644 rpm5utils/urpmgraphs/algorithms/components/__init__.py create mode 100644 rpm5utils/urpmgraphs/algorithms/components/strongly_connected.py create mode 100644 rpm5utils/urpmgraphs/algorithms/cycles.py create mode 100644 rpm5utils/urpmgraphs/classes/__init__.py create mode 100644 rpm5utils/urpmgraphs/classes/digraph.py create mode 100644 rpm5utils/urpmgraphs/classes/function.py create mode 100644 rpm5utils/urpmgraphs/classes/graph.py create mode 100644 rpm5utils/urpmgraphs/convert.py create mode 100644 rpm5utils/urpmgraphs/exception.py create mode 100755 urpm-downloader.py create mode 100755 urpm-package-cleanup.py create mode 100755 urpm-repoclosure.pl create mode 100755 urpm-repodiff.py create mode 100755 urpm-repograph.py create mode 100755 urpm-repomanage.py create mode 100755 urpm-reposync.py create mode 100644 urpm-tools.pot create mode 100644 urpm-tools.spec create mode 100644 urpm-tools/AUTHORS create mode 100644 urpm-tools/COPYING create mode 100644 urpm-tools/Makefile create mode 100644 urpm-tools/README create mode 100644 urpm-tools/docs/urpm-downloader.1 create mode 100644 urpm-tools/docs/urpm-package-cleanup.1 create mode 100644 urpm-tools/docs/urpm-repoclosure.1 create mode 100644 urpm-tools/docs/urpm-repodiff.1 create mode 100644 urpm-tools/docs/urpm-repograph.1 create mode 100644 urpm-tools/docs/urpm-repomanage.1 create mode 100644 urpm-tools/docs/urpm-reposync.1 create mode 100644 urpm-tools/locale/ru/LC_MESSAGES/urpm-tools.po create mode 100755 urpm-tools/localizer.py create mode 100644 urpm-tools/rpm5utils/COPYING create mode 100644 urpm-tools/rpm5utils/Makefile create mode 100644 urpm-tools/rpm5utils/__init__.py create mode 100644 urpm-tools/rpm5utils/arch.py create mode 100644 urpm-tools/rpm5utils/miscutils.py create mode 100644 urpm-tools/rpm5utils/tests/updates-test.py create mode 100644 urpm-tools/rpm5utils/transaction.py create mode 100644 urpm-tools/rpm5utils/updates.py create mode 100644 urpm-tools/rpm5utils/urpmgraphs/__init__.py create mode 100644 urpm-tools/rpm5utils/urpmgraphs/algorithms/__init__.py create mode 100644 urpm-tools/rpm5utils/urpmgraphs/algorithms/components/__init__.py create mode 100644 urpm-tools/rpm5utils/urpmgraphs/algorithms/components/strongly_connected.py create mode 100644 urpm-tools/rpm5utils/urpmgraphs/algorithms/cycles.py create mode 100644 urpm-tools/rpm5utils/urpmgraphs/classes/__init__.py create mode 100644 urpm-tools/rpm5utils/urpmgraphs/classes/digraph.py create mode 100644 urpm-tools/rpm5utils/urpmgraphs/classes/function.py create mode 100644 urpm-tools/rpm5utils/urpmgraphs/classes/graph.py create mode 100644 urpm-tools/rpm5utils/urpmgraphs/convert.py create mode 100644 urpm-tools/rpm5utils/urpmgraphs/exception.py create mode 100755 urpm-tools/urpm-downloader.py create mode 100755 urpm-tools/urpm-package-cleanup.py create mode 100755 urpm-tools/urpm-repoclosure.pl create mode 100755 urpm-tools/urpm-repodiff.py create mode 100755 urpm-tools/urpm-repograph.py create mode 100755 urpm-tools/urpm-repomanage.py create mode 100755 urpm-tools/urpm-reposync.py create mode 100644 urpm-tools/urpm-tools.pot create mode 100644 urpm-tools/urpm-tools.spec create mode 100644 urpm-tools/urpmmisc.py create mode 100644 urpmmisc.py diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000..91a101f --- /dev/null +++ b/AUTHORS @@ -0,0 +1,9 @@ +------------------- +Urpm-tools Authors +------------------- + + Anton Kirilenko + Andrey Ponomarenko + Denis Silakov + Vladimir Testov + diff --git a/COPYING b/COPYING new file mode 100644 index 0000000..e77696a --- /dev/null +++ b/COPYING @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 675 Mass Ave, Cambridge, MA 02139, USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) 19yy + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) 19yy name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..61fb63b --- /dev/null +++ b/Makefile @@ -0,0 +1,43 @@ +SUBDIRS = rpm5utils +PKGNAME = urpm-tools +PYTHON_UTILS = urpm-downloader urpm-package-cleanup urpm-repodiff urpm-repomanage urpm-repograph urpm-reposync +PERL_UTILS = urpm-repoclosure + +PYTHON=python +PYFILES = $(wildcard *.py) +PYVER := $(shell $(PYTHON) -c 'import sys; print "%.3s" %(sys.version)') +PYSYSDIR := $(shell $(PYTHON) -c 'import sys; print sys.prefix') +PYLIBDIR = $(PYSYSDIR)/lib/python$(PYVER) +PKGDIR = $(PYLIBDIR)/site-packages +SHELL=/bin/bash +all: + @echo "Nothing to do. Run 'make install' or 'make clean'" + +clean: + rm -f *.pyc *.pyo *~ + rm -f test/*~ + rm -f *.tar.gz + +install: + mkdir -p $(DESTDIR)/usr/bin/ + mkdir -p $(DESTDIR)/usr/share/man/man1 + for util in $(PYTHON_UTILS); do \ + install -m 755 $$util.py $(DESTDIR)/usr/bin/$$util; \ + install -m 664 docs/$$util.1 $(DESTDIR)/usr/share/man/man1/$$util.1; \ + done + + for util in $(PERL_UTILS); do \ + install -m 755 $$util.pl $(DESTDIR)/usr/bin/$$util; \ + install -m 664 docs/$$util.1 $(DESTDIR)/usr/share/man/man1/$$util.1; \ + done + + for d in $(SUBDIRS); do make DESTDIR=$(DESTDIR) -C $$d install; [ $$? = 0 ] || exit 1; done + + install -m 644 urpmmisc.py $(DESTDIR)/$(PKGDIR)/urpmmisc.py; + + + for d in `python localizer.py --list`; do\ + mkdir -p $(DESTDIR)/usr/share/locale/$$d/LC_MESSAGES;\ + install -m 644 locale/$$d/LC_MESSAGES/urpm-tools.mo $(DESTDIR)/usr/share/locale/$$d/LC_MESSAGES/urpm-tools.mo;\ + done + \ No newline at end of file diff --git a/README b/README new file mode 100644 index 0000000..0fd7377 --- /dev/null +++ b/README @@ -0,0 +1,7 @@ +Urpm-tools - a set of utilities to work with Urpm repositories +They make URPM-based repositories easier and more powerful to use. +These tools include: urpm-downloader, urpm-package-cleanup, +urpm-repoclosure, urpm-repodiff, urpm-repomanage, urpm-repograph, +urpm-reposync + +rpm5utils are based on rpmUtils from yum, http://yum.baseurl.org diff --git a/docs/urpm-downloader.1 b/docs/urpm-downloader.1 new file mode 100644 index 0000000..9524489 --- /dev/null +++ b/docs/urpm-downloader.1 @@ -0,0 +1,85 @@ +.\" urpm-downloader +.TH "urpm-downloader" "1" "21 December 2011" "Anton Kirilenko" "" +.SH "NAME" +urpm-downloader - download RPMs from URPM-based linux repositories +.SH "SYNOPSIS" +\fBurpm-downloader\fP [options] package(s) +.SH "DESCRIPTION" +.PP +\fBurpm-downloader\fP is a tool for downloading RPMs and SRPMs from URPM-based linux repositories +.PP +\fBpackage\fP Package name(s) to download. It can contain not only package names, but (S)RPM files too. In this case package name extracted from this file will be used +.PP +.SH "GENERAL OPTIONS" +.IP "\fB\-h, \-\-help\fP" +Help; display a help message and then quit. +.IP "\fB\-\-version\fP" +Report program version and exit. +.IP "\fB\-\-dest\-dir\fP" +Specify a destination directory for the download. +.IP "\fB\-v, \-\-verbose\fP" +Verbose (print additional info) +.IP "\fB-q, \-\-quiet\fP" +Quiet operation +.IP "\fB\-\-include\-media, \-\-media\fP" +Use only selected URPM media +.IP "\fB\-\-exclude\-media\fP" +Do not use selected URPM media +.IP "\fB\-\-fail\-broken\fP" +Exit if fail to resolve package dependencies. +.IP "\fB\-i, \-\-ignore-errors\fP" +Try to continue when error occurs + +.PP +.SH "DOWNLOAD OPTIONS" +.IP "\fB\-s, \-\-source\fP" +Download the source RPMs (SRPMs) +.IP "\fB\-u, \-\-urls\fP" +Instead of downloading files, list the URLs that would be processed + +.IP "\fB\-b, \-\-binary\fP" +Download binary RPMs +.IP "\fB\-s, \-\-source\fP" +Download the source RPMs (SRPMs) +.IP "\fB\-d, \-\-debug-info \fP" +Download debug RPMs + +.IP "\fB\-r, \-\-resolve\fP" +When downloading RPMs, resolve dependencies and also download the required packages, if they are not already installed +.IP "\fB\-a, \-\-resolve\-all\fP" +When downloading RPMs, resolve dependencies and also download the required packages, even if they are already installed +.IP "\fB\-x, \-\-exclude\-packages\fP" +Exclude package(s) by regex +.IP "\fB\-o, \-\-overwrite\fP" +If the file already exists, download it again and overwrite the old one +.IP "\fB\-\-all\-alternatives\fP" +If package dependency can be satisfied by several packages, download all of them (by default, only the first one is downloaded) +.IP "\fB\-\-all\-versions\fP" +If different versions of package present in repository, process them all +.PP +.SH "EXAMPLES" +.IP "Download RPMs for given packages (pk1, pk2, ...) into the directory 'path':" +\fBurpm-downloader --dest-dir path pkg1 pkg2\fP +.IP "Download SRPMs for given packages (pk1, pk2, ...) into the current directory:" +\fBurpm-downloader -s pkg1 pkg2\fP +.IP "Download the package with a whole dependency tree to the specified directory:" +\fBurpm-downloader -a --dest-dir path package-name\fP +.IP "You want to rebuild existing rpm. Download corresponding SRPM and all the packages missing for building:" +\fBurpm-downloader -sr --dest-dir path package.rpm\fP +.PP +.SH "EXIT CODES" +.IP \fB0\fP +Completed successfully +.IP \fB1\fP +Error calling external command (urpmq, rpm, etc.). This command output will be printed before exit +.IP \fB2\fP +Can not download SRPM +.IP \fB3\fP +Can not download RPM +.IP \fB4\fP +One or more specified rpm files not exist +.PP +.SH "AUTHORS" +.nf +See the Authors file included with this program. +.fi diff --git a/docs/urpm-package-cleanup.1 b/docs/urpm-package-cleanup.1 new file mode 100644 index 0000000..6afa4ff --- /dev/null +++ b/docs/urpm-package-cleanup.1 @@ -0,0 +1,100 @@ +.\" package-cleanup +.TH "urpm-package-cleanup" "1" "21 December 2011" "Denis Silakov" "" +.SH "NAME" +urpm-package-cleanup - find and fix rpmdb problems +.SH "SYNOPSIS" +\fBurpm-package-cleanup\fP [options] +.SH "DESCRIPTION" +.PP +\fBurpm-package-cleanup\fP is a program for cleaning up the locally-installed RPMs. +.PP +.SH "GENERAL OPTIONS" +.IP "\fB\-h, \-\-help\fP" +Help; display a help message and then quit\&. +.IP "\fB\-v, \-\-version\fP" +Report program version and exit. +.IP "\fB\-\-leaves\fP" +List leaf nodes in the local RPM database. Leaf nodes are RPMs that +are not relied upon by any other RPM. +.IP "\fB\-\-orphans\fP" +List installed packages which are not available from currently configured +repositories. This is identical to "urpmq --not-available". +.IP "\fB\-\-oldkernels\fP" +Remove old kernel and kernel-devel packages. +.IP "\fB\-\-problems\fP" +List dependency problems in the local RPM database. +.IP "\fB\-\-dupes\fP" +Scan for duplicates in the local RPM database. +.PP +.SH "LEAVES OPTIONS" +.IP "\fB\-\-all\fP" +When listing leaf nodes also list leaf nodes that are +not libraries. +.IP "\fB\-\-leaf\-regex\fP" +A package name that matches this regular expression will be considered a leaf. +.IP "\fB\-\-exclude\-devel\fP" +When listing leaf nodes do not list development packages. +.IP "\fB\-\-exclude\-bin\fP" +When listing leaf nodes do not list packages with files in bin directories. +.PP +.SH "OLDKERNELS OPTIONS" +.IP "\fB\-\-count \fP" +Number of kernel packages to keep on the system (default 2) +.IP "\fB\-\-keepdevel\fP" +Do not remove kernel-devel packages when removing kernels +.PP +.SH "DUPLICATE PACKAGE OPTIONS" +.IP "\fB\-\-cleandupes\fP" +Scan for duplicates in the local RPM database and clean out the +older versions. +.IP "\fB\-\-noscripts\fP" +Disable rpm scriptlets from running when cleaning duplicates +.PP +.SH "DEPENDENCY PROBLEMS OPTIONS" +.IP "\fB\-\-suggests\fP" +List missing suggestions of installed packages + + +.SH "EXAMPLES" +.IP "List all dependency problems:" +\fBurpm-package-cleanup --problems\fP +.IP "List all packages that are not in any configured repository:" +\fBurpm-package-cleanup --orphans\fP +.IP "Remove old kernels keeping 3 and leaving old kernel-devel packages installed:" +\fBurpm-package-cleanup --oldkernels --count=3 --keepdevel\fP +.PP +.IP "List all leaf packages with no files in a bin directory whose name begins with either 'perl' or 'python':" +\fBurpm-package-cleanup --leaves --exclude-bin --leaf-regex="^(perl)|(python)"\fP +.PP +.SH "FILES" +For some actions urpm-package-cleanup invokes urpmi and relies on its +configuration file: +.PP +.nf +/etc/urpmi/urpmi.cfg +.fi + +.PP +.SH "EXIT CODES" +.IP \fB0\fP +Completed successfully +.IP \fB1\fP +Script execution error (wrong option, insufficient permissions, etc.) +.IP \fB2\fP +Unsatisfied dependencies detected +.IP \fB3\fP +Unsatisfied soft dependencies detected +.IP \fB100\fP +Illegal option value + +.PP +.SH "SEE ALSO" +.nf +.I urpmi.cfg (1) +.fi + +.PP +.SH "AUTHORS" +.nf +See the Authors file included with this program. +.fi diff --git a/docs/urpm-repoclosure.1 b/docs/urpm-repoclosure.1 new file mode 100644 index 0000000..d144a5d --- /dev/null +++ b/docs/urpm-repoclosure.1 @@ -0,0 +1,77 @@ +.\" urpm-repoclosure +.TH "urpm-repoclosure" "1" "21 February 2012" "Andrey Ponomarenko" "" +.SH "NAME" +urpm-repoclosure - check closure of a set of RPM packages +.SH "SYNOPSIS" +\fBurpm-repoclosure\fP [options] +.SH "DESCRIPTION" +.PP +\fBurpm-repoclosure\fP a tool for checking closure of a set of RPM packages +.PP +.SH "GENERAL OPTIONS" +.IP "\fB\-h, \-help\fP" +Print this help. + +.IP "\fB\-v, \-version\fP" +Print version information. + +.IP "\fB\-hdlist \fP" +Path or URL of HDlist (synthesis) to check. + +.IP "\fB\-d, \-dir \fP" +The directory with RPM packages to check. + +.IP "\fB\-l, \-list \fP" +The list of packages to check. + +.IP "\fB\-add, \-update \fP" +The directory with RPM packages that should +be added to the repository or updated. + +.IP "\fB\-file\-deps \fP" +Read file\-deps to ignore some unresolved +dependencies. + +.IP "\fB\-s, \-static\fP" +Check statically if all required dependencies are +satisfied by provided dependencies in the set of +RPM packages. + +.IP "\fB\-dynamic\fP" +Install a set of RPM packages to the local chroot +and check if extra packages were installed. + +.IP "\fB\-r, \-check\-release\fP" +Check installation media (DVD). + +.IP "\fB\-sign, \-check\-signature\fP" +Validate package signatures. + +.IP "\fB\-noclean\fP" +Do not clean urpmi cache. + +.IP "\fB\-root \fP" +Where to install packages. + Default: /tmp/... +.PP + +.SH "EXIT CODES" +.IP "0 \- Suceess. The tool has run without any errors and has not discover any issues." +.IP "non\-zero \- Failed or the tool has run with errors. In particular:" +.IP "1 \- Failed to run the tool" +.IP "2 \- Discovered dependency problems" + +.SH "EXAMPLES" +.IP "Run a static test using an hdlist:" +\fBurpm-repoclosure --hdlist=hdlist.txt\fP + +\fBurpm-repoclosure --hdlist=http://mirror.yandex.ru/mandriva/.../synthesis.hdlist.cz\fP +.IP "Check closure of a local set of RPMs:" +\fBurpm-repoclosure --dir=rpms/ --static\fP +.IP "Check a set of RPMs, specified in list.txt:" +\fBurpm-repoclosure --list=list.txt --dynamic\fP +.PP +.SH "AUTHORS" +.nf +See the Authors file included with this program. +.fi diff --git a/docs/urpm-repodiff.1 b/docs/urpm-repodiff.1 new file mode 100644 index 0000000..3b3b02b --- /dev/null +++ b/docs/urpm-repodiff.1 @@ -0,0 +1,49 @@ +.\" urpm-repodiff +.TH "urpm-repodiff" "1" "21 December 2011" "Vladimir Testov" "Mandriva Package Management" +.SH "NAME" +urpm-repodiff - diff for urpmi repositories +.SH "SYNOPSIS" +\fBurpm-repodiff\fP [options] --old old_repo_baseurl [old_repo_baseurl ...] --new new_repo_baseurl [new_repo_baseurl ...] +.SH "DESCRIPTION" +.PP +\fBurpm-repodiff\fP is a program which will list differences between two sets of +repositories. +.PP +.SH "GENERAL OPTIONS" +.IP "\fB\-\-old, -o\fP" +"Old" repository or list of "old" repositories if several present. +.IP "\fB\-\-new, -n\fP" +"New" repository or list of "new" repositories if several present. +.IP "\fB\-\-quiet, -q\fP" +Quiet mode: hide service messages. +.PP +.SH "USUAL OUTPUT OPTIONS" +.IP "\fB\-\-size, -s\fP" +Show differences in package sizes. +.IP "\fB\-\-simple\fP" +Simple output format. +.IP "\fB\-\-changelog, -s\fP" +Show changelog difference. +.PP +.SH "HTML OUTPUT OPTION" +.IP "\fB\-\-html\fP" +Output difference in format of html page. In case of using this option \fB--size, -s\fP, \fB--simple\fP and \fB--changelog\fP options are ignored. +If \fB--output, -o\fP option is not present, page will be output to file 'repodiff.html' in the current directory. +.PP +.SH "OUTPUT OPTION" +.IP "\fB\-\-output, -o OUTPUT_FILE\fP" +Change standart output to OUTPUT_FILE. +.SH "EXAMPLES" +.IP "Compare packages in two local repositories:" +\fBurpm-repodiff --old /tmp/repo-old --new /tmp/repo-new\fP +.IP "Compare packages in two remote repositories, and two local ones:" +\fBurpm-repodiff --old http://example.com/repo1-old --old /tmp/repo-old --new http://example.com/repo1-new --new /tmp/repo-new\fP +.IP "Compare packages, use simple report format (no chanlog difference), but report difference in package size:" +\fBurpm-repodiff --old /tmp/repo-old --new /tmp/repo-new --size --simple\fP +.PP + +.PP +.SH "AUTHORS" +.nf +See the Authors file included with this program. +.fi diff --git a/docs/urpm-repograph.1 b/docs/urpm-repograph.1 new file mode 100644 index 0000000..c2a0976 --- /dev/null +++ b/docs/urpm-repograph.1 @@ -0,0 +1,106 @@ +.\" urpm-repograph +.TH "urpm-repograph" "1" "21 December 2011" "Vladimir Testov" "Mandriva Package Management" +.SH "NAME" +urpm-repograph - build dependency graph of repository +.SH "SYNOPSIS" +\fBurpm-repograph\fP [options] REPOSITORY +.SH "DESCRIPTION" +.PP +\fBurpm-repograph\fP is a tool for generating dependency graph for REPOSITORY packages. +Output is in the format of language "DOT". Meanwhile it can check for +missing dependecies, track cross-repository dependecies, search and display dependency cycles +(A needs B, B needs C, C needs A), search and display alternatives ("word" is provided by +A, B and C), also the tool with options \fB--whatrequires\fP and \fB--requires-recursive\fP can +select only that part of the graph which is provided (in recursive sense) by PKG or +requires (also in recursive sense) PKG. Note that warning about repository mismatches +will not be shown in the last case. +.PP +.SH "GENERAL OPTIONS" +.IP "\fBREPOSITORY\fP" +The only required argument. URL (starts with "http://" or "ftp://") +or PATH (global or local, can starts with "file://") +to repository (exactly url or path which consists of packages and includes directory "media_info", +which is the only object of interest for this tool. (You can download separately files +"synthesis.hdlist.cz" and "files.xml.lzma" to folder (for example) +"./A/media_info" and run tool with "./A": "urpm-repograph ./A", +"files.xml.lzma" is needed only if \fB--file / -f\fP option is present.) +.IP "\fB\-\-cross, -c CROSS_REPO [CROSS_REPO ...]\fP" +Check \fBCROSS_REPO(s)\fP for cross-repository dependencies. Note that dependencies inside \fBCROSS_REPO(s)\fP +(PKG1 from CROSS_REPO(s) needs PKG2 from CROSS_REPO(s)) will not be shown, still dependencies inside \fBREPOSITORY\fP will be. +.IP "\fB\-\-quiet, -q\fP" +Hide service messages. Hides all kinds of status messages. +Note, that you can not use options \fB--quiet, -q\fP, \fB--nograph, -n\fP and do not use option \fB--verbose, -v\fP at the same time. +.IP "\fB\-\-verbose, -v\fP" +Show extended information. Shows more detailed information. Also shows warnings - +about missing dependecies, self-dependecies, cross-repository dependencies. +Note, that you can not use options \fB--quiet, -q\fP, \fB--nograph, -n\fP and do not use option \fB--verbose, -v\fP at the same time. +.IP "\fB\-\-requires, -r\fP" +Also describes \fB--suggests, -s\fP. These two options declare for which types of dependecies +the graph should be build and whick dependecies should be checked and processed. +\fB--requires, -r\fP - required dependencies, as in RPM spec-file. \fB--suggests, -s\fP - suggested dependencies, as in RPM spec-file. +If none of the options are present then tool works as if \fB--requires, -r\fP option was present. +.IP "\fB\-\-suggest, -s\fP" +See \fB--requires, -r\fP description. +.IP "\fB\-\-file, -f\fP" +Process file dependecies. If not present then tool will skip both checking and processing +dependencies from files. If present, then "files.xml.lzma" should be present. +.IP "\fB\-\-unprovided, -u\fP" +Show unprovided dependencies. Unprovided phrases in requires (and \ or suggests) sections of synthesis.hdlist will be shown in final graph. +Do not use with \fB--broken, -b\fP option, error will be shown and workflow terminated. \fB--broken, -b\fP does the same as \fB--unprovided, -u\fP. +So there is no sense in using these two options together. +.PP +.SH "PACKAGE SPECIFIC OPTIONS" +Only one option in this group can be present. PKG is either packagename (e.g. urpm-tools) +or full package name (with version, release etc). Note that if option from this group is +present then PKG will be checked - if there is no package named PKG in \fBREPOSITORY\fP and +(if \fB--cross, -c\fP option is present) there is no package named PKG in \fBCROSS_REPO(s)\fP +(or if there is no cross-repository dependencies to or from PKG really present in \fBCROSS_REPO(s)\fP) +then selecting of sub-graph will not be performed, warning will be shown and the tool will stop. +Also note that no warnings will be shown (even if \fB--verbose, -v\fP option is present). +If \fB--verbose, -v\fP option is present then list of packages will be written to STDIN. Also some types of warnings will be written to STDIN +when using \fB--verbose, -v\fP. +.IP "\fB\-\-requires-recursive PKG\fP" +Search for packages, which are required by package PKG. +(in recursive sense, for example, if PKG needs PKG2 and PKG2 needs PKG3, +then PKG3 will be also checked and processed and so on) +.IP "\fB\-\-whatrequires PKG\fP" +Search for packages, which requires package PKG. +(in recursive sense, for example, if PKG is needed by PKG2 and PKG2 is needed by PKG3, +then PKG3 will be also checked and processed and so on) +.PP +.SH "ALTERNATIVE TASK OPTIONS" +Only one option from this group can be present. Note that \fB--requires-recursive\fP and \fB--whatrequires\fP processes are first to made (if present). +So, for example, you can select subgraph connected with specific package and then select subgraph of broken packages from the first subgraph. +If \fB--loops, -l\fP, \fB--alternatives, -a\fP or \fB--broken, -b\fP options are present - then another graph will be shown and additional algorithms will be performed. +.IP "\fB\-\-loops, -l\fP" +Search for all simple loops of cycled dependencies. +.IP "\fB\-\-alternatives, -a\fP" +Search for alternative packages providing the same feature. +.IP "\fB\-\-broken, -b\fP" +Search for broken packages and those, which are dependend from broken. +.IP "\fB\-\-different, -d\fP" +Output each loop or each alternative in different file. \fBOUTPUT_FILE\fP is tracted as folder name for new files in that case. +Ignored if both \fB--loops, -l\fP and \fB--alternatives, -a\fP options are absent. Also ignored if \fB--output, -o\fP option is not present. +.PP +.SH "OUTPUT OPTIONS" +Only one option in this group can be present. If not specified, graph will be outputted to STDIN. +.IP "\fB\-\-output, -o OUTPUT_FILE\fP" +Output graph to a specified file OUTPUT_FILE. OUTPUT_FILE is treated as directory name if \fB--different, -d\fP option is present. +.IP "\fB\-\-nograph, -n\fP" +Do not output graph. +Note, that you can not use options \fB--quiet, -q\fP, \fB--nograph, -n\fP and do not use option \fB--verbose, -v\fP at the same time. +.PP +.SH "EXAMPLES" +.IP "Analyze local repository and output graph to file './full-graph.dot', show service messages:" +\fBurpm-repograph /tmp/repo -v -o ./full-graph.dot\fP +.IP "Analyze external repository, hide service messages, show warnings and save them into 'warnings.txt':" +\fBurpm-repograph http://example.com/repo -qvn > warnings.txt\fP +.IP "Analyze two external repository - 'http://example.com/main/release' and additional 'http://example.com/contrib/release'. Select only packages that requires 'example-pkg' (in recursive sense). Search for loops in this group of packages and output every loop in different file in directory '/tmp/tmp-forever':" +\fBurpm-repograph http://example.com/main/release -c http://example.com/contrib/release --whatrequires example-pkg -qad -o /tmp/tmp-forever/\fP +.PP + +.PP +.SH "AUTHORS" +.nf +See the Authors file included with this program. +.fi diff --git a/docs/urpm-repomanage.1 b/docs/urpm-repomanage.1 new file mode 100644 index 0000000..5667885 --- /dev/null +++ b/docs/urpm-repomanage.1 @@ -0,0 +1,56 @@ +.\" urpm-repomanage +.TH "urpm-repomanage" "1" "21 December 2011" "Denis Silakov" "Mandriva Package Management" +.SH "NAME" +urpm-repomanage - report newest and oldest packages in a given set +.SH "SYNOPSIS" +\fBurpm-repomanage\fP [-h] [-o | -n] [-r] [-s] [-k KEEP] [-c] [-q | -V] path +.SH "DESCRIPTION" +.PP +\fBurpm-repomanage\fP is a program that scans directory of rpm packages and report newest or oldest packages. +.PP +.SH "ARGUMENTS" +.IP "\fBpath\fP" +Path to directory with rpm packages. The tool traverses directory recursively +and analyzes all RPM packages found +.PP +.SH "OPTIONS" +.IP "\fB\-\-help, -h\fP" +show help message and exit +.IP "\fB\-\-old, -o\fP" +print the older packages +.IP "\fB\-\-new, -n\fP" +print the newest packages (this is the default behavior) +.IP "\fB\-\-remove-old, -r\fP" +remove older packages +.IP "\fB\-\-space, -s\fP" +space separated output, not newline +.IP "\fB\-\-keep KEEP, -k KEEP\fP" +number of newest packages to keep; defaults to 1 +.IP "\fB\-\-nocheck, -c\fP" +do not check package payload signatures/digests +.IP "\fB\-\-quiet, -q\fP" +be completely quiet +.IP "\fB\-\-verbose, -V\fP" +be verbose - say which packages are decided to be old +and why (this info is dumped to STDERR) + +.SH "EXIT CODES" +.IP "0 \- Suceess. The tool has run without any errors and old packages were not found." +.IP "1 \- No packages were found" +.IP "2 \- Illegal option or missing argument" +.IP "3 \- The tool has run successfully and detected old packages" + +.SH "EXAMPLES" +.IP "Scan local directory with packages and for every package name print only file with the latest version:" +\fBurpm-repomanage /tmp/repo\fP +.IP "Scan local directory with packages, for every package detect two latest versions and print older versions. For every old package, print names of newer packages:" +\fBurpm-repomanage --old -V -k 2 /tmp/repo\fP +.IP "Remove older packages in a local directory without printing anything to terminal:" +\fBurpm-repomanage --remove-old -q /tmp/repo\fP +.PP + +.PP +.SH "AUTHORS" +.nf +See the Authors file included with this program. +.fi diff --git a/docs/urpm-reposync.1 b/docs/urpm-reposync.1 new file mode 100644 index 0000000..b08213d --- /dev/null +++ b/docs/urpm-reposync.1 @@ -0,0 +1,69 @@ +.\" urpm-reposync +.TH "urpm-reposync" "1" "21 December 2011" "Anton Kirilenko" "" +.SH "NAME" +urpm-sync - synchronize packages on your computer with repository +.SH "SYNOPSIS" +\fBurpm-reposync\fP [options] +.SH "DESCRIPTION" +.PP +\fBurpm-reposync\fP is used to synchronize a set of packages on the local computer with the remote repository +.PP + +.SH "OPTIONS" +.IP "\fB\-h, \-\-help\fP" +Help; display a help message and then quit. +.IP "\fB\-v, \-\-verbose\fP" +Verbose (print additional info) +.IP "\fB\-q, \-\-quiet\fP" +Quiet operation +.IP "\fB\-\-include\-media, \-\-media\fP" +Use only selected URPM media +.IP "\fB\-\-exclude\-media\fP" +Do not use selected URPM media +.IP "\fB\-a, \-\-auto\fP" +Do not ask questions, just do it! + +.IP "\fB\-\-include-media, \-\-media\fP" +Use only selected URPM media +.IP "\fB\-\-exclude-media\fP" +Do not use selected URPM media +.IP "\fB\-v, \-\-verbose\fP" +Verbose (print additional info) +.IP "\fB\-q, \-\-quiet\fP" +Quiet operation. Senseless without --auto +.IP "\fB\-a, \-\-auto\fP" +Do not ask questions, just do it! +.IP "\fB\-p, \-\-printonly\fP" +Only print the list of actions to be done and do nothing more! +.IP "\fB\-d, \-\-download\fP" +Only download the rpm files, but install or remove nothing. +.IP "\fB\-n, \-\-noremove\fP" +Do not remove packages at all. If some installed package prevent another package from beeing updated - do not update it. +.IP "\fB\-c, \-\-check\fP" +Download packages and check wether they can be installed to your system, but do not install them. +.IP "\fB\-k, \-\-nokernel\fP" +Do nothing with kernels. +.IP "\fB\-\-runselftests\fP" +Run self-tests end exit. +.IP "\fB\-\-detailed\fP" +Show detailed information about packages are going to be removed or installed (why does it have to be done) + + +.SH "EXIT CODES" +.IP \fB0\fP +Completed successfully +.IP \fB1\fP +Error calling external command (urpmq, rpm, etc.). This command output will be printed before exit +.IP \fB2\fP +Incorrect command line options combination. For example, if you try to execute it with --auto and --quiet +.IP \fB3\fP +Dependencies were resolved incorrectly. Please, contact the tool developer and provide the full program output. +.IP \fB4\fP +Inconsistent repository. Please, contact distributive maintainers and show the the output. +.IP \fB5\fP +Error while downloading rpm file. +.PP +.SH "AUTHORS" +.nf +See the Authors file included with this program. +.fi diff --git a/locale/ru/LC_MESSAGES/urpm-tools.po b/locale/ru/LC_MESSAGES/urpm-tools.po new file mode 100644 index 0000000..046ea84 --- /dev/null +++ b/locale/ru/LC_MESSAGES/urpm-tools.po @@ -0,0 +1,1422 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) 2012 +# This file is distributed under the same license as the urpm-tools package. +# Anton Kirilenko , 2012. +# +msgid "" +msgstr "" +"Project-Id-Version: 1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2012-08-21 16:34+0400\n" +"PO-Revision-Date: 2012-08-21 16:35+0300\n" +"Last-Translator: Anton Kirilenko \n" +"Language-Team: RUSSIAN\n" +"Language: ru\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#. if not fatal_fails, do nothing. Caller have to deal with that himself +#. rpm return code is not 0 +#: urpm-reposync.py:64 +#: urpm-downloader.py:156 +#: urpm-downloader.py:546 +msgid "Error while calling command" +msgstr "Ошибка при выполнении команды" + +#: urpm-reposync.py:66 +#: urpm-downloader.py:158 +msgid "Error message: \n" +msgstr "Сообщение об ошибке: \n" + +#: urpm-reposync.py:74 +msgid "reposync is used to synchronize a set of packages on the local computer with the remote repository." +msgstr "Инструмент reposync используется для синхронизации установленных на компьютере пакетов с удаленным репозиторием." + +#: urpm-reposync.py:76 +#: urpm-downloader.py:104 +msgid "Use only selected URPM media" +msgstr "Использовать только указанные источники" + +#: urpm-reposync.py:77 +#: urpm-downloader.py:105 +msgid "Do not use selected URPM media" +msgstr "Не использовать указанные источники" + +#. arg_parser.add_argument('-x', '--exclude-packages', action='store',nargs = '+', help="Exclude package(s) by regex") +#: urpm-reposync.py:79 +#: urpm-downloader.py:102 +msgid "Verbose (print additional info)" +msgstr "Выводить при исполнении отладочную информацию" + +#: urpm-reposync.py:80 +msgid "Quiet operation. Senseless without --auto." +msgstr "Ничего не выводить на экран. Не используется без --auto." + +#: urpm-reposync.py:81 +msgid "Do not ask questions, just do it!" +msgstr "Выполнять все действия без вопросов" + +#: urpm-reposync.py:82 +msgid "Only print the list of actions to be done and do nothing more!" +msgstr "Только вывести список планируемых действий и выйти." + +#: urpm-reposync.py:83 +msgid "Only download the rpm files, but install or remove nothing." +msgstr "Только скачать пакеты, но ничего не устанавливать и не удалять." + +#. arg_parser.add_argument('-n', '--noremove', action='store_true', help=_("Do not remove packages at all. If some installed package prevent another package from beeing updated - do not update it.")) +#: urpm-reposync.py:85 +msgid "Remove all the packages which do not present in repository. By default, only some of them would be removed." +msgstr "Удалять все пакеты, которых нет в репозитории. По умолчанию инструмент пытается сохранить их, если возможно." + +#: urpm-reposync.py:86 +msgid "Download packages and check wether they can be installed to your system, but do not install them." +msgstr "Скачать пакеты и проверить, могут ли они быть установлены на текущую систему. Пакеты не будут установлены." + +#: urpm-reposync.py:87 +msgid "Do nothing with kernels." +msgstr "Ничего не делать с ядрами." + +#: urpm-reposync.py:88 +msgid "Run self-tests end exit." +msgstr "Запустить самопроверку." + +#: urpm-reposync.py:89 +msgid "Show detailed information about packages are going to be removed or installed (why does it have to be done)" +msgstr "Показывать детальную информацию о пакетах, которые будут удалены и установлены (будут объяснены решения по каждому пакету)." + +#: urpm-reposync.py:93 +msgid "It's senseless to use --quiet without --auto!" +msgstr "Использование --quiet без --auto лишено смысла!" + +#: urpm-reposync.py:305 +msgid "Loading the list of installed packages..." +msgstr "Загрузка списка установленных пакетов..." + +#: urpm-reposync.py:319 +msgid "Duplicating " +msgstr "Дублирующийся пакет " + +#: urpm-reposync.py:320 +msgid "Already found: " +msgstr "Уже найдено: " + +#. print synthesis_list +#: urpm-reposync.py:396 +msgid "Processing medium " +msgstr "Обработка источника " + +#: urpm-reposync.py:414 +#, python-format +msgid "Could not read synthesis file. (File %s not found)" +msgstr "Файл %s не найден. Невозможно обработать synthesis файл." + +#: urpm-reposync.py:484 +msgid "File can not be processed! Url: " +msgstr "Не удалось обработать файл! Url: " + +#: urpm-reposync.py:579 +#, python-format +msgid "Removing %s" +msgstr "Удаление %s" + +#: urpm-reposync.py:586 +msgid "urpm-reposync: error in package %s. Data: %(data)s" +msgstr "urpm-reposync: ошибка при работе с пакетом %s. Данные: %(data)s" + +#: urpm-reposync.py:683 +#, python-format +msgid "\tRequires %s, which will not be installed." +msgstr "\tТребует пакет %s, который не будет установлен." + +#: urpm-reposync.py:689 +#, python-format +msgid "\t%s conflicts with it" +msgstr "\t%s конфликтует с этим пакетом" + +#: urpm-reposync.py:694 +#, python-format +msgid "\tIt conflicts with %s" +msgstr "\tКонфликтует с %s" + +#: urpm-reposync.py:768 +msgid "Some packages can not be installed dew to unresolved dependencies: " +msgstr "Некоторые пакеты не могут быть установлены из-за неразрешенных зависимостей:" + +#: urpm-reposync.py:771 +msgid "Contact repository maintaiers and send them this information, please." +msgstr "Пожалуйста, отправьте разработчикам дистрибутива эту информацию." + +#: urpm-reposync.py:777 +msgid "Downloading files..." +msgstr "Скачивание файлов..." + +#: urpm-reposync.py:807 +msgid "Generating transaction..." +msgstr "Создание транзакции..." + +#: urpm-reposync.py:825 +msgid "Checking dependencies..." +msgstr "Проверка зависимостей..." + +#: urpm-reposync.py:830 +msgid "requires" +msgstr "требует" + +#: urpm-reposync.py:832 +msgid "conflicts with" +msgstr "конфликтует с" + +#: urpm-reposync.py:848 +#, python-format +msgid "Package %(name)s-%(ver)s-%(rel)s %(t)s %(namereq)s%(verreq)s" +msgstr "Пакет %(name)s-%(ver)s-%(rel)s %(t)s %(namereq)s%(verreq)s" + +#: urpm-reposync.py:854 +msgid "There are some unresolved dependencies: " +msgstr "Найдены неразрешенные зависимости: " + +#: urpm-reposync.py:857 +msgid "Packages can not be installed. Please, contact urpm-tools developers and provide this output." +msgstr "Пакеты не могут быть установлены. Пожалуйста, отправьте разработчику весь вывод программы." + +#: urpm-reposync.py:859 +msgid "No errors found in transaction" +msgstr "Ошибок не найдено" + +#: urpm-reposync.py:864 +msgid "Running transaction..." +msgstr "Запуск транзакции..." + +#: urpm-reposync.py:905 +msgid "WARNING: Some libraries are going to be removed because there are only the packages with the other architecture in the repository. Maybe you missed media with the correct architecture?" +msgstr "ПРЕДУПРЕЖДЕНИЕ: Некоторые библиотеки будут удалены, потому что в репозитории присутствуют только эти библиотеки с другой архитектурой. Может быть, нужно добавить источники с правильными архитектурами?" + +#: urpm-reposync.py:946 +#: urpm-reposync.py:981 +#: urpm-reposync.py:1003 +#: urpm-reposync.py:1016 +msgid "Package Name" +msgstr "Имя пакета" + +#: urpm-reposync.py:946 +#: urpm-reposync.py:1003 +#: urpm-reposync.py:1016 +msgid "Current Version" +msgstr "Текущая версия" + +#: urpm-reposync.py:946 +msgid "New Version" +msgstr "Новая версия" + +#: urpm-reposync.py:946 +#: urpm-reposync.py:981 +#: urpm-reposync.py:1003 +#: urpm-reposync.py:1016 +msgid "Arch" +msgstr "Архитектура" + +#: urpm-reposync.py:948 +msgid "medium" +msgstr "источник " + +#: urpm-reposync.py:970 +msgid "The following packages are going to be upgraded:" +msgstr "Следующие пакеты будут обновлены:" + +#: urpm-reposync.py:975 +msgid "The following packages are going to be downgraded:" +msgstr "Версии следующих пакетов будут понижены:" + +#: urpm-reposync.py:980 +msgid "Additional packages are going to be installed:" +msgstr "Дополнительные пакеты будут установлены:" + +#: urpm-reposync.py:981 +msgid "Version" +msgstr "Версия" + +#: urpm-reposync.py:997 +#, python-format +msgid "\tRequired by %s" +msgstr "\tТребуется для %s" + +#: urpm-reposync.py:1002 +msgid "The following packages are going to be removed:" +msgstr "Следующие пакеты будут удалены:" + +#: urpm-reposync.py:1015 +msgid "Packages which do not present in repositories, but do not have to be removed (will be saved):" +msgstr "Пакеты, которые отсутствуют в репозитории, но могут быть сохранены:" + +#: urpm-reposync.py:1022 +#, python-format +msgid "%d packages are going to be downloaded and installed." +msgstr "Пакетов будет скачано и установлено: %d." + +#: urpm-reposync.py:1023 +#, python-format +msgid "%d packages are going to be removed." +msgstr "Пакетов будет удалено: %d." + +#: urpm-reposync.py:1024 +#, python-format +msgid "%s will be downloaded." +msgstr "Данных будет скачано: %s." + +#: urpm-reposync.py:1080 +#, python-format +msgid "\tForced to be removed dew to \"%s\" policy." +msgstr "\tДолжен быть удален из-за правила \"%s\"." + +#: urpm-reposync.py:1108 +msgid "Nothing to do" +msgstr "В системе не требуются изменения" + +#: urpm-reposync.py:1121 +msgid "Do you want to proceed? (y/n): " +msgstr "Хотите продолжить? (д/н): " + +#: urpm-reposync.py:1126 +msgid "y" +msgstr "д" + +#: urpm-reposync.py:1126 +msgid "yes" +msgstr "да" + +#: urpm-reposync.py:1128 +msgid "n" +msgstr "н" + +#: urpm-reposync.py:1128 +msgid "no" +msgstr "нет" + +#: urpm-repograph.py:86 +msgid "Tool for generating dependency graph for REPOSITORY packages." +msgstr "Инструмент для создания графа зависимостей для пакетов из репозитория." + +#: urpm-repograph.py:90 +msgid "Search for cross-repository references in CROSS_REPO(s) repositories." +msgstr "Искать зывисимости между репозиториями в репозиториями CROSS_REPO" + +#: urpm-repograph.py:93 +msgid "Hide service messages. (About progress status etc.)" +msgstr "Не показывать служебные сообщения. (О прогрессе и т. д.)" + +#: urpm-repograph.py:95 +msgid "Show warnings. (About unprovided packages etc.)" +msgstr "Показывать предупреждения (О зависимостях, не предоставляемых ни одним пакетом из репозитория и т. д.)" + +#: urpm-repograph.py:98 +msgid "Process \"requires\" package dependencies. Used by default." +msgstr "Обрабатывать \"requires\" пакетные зависимости. Используется по умолчанию." + +#: urpm-repograph.py:100 +msgid "Process \"suggests\" package dependencies. If used without --requires then only suggests dependencies are processed." +msgstr "Обрабатывать \"suggests\" пакетные зависимости. Если используется без --requires, то будут обрабатываться только мягкие зависимости." + +#: urpm-repograph.py:103 +msgid "Process file dependencies." +msgstr "Обработка зависимостей по файлам..." + +#: urpm-repograph.py:105 +msgid "Show unprovided dependencies." +msgstr "Показать зависимости, не предоставленные ни одним пакетом из репозитория." + +#: urpm-repograph.py:109 +msgid "Search for packages, which are required by package PKG (PKG is a file name or package name)" +msgstr "Искать пакеты, которые нужны пакету PKG. (PKG - это имя пакета или файла)" + +#: urpm-repograph.py:111 +msgid "Search for packages, which requires package PKG (PKG is a file name or package name)" +msgstr "Искать пакеты, которым нужен пакет PKG. (PKG - это имя пакета или файла)" + +#: urpm-repograph.py:115 +msgid "Search for all simple loops of package dependecies." +msgstr "Поиск всех простых циклов в пакетных зависимостях." + +#: urpm-repograph.py:117 +msgid "Search for alternative packages providing the same feature." +msgstr "Поиск альтернативных пакетов, предоставляющих одну и ту же зависимость." + +#: urpm-repograph.py:119 +msgid "Search for all broken packages and anything beetween them" +msgstr "Искать все пакеты с нарушенными зависимостями и цепочки пакетов между ними" + +#: urpm-repograph.py:121 +msgid "Output each loop or each alternative in different file. Ignored if --loops or --alternatives options are not present. OUTPUT_FILE (if present) is tracted as folder name for new files in that case." +msgstr "Выводить каждый цикл или каждую альтернативу в отдельный файл. Игнорируется, если указано --loops или --alternatives. OUTPUT_FILE (если указан) в этом случае рассматривается как имя директории для новых файлов." + +#: urpm-repograph.py:127 +msgid "Change graph output to \"OUTPUT_FILE\". STDOUT by default." +msgstr "Перенаправить вывод графа в файл \"OUTPUT_FILE\". По умолчанию используется STDOUT." + +#: urpm-repograph.py:129 +msgid "Do not output graph. Tool will not start working if --quiet, --nograph are present and --verbose is not. (If there is nothing to output - then nothing has to be done.)" +msgstr "Не выводить граф. Инструмент не будет ничего делать, если включены --quiet и --nograph, а verbose нет. (Если ничего не надо выводить, то и не надо ничего делать.)" + +#: urpm-repograph.py:157 +#: urpm-repodiff.py:125 +#, python-format +msgid "Error: URL to repository \"%s\" is incorrect" +msgstr "Ошибка: Неверный URL репозитория \"%s\"" + +#: urpm-repograph.py:179 +#: urpm-repodiff.py:147 +#, python-format +msgid "Error: directory %s does not exist" +msgstr "Ошибка: директория %s не существует" + +#: urpm-repograph.py:189 +#: urpm-repodiff.py:157 +#, python-format +msgid "Error: \"%s\" is not correct url, path or name of repository" +msgstr "Ошибка: \"%s\" не является корректным URL, путем или именем репозитория" + +#: urpm-repograph.py:216 +#, python-format +msgid "Error: directory %s already exists" +msgstr "Ошибка: директория %s уже существует" + +#: urpm-repograph.py:222 +#: urpm-repograph.py:237 +#: urpm-repodiff.py:183 +#, python-format +msgid "Error: File %s already exists" +msgstr "Ошибка: Файл %s уже существует" + +#: urpm-repograph.py:229 +#, python-format +msgid "Error: directory %s was not created" +msgstr "Ошибка: директория %s не была создана" + +#: urpm-repograph.py:246 +#: urpm-repodiff.py:192 +#, python-format +msgid "Error: File %s cannot be created" +msgstr "Ошибка: Не удалось создать файл %s" + +#: urpm-repograph.py:250 +#: urpm-repodiff.py:196 +#, python-format +msgid "Error: Path %s does not exist." +msgstr "Ошибка: Путь %s не существует." + +#: urpm-repograph.py:262 +#: urpm-repodiff.py:218 +#, python-format +msgid "getting file %s from " +msgstr "получение файла %s из " + +#: urpm-repograph.py:267 +#: urpm-repodiff.py:223 +#, python-format +msgid "Error: file %s was not copied" +msgstr "Ошибка: файл %s был скопирован" + +#: urpm-repograph.py:275 +#: urpm-repodiff.py:231 +#, python-format +msgid "Error: file %(from)s was not downloaded to %(to)s" +msgstr "Ошибка: файл %(from)s не был скачан в %(to)s" + +#: urpm-repograph.py:288 +#: urpm-repodiff.py:272 +msgid "Error: file not found: " +msgstr "Ошибка: файл не найден: " + +#: urpm-repograph.py:293 +#: urpm-repodiff.py:277 +#, python-format +msgid "Error: cannot rename file %(from)s to %(to)s" +msgstr "Ошибка: не удалось переименовать файл %(from)s в %(to)s" + +#: urpm-repograph.py:297 +#: urpm-repograph.py:313 +#: urpm-repograph.py:543 +#: urpm-repodiff.py:281 +#, python-format +msgid "Error: file %s is missing." +msgstr "Ошибка: файл %s отсутствует." + +#: urpm-repograph.py:301 +#: urpm-repodiff.py:285 +#, python-format +msgid "file %(from)s was renamed to %(to)s" +msgstr "файл %(from)s был переименован в %(to)s" + +#: urpm-repograph.py:311 +#: urpm-repograph.py:541 +#: urpm-repodiff.py:294 +#: urpm-repodiff.py:297 +msgid "unpacking file " +msgstr "распаковка файла " + +#: urpm-repograph.py:371 +#: urpm-repodiff.py:410 +msgid "REPODIFF-Warning: strange : " +msgstr "REPODIFF-Предупреждение: необычное поле : " + +#: urpm-repograph.py:406 +#: urpm-repodiff.py:351 +#, python-format +msgid "Error: Synthesis file %s was not found." +msgstr "Ошибка: Synthesis файл %s не найден." + +#: urpm-repograph.py:409 +msgid "Parsing synthesis." +msgstr "Обработка synthesis файла." + +#: urpm-repograph.py:435 +#, python-format +msgid "Warning: Unexpected sign %(sign)s in 'provides' section of %(of)s" +msgstr "Предупреждение: неожиданный знак %(sign)s в 'provides' секции %(of)s" + +#: urpm-repograph.py:451 +#: urpm-repodiff.py:380 +msgid "Error: Failed to open synthesis file " +msgstr "Ошибка: Не удалось открыть synthesis файл" + +#: urpm-repograph.py:555 +msgid "Reading fileslist" +msgstr "Чтение файла со списком" + +#: urpm-repograph.py:557 +msgid "Error: Can't find fileslist " +msgstr "Ошибка: Не удалось найти файл со списком" + +#: urpm-repograph.py:561 +msgid "Error: Can't read fileslist " +msgstr "Ошибка: Не удалось прочитать файл со списком" + +#: urpm-repograph.py:565 +msgid "Error: Wrong fileslist." +msgstr "Ошибка: Неправильный файл со списком." + +#: urpm-repograph.py:578 +msgid "Error: Corrupted fileslist" +msgstr "Ошибка: Поврежденный файл со списком" + +#: urpm-repograph.py:608 +msgid "Warning: cross-repository dependency: " +msgstr "Предупреждение: пакет из одного репозиттория зависит от пакета из другого: " + +#: urpm-repograph.py:612 +#: urpm-repograph.py:662 +msgid "Warning: package has self-dependecies: " +msgstr "Предупреждение: пакет зависит от себя: " + +#: urpm-repograph.py:658 +#, python-format +msgid "" +"Warning: cross-repository dependency:\n" +" package %(pkg)s is dependent from\n" +" <- %(from)s located in another repository" +msgstr "" +"Предупреждение: зависимость между репозиториями:\n" +" пакет %(pkg)s зависит от\n" +" <- %(from)s, расположенного в другом репозитории" + +#: urpm-repograph.py:691 +#, python-format +msgid "Warning: needed version is absent <%(ver)s> %(rel)s required by package" +msgstr "Предупреждение: отсутствует версия <%(ver)s> %(rel)s, требуемая пакетом" + +#: urpm-repograph.py:708 +#, python-format +msgid "Warning: Package %(pkg)s unprovided by %(by)s" +msgstr "Предупреждение: Файл %(by)s требуется пакету %(pkg)s, но не предоставляется ни одним пакетом" + +#: urpm-repograph.py:740 +msgid "Finding dependencies." +msgstr "Поиск зависимостей." + +#: urpm-repograph.py:749 +#, python-format +msgid "" +"Warning: can't find <%(ask)s> required by package\n" +" <%(pkg)s>" +msgstr "" +"Предупреждение: не удалось найти <%(ask)s>, требуемый пакетом\n" +" <%(pkg)s>" + +#: urpm-repograph.py:812 +msgid "Total cross-referenced packages: " +msgstr "Всего пакетов с кросс-платформенными зависимостями: " + +#: urpm-repograph.py:816 +msgid "Total unprovided packages: " +msgstr " Всего пакетов с ничем не предоставленными зависимостями: " + +#: urpm-repograph.py:833 +msgid "Calculating colors." +msgstr "Вычисление цветов." + +#: urpm-repograph.py:1112 +msgid "Non-cycle nodes removed: " +msgstr "Нецикличных узлов удалено: " + +#: urpm-repograph.py:1113 +msgid "Cyclic packages: " +msgstr "Зацикленных пакетов осталось: " + +#: urpm-repograph.py:1130 +#, python-format +msgid "Worktime: %s seconds" +msgstr "Время работы: %s секунд" + +#: urpm-repograph.py:1136 +msgid "Searching loops." +msgstr "Поиск циклов." + +#: urpm-repograph.py:1140 +#: urpm-repograph.py:1188 +msgid "End of search." +msgstr "Конец поиска." + +#: urpm-repograph.py:1141 +#, python-format +msgid "Loops search: %s seconds" +msgstr "Поиск циклов: %s секунд" + +#: urpm-repograph.py:1145 +#, python-format +msgid "Total: %s loops." +msgstr "Всего: %s циклов." + +#: urpm-repograph.py:1151 +msgid "Loop " +msgstr "Цикл " + +#: urpm-repograph.py:1168 +msgid "Searching alternatives." +msgstr "Поиск альтернатив." + +#: urpm-repograph.py:1180 +#, python-format +msgid "Total: %d alternatives." +msgstr "Всего: %d альтернатив." + +#: urpm-repograph.py:1182 +msgid "Alternative " +msgstr "Альтернатива " + +#: urpm-repograph.py:1182 +msgid " is provided by:" +msgstr " предоставляется:" + +#: urpm-repograph.py:1260 +msgid "Searching for broken packages." +msgstr "Поиск нарушенных зависимостей." + +#: urpm-repograph.py:1266 +msgid "Searching for packages REQUIRED by " +msgstr "Поиск пакетов, требуемых " + +#: urpm-repograph.py:1268 +msgid "Searching for packages that REQUIRE " +msgstr "Поиск пакетов, требующих " + +#: urpm-repograph.py:1276 +#, python-format +msgid "Level %d dependency." +msgstr "Зависимость уровня %d." + +#: urpm-repograph.py:1355 +msgid "Remaking structures." +msgstr "Пересоздание структур." + +#: urpm-repograph.py:1367 +msgid "Error: can't find package name or filename \"" +msgstr "Ошибка: Не удалось найти имя пакета или файла \"" + +#: urpm-repograph.py:1401 +msgid "Do not use -q/--quiet and -n/--nograph without -v/--verbose together." +msgstr "Не используйте -q/--quiet совместно с -n/--nograph без -v/--verbose." + +#: urpm-repograph.py:1402 +msgid "That way there is no information to output anywhere. Nothing will be done." +msgstr "В этом случае нет информации, которую можно вывести. Ничего не будет сделано." + +#: urpm-repograph.py:1405 +msgid "Do not use -u/--unprovided and -b/--broken options together." +msgstr "Не используйте -u/--unprovided и -b/--broken вместе." + +#: urpm-repograph.py:1406 +msgid "-b does everything that do -u and a little more." +msgstr "-b делает все то же, что и -u, и немного больше." + +#: urpm-downloader.py:91 +msgid "A tool for downloading RPMs and SRPMs from URPM-based linux repositories" +msgstr "Инструмент, позволяющий скачивать RPM и SRPM пакеты из URPM репозиториев" + +#: urpm-downloader.py:92 +msgid "If none of the options -b, -s, -d turned on, it will be treated as -b" +msgstr "Если ни одна из опций -b, -s или -d не указана, то по умолчанию включается -b" + +#: urpm-downloader.py:93 +msgid "Package name(s) to download. It can contain not only package names, but (S)RPM files too. In this case package name extracted from this file will be used" +msgstr "Имена пакетов для скачивания. Можно так же использовать имена существующих (S)RPM файлов, в этом случае информация об имени пакета будет извлечена из них." + +#: urpm-downloader.py:94 +msgid "Instead of downloading files, list the URLs that would be processed" +msgstr "Выводить их URL файлов, но не скачивать их (в случае использования совместно с -a или -r src.rpm файл все равно будет скачан, так как без этого невозможно разрешить сборочные зависимости)" + +#: urpm-downloader.py:95 +msgid "When downloading RPMs, resolve dependencies and also download the required packages, if they are not already installed" +msgstr "При скачивании пакета разрешать зависимости и скачивать все необходимые пакеты, но только если они не установлены в системе." + +#: urpm-downloader.py:96 +msgid "When downloading RPMs, resolve dependencies and also download the required packages, even if they are already installed" +msgstr "При скачивании пакета разрешать зависимости и скачивать все необходимые пакеты, даже если они уже установлены в системе." + +#: urpm-downloader.py:97 +msgid "Download binary RPMs" +msgstr "Скачивать бинарные пакеты (RPM)" + +#: urpm-downloader.py:98 +msgid "Download the source RPMs (SRPMs)" +msgstr "Скачать пакеты с исходными кодами (SRPM)" + +#: urpm-downloader.py:99 +msgid "Download debug RPMs" +msgstr "Скачать пакеты с отладочной информацией" + +#: urpm-downloader.py:100 +msgid "Download debug RPMs and install" +msgstr "Скачать пакеты с отладочной информацией и установить" + +#: urpm-downloader.py:103 +msgid "Quiet operation." +msgstr "Ничего не печатать в консоль" + +#: urpm-downloader.py:106 +msgid "Exclude package(s) by regex" +msgstr "Исключить пакеты по регулярному выражению" + +#: urpm-downloader.py:107 +msgid "Try to continue when error occurs" +msgstr "Пытаться игнорировать ошибки" + +#: urpm-downloader.py:108 +msgid "If the file already exists, download it again and overwrite the old one" +msgstr "Если файл уже существует, скачать его заново и заменить." + +#: urpm-downloader.py:109 +msgid "If package dependency can be satisfied by several packages, download all of them (by default, only the first one is downloaded)" +msgstr "Скачивать все пакеты, которые могут удовлетворить зависимости для данного пакета (по умолчанию скачивается лишь один)." + +#: urpm-downloader.py:110 +msgid "If different versions of package present in repository, process them all" +msgstr "Если в репозитории присутствует несколько версий пакета, обработать их все." + +#. arg_parser.add_argument('--self-test', action='store_true', help="Test urpm-downloader end exit") +#: urpm-downloader.py:113 +msgid "Specify a destination directory for the download" +msgstr "Директория, в которую будут помещены скачаные файлы" + +#: urpm-downloader.py:130 +msgid "Use of --verbose with --quiet is senseless. Turning verbose mode off." +msgstr "Использование --verbose совместно с --quiet лишено смысла. Опция --verbose будет проигнорирована." + +#: urpm-downloader.py:134 +msgid "Note that resolving of SRPM dependencies is not possible until SRPM downloaded. So, it will be done despite --urls" +msgstr "Помните, что разрешение сборочных зависимостей SRPM невозможно без скачивания этого файла, поэтому SRPM файл все равно будет скачан несмотря на --urls" + +#: urpm-downloader.py:375 +msgid "* Downloaded: " +msgstr "* Скачано: " + +#: urpm-downloader.py:377 +msgid "* File exists, skipping: " +msgstr "* Файл существует, пропускаю: " + +#: urpm-downloader.py:476 +msgid "Can not download SRPM for package" +msgstr "Не удалось скачать SRPM файл для пакета" + +#: urpm-downloader.py:499 +#: urpm-downloader.py:532 +msgid "Can not download RPM" +msgstr "Не удалось скачать RPM файл" + +#: urpm-downloader.py:504 +msgid "Resolving debug-info packages..." +msgstr "Поиск пакетов с отладочной информацией..." + +#. urpmq output. RU: Нет пакета с названием +#: urpm-downloader.py:509 +msgid "No package named " +msgstr "Нет пакета с именем " + +#: urpm-downloader.py:533 +msgid "Maybe you need to update urpmi database (urpmi.update -a)?" +msgstr "Может быть, нужно обновить базу urpmi (urpmi.update -a)?" + +#: urpm-downloader.py:542 +msgid "Installing " +msgstr "Установка " + +#. return code is not 0 +#: urpm-downloader.py:553 +#, python-format +msgid "Debug package for '%s' not found" +msgstr "Для пакета %s не найдено пакета с отладочной информацией." + +#: urpm-downloader.py:602 +msgid "Parameters that end with '.rpm' seem to be local files, but the folowing files do not exist: " +msgstr "Параметры, заканчивающиеся на '.rpm' расцениваются как файлы, но следующие файлы не существуют: " + +#: urpm-downloader.py:627 +msgid "Searching src.rpm file(s) in repository..." +msgstr "Поиск подходящих src.rpm файлов в репозитории..." + +#: urpm-downloader.py:629 +msgid "Downloading src.rpm file(s)..." +msgstr "Скачивание src.rpm файлов..." + +#: urpm-downloader.py:659 +msgid "Resolving build dependencies..." +msgstr "Разрешение сборочных зависимостей..." + +#: urpm-downloader.py:661 +msgid "Resolving dependencies..." +msgstr "Разрешение зависимостей..." + +#: urpm-downloader.py:663 +#, python-format +msgid "Resolved %d packages" +msgstr "Найдено пакетов: %d" + +#: urpm-downloader.py:665 +msgid "Nothing to download" +msgstr "Нечего скачивать" + +#: urpm-repomanage.py:56 +#, python-format +msgid "Error accessing directory %(path)s, %(e)s" +msgstr "Ошибка доступа к директории %(path)s: %(e)s" + +#: urpm-repomanage.py:86 +msgid "manage a directory of rpm packages and report newest or oldest packages" +msgstr "Обработать директорию с rpm пакетами и сообщить о наиболее новых и старых версиях" + +#: urpm-repomanage.py:92 +msgid "path to directory with rpm packages" +msgstr "путь к директории с rpm пакетами" + +#: urpm-repomanage.py:95 +msgid "print the older packages" +msgstr "напечатать более старые пакеты" + +#: urpm-repomanage.py:97 +msgid "print the newest packages (this is the default behavior)" +msgstr "напечатать наиболее новые пакеты (поведение по умолчанию)" + +#: urpm-repomanage.py:99 +msgid "remove older packages" +msgstr "удалить более старые пакеты" + +#: urpm-repomanage.py:101 +msgid "space separated output, not newline" +msgstr "вывод разделяется пробелами, а не переводами строки" + +#: urpm-repomanage.py:103 +msgid "number of newest packages to keep - defaults to 1" +msgstr "количество наиболее новых пакетов, которые надо оставить - по умолчанию 1" + +#: urpm-repomanage.py:105 +msgid "do not check package payload signatures/digests" +msgstr "не проверять встроенные подписи пакетов" + +#: urpm-repomanage.py:108 +msgid "be completely quiet" +msgstr "ничего не печатать" + +#: urpm-repomanage.py:110 +msgid "be verbose - say which packages are decided to be old and why (this info is dumped to STDERR)" +msgstr "показывать дополнительную информацию - какие пакеты выбраны наиболее новыми и почему (информация выводится в STDERR)" + +#: urpm-repomanage.py:131 +msgid "No files to process" +msgstr "Нет файлов для обработки" + +#: urpm-repomanage.py:144 +#, python-format +msgid "Error opening pkg %(pkg)s: %(err)s" +msgstr "Ошибка открытия файла: %(pkg)s: %(err)s" + +#: urpm-repomanage.py:195 +#: urpm-repomanage.py:221 +msgid "Dropped " +msgstr "Убран " + +#: urpm-repomanage.py:196 +#: urpm-repomanage.py:222 +msgid " superseded by: " +msgstr " заменен на: " + +#: urpm-repodiff.py:83 +msgid "Tool for comparing sets of repositories." +msgstr "Инструмент для сравнения наборов репозиториев." + +#: urpm-repodiff.py:85 +msgid "URL or PATH to old repositories" +msgstr "URL или пути к старым репозиториям" + +#: urpm-repodiff.py:87 +msgid "URL or PATH to new repositories" +msgstr "URL или пути к новым репозиториям" + +#: urpm-repodiff.py:89 +msgid "Show differences in package sizes." +msgstr "Показывать различия в размерах пакетов." + +#: urpm-repodiff.py:91 +msgid "Simple output format." +msgstr "Упрощенный формат вывода." + +#: urpm-repodiff.py:93 +msgid "Hide service messages." +msgstr "Не показывать служебные сообщения." + +#: urpm-repodiff.py:95 +msgid "Show changelog difference." +msgstr "Показывать разницу списков изменений." + +#: urpm-repodiff.py:97 +#, python-format +msgid "Output in HTML format, if --output is not present \"%s\" will be created in current directory. --size, --simple and --changelog options are ignored." +msgstr "Вывод в формате HTML. Если --output не указан, то файл \"%s\" будет создан в текущей директории. Опции --size, --simple и --changelog будут игнорироваться." + +#: urpm-repodiff.py:101 +msgid "Change standart output to \"OUTPUT_FILE\"." +msgstr "Перенаправить вывод в \"OUTPUT_FILE\"" + +#: urpm-repodiff.py:174 +#, python-format +msgid "Error: Cannot open %s for writing." +msgstr "Ошибка: Не удалось открыть %s для записи." + +#: urpm-repodiff.py:354 +msgid "Parsing synthesis" +msgstr "Чтение synthesis файла" + +#: urpm-repodiff.py:389 +msgid "REPODIFF-Warning: strange format of or : " +msgstr "REPODIFF-Предупреждение: необычный формат или : " + +#: urpm-repodiff.py:527 +msgid "New package: " +msgstr "Новый пакет: " + +#: urpm-repodiff.py:542 +msgid "Generating obsoleted list." +msgstr "Создание списка устаревших пакетов." + +#: urpm-repodiff.py:601 +msgid "Removed package: " +msgstr "Удален пакет: " + +#: urpm-repodiff.py:609 +msgid " Obsoleted by " +msgstr " Устарел из-за добавления " + +#: urpm-repodiff.py:630 +msgid "Reading changelog" +msgstr "Чтение списка изменений" + +#: urpm-repodiff.py:632 +msgid "Error: Can't find changelog " +msgstr "Ошибка: Не удалось найти список изменений " + +#: urpm-repodiff.py:636 +msgid "Error: Can't read changelog " +msgstr "Ошибка: Не удалось прочитать список изменений " + +#: urpm-repodiff.py:640 +msgid "Error: Wrong changelog." +msgstr "Ошибка: Неправильный список изменений." + +#: urpm-repodiff.py:662 +msgid "Error: Corrupted changelog" +msgstr "Ошибка: Поврежденный список изменений" + +#: urpm-repodiff.py:756 +msgid "Generating changes list." +msgstr "Создание списка изменений." + +#: urpm-repodiff.py:770 +#: urpm-repodiff.py:773 +#, python-format +msgid "REPODIFF-Warning: Package %s was not described in changelogs.xml" +msgstr "REPODIFF-Предупреждение: Пакет %s не описан в changelogs.xml" + +#: urpm-repodiff.py:771 +msgid "REPODIFF-Warning: Changelogs of a package are absent in \"new\" repository." +msgstr "REPODIFF-Предупреждение: В репозитории \"новый\" отсутствует список изменений пакета." + +#: urpm-repodiff.py:774 +msgid "REPODIFF-Warning: Changelogs of a package are absent." +msgstr "REPODIFF-Предупреждение: У пакета отсутствует список изменений." + +#: urpm-repodiff.py:800 +#, python-format +msgid "Package %s has no changelog info\n" +msgstr "Пакет %s не имеет списка изменений\n" + +#: urpm-repodiff.py:818 +msgid "" +"\n" +"\n" +"Updated packages:\n" +"\n" +msgstr "" +"\n" +"\n" +"Обновленные пакеты:\n" +"\n" + +#: urpm-repodiff.py:825 +msgid " ***DOWNGRADED***\n" +msgstr " ***УСТАНОВЛЕНА ПРЕДЫДУЩАЯ ВЕРСИЯ***\n" + +#: urpm-repodiff.py:834 +#, python-format +msgid "" +"Size Change: %d bytes\n" +"\n" +msgstr "" +"Размер изменен: %d байт\n" +"\n" + +#: urpm-repodiff.py:844 +msgid " Total added packages: " +msgstr " Всего добавлено пакетов: " + +#: urpm-repodiff.py:847 +msgid " Total removed packages: " +msgstr " Всего удалено пакетов: " + +#: urpm-repodiff.py:856 +msgid " Total updated packages: " +msgstr " Всего обновлено пакетов: " + +#: urpm-repodiff.py:858 +msgid " Total downgraded packages: " +msgstr " Всего пакетов с пониженной версией: " + +#: urpm-repodiff.py:1316 +msgid "Creating HTML file." +msgstr "Создание HTML файла." + +#: urpm-package-cleanup.py:58 +msgid "Find problems in the rpmdb of system and correct them" +msgstr "Найти проблемы в локальной базе RPM и исправить их" + +#: urpm-package-cleanup.py:62 +msgid "Query format to use for output." +msgstr "Формат вывода." + +#: urpm-package-cleanup.py:65 +msgid "Use non-interactive mode" +msgstr "Работать в неинтерактивном режиме" + +#: urpm-package-cleanup.py:68 +msgid "Orphans Options" +msgstr "Осиротевшие пакеты" + +#: urpm-package-cleanup.py:71 +msgid "List installed packages which are not available from currently configured repositories" +msgstr "Перечислить пакеты, недоступные в настроенных на текущий момент репозиториях" + +#: urpm-package-cleanup.py:75 +msgid "Use only update media. This means that urpmq will search and resolve dependencies only in media marked as containing updates (e.g. which have been created with \"urpmi.addmedia --update\")." +msgstr "Ипользовать только источники обновлений. Это означает, что urpmq будет искать и разрешать зависимости только используя источники, помеченные как источники обновлений (например, которые были добавлены при помощи \"urpmi.addmedia --update\")" + +#: urpm-package-cleanup.py:80 +msgid "Select specific media to be used, instead of defaulting to all available media (or all update media if --update is used). No rpm will be found in other media." +msgstr "Выбрать особые источники вместо того чтобы использовать все доступные по умолчанию источники (или все источники обновлений, если указан флаг --update). В других источниках пакеты искаться не будут." + +#: urpm-package-cleanup.py:85 +msgid "Do not use the specified media." +msgstr "Не использовать указанные источники." + +#: urpm-package-cleanup.py:87 +msgid "Dependency Problems Options" +msgstr "Проблемы с зависимостями" + +#: urpm-package-cleanup.py:90 +msgid "List dependency problems in the local RPM database" +msgstr "Перечислить проблемы с зависимостями в локальной базе RPM" + +#: urpm-package-cleanup.py:93 +msgid "List missing suggestions of installed packages" +msgstr "Перечислить список мягких зависимостей установленных пакетов" + +#: urpm-package-cleanup.py:96 +msgid "Duplicate Package Options" +msgstr "Дублирующиеся пакеты" + +#: urpm-package-cleanup.py:99 +msgid "Scan for duplicates in your rpmdb" +msgstr "Найти дубликаты в локальной базе RPM" + +#: urpm-package-cleanup.py:102 +msgid "Scan for duplicates in your rpmdb and remove older " +msgstr "Найти дубликаты в локальной базе RPM и удалить более старые" + +#: urpm-package-cleanup.py:105 +msgid "disable rpm scriptlets from running when cleaning duplicates" +msgstr "отключить скриптлеты rpm при очистке дубликатов" + +#: urpm-package-cleanup.py:107 +msgid "Leaf Node Options" +msgstr "Листовые узлы" + +#: urpm-package-cleanup.py:110 +msgid "List leaf nodes in the local RPM database" +msgstr "Перечислить листовые узлы в локальной базе RPM" + +#: urpm-package-cleanup.py:113 +msgid "list all packages leaf nodes that do not match leaf-regex" +msgstr "перечислить все пакеты-листовые узлы, имя которых не подходить под регулярное выражение" + +#: urpm-package-cleanup.py:117 +msgid "A package name that matches this regular expression (case insensitively) is a leaf" +msgstr "Считать листовым узлом пакет, имя которого подходит по регулярному выражению (регистронезависимо)." + +#: urpm-package-cleanup.py:121 +msgid "do not list development packages as leaf nodes" +msgstr "не считать devel пакеты листовыми узлами" + +#: urpm-package-cleanup.py:124 +msgid "do not list packages with files in a bin dirs as leaf nodes" +msgstr "не считать пакеты, имеющие файлы в bin директориях, листовыми узлами" + +#: urpm-package-cleanup.py:127 +msgid "Old Kernel Options" +msgstr "Старые ядра" + +#: urpm-package-cleanup.py:130 +msgid "Remove old kernel and kernel-devel packages" +msgstr "Удалить старые ядра и их devel пакеты." + +#: urpm-package-cleanup.py:133 +msgid "Number of kernel packages to keep on the system (default 2)" +msgstr "Количество пакетов с ядрами, которые надо сохранить в системе (по умолчанию 2)" + +#: urpm-package-cleanup.py:137 +msgid "Do not remove kernel-devel packages when removing kernels" +msgstr "Не удалять kernel-devel пакеты при удалении ядер" + +#: urpm-package-cleanup.py:306 +#, python-format +msgid "Warning: neither single nor multi lib arch: %s " +msgstr "Некорректная архитектура: %s " + +#: urpm-package-cleanup.py:417 +#, python-format +msgid "Not removing kernel %(kver)s-%(krel)s because it is the running kernel" +msgstr "Невозможно удалить пакет %(kver)s-%(krel)s, потому что это запущенное ядро" + +#: urpm-package-cleanup.py:447 +#, python-format +msgid "Package %(qf)s %(prob)s" +msgstr "Пакет %(qf)s %(prob)s" + +#: urpm-package-cleanup.py:450 +msgid "Missing suggests:" +msgstr "Недостающие мягкие зависимости:" + +#: urpm-package-cleanup.py:458 +msgid "No Problems Found" +msgstr "Проблем не найдено" + +#: urpm-package-cleanup.py:473 +msgid "Error: Cannot remove kernels as a user, must be root" +msgstr "Ошибка: Невозможно удалить ядро, нужны права root." + +#: urpm-package-cleanup.py:476 +msgid "Error: should keep at least 1 kernel!" +msgstr "Ошибка: нужно оставить хотя бы одно ядро!" + +#: urpm-package-cleanup.py:529 +msgid "Error: Cannot remove packages as a user, must be root" +msgstr "Ошибка: невозможно удалить пакет, нужны права root." + +#~ msgid "Running trunsaction..." +#~ msgstr "Запуск транзакции..." + +#~ msgid "Downloading packages..." +#~ msgstr "Скачивание пакетов..." + +#~ msgid "Could not download packages. Urpm-download output: " +#~ msgstr "Не удалось скачать пакеты. Вывод urpm-downloader: " + +#~ msgid "Output in HTML format, if --output is not present HTML will be created in current directory. --size and --simple options are ignored." +#~ msgstr "Вывод в формате HTML. Если --output не указан, то HTML файл будет создан в текущей директории. Опции --size и --simple будут игнорироваться." + +#~ msgid "input is not correct url, path or name of repository" +#~ msgstr "Введенная строка не является корректным URL, путем или именем репозитория" + +#~ msgid "getting file %s from" +#~ msgstr "получение файла %s из" + +#~ msgid "Error: file %s was not downloaded" +#~ msgstr "Ошибка: файл %s не был скачан" + +#~ msgid "file %(from) was renamed to %(to)s" +#~ msgstr "файл %(from)s был переименован в %(to)s" + +#~ msgid "Error: cannot rename file %(from)s to %(from)s" +#~ msgstr "Ошибка: не удалось переименовать файл %(from)s в %(to)s" + +#~ msgid "Output in HTML format, if --output is not present" +#~ msgstr "Вывод в формате HTML, если --output не " + +#~ msgid "usage: " +#~ msgstr "ssdgfdf" + +#~ msgid "" +#~ "URPM Repos Closure Checker [_1] for Mandriva Linux\n" +#~ "A tool for checking closure of a set of RPM packages\n" +#~ "Copyright (C) 2012 ROSA Laboratory\n" +#~ "License: GNU GPL\n" +#~ "\n" +#~ "Usage: [_2] [options]\n" +#~ "Example: [_2] --hdlist=hdlist.txt\n" +#~ "\n" +#~ "More info: [_2] --help\n" +#~ msgstr "" +#~ "URPM Repos Closure Checker [_1] для Mandriva Linux\n" +#~ "Инструмент для определения замкнутости набора RPM пакетов\n" +#~ "Copyright (C) 2012 Лаборатория РОСА\n" +#~ "Лицензия: GNU GPL\n" +#~ "\n" +#~ "Использование: [_2] [options]\n" +#~ "Пример: [_2] --hdlist=hdlist.txt\n" +#~ "\n" +#~ "Больше информации: [_2] --help\n" + +#~ msgid "" +#~ "\n" +#~ "NAME:\n" +#~ " URPM Repos Closure Checker 1.0 for Mandriva Linux\n" +#~ " A tool for checking closure of a set of RPM packages\n" +#~ "\n" +#~ "USAGE:\n" +#~ " [_1] --hdlist=hdlist.txt\n" +#~ " [_1] --hdlist=http://mirror.yandex.ru/mandriva/.../synthesis.hdlist.cz\n" +#~ " [_1] --dir=rpms/ --static --file-deps=file-deps.txt\n" +#~ " [_1] --list=list.txt --dynamic\n" +#~ "\n" +#~ "OPTIONS:\n" +#~ " -h|-help\n" +#~ " Print this help.\n" +#~ "\n" +#~ " -v|-version\n" +#~ " Print version information.\n" +#~ "\n" +#~ " -hdlist \n" +#~ " Path or URL of HDlist (synthesis) to check.\n" +#~ "\n" +#~ " -d|-dir \n" +#~ " The directory with RPM packages to check.\n" +#~ "\n" +#~ " -l|-list \n" +#~ " The list of packages to check.\n" +#~ "\n" +#~ " -add|-update \n" +#~ " The directory with RPM packages that should\n" +#~ " be added to the repository or updated.\n" +#~ "\n" +#~ " -file-deps \n" +#~ " Read file-deps to ignore some unresolved\n" +#~ " dependencies.\n" +#~ "\n" +#~ " -s|-static\n" +#~ " Check statically if all required dependencies are\n" +#~ " satisfied by provided dependencies in the set of\n" +#~ " RPM packages.\n" +#~ "\n" +#~ " -dynamic\n" +#~ " Install a set of RPM packages to the local chroot\n" +#~ " and check if extra packages were installed.\n" +#~ "\n" +#~ " -r|-check-release\n" +#~ " Check installation media (DVD).\n" +#~ "\n" +#~ " -sign|-check-signature\n" +#~ " Validate package signatures.\n" +#~ "\n" +#~ " -noclean\n" +#~ " Do not clean urpmi cache.\n" +#~ "\n" +#~ " -root \n" +#~ " Where to install packages.\n" +#~ " Default:\n" +#~ " /tmp/...\n" +#~ "\n" +#~ "EXIT CODES:\n" +#~ " 0 - Suceess. The tool has run without any errors\n" +#~ " non-zero - Failed or the tool has run with errors. In particular:\n" +#~ " 1 - Failed to run the tool\n" +#~ " 2 - Discovered dependency problems\n" +#~ " \n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "ИМЯ:\n" +#~ " URPM Repos Closure Checker 1.0 для Mandriva Linux\n" +#~ " Инструмент для определения замкнутости набора RPM пакетов\n" +#~ "\n" +#~ "ИСПОЛЬЗОВАНИЕ:\n" +#~ " [_1] --hdlist=hdlist.txt\n" +#~ " [_1] --hdlist=http://mirror.yandex.ru/mandriva/.../synthesis.hdlist.cz\n" +#~ " [_1] --dir=rpms/ --static --file-deps=file-deps.txt\n" +#~ " [_1] --list=list.txt --dynamic\n" +#~ "\n" +#~ "ОПЦИИ:\n" +#~ " -h|-help\n" +#~ " Показать это сообщение.\n" +#~ "\n" +#~ " -v|-version\n" +#~ " Показать информацию о версии программы.\n" +#~ "\n" +#~ " -hdlist <путь>\n" +#~ " Путь к HDlist (synthesis), который надо проверить.\n" +#~ "\n" +#~ " -d|-dir <путь>\n" +#~ " Директория с RPM пакетами, которую надо проверить.\n" +#~ "\n" +#~ " -l|-list <путь>\n" +#~ " Список пакетов, который надо проверить.\n" +#~ "\n" +#~ " -add|-update <путь>\n" +#~ " Директория с RPM пакетами которые\n" +#~ " надо добавить в репозиторий или обновить.\n" +#~ "\n" +#~ " -file-deps <путь>\n" +#~ " Использовать файл file-deps для игнорирования\n" +#~ " некоторых зависимостей.\n" +#~ "\n" +#~ " -s|-static\n" +#~ " Статический анализ зависимостей.\n" +#~ "\n" +#~ " -dynamic\n" +#~ " Динамический анализ зависимостей (через установку пакетов).\n" +#~ "\n" +#~ " -r|-check-release\n" +#~ " Проверить установочный диск (CD/DVD).\n" +#~ "\n" +#~ " -sign|-check-signature\n" +#~ " Проверить сигнатуры пакетов.\n" +#~ "\n" +#~ " -noclean\n" +#~ " Не очищать кэш инструментария URPM.\n" +#~ "\n" +#~ " -root <путь>\n" +#~ " Куда устанавливать пакеты.\n" +#~ " Путь по-умолчанию:\n" +#~ " /tmp/...\n" +#~ "\n" +#~ "КОДЫ ОШИБОК:\n" +#~ " 0 - Успех. Набор пакетов замкнут. Ошибок не произошло.\n" +#~ " 1 - Ошибки во время выполнения программы.\n" +#~ " 2 - Набор пакетов не замкнут.\n" +#~ " \n" +#~ "\n" + +#~ msgid "can't open file '[_1]': [_2]\n" +#~ msgstr "не удалось открыть файл '[_1]': [_2]\n" + +#~ msgid "ERROR: you should be root\n" +#~ msgstr "ОШИБКА: требуются права администратора\n" + +#~ msgid "ERROR: cannot access '[_1]'\n" +#~ msgstr "ОШИБКА: не удалось найти '[_1]'\n" + +#~ msgid "ERROR: the list of packages is empty\n" +#~ msgstr "ОШИБКА: список пакетов пуст\n" + +#~ msgid "ERROR: file '[_1]' is not RPM package\n" +#~ msgstr "ОШИБКА: файл '[_1]' не является RPM пакетом\n" + +#~ msgid "ERROR: --dir or --list option should be specified\n" +#~ msgstr "ОШИБКА: одна из следующих опций должна быть предоставлена: --dir или --list\n" + +#~ msgid "" +#~ "Extra Packages:\n" +#~ "\n" +#~ msgstr "" +#~ "Дополнительные Пакеты:\n" +#~ "\n" + +#~ msgid " (required by: [_1])" +#~ msgstr " (требуется в: [_1])" + +#~ msgid "" +#~ "Broken Packages:\n" +#~ "\n" +#~ msgstr "" +#~ "Сломанные Пакеты:\n" +#~ "\n" + +#~ msgid "Report has been generated to:" +#~ msgstr "Отчет создан:" + +#~ msgid "Checking RPMs ...\n" +#~ msgstr "Проверка RPM пакетов ...\n" + +#~ msgid "Checking [_1]\n" +#~ msgstr "Проверка [_1]\n" + +#~ msgid " FAILED: invalid signature\n" +#~ msgstr " ОШИБКА: некорректная сигнатура\n" + +#~ msgid "" +#~ "Broken Signature:\n" +#~ "\n" +#~ msgstr "" +#~ "Некорректные сигнатуры:\n" +#~ "\n" + +#~ msgid "ERROR: --hdlist, --dir or --list option should be specified\n" +#~ msgstr "ОШИБКА: одна из следующих опций должна быть предоставлена: --hdlist, --dir or --list\n" + +#~ msgid "Downloading HDlist ...\n" +#~ msgstr "Загрузка HDlist-файла ...\n" + +#~ msgid "ERROR: cannot extract '[_1]'\n" +#~ msgstr "ОШИБКА: не удалось распаковать '[_1]'\n" + +#~ msgid "ERROR: unknown format of hdlist\n" +#~ msgstr "ОШИБКА: неизвестный формат HDlist-файла\n" + +#~ msgid "Checking HDlist ...\n" +#~ msgstr "Проверка HDlist-файла ...\n" + +#~ msgid "Unresolved \"Required\" Dependencies ([_1]):" +#~ msgstr "Сломанные \"Requires\" Зависимости ([_1]):" + +#~ msgid "Unresolved \"Suggested\" Dependencies ([_1]):" +#~ msgstr "Сломанные \"Suggests\" Зависимости ([_1]):" + +#~ msgid "Broken Packages ([_1]):" +#~ msgstr "Сломанные Пакеты ([_1]):" + +#~ msgid "" +#~ "URPM Repos Closure Checker [_1] for Mandriva Linux\n" +#~ "Copyright (C) 2012 ROSA Laboratory\n" +#~ "License: GPL \n" +#~ "This program is free software: you can redistribute it and/or modify it.\n" +#~ "\n" +#~ "Written by Andrey Ponomarenko.\n" +#~ msgstr "" +#~ "URPM Repos Closure Checker [_1] для Mandriva Linux\n" +#~ "Copyright (C) 2012 Лаборатория РОСА\n" +#~ "Лицензия: GPL \n" diff --git a/localizer.py b/localizer.py new file mode 100755 index 0000000..36130a3 --- /dev/null +++ b/localizer.py @@ -0,0 +1,62 @@ +#!/usr/bin/python2.7 +# -*- coding: UTF-8 -*- + +import os, sys + +quiet = False +if '--list' in sys.argv: + quiet = True + +def qprint(text): + if quiet: + sys.stderr.write(text + '\n') + sys.stderr.flush() + return + print text + +def dumb(cmd): + if quiet: + return cmd + ' 1>&2' + else: + return cmd + +walkres = os.walk('.') +fls = [] +pos = [] + +for path, dirs, files in walkres: + for file in files: + p = os.path.join(path, file) + if p.endswith(".py"): + fls.append(p) + if p.endswith(".pl"): + fls.append(p) + if p.endswith(".po"): + pos.append(p) + +if not fls: + qprint("No python modules found!") + exit(1) + + +FN = 'urpm-tools.pot' + +qprint("Generating " + FN) +cmd = "xgettext -d urpm-tools -o " + FN + ' -c --no-wrap ' + ' '.join(fls) +os.system(dumb(cmd)) + +LIST_OUT = [] +for po in pos: + qprint("Updating " + po) + LIST_OUT.append(po.split('/')[2]) + + cmd = "msgmerge --no-wrap -U " + po + ' ' + FN + os.system(dumb(cmd)) + mo = po[:-2] + 'mo' + qprint ("Compiling " + po) + cmd = "msgfmt -o " + mo + ' ' + po + os.system(dumb(cmd)) + +if quiet: + print ' '.join(LIST_OUT) + \ No newline at end of file diff --git a/rpm5utils/COPYING b/rpm5utils/COPYING new file mode 100644 index 0000000..e77696a --- /dev/null +++ b/rpm5utils/COPYING @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 675 Mass Ave, Cambridge, MA 02139, USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) 19yy + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) 19yy name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/rpm5utils/Makefile b/rpm5utils/Makefile new file mode 100644 index 0000000..e3f87f8 --- /dev/null +++ b/rpm5utils/Makefile @@ -0,0 +1,27 @@ +PYTHON=python +PACKAGE = $(shell basename `pwd`) +PYVER := $(shell $(PYTHON) -c 'import sys; print "%.3s" %(sys.version)') +PYSYSDIR := $(shell $(PYTHON) -c 'import sys; print sys.prefix') +PYLIBDIR = $(PYSYSDIR)/lib/python$(PYVER) +SITEDIR = $(PYLIBDIR)/site-packages +PKGDIR = $(SITEDIR)/$(PACKAGE) + +all: + echo "Nothing to do" + +clean: + rm -f *.pyc *.pyo *~ + +install: + mkdir -p $(DESTDIR)/$(PKGDIR) + + #copy urpmgraph dir and set permissions for files and folders + cp -rf . $(DESTDIR)/$(PKGDIR) + #don't copy these files + rm -f $(DESTDIR)/$(PKGDIR)/Makefile + rm -f $(DESTDIR)/$(PKGDIR)/COPYING + find $(DESTDIR)/$(PKGDIR) -type f |xargs -l chmod 644 $1 + find $(DESTDIR)/$(PKGDIR) -type d |xargs -l chmod 775 $1 + + #compile python sources + python -m compileall $(DESTDIR)/$(PKGDIR) \ No newline at end of file diff --git a/rpm5utils/__init__.py b/rpm5utils/__init__.py new file mode 100644 index 0000000..ae44f70 --- /dev/null +++ b/rpm5utils/__init__.py @@ -0,0 +1,10 @@ + +import rpm5utils.urpmgraphs +from rpm5utils.urpmgraphs import * + +class Rpm5UtilsError(Exception): + + """ Exception thrown for anything rpm5utils related. """ + + def __init__(self, args=None): + Exception.__init__(self, args) diff --git a/rpm5utils/arch.py b/rpm5utils/arch.py new file mode 100644 index 0000000..02ca7a4 --- /dev/null +++ b/rpm5utils/arch.py @@ -0,0 +1,423 @@ + +import os + +# dict mapping arch -> ( multicompat, best personality, biarch personality ) +multilibArches = { "x86_64": ( "athlon", "x86_64", "athlon" ), + "sparc64v": ( "sparcv9v", "sparcv9v", "sparc64v" ), + "sparc64": ( "sparcv9", "sparcv9", "sparc64" ), + "ppc64": ( "ppc", "ppc", "ppc64" ), + "s390x": ( "s390", "s390x", "s390" ), + } + +arches = { + # ia32 + "athlon": "i686", + "i686": "i586", + "geode": "i586", + "i586": "i486", + "i486": "i386", + "i386": "noarch", + + # amd64 + "x86_64": "athlon", + "amd64": "x86_64", + "ia32e": "x86_64", + + # ppc + "ppc64pseries": "ppc64", + "ppc64iseries": "ppc64", + "ppc64": "ppc", + "ppc": "noarch", + + # s390{,x} + "s390x": "s390", + "s390": "noarch", + + # sparc + "sparc64v": "sparcv9v", + "sparc64": "sparcv9", + "sparcv9v": "sparcv9", + "sparcv9": "sparcv8", + "sparcv8": "sparc", + "sparc": "noarch", + + # alpha + "alphaev7": "alphaev68", + "alphaev68": "alphaev67", + "alphaev67": "alphaev6", + "alphaev6": "alphapca56", + "alphapca56": "alphaev56", + "alphaev56": "alphaev5", + "alphaev5": "alphaev45", + "alphaev45": "alphaev4", + "alphaev4": "alpha", + "alpha": "noarch", + + # arm + "armv7l": "armv6l", + "armv6l": "armv5tejl", + "armv5tejl": "armv5tel", + "armv5tel": "noarch", + + # super-h + "sh4a": "sh4", + "sh4": "noarch", + "sh3": "noarch", + + #itanium + "ia64": "noarch", + } + +def legitMultiArchesInSameLib(arch=None): + # this is completely crackrock - if anyone has a better way I + # am all ears + + arch = getBestArch(arch) + if isMultiLibArch(arch): + arch = getBaseArch(myarch=arch) + + results = [arch] + + if arch == 'x86_64' or arch.startswith('sparcv9'): + for (k, v) in arches.items(): + if v == arch: + results.append(k) + return results + + +def canCoinstall(arch1, arch2): + """Take two arches and return True if it is possible that they can be + installed together with the same nevr. Ex: arch1=i386 and arch2=i686 then + it will return False. arch1=i386 and arch2=x86_64 will return True. + It does not determine whether or not the arches make any sense. Just whether + they could possibly install w/o conflict""" + + # if both are a multlibarch then we can't coinstall (x86_64, ia32e) + # if both are not multilibarches then we can't coinstall (i386, i686) + + if 'noarch' in [arch1, arch2]: # noarch can never coinstall + return False + + if isMultiLibArch(arch=arch1) == isMultiLibArch(arch=arch2): + return False + # this section keeps arch1=x86_64 arch2=ppc from returning True + if arch1 in getArchList(arch2) or arch2 in getArchList(arch1): + return True + return False + +# this computes the difference between myarch and targetarch +def archDifference(myarch, targetarch): + if myarch == targetarch: + return 1 + if myarch in arches: + ret = archDifference(arches[myarch], targetarch) + if ret != 0: + return ret + 1 + return 0 + return 0 + +def score(arch): + return archDifference(canonArch, arch) + +def isMultiLibArch(arch=None): + """returns true if arch is a multilib arch, false if not""" + if arch is None: + arch = canonArch + + if arch not in arches: # or we could check if it is noarch + return 0 + + if arch in multilibArches: + return 1 + + if arches[arch] in multilibArches: + return 1 + + return 0 + +def getBestArchFromList(archlist, myarch=None): + """ + return the best arch from the list for myarch if - myarch is not given, + then return the best arch from the list for the canonArch. + """ + + if len(archlist) == 0: + return None + + if myarch is None: + myarch = canonArch + + mybestarch = getBestArch(myarch) + + bestarch = getBestArch(myarch) + if bestarch != myarch: + bestarchchoice = getBestArchFromList(archlist, bestarch) + if bestarchchoice != None and bestarchchoice != "noarch": + return bestarchchoice + + thisarch = archlist[0] + for arch in archlist[1:]: + val1 = archDifference(myarch, thisarch) + val2 = archDifference(myarch, arch) + if val1 == 0 and val2 == 0: + continue + if val1 < val2: + if val1 == 0: + thisarch = arch + if val2 < val1: + if val2 != 0: + thisarch = arch + if val1 == val2: + pass + + # thisarch should now be our bestarch + # one final check to make sure we're not returning a bad arch + val = archDifference(myarch, thisarch) + if val == 0: + return None + + return thisarch + + +def getArchList(thisarch=None): + # this returns a list of archs that are compatible with arch given + if not thisarch: + thisarch = canonArch + + archlist = [thisarch] + while thisarch in arches: + thisarch = arches[thisarch] + archlist.append(thisarch) + + # hack hack hack + # sparc64v is also sparc64 compat + if archlist[0] == "sparc64v": + archlist.insert(1,"sparc64") + + # if we're a weirdo arch - add noarch on there. + if len(archlist) == 1 and archlist[0] == thisarch: + archlist.append('noarch') + return archlist + +def _try_read_cpuinfo(): + """ Try to read /proc/cpuinfo ... if we can't ignore errors (ie. proc not + mounted). """ + try: + lines = open("/proc/cpuinfo", "r").readlines() + return lines + except: + return [] + +def getCanonX86Arch(arch): + # + if arch == "i586": + for line in _try_read_cpuinfo(): + if line.startswith("model name") and line.find("Geode(TM)") != -1: + return "geode" + return arch + # only athlon vs i686 isn't handled with uname currently + if arch != "i686": + return arch + + # if we're i686 and AuthenticAMD, then we should be an athlon + for line in _try_read_cpuinfo(): + if line.startswith("vendor") and line.find("AuthenticAMD") != -1: + return "athlon" + # i686 doesn't guarantee cmov, but we depend on it + elif line.startswith("flags") and line.find("cmov") == -1: + return "i586" + + return arch + +def getCanonPPCArch(arch): + # FIXME: should I do better handling for mac, etc? + if arch != "ppc64": + return arch + + machine = None + for line in _try_read_cpuinfo(): + if line.find("machine") != -1: + machine = line.split(':')[1] + break + if machine is None: + return arch + + if machine.find("CHRP IBM") != -1: + return "ppc64pseries" + if machine.find("iSeries") != -1: + return "ppc64iseries" + return arch + +def getCanonSPARCArch(arch): + # Deal with sun4v, sun4u, sun4m cases + SPARCtype = None + for line in _try_read_cpuinfo(): + if line.startswith("type"): + SPARCtype = line.split(':')[1] + break + if SPARCtype is None: + return arch + + if SPARCtype.find("sun4v") != -1: + if arch.startswith("sparc64"): + return "sparc64v" + else: + return "sparcv9v" + if SPARCtype.find("sun4u") != -1: + if arch.startswith("sparc64"): + return "sparc64" + else: + return "sparcv9" + if SPARCtype.find("sun4m") != -1: + return "sparcv8" + return arch + +def getCanonX86_64Arch(arch): + if arch != "x86_64": + return arch + + vendor = None + for line in _try_read_cpuinfo(): + if line.startswith("vendor_id"): + vendor = line.split(':')[1] + break + if vendor is None: + return arch + + if vendor.find("Authentic AMD") != -1 or vendor.find("AuthenticAMD") != -1: + return "amd64" + if vendor.find("GenuineIntel") != -1: + return "ia32e" + return arch + +def getCanonArch(skipRpmPlatform = 0): + if not skipRpmPlatform and os.access("/etc/rpm/platform", os.R_OK): + try: + f = open("/etc/rpm/platform", "r") + line = f.readline() + f.close() + (arch, vendor, opersys) = line.split("-", 2) + return arch + except: + pass + + arch = os.uname()[4] + + if (len(arch) == 4 and arch[0] == "i" and arch[2:4] == "86"): + return getCanonX86Arch(arch) + + if arch.startswith("ppc"): + return getCanonPPCArch(arch) + if arch.startswith("sparc"): + return getCanonSPARCArch(arch) + if arch == "x86_64": + return getCanonX86_64Arch(arch) + + return arch + +canonArch = getCanonArch() + +# this gets you the "compat" arch of a biarch pair +def getMultiArchInfo(arch = canonArch): + if arch in multilibArches: + return multilibArches[arch] + if arch in arches and arches[arch] != "noarch": + return getMultiArchInfo(arch = arches[arch]) + return None + +# get the best usual userspace arch for the arch we're on. this is +# our arch unless we're on an arch that uses the secondary as its +# userspace (eg ppc64, sparc64) +def getBestArch(myarch=None): + if myarch: + arch = myarch + else: + arch = canonArch + + if arch.startswith("sparc64"): + arch = multilibArches[arch][1] + + if arch.startswith("ppc64"): + arch = 'ppc' + + return arch + +def getBaseArch(myarch=None): + """returns 'base' arch for myarch, if specified, or canonArch if not. + base arch is the arch before noarch in the arches dict if myarch is not + a key in the multilibArches.""" + + if not myarch: + myarch = canonArch + + if myarch not in arches: # this is dumb, but + return myarch + + if myarch.startswith("sparc64"): + return "sparc" + elif myarch.startswith("ppc64"): + return "ppc" + elif myarch.startswith("arm"): + return "arm" + + if isMultiLibArch(arch=myarch): + if myarch in multilibArches: + return myarch + else: + return arches[myarch] + + if myarch in arches: + basearch = myarch + value = arches[basearch] + while value != 'noarch': + basearch = value + value = arches[basearch] + + return basearch + + +class ArchStorage(object): + """class for keeping track of what arch we have set and doing various + permutations based on it""" + def __init__(self): + self.canonarch = None + self.basearch = None + self.bestarch = None + self.compatarches = [] + self.archlist = [] + self.multilib = False + self.setup_arch() + + def setup_arch(self, arch=None, archlist_includes_compat_arch=True): + if arch: + self.canonarch = arch + else: + self.canonarch = getCanonArch() + + self.basearch = getBaseArch(myarch=self.canonarch) + self.archlist = getArchList(thisarch=self.canonarch) + + if not archlist_includes_compat_arch: # - do we bother including i686 and below on x86_64 + limit_archlist = [] + for a in self.archlist: + if isMultiLibArch(a) or a == 'noarch': + limit_archlist.append(a) + self.archlist = limit_archlist + + self.bestarch = getBestArch(myarch=self.canonarch) + self.compatarches = getMultiArchInfo(arch=self.canonarch) + self.multilib = isMultiLibArch(arch=self.canonarch) + self.legit_multi_arches = legitMultiArchesInSameLib(arch = self.canonarch) + + def get_best_arch_from_list(self, archlist, fromarch=None): + if not fromarch: + fromarch = self.canonarch + return getBestArchFromList(archlist, myarch=fromarch) + + def score(self, arch): + return archDifference(self.canonarch, arch) + + def get_arch_list(self, arch): + if not arch: + return self.archlist + return getArchList(thisarch=arch) diff --git a/rpm5utils/miscutils.py b/rpm5utils/miscutils.py new file mode 100644 index 0000000..165ec75 --- /dev/null +++ b/rpm5utils/miscutils.py @@ -0,0 +1,455 @@ +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# Copyright 2003 Duke University + +import rpm +import types +import gzip +import os +import sys +import locale +import signal + +import rpm5utils.transaction + +def rpmOutToStr(arg): + if type(arg) != types.StringType: + # and arg is not None: + arg = str(arg) + + return arg + + +def compareEVR((e1, v1, r1), (e2, v2, r2)): + # return 1: a is newer than b + # 0: a and b are the same version + # -1: b is newer than a + if e1 is None: + e1 = '0' + else: + e1 = str(e1) + if v1 is None: + v1 = '0' + else: + v1 = str(v1) + if r1 is None: + r1 = '0' + else: + r1 = str(r1) + + if e2 is None: + e2 = '0' + else: + e2 = str(e2) + if v2 is None: + v2 = '0' + else: + v2 = str(v2) + if r2 is None: + r2 = '0' + else: + r2 = str(r2) + #~ print '%s, %s, %s vs %s, %s, %s' % (e1, v1, r1, e2, v2, r2) + rc = rpm.labelCompare((e1, v1, r1), (e2, v2, r2)) + #~ print '%s, %s, %s vs %s, %s, %s = %s' % (e1, v1, r1, e2, v2, r2, rc) + return rc + +def compareDEVR((d1, e1, v1, r1), (d2, e2, v2, r2)): + # return 1: a is newer than b + # 0: a and b are the same version + # -1: b is newer than a + if d1 is None: + d1 = '0' + if d2 is None: + d2 = '0' + + if d1 > d2: + return 1 + if d1 < d2: + return -1 + + rc = compareEVR((e1, v1, r1), (e2, v2, r2)) + return rc + +def compareVerOnly(v1, v2): + """compare version strings only using rpm vercmp""" + return compareEVR(('', v1, ''), ('', v2, '')) + +def checkSig(ts, package): + """Takes a transaction set and a package, check it's sigs, + return 0 if they are all fine + return 1 if the gpg key can't be found + return 2 if the header is in someway damaged + return 3 if the key is not trusted + return 4 if the pkg is not gpg or pgp signed""" + + value = 0 + currentflags = ts.setVSFlags(0) + fdno = os.open(package, os.O_RDONLY) + try: + hdr = ts.hdrFromFdno(fdno) + except rpm.error, e: + if str(e) == "public key not availaiable": + value = 1 + if str(e) == "public key not available": + value = 1 + if str(e) == "public key not trusted": + value = 3 + if str(e) == "error reading package header": + value = 2 + else: + error, siginfo = getSigInfo(hdr) + if error == 101: + os.close(fdno) + del hdr + value = 4 + else: + del hdr + + try: + os.close(fdno) + except OSError, e: # if we're not opened, don't scream about it + pass + + ts.setVSFlags(currentflags) # put things back like they were before + return value + +def getSigInfo(hdr): + """checks signature from an hdr hand back signature information and/or + an error code""" + + locale.setlocale(locale.LC_ALL, 'C') + string = '%|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|' + siginfo = hdr.sprintf(string) + if siginfo != '(none)': + error = 0 + sigtype, sigdate, sigid = siginfo.split(',') + else: + error = 101 + sigtype = 'MD5' + sigdate = 'None' + sigid = 'None' + + infotuple = (sigtype, sigdate, sigid) + return error, infotuple + +def pkgTupleFromHeader(hdr): + """return a pkgtuple (n, a, e, v, r) from a hdr object, converts + None epoch to 0, as well.""" + + name = hdr['name'] + + # RPMTAG_SOURCEPACKAGE: RPMTAG_SOURCERPM is not necessarily there for + # e.g. gpg-pubkeys imported with older rpm versions + # http://lists.baseurl.org/pipermail/yum/2009-January/022275.html + if hdr[rpm.RPMTAG_SOURCERPM] or hdr[rpm.RPMTAG_SOURCEPACKAGE] != 1: + arch = hdr['arch'] + else: + arch = 'src' + + ver = hdr['version'] + rel = hdr['release'] + epoch = hdr['epoch'] + if epoch is None: + epoch = '0' + pkgtuple = (name, arch, epoch, ver, rel) + return pkgtuple + +def pkgDistTupleFromHeader(hdr): + """the same as above, but appends DistEpoch to the tuple""" + + (n,a,e,v,r) = pkgTupleFromHeader(hdr) + d = hdr['distepoch'] + if d is None: + d = '0' + + pkgtuple = (n,a,e,v,r,d) + return pkgtuple + +def rangeCheck(reqtuple, pkgtuple): + """returns true if the package epoch-ver-rel satisfy the range + requested in the reqtuple: + ex: foo >= 2.1-1""" + # we only ever get here if we have a versioned prco + # nameonly shouldn't ever raise it + #(reqn, reqf, (reqe, reqv, reqr)) = reqtuple + (n, a, e, v, r) = pkgtuple + return rangeCompare(reqtuple, (n, rpm.RPMSENSE_EQUAL, (e, v, r))) + +def rangeCompare(reqtuple, provtuple): + """returns true if provtuple satisfies reqtuple""" + (reqn, reqf, (reqe, reqv, reqr)) = reqtuple + (n, f, (e, v, r)) = provtuple + if reqn != n: + return 0 + + # unversioned satisfies everything + if not f or not reqf: + return 1 + + # and you thought we were done having fun + # if the requested release is left out then we have + # to remove release from the package prco to make sure the match + # is a success - ie: if the request is EQ foo 1:3.0.0 and we have + # foo 1:3.0.0-15 then we have to drop the 15 so we can match + if reqr is None: + r = None + if reqe is None: + e = None + if reqv is None: # just for the record if ver is None then we're going to segfault + v = None + + # if we just require foo-version, then foo-version-* will match + if r is None: + reqr = None + + rc = compareEVR((e, v, r), (reqe, reqv, reqr)) + + # does not match unless + if rc >= 1: + if (reqf & rpm.RPMSENSE_GREATER) or (reqf & rpm.RPMSENSE_EQUAL): + return 1 + + if rc == 0: + if (reqf & rpm.RPMSENSE_EQUAL): + return 1 + + if rc <= -1: + if (reqf & rpm.RPMSENSE_LESS) or (reqf & rpm.RPMSENSE_EQUAL): + return 1 + + return 0 + + +########### +# Title: Remove duplicates from a sequence +# Submitter: Tim Peters +# From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560 + +def unique(s): + """Return a list of the elements in s, but without duplicates. + + For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3], + unique("abcabc") some permutation of ["a", "b", "c"], and + unique(([1, 2], [2, 3], [1, 2])) some permutation of + [[2, 3], [1, 2]]. + + For best speed, all sequence elements should be hashable. Then + unique() will usually work in linear time. + + If not possible, the sequence elements should enjoy a total + ordering, and if list(s).sort() doesn't raise TypeError it's + assumed that they do enjoy a total ordering. Then unique() will + usually work in O(N*log2(N)) time. + + If that's not possible either, the sequence elements must support + equality-testing. Then unique() will usually work in quadratic + time. + """ + + n = len(s) + if n == 0: + return [] + + # Try using a dict first, as that's the fastest and will usually + # work. If it doesn't work, it will usually fail quickly, so it + # usually doesn't cost much to *try* it. It requires that all the + # sequence elements be hashable, and support equality comparison. + u = {} + try: + for x in s: + u[x] = 1 + except TypeError: + del u # move on to the next method + else: + return u.keys() + + # We can't hash all the elements. Second fastest is to sort, + # which brings the equal elements together; then duplicates are + # easy to weed out in a single pass. + # NOTE: Python's list.sort() was designed to be efficient in the + # presence of many duplicate elements. This isn't true of all + # sort functions in all languages or libraries, so this approach + # is more effective in Python than it may be elsewhere. + try: + t = list(s) + t.sort() + except TypeError: + del t # move on to the next method + else: + assert n > 0 + last = t[0] + lasti = i = 1 + while i < n: + if t[i] != last: + t[lasti] = last = t[i] + lasti += 1 + i += 1 + return t[:lasti] + + # Brute force is all that's left. + u = [] + for x in s: + if x not in u: + u.append(x) + return u + + +def splitFilename(filename): + """ + Pass in a standard style rpm fullname + + Return a name, version, release, epoch, arch, e.g.:: + foo-1.0-1.i386.rpm returns foo, 1.0, 1, i386 + 1:bar-9-123a.ia64.rpm returns bar, 9, 123a, 1, ia64 + """ + + if filename[-4:] == '.rpm': + filename = filename[:-4] + + archIndex = filename.rfind('.') + arch = filename[archIndex+1:] + + relIndex = filename[:archIndex].rfind('-') + rel = filename[relIndex+1:archIndex] + + verIndex = filename[:relIndex].rfind('-') + ver = filename[verIndex+1:relIndex] + + epochIndex = filename.find(':') + if epochIndex == -1: + epoch = '' + else: + epoch = filename[:epochIndex] + + name = filename[epochIndex + 1:verIndex] + return name, ver, rel, epoch, arch + + +def rpm2cpio(fdno, out=sys.stdout, bufsize=2048): + """Performs roughly the equivalent of rpm2cpio(8). + Reads the package from fdno, and dumps the cpio payload to out, + using bufsize as the buffer size.""" + ts = rpm5utils.transaction.initReadOnlyTransaction() + hdr = ts.hdrFromFdno(fdno) + del ts + + compr = hdr[rpm.RPMTAG_PAYLOADCOMPRESSOR] or 'gzip' + #XXX FIXME + #if compr == 'bzip2': + # TODO: someone implement me! + #el + if compr != 'gzip': + raise rpm5utils.Rpm5UtilsError, \ + 'Unsupported payload compressor: "%s"' % compr + f = gzip.GzipFile(None, 'rb', None, os.fdopen(fdno, 'rb', bufsize)) + while 1: + tmp = f.read(bufsize) + if tmp == "": break + out.write(tmp) + f.close() + +def formatRequire (name, version, flags): + ''' + Return a human readable requirement string (ex. foobar >= 2.0) + @param name: requirement name (ex. foobar) + @param version: requirent version (ex. 2.0) + @param flags: binary flags ( 0010 = equal, 0100 = greater than, 1000 = less than ) + ''' + s = name + + if flags and (type(flags) == type(0) or type(flags) == type(0L)): # Flag must be set and a int (or a long, now) + if flags & (rpm.RPMSENSE_LESS | rpm.RPMSENSE_GREATER | + rpm.RPMSENSE_EQUAL): + s = s + " " + if flags & rpm.RPMSENSE_LESS: + s = s + "<" + if flags & rpm.RPMSENSE_GREATER: + s = s + ">" + if flags & rpm.RPMSENSE_EQUAL: + s = s + "=" + if version: + s = "%s %s" %(s, version) + return s + + +def flagToString(flags): + flags = flags & 0xf + + if flags == 0: return None + elif flags == 2: return 'LT' + elif flags == 4: return 'GT' + elif flags == 8: return 'EQ' + elif flags == 10: return 'LE' + elif flags == 12: return 'GE' + + return flags + +def stringToVersion(verstring): + if verstring in [None, '']: + return (None, None, None) + i = verstring.find(':') + if i != -1: + try: + epoch = str(long(verstring[i:])) + except ValueError: + # look, garbage in the epoch field, how fun, kill it + epoch = '0' # this is our fallback, deal + else: + epoch = '0' + j = verstring.find('-') + if j != -1: + if verstring[i + 1:j] == '': + version = None + else: + version = verstring[i + 1:j] + release = verstring[j + 1:] + else: + if verstring[i + 1:] == '': + version = None + else: + version = verstring[i + 1:] + release = None + return (epoch, version, release) + +def hdrFromPackage(ts, package): + """hand back the rpm header or raise an Error if the pkg is fubar""" + try: + fdno = os.open(package, os.O_RDONLY) + except OSError, e: + raise rpm5utils.Rpm5UtilsError, 'Unable to open file' + + # XXX: We should start a readonly ts here, so we don't get the options + # from the other one (sig checking, etc) + try: + hdr = ts.hdrFromFdno(fdno) + except rpm.error, e: + os.close(fdno) + raise rpm5utils.Rpm5UtilsError, "RPM Error opening Package" + if type(hdr) != rpm.hdr: + os.close(fdno) + raise rpm5utils.Rpm5UtilsError, "RPM Error opening Package (type)" + + os.close(fdno) + return hdr + +def checkSignals(): + if hasattr(rpm, "checkSignals") and hasattr(rpm, 'signalsCaught'): + if rpm.signalsCaught([signal.SIGINT, + signal.SIGTERM, + signal.SIGPIPE, + signal.SIGQUIT, + signal.SIGHUP]): + sys.exit(1) + diff --git a/rpm5utils/tests/updates-test.py b/rpm5utils/tests/updates-test.py new file mode 100644 index 0000000..44ab5a2 --- /dev/null +++ b/rpm5utils/tests/updates-test.py @@ -0,0 +1,63 @@ + +import rpm5utils.updates +import rpm5utils.arch + +instlist = [('foo', 'i386', '0', '1', '1'), + ('do', 'i386', '0', '2', '3'), + ('glibc', 'i386', '0', '1', '1'), + ('bar', 'noarch', '0', '2', '1'), + ('baz', 'i686', '0', '2', '3'), + ('baz', 'x86_64', '0','1','4'), + ('foo', 'i686', '0', '1', '1'), + ('cyrus-sasl','sparcv9', '0', '1', '1')] + +availlist = [('foo', 'i686', '0', '1', '3'), + ('do', 'noarch', '0', '3', '3'), + ('do', 'noarch', '0', '4', '3'), + ('foo', 'i386', '0', '1', '3'), + ('foo', 'i686', '0', '1', '2'), + ('glibc', 'i686', '0', '1', '2'), + ('glibc', 'i386', '0', '1', '2'), + ('bar', 'noarch', '0', '2', '2'), + ('baz', 'noarch', '0', '2', '4'), + ('baz', 'i686', '0', '2', '4'), + ('baz', 'x86_64', '0', '1', '5'), + ('baz', 'ppc', '0', '1', '5'), + ('cyrus-sasl','sparcv9', '0', '1', '2'), + ('cyrus-sasl','sparc64', '0', '1', '2'),] + +obslist = {('quux', 'noarch', '0', '1', '3'): [('bar', None, (None, None, None))], + + ('quuxish', 'noarch', '0', '1', '3'):[('foo', 'GE', ('0', '1', None))], + } + + +up = rpm5utils.updates.Updates(instlist, availlist) +up.debug=1 +up.exactarch=1 +#up.myarch = 'sparc64' +up._is_multilib = rpm5utils.arch.isMultiLibArch(up.myarch) +up._archlist = rpm5utils.arch.getArchList(up.myarch) +print up._archlist +up._multilib_compat_arches = rpm5utils.arch.getMultiArchInfo(up.myarch) +up.doUpdates() +up.condenseUpdates() + +for tup in up.updatesdict.keys(): + (old_n, old_a, old_e, old_v, old_r) = tup + for (n, a, e, v, r) in up.updatesdict[tup]: + print '%s.%s %s:%s-%s updated by %s.%s %s:%s-%s' % (old_n, + old_a, old_e, old_v, old_r, n, a, e, v, r) + +up.rawobsoletes = obslist +up.doObsoletes() +for tup in up.obsoletes.keys(): + (old_n, old_a, old_e, old_v, old_r) = tup + for (n, a, e, v, r) in up.obsoletes[tup]: + print '%s.%s %s:%s-%s obsoletes %s.%s %s:%s-%s' % (old_n, + old_a, old_e, old_v, old_r, n, a, e, v, r) + + + + + diff --git a/rpm5utils/transaction.py b/rpm5utils/transaction.py new file mode 100644 index 0000000..83393e1 --- /dev/null +++ b/rpm5utils/transaction.py @@ -0,0 +1,192 @@ +# +# Client code for Update Agent +# Copyright (c) 1999-2002 Red Hat, Inc. Distributed under GPL. +# +# Adrian Likins +# Some Edits by Seth Vidal +# +# a couple of classes wrapping up transactions so that we +# can share transactions instead of creating new ones all over +# + +import rpm +import miscutils + +read_ts = None +ts = None + +# wrapper/proxy class for rpm.Transaction so we can +# instrument it, etc easily +class TransactionWrapper: + def __init__(self, root='/'): + self.ts = rpm.TransactionSet(root) + self._methods = ['check', + 'order', + 'addErase', + 'addInstall', + 'run', + 'pgpImportPubkey', + 'pgpPrtPkts', + 'problems', + 'setFlags', + 'setVSFlags', + 'setProbFilter', + 'hdrFromFdno', + 'next', + 'clean'] + self.tsflags = [] + self.open = True + + def __del__(self): + # Automatically close the rpm transaction when the reference is lost + self.close() + + def close(self): + if self.open: + self.ts.closeDB() + self.ts = None + self.open = False + + def dbMatch(self, *args, **kwds): + if 'patterns' in kwds: + patterns = kwds.pop('patterns') + else: + patterns = [] + + mi = self.ts.dbMatch(*args, **kwds) + for (tag, tp, pat) in patterns: + mi.pattern(tag, tp, pat) + return mi + + def __getattr__(self, attr): + if attr in self._methods: + return self.getMethod(attr) + else: + raise AttributeError, attr + + def __iter__(self): + return self.ts + + def getMethod(self, method): + # in theory, we can override this with + # profile/etc info + return getattr(self.ts, method) + + # push/pop methods so we dont lose the previous + # set value, and we can potentiall debug a bit + # easier + def pushVSFlags(self, flags): + self.tsflags.append(flags) + self.ts.setVSFlags(self.tsflags[-1]) + + def popVSFlags(self): + del self.tsflags[-1] + self.ts.setVSFlags(self.tsflags[-1]) + + def addTsFlag(self, flag): + curflags = self.ts.setFlags(0) + self.ts.setFlags(curflags | flag) + + def getTsFlags(self): + curflags = self.ts.setFlags(0) + self.ts.setFlags(curflags) + return curflags + + def isTsFlagSet(self, flag): + val = self.getTsFlags() + return bool(flag & val) + + def setScriptFd(self, fd): + self.ts.scriptFd = fd.fileno() + +# def addProblemFilter(self, filt): +# curfilter = self.ts.setProbFilter(0) +# self.ts.setProbFilter(cutfilter | filt) + + def test(self, cb, conf={}): + """tests the ts we've setup, takes a callback function and a conf dict + for flags and what not""" + + origflags = self.getTsFlags() + self.addTsFlag(rpm.RPMTRANS_FLAG_TEST) + # FIXME GARBAGE - remove once this is reimplemented elsehwere + # KEEPING FOR API COMPLIANCE ONLY + if conf.get('diskspacecheck') == 0: + self.ts.setProbFilter(rpm.RPMPROB_FILTER_DISKSPACE) + tserrors = self.ts.run(cb.callback, '') + self.ts.setFlags(origflags) + + reserrors = [] + if tserrors: + for (descr, (etype, mount, need)) in tserrors: + reserrors.append(descr) + + return reserrors + + + def returnLeafNodes(self, headers=False): + """returns a list of package tuples (n,a,e,v,r) that are not required by + any other package on the system + If headers is True then it will return a list of (header, index) tuples + """ + + req = {} + orphan = [] + + mi = self.dbMatch() + if mi is None: # this is REALLY unlikely but let's just say it for the moment + return orphan + + # prebuild the req dict + for h in mi: + if h['name'] == 'gpg-pubkey': + continue + if not h[rpm.RPMTAG_REQUIRENAME]: + continue + tup = miscutils.pkgTupleFromHeader(h) + for r in h[rpm.RPMTAG_REQUIRENAME]: + if r not in req: + req[r] = set() + req[r].add(tup) + + + mi = self.dbMatch() + if mi is None: + return orphan + + def _return_all_provides(hdr): + """ Return all the provides, via yield. """ + # These are done one by one, so that we get lazy loading + for prov in hdr[rpm.RPMTAG_PROVIDES]: + yield prov + for prov in hdr[rpm.RPMTAG_FILENAMES]: + yield prov + + for h in mi: + if h['name'] == 'gpg-pubkey': + continue + preq = 0 + tup = miscutils.pkgTupleFromHeader(h) + for p in _return_all_provides(h): + if p in req: + # Don't count a package that provides its require + s = req[p] + if len(s) > 1 or tup not in s: + preq = preq + 1 + break + + if preq == 0: + if headers: + orphan.append((h, mi.instance())) + else: + orphan.append(h) + #~ orphan.append(tup) + + return orphan + + +def initReadOnlyTransaction(root='/'): + read_ts = TransactionWrapper(root=root) + read_ts.pushVSFlags((rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)) + return read_ts + diff --git a/rpm5utils/updates.py b/rpm5utils/updates.py new file mode 100644 index 0000000..4ef2849 --- /dev/null +++ b/rpm5utils/updates.py @@ -0,0 +1,723 @@ +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# Copyright 2004 Duke University + +import rpm5utils +import rpm5utils.miscutils +import rpm5utils.arch + +def _vertup_cmp(tup1, tup2): + return rpm5utils.miscutils.compareEVR(tup1, tup2) +class Updates: + """ + This class computes and keeps track of updates and obsoletes. + initialize, add installed packages, add available packages (both as + unique lists of name, arch, ver, rel, epoch tuples), add an optional dict + of obsoleting packages with obsoletes and what they obsolete ie:: + foo, i386, 0, 1.1, 1: bar >= 1.1. + """ + + def __init__(self, instlist, availlist): + + self.installed = instlist # list of installed pkgs (n, a, e, v, r) + self.available = availlist # list of available pkgs (n, a, e, v, r) + + self.rawobsoletes = {} # dict of obsoleting package->[what it obsoletes] + self._obsoletes_by_name = None + self.obsoleted_dict = {} # obsoleted pkgtup -> [ obsoleting pkgtups ] + self.obsoleting_dict = {} # obsoleting pkgtup -> [ obsoleted pkgtups ] + + self.exactarch = 1 # don't change archs by default + self.exactarchlist = set(['kernel', 'kernel-smp', 'glibc', + 'kernel-hugemem', + 'kernel-enterprise', 'kernel-bigmem', + 'kernel-BOOT']) + + self.myarch = rpm5utils.arch.canonArch # set this if you want to + # test on some other arch + # otherwise leave it alone + self._is_multilib = rpm5utils.arch.isMultiLibArch(self.myarch) + + self._archlist = rpm5utils.arch.getArchList(self.myarch) + + self._multilib_compat_arches = rpm5utils.arch.getMultiArchInfo(self.myarch) + + # make some dicts from installed and available + self.installdict = self.makeNADict(self.installed, 1) + self.availdict = self.makeNADict(self.available, 0, # Done in doUpdate + filter=self.installdict) + + # holder for our updates dict + self.updatesdict = {} + self.updating_dict = {} + #debug, ignore me + self.debug = 0 + self.obsoletes = {} + + def _delFromDict(self, dict_, keys, value): + for key in keys: + if key not in dict_: + continue + dict_[key] = filter(value.__ne__, dict_[key]) + if not dict_[key]: + del dict_[key] + + def _delFromNADict(self, dict_, pkgtup): + (n, a, e, v, r) = pkgtup + for aa in (a, None): + if (n, aa) in dict_: + dict_[(n, aa)] = filter((e,v,r).__ne__, dict_[(n, aa)]) + if not dict_[(n, aa)]: + del dict_[(n, aa)] + + def delPackage(self, pkgtup): + """remove available pkgtup that is no longer available""" + if pkgtup not in self.available: + return + self.available.remove(pkgtup) + self._delFromNADict(self.availdict, pkgtup) + + self._delFromDict(self.updating_dict, self.updatesdict.get(pkgtup, []), pkgtup) + self._delFromDict(self.updatesdict, self.updating_dict.get(pkgtup, []), pkgtup) + + if pkgtup in self.rawobsoletes: + if self._obsoletes_by_name: + for name, flag, version in self.rawobsoletes[pkgtup]: + self._delFromDict(self._obsoletes_by_name, [name], (flag, version, pkgtup)) + del self.rawobsoletes[pkgtup] + + self._delFromDict(self.obsoleted_dict, self.obsoleting_dict.get(pkgtup, []), pkgtup) + self._delFromDict(self.obsoleting_dict, self.obsoleted_dict.get(pkgtup, []), pkgtup) + + def debugprint(self, msg): + if self.debug: + print msg + + def makeNADict(self, pkglist, Nonelists, filter=None): + """return lists of (e,v,r) tuples as value of a dict keyed on (n, a) + optionally will return a (n, None) entry with all the a for that + n in tuples of (a,e,v,r)""" + + returndict = {} + for (n, a, e, v, r) in pkglist: + if filter and (n, None) not in filter: + continue + if (n, a) not in returndict: + returndict[(n, a)] = [] + if (e,v,r) in returndict[(n, a)]: + continue + returndict[(n, a)].append((e,v,r)) + + if Nonelists: + if (n, None) not in returndict: + returndict[(n, None)] = [] + if (a,e,v,r) in returndict[(n, None)]: + continue + returndict[(n, None)].append((a, e, v, r)) + + return returndict + + + def returnNewest(self, evrlist): + """takes a list of (e, v, r) tuples and returns the newest one""" + if len(evrlist)==0: + raise rpm5utils.Rpm5UtilsError, "Zero Length List in returnNewest call" + + if len(evrlist)==1: + return evrlist[0] + + (new_e, new_v, new_r) = evrlist[0] # we'll call the first ones 'newest' + + for (e, v, r) in evrlist[1:]: + rc = rpm5utils.miscutils.compareEVR((e, v, r), (new_e, new_v, new_r)) + if rc > 0: + new_e = e + new_v = v + new_r = r + return (new_e, new_v, new_r) + + + def returnHighestVerFromAllArchsByName(self, name, archlist, pkglist): + """returns a list of package tuples in a list (n, a, e, v, r) + takes a package name, a list of archs, and a list of pkgs in + (n, a, e, v, r) form.""" + returnlist = [] + high_vertup = None + for pkgtup in pkglist: + (n, a, e, v, r) = pkgtup + # FIXME: returnlist used to _possibly_ contain things not in + # archlist ... was that desired? + if name == n and a in archlist: + vertup = (e, v, r) + if (high_vertup is None or + (_vertup_cmp(high_vertup, vertup) < 0)): + high_vertup = vertup + returnlist = [] + if vertup == high_vertup: + returnlist.append(pkgtup) + + return returnlist + + def condenseUpdates(self): + """remove any accidental duplicates in updates""" + + for tup in self.updatesdict: + if len(self.updatesdict[tup]) > 1: + mylist = self.updatesdict[tup] + self.updatesdict[tup] = rpm5utils.miscutils.unique(mylist) + + + def checkForObsolete(self, pkglist, newest=1): + """accept a list of packages to check to see if anything obsoletes them + return an obsoleted_dict in the format of makeObsoletedDict""" + if self._obsoletes_by_name is None: + self._obsoletes_by_name = {} + for pkgtup, obsoletes in self.rawobsoletes.iteritems(): + for name, flag, version in obsoletes: + self._obsoletes_by_name.setdefault(name, []).append( + (flag, version, pkgtup) ) + + obsdict = {} # obseleting package -> [obsoleted package] + + for pkgtup in pkglist: + name = pkgtup[0] + for obs_flag, obs_version, obsoleting in self._obsoletes_by_name.get(name, []): + if obs_flag in [None, 0] and name == obsoleting[0]: continue + if rpm5utils.miscutils.rangeCheck( (name, obs_flag, obs_version), pkgtup): + obsdict.setdefault(obsoleting, []).append(pkgtup) + + if not obsdict: + return {} + + obslist = obsdict.keys() + if newest: + obslist = self._reduceListNewestByNameArch(obslist) + + returndict = {} + for new in obslist: + for old in obsdict[new]: + if old not in returndict: + returndict[old] = [] + returndict[old].append(new) + + return returndict + + def doObsoletes(self): + """figures out what things available obsolete things installed, returns + them in a dict attribute of the class.""" + + obsdict = {} # obseleting package -> [obsoleted package] + # this needs to keep arch in mind + # if foo.i386 obsoletes bar + # it needs to obsoletes bar.i386 preferentially, not bar.x86_64 + # if there is only one bar and only one foo then obsolete it, but try to + # match the arch. + + # look through all the obsoleting packages look for multiple archs per name + # if you find it look for the packages they obsolete + # + obs_arches = {} + for (n, a, e, v, r) in self.rawobsoletes: + if n not in obs_arches: + obs_arches[n] = [] + obs_arches[n].append(a) + + for pkgtup in self.rawobsoletes: + (name, arch, epoch, ver, rel) = pkgtup + for (obs_n, flag, (obs_e, obs_v, obs_r)) in self.rawobsoletes[(pkgtup)]: + if (obs_n, None) in self.installdict: + for (rpm_a, rpm_e, rpm_v, rpm_r) in self.installdict[(obs_n, None)]: + if flag in [None, 0] or \ + rpm5utils.miscutils.rangeCheck((obs_n, flag, (obs_e, obs_v, obs_r)), + (obs_n, rpm_a, rpm_e, rpm_v, rpm_r)): + # make sure the obsoleting pkg is not already installed + willInstall = 1 + if (name, None) in self.installdict: + for (ins_a, ins_e, ins_v, ins_r) in self.installdict[(name, None)]: + pkgver = (epoch, ver, rel) + installedver = (ins_e, ins_v, ins_r) + if self.returnNewest((pkgver, installedver)) == installedver: + willInstall = 0 + break + if rpm_a != arch and rpm_a in obs_arches[name]: + willInstall = 0 + if willInstall: + if pkgtup not in obsdict: + obsdict[pkgtup] = [] + obsdict[pkgtup].append((obs_n, rpm_a, rpm_e, rpm_v, rpm_r)) + self.obsoletes = obsdict + self.makeObsoletedDict() + + def makeObsoletedDict(self): + """creates a dict of obsoleted packages -> [obsoleting package], this + is to make it easier to look up what package obsoletes what item in + the rpmdb""" + self.obsoleted_dict = {} + for new in self.obsoletes: + for old in self.obsoletes[new]: + if old not in self.obsoleted_dict: + self.obsoleted_dict[old] = [] + self.obsoleted_dict[old].append(new) + self.obsoleting_dict = {} + for obsoleted, obsoletings in self.obsoleted_dict.iteritems(): + for obsoleting in obsoletings: + self.obsoleting_dict.setdefault(obsoleting, []).append(obsoleted) + + def doUpdates(self): + """check for key lists as populated then commit acts of evil to + determine what is updated and/or obsoleted, populate self.updatesdict + """ + + + # best bet is to chew through the pkgs and throw out the new ones early + # then deal with the ones where there are a single pkg installed and a + # single pkg available + # then deal with the multiples + + # we should take the whole list as a 'newlist' and remove those entries + # which are clearly: + # 1. updates + # 2. identical to the ones in ourdb + # 3. not in our archdict at all + + simpleupdate = [] + complexupdate = [] + + updatedict = {} # (old n, a, e, v, r) : [(new n, a, e, v, r)] + # make the new ones a list b/c while we _shouldn't_ + # have multiple updaters, we might and well, it needs + # to be solved one way or the other + newpkgs = self.availdict + + archlist = self._archlist + for (n, a) in newpkgs.keys(): + if a not in archlist: + # high log here + del newpkgs[(n, a)] + continue + + # remove the older stuff - if we're doing an update we only want the + # newest evrs + for (n, a) in newpkgs: + (new_e,new_v,new_r) = self.returnNewest(newpkgs[(n, a)]) + for (e, v, r) in newpkgs[(n, a)][:]: + if (new_e, new_v, new_r) != (e, v, r): + newpkgs[(n, a)].remove((e, v, r)) + + for (n, a) in newpkgs: + # simple ones - look for exact matches or older stuff + if (n, a) in self.installdict: + for (rpm_e, rpm_v, rpm_r) in self.installdict[(n, a)]: + try: + (e, v, r) = self.returnNewest(newpkgs[(n,a)]) + except rpm5utils.Rpm5UtilsError: + continue + else: + rc = rpm5utils.miscutils.compareEVR((e, v, r), (rpm_e, rpm_v, rpm_r)) + if rc <= 0: + try: + newpkgs[(n, a)].remove((e, v, r)) + except ValueError: + pass + + # Now we add the (n, None) entries back... + for na in newpkgs.keys(): + all_arches = map(lambda x: (na[1], x[0], x[1], x[2]), newpkgs[na]) + newpkgs.setdefault((na[0], None), []).extend(all_arches) + + # get rid of all the empty dict entries: + for nakey in newpkgs.keys(): + if len(newpkgs[nakey]) == 0: + del newpkgs[nakey] + + + # ok at this point our newpkgs list should be thinned, we should have only + # the newest e,v,r's and only archs we can actually use + for (n, a) in newpkgs: + if a is None: # the None archs are only for lookups + continue + + if (n, None) in self.installdict: + installarchs = [] + availarchs = [] + for (a, e, v ,r) in newpkgs[(n, None)]: + availarchs.append(a) + for (a, e, v, r) in self.installdict[(n, None)]: + installarchs.append(a) + + if len(availarchs) > 1 or len(installarchs) > 1: + self.debugprint('putting %s in complex update' % n) + complexupdate.append(n) + else: + #log(4, 'putting %s in simple update list' % name) + self.debugprint('putting %s in simple update' % n) + simpleupdate.append((n, a)) + + # we have our lists to work with now + + # simple cases + for (n, a) in simpleupdate: + # try to be as precise as possible + if n in self.exactarchlist: + if (n, a) in self.installdict: + (rpm_e, rpm_v, rpm_r) = self.returnNewest(self.installdict[(n, a)]) + if (n, a) in newpkgs: + (e, v, r) = self.returnNewest(newpkgs[(n, a)]) + rc = rpm5utils.miscutils.compareEVR((e, v, r), (rpm_e, rpm_v, rpm_r)) + if rc > 0: + # this is definitely an update - put it in the dict + if (n, a, rpm_e, rpm_v, rpm_r) not in updatedict: + updatedict[(n, a, rpm_e, rpm_v, rpm_r)] = [] + updatedict[(n, a, rpm_e, rpm_v, rpm_r)].append((n, a, e, v, r)) + + else: + # we could only have 1 arch in our rpmdb and 1 arch of pkg + # available - so we shouldn't have to worry about the lists, here + # we just need to find the arch of the installed pkg so we can + # check it's (e, v, r) + (rpm_a, rpm_e, rpm_v, rpm_r) = self.installdict[(n, None)][0] + if (n, None) in newpkgs: + for (a, e, v, r) in newpkgs[(n, None)]: + rc = rpm5utils.miscutils.compareEVR((e, v, r), (rpm_e, rpm_v, rpm_r)) + if rc > 0: + # this is definitely an update - put it in the dict + if (n, rpm_a, rpm_e, rpm_v, rpm_r) not in updatedict: + updatedict[(n, rpm_a, rpm_e, rpm_v, rpm_r)] = [] + updatedict[(n, rpm_a, rpm_e, rpm_v, rpm_r)].append((n, a, e, v, r)) + + + # complex cases + + # we're multilib/biarch + # we need to check the name.arch in two different trees + # one for the multiarch itself and one for the compat arch + # ie: x86_64 and athlon(i686-i386) - we don't want to descend + # x86_64->i686 + # however, we do want to descend x86_64->noarch, sadly. + + archlists = [] + if self._is_multilib: + if self.myarch in rpm5utils.arch.multilibArches: + biarches = [self.myarch] + else: + biarches = [self.myarch, rpm5utils.arch.arches[self.myarch]] + biarches.append('noarch') + + multicompat = self._multilib_compat_arches[0] + multiarchlist = rpm5utils.arch.getArchList(multicompat) + archlists = [ set(biarches), set(multiarchlist) ] + # archlists = [ biarches, multiarchlist ] + else: + archlists = [ set(archlist) ] + # archlists = [ archlist ] + + for n in complexupdate: + for thisarchlist in archlists: + # we need to get the highest version and the archs that have it + # of the installed pkgs + tmplist = [] + for (a, e, v, r) in self.installdict[(n, None)]: + tmplist.append((n, a, e, v, r)) + + highestinstalledpkgs = self.returnHighestVerFromAllArchsByName(n, + thisarchlist, tmplist) + hipdict = self.makeNADict(highestinstalledpkgs, 0) + + + if n in self.exactarchlist: + tmplist = [] + for (a, e, v, r) in newpkgs[(n, None)]: + tmplist.append((n, a, e, v, r)) + highestavailablepkgs = self.returnHighestVerFromAllArchsByName(n, + thisarchlist, tmplist) + + hapdict = self.makeNADict(highestavailablepkgs, 0) + + for (n, a) in hipdict: + if (n, a) in hapdict: + self.debugprint('processing %s.%s' % (n, a)) + # we've got a match - get our versions and compare + (rpm_e, rpm_v, rpm_r) = hipdict[(n, a)][0] # only ever going to be first one + (e, v, r) = hapdict[(n, a)][0] # there can be only one + rc = rpm5utils.miscutils.compareEVR((e, v, r), (rpm_e, rpm_v, rpm_r)) + if rc > 0: + # this is definitely an update - put it in the dict + if (n, a, rpm_e, rpm_v, rpm_r) not in updatedict: + updatedict[(n, a, rpm_e, rpm_v, rpm_r)] = [] + updatedict[(n, a, rpm_e, rpm_v, rpm_r)].append((n, a, e, v, r)) + else: + self.debugprint('processing %s' % n) + # this is where we have to have an arch contest if there + # is more than one arch updating with the highest ver + instarchs = [] + for (n,a) in hipdict: + instarchs.append(a) + + rpm_a = rpm5utils.arch.getBestArchFromList(instarchs, myarch=self.myarch) + if rpm_a is None: + continue + + tmplist = [] + for (a, e, v, r) in newpkgs[(n, None)]: + tmplist.append((n, a, e, v, r)) + highestavailablepkgs = self.returnHighestVerFromAllArchsByName(n, + thisarchlist, tmplist) + + hapdict = self.makeNADict(highestavailablepkgs, 0) + availarchs = [] + for (n,a) in hapdict: + availarchs.append(a) + a = rpm5utils.arch.getBestArchFromList(availarchs, myarch=self.myarch) + if a is None: + continue + + (rpm_e, rpm_v, rpm_r) = hipdict[(n, rpm_a)][0] # there can be just one + (e, v, r) = hapdict[(n, a)][0] # just one, I'm sure, I swear! + rc = rpm5utils.miscutils.compareEVR((e, v, r), (rpm_e, rpm_v, rpm_r)) + if rc > 0: + # this is definitely an update - put it in the dict + if (n, rpm_a, rpm_e, rpm_v, rpm_r) not in updatedict: + updatedict[(n, rpm_a, rpm_e, rpm_v, rpm_r)] = [] + updatedict[(n, rpm_a, rpm_e, rpm_v, rpm_r)].append((n, a, e, v, r)) + + self.updatesdict = updatedict + self.makeUpdatingDict() + + def makeUpdatingDict(self): + """creates a dict of available packages -> [installed package], this + is to make it easier to look up what package will be updating what + in the rpmdb""" + self.updating_dict = {} + for old in self.updatesdict: + for new in self.updatesdict[old]: + if new not in self.updating_dict: + self.updating_dict[new] = [] + self.updating_dict[new].append(old) + + def reduceListByNameArch(self, pkglist, name=None, arch=None): + """returns a set of pkg naevr tuples reduced based on name or arch""" + returnlist = [] + + if name or arch: + for (n, a, e, v, r) in pkglist: + if name: + if name == n: + returnlist.append((n, a, e, v, r)) + continue + if arch: + if arch == a: + returnlist.append((n, a, e, v, r)) + continue + else: + returnlist = pkglist + + return returnlist + + + def getUpdatesTuples(self, name=None, arch=None): + """returns updates for packages in a list of tuples of: + (updating naevr, installed naevr)""" + returnlist = [] + for oldtup in self.updatesdict: + for newtup in self.updatesdict[oldtup]: + returnlist.append((newtup, oldtup)) + + # self.reduceListByNameArch() for double tuples + tmplist = [] + if name: + for ((n, a, e, v, r), oldtup) in returnlist: + if name != n: + tmplist.append(((n, a, e, v, r), oldtup)) + if arch: + for ((n, a, e, v, r), oldtup) in returnlist: + if arch != a: + tmplist.append(((n, a, e, v, r), oldtup)) + + for item in tmplist: + try: + returnlist.remove(item) + except ValueError: + pass + + return returnlist + + def getUpdatesList(self, name=None, arch=None): + """returns updating packages in a list of (naevr) tuples""" + returnlist = [] + + for oldtup in self.updatesdict: + for newtup in self.updatesdict[oldtup]: + returnlist.append(newtup) + + returnlist = self.reduceListByNameArch(returnlist, name, arch) + + return returnlist + + # NOTE: This returns obsoleters and obsoletees, but narrows based on + # _obsoletees_ (unlike getObsoletesList). Look at getObsoletersTuples + def getObsoletesTuples(self, newest=0, name=None, arch=None): + """returns obsoletes for packages in a list of tuples of: + (obsoleting naevr, installed naevr). You can specify name and/or + arch of the installed package to narrow the results. + You can also specify newest=1 to get the set of newest pkgs (name, arch) + sorted, that obsolete something""" + + tmplist = [] + obslist = self.obsoletes.keys() + if newest: + obslist = self._reduceListNewestByNameArch(obslist) + + for obstup in obslist: + for rpmtup in self.obsoletes[obstup]: + tmplist.append((obstup, rpmtup)) + + # self.reduceListByNameArch() for double tuples + returnlist = [] + if name or arch: + for (obstup, (n, a, e, v, r)) in tmplist: + if name: + if name == n: + returnlist.append((obstup, (n, a, e, v, r))) + continue + if arch: + if arch == a: + returnlist.append((obstup, (n, a, e, v, r))) + continue + else: + returnlist = tmplist + + return returnlist + + # NOTE: This returns obsoleters and obsoletees, but narrows based on + # _obsoleters_ (like getObsoletesList). + def getObsoletersTuples(self, newest=0, name=None, arch=None): + """returns obsoletes for packages in a list of tuples of: + (obsoleting naevr, installed naevr). You can specify name and/or + arch of the obsoleting package to narrow the results. + You can also specify newest=1 to get the set of newest pkgs (name, arch) + sorted, that obsolete something""" + + tmplist = [] + obslist = self.obsoletes.keys() + if newest: + obslist = self._reduceListNewestByNameArch(obslist) + + for obstup in obslist: + for rpmtup in self.obsoletes[obstup]: + tmplist.append((obstup, rpmtup)) + + # self.reduceListByNameArch() for double tuples + returnlist = [] + if name or arch: + for ((n, a, e, v, r), insttup) in tmplist: + if name: + if name == n: + returnlist.append(((n, a, e, v, r), insttup)) + continue + if arch: + if arch == a: + returnlist.append(((n, a, e, v, r), insttup)) + continue + else: + returnlist = tmplist + + return returnlist + + # NOTE: This returns _obsoleters_, and narrows based on that (unlike + # getObsoletesTuples, but like getObsoletersTuples) + def getObsoletesList(self, newest=0, name=None, arch=None): + """returns obsoleting packages in a list of naevr tuples of just the + packages that obsolete something that is installed. You can specify + name and/or arch of the obsoleting packaging to narrow the results. + You can also specify newest=1 to get the set of newest pkgs (name, arch) + sorted, that obsolete something""" + + tmplist = self.obsoletes.keys() + if newest: + tmplist = self._reduceListNewestByNameArch(tmplist) + + returnlist = self.reduceListByNameArch(tmplist, name, arch) + + return returnlist + + def getObsoletedList(self, newest=0, name=None): + """returns a list of pkgtuples obsoleting the package in name""" + returnlist = [] + for new in self.obsoletes: + for obstup in self.obsoletes[new]: + (n, a, e, v, r) = obstup + if n == name: + returnlist.append(new) + continue + return returnlist + + + + def getOthersList(self, name=None, arch=None): + """returns a naevr tuple of the packages that are neither installed + nor an update - this may include something that obsoletes an installed + package""" + updates = {} + inst = {} + tmplist = [] + + for pkgtup in self.getUpdatesList(): + updates[pkgtup] = 1 + + for pkgtup in self.installed: + inst[pkgtup] = 1 + + for pkgtup in self.available: + if pkgtup not in updates and pkgtup not in inst: + tmplist.append(pkgtup) + + returnlist = self.reduceListByNameArch(tmplist, name, arch) + + return returnlist + + + + def _reduceListNewestByNameArch(self, tuplelist): + """return list of newest packages based on name, arch matching + this means(in name.arch form): foo.i386 and foo.noarch are not + compared to each other for highest version only foo.i386 and + foo.i386 will be compared""" + highdict = {} + done = False + for pkgtup in tuplelist: + (n, a, e, v, r) = pkgtup + if (n, a) not in highdict: + highdict[(n, a)] = pkgtup + else: + pkgtup2 = highdict[(n, a)] + done = True + (n2, a2, e2, v2, r2) = pkgtup2 + rc = rpm5utils.miscutils.compareEVR((e,v,r), (e2, v2, r2)) + if rc > 0: + highdict[(n, a)] = pkgtup + + if not done: + return tuplelist + + return highdict.values() + + +# def getProblems(self): +# """return list of problems: +# - Packages that are both obsoleted and updated. +# - Packages that have multiple obsoletes. +# - Packages that _still_ have multiple updates +# """ + + diff --git a/rpm5utils/urpmgraphs/__init__.py b/rpm5utils/urpmgraphs/__init__.py new file mode 100644 index 0000000..095491a --- /dev/null +++ b/rpm5utils/urpmgraphs/__init__.py @@ -0,0 +1,66 @@ +""" +NetworkX +======== + + NetworkX (NX) is a Python package for the creation, manipulation, and + study of the structure, dynamics, and functions of complex networks. + + https://networkx.lanl.gov/ + +Using +----- + + Just write in Python + + >>> import networkx as nx + >>> G=nx.Graph() + >>> G.add_edge(1,2) + >>> G.add_node("spam") + >>> print(G.nodes()) + [1, 2, 'spam'] + >>> print(G.edges()) + [(1, 2)] +""" +# Copyright (C) 2004-2010 by +# Aric Hagberg +# Dan Schult +# Pieter Swart +# All rights reserved. +# BSD license. +# +# Add platform dependent shared library path to sys.path +# + +from __future__ import absolute_import + +import sys +if sys.version_info[:2] < (2, 6): + m = "Python version 2.6 or later is required for NetworkX (%d.%d detected)." + raise ImportError(m % sys.version_info[:2]) +del sys + +# Release data + +# these packages work with Python >= 2.6 +from rpm5utils.urpmgraphs.exception import * +import rpm5utils.urpmgraphs.classes +from rpm5utils.urpmgraphs.classes import * +import rpm5utils.urpmgraphs.convert +from rpm5utils.urpmgraphs.convert import * +#import urpmgraphs.relabel +#from urpmgraphs.relabel import * +#import urpmgraphs.generators +#from urpmgraphs.generators import * +#from urpmgraphs.readwrite import * +#import urpmgraphs.readwrite +#Need to test with SciPy, when available +import rpm5utils.urpmgraphs.algorithms +from rpm5utils.urpmgraphs.algorithms import * +#import urpmgraphs.linalg +#from urpmgraphs.linalg import * +#from urpmgraphs.tests.test import run as test +#import urpmgraphs.utils + +#import urpmgraphs.drawing +#from urpmgraphs.drawing import * + diff --git a/rpm5utils/urpmgraphs/algorithms/__init__.py b/rpm5utils/urpmgraphs/algorithms/__init__.py new file mode 100644 index 0000000..9e6d007 --- /dev/null +++ b/rpm5utils/urpmgraphs/algorithms/__init__.py @@ -0,0 +1,2 @@ +from rpm5utils.urpmgraphs.algorithms.components import * +from rpm5utils.urpmgraphs.algorithms.cycles import * diff --git a/rpm5utils/urpmgraphs/algorithms/components/__init__.py b/rpm5utils/urpmgraphs/algorithms/components/__init__.py new file mode 100644 index 0000000..ae16a0c --- /dev/null +++ b/rpm5utils/urpmgraphs/algorithms/components/__init__.py @@ -0,0 +1,2 @@ +#from urpmgraphs.algorithms.components.connected import * +from rpm5utils.urpmgraphs.algorithms.components.strongly_connected import * diff --git a/rpm5utils/urpmgraphs/algorithms/components/strongly_connected.py b/rpm5utils/urpmgraphs/algorithms/components/strongly_connected.py new file mode 100644 index 0000000..c9db4b6 --- /dev/null +++ b/rpm5utils/urpmgraphs/algorithms/components/strongly_connected.py @@ -0,0 +1,321 @@ +# -*- coding: utf-8 -*- +""" +Strongly connected components. +""" +__authors__ = "\n".join(['Eben Kenah', + 'Aric Hagberg (hagberg@lanl.gov)' + 'Christopher Ellison']) +# Copyright (C) 2004-2010 by +# Aric Hagberg +# Dan Schult +# Pieter Swart +# All rights reserved. +# BSD license. + +__all__ = ['number_strongly_connected_components', + 'strongly_connected_components', + 'strongly_connected_component_subgraphs', + 'is_strongly_connected', + 'strongly_connected_components_recursive', + 'kosaraju_strongly_connected_components', + 'condensation', + ] + +import rpm5utils as nx + +def strongly_connected_components(G): + """Return nodes in strongly connected components of graph. + + Parameters + ---------- + G : NetworkX Graph + An directed graph. + + Returns + ------- + comp : list of lists + A list of nodes for each component of G. + The list is ordered from largest connected component to smallest. + + See Also + -------- + connected_components + + Notes + ----- + Uses Tarjan's algorithm with Nuutila's modifications. + Nonrecursive version of algorithm. + + References + ---------- + .. [1] Depth-first search and linear graph algorithms, R. Tarjan + SIAM Journal of Computing 1(2):146-160, (1972). + + .. [2] On finding the strongly connected components in a directed graph. + E. Nuutila and E. Soisalon-Soinen + Information Processing Letters 49(1): 9-14, (1994).. + """ + preorder={} + lowlink={} + scc_found={} + scc_queue = [] + scc_list=[] + i=0 # Preorder counter + for source in G: + if source not in scc_found: + queue=[source] + while queue: + v=queue[-1] + if v not in preorder: + i=i+1 + preorder[v]=i + done=1 + v_nbrs=G[v] + for w in v_nbrs: + if w not in preorder: + queue.append(w) + done=0 + break + if done==1: + lowlink[v]=preorder[v] + for w in v_nbrs: + if w not in scc_found: + if preorder[w]>preorder[v]: + lowlink[v]=min([lowlink[v],lowlink[w]]) + else: + lowlink[v]=min([lowlink[v],preorder[w]]) + queue.pop() + if lowlink[v]==preorder[v]: + scc_found[v]=True + scc=[v] + while scc_queue and preorder[scc_queue[-1]]>preorder[v]: + k=scc_queue.pop() + scc_found[k]=True + scc.append(k) + scc_list.append(scc) + else: + scc_queue.append(v) + scc_list.sort(key=len,reverse=True) + return scc_list + + +def kosaraju_strongly_connected_components(G,source=None): + """Return nodes in strongly connected components of graph. + + Parameters + ---------- + G : NetworkX Graph + An directed graph. + + Returns + ------- + comp : list of lists + A list of nodes for each component of G. + The list is ordered from largest connected component to smallest. + + See Also + -------- + connected_components + + Notes + ----- + Uses Kosaraju's algorithm. + """ + components=[] + G=G.reverse(copy=False) + post=list(nx.dfs_postorder_nodes(G,source=source)) + G=G.reverse(copy=False) + seen={} + while post: + r=post.pop() + if r in seen: + continue + c=nx.dfs_preorder_nodes(G,r) + new=[v for v in c if v not in seen] + seen.update([(u,True) for u in new]) + components.append(new) + components.sort(key=len,reverse=True) + return components + + +def strongly_connected_components_recursive(G): + """Return nodes in strongly connected components of graph. + + Recursive version of algorithm. + + Parameters + ---------- + G : NetworkX Graph + An directed graph. + + Returns + ------- + comp : list of lists + A list of nodes for each component of G. + The list is ordered from largest connected component to smallest. + + See Also + -------- + connected_components + + Notes + ----- + Uses Tarjan's algorithm with Nuutila's modifications. + + References + ---------- + .. [1] Depth-first search and linear graph algorithms, R. Tarjan + SIAM Journal of Computing 1(2):146-160, (1972). + + .. [2] On finding the strongly connected components in a directed graph. + E. Nuutila and E. Soisalon-Soinen + Information Processing Letters 49(1): 9-14, (1994).. + """ + def visit(v,cnt): + root[v]=cnt + visited[v]=cnt + cnt+=1 + stack.append(v) + for w in G[v]: + if w not in visited: visit(w,cnt) + if w not in component: + root[v]=min(root[v],root[w]) + if root[v]==visited[v]: + component[v]=root[v] + tmpc=[v] # hold nodes in this component + while stack[-1]!=v: + w=stack.pop() + component[w]=root[v] + tmpc.append(w) + stack.remove(v) + scc.append(tmpc) # add to scc list + scc=[] + visited={} + component={} + root={} + cnt=0 + stack=[] + for source in G: + if source not in visited: + visit(source,cnt) + + scc.sort(key=len,reverse=True) + return scc + + +def strongly_connected_component_subgraphs(G): + """Return strongly connected components as subgraphs. + + Parameters + ---------- + G : NetworkX Graph + A graph. + + Returns + ------- + glist : list + A list of graphs, one for each strongly connected component of G. + + See Also + -------- + connected_component_subgraphs + + Notes + ----- + The list is ordered from largest strongly connected component to smallest. + """ + cc=strongly_connected_components(G) + graph_list=[] + for c in cc: + graph_list.append(G.subgraph(c)) + return graph_list + + +def number_strongly_connected_components(G): + """Return number of strongly connected components in graph. + + Parameters + ---------- + G : NetworkX graph + A directed graph. + + Returns + ------- + n : integer + Number of strongly connected components + + See Also + -------- + connected_components + + Notes + ----- + For directed graphs only. + """ + return len(strongly_connected_components(G)) + + +def is_strongly_connected(G): + """Test directed graph for strong connectivity. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + connected : bool + True if the graph is strongly connected, False otherwise. + + See Also + -------- + strongly_connected_components + + Notes + ----- + For directed graphs only. + """ + if not G.is_directed(): + raise nx.NetworkXError("""Not allowed for undirected graph G. + See is_connected() for connectivity test.""") + + if len(G)==0: + raise nx.NetworkXPointlessConcept( + """Connectivity is undefined for the null graph.""") + + return len(strongly_connected_components(G)[0])==len(G) + + +def condensation(G): + """Returns the condensation of G. + + The condensation of G is the graph with each of the strongly connected + components contracted into a single node. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + cG : NetworkX DiGraph + The condensation of G. + + Notes + ----- + After contracting all strongly connected components to a single node, + the resulting graph is a directed acyclic graph. + + """ + scc = strongly_connected_components(G) + mapping = dict([(n,tuple(sorted(c))) for c in scc for n in c]) + cG = nx.DiGraph() + for u in mapping: + cG.add_node(mapping[u]) + for _,v,d in G.edges_iter(u, data=True): + if v not in mapping[u]: + cG.add_edge(mapping[u], mapping[v]) + return cG + diff --git a/rpm5utils/urpmgraphs/algorithms/cycles.py b/rpm5utils/urpmgraphs/algorithms/cycles.py new file mode 100644 index 0000000..1abc168 --- /dev/null +++ b/rpm5utils/urpmgraphs/algorithms/cycles.py @@ -0,0 +1,122 @@ +""" +======================== +Cycle finding algorithms +======================== + +""" +# Copyright (C) 2010 by +# Aric Hagberg +# Dan Schult +# Pieter Swart +# All rights reserved. +# BSD license. +import rpm5utils as nx +from collections import defaultdict + +__all__ = ['simple_cycles'] + +__author__ = "\n".join(['Jon Olav Vik ', + 'Aric Hagberg ']) + + +def simple_cycles(G): + """Find simple cycles (elementary circuits) of a directed graph. + + An simple cycle, or elementary circuit, is a closed path where no + node appears twice, except that the first and last node are the same. + Two elementary circuits are distinct if they are not cyclic permutations + of each other. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + Returns + ------- + A list of circuits, where each circuit is a list of nodes, with the first + and last node being the same. + + Example: + >>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]) + >>> nx.simple_cycles(G) + [[0, 0], [0, 1, 2, 0], [0, 2, 0], [1, 2, 1], [2, 2]] + + See Also + -------- + cycle_basis (for undirected graphs) + + Notes + ----- + The implementation follows pp. 79-80 in [1]_. + + The time complexity is O((n+e)(c+1)) for n nodes, e edges and c + elementary circuits. + + References + ---------- + .. [1] Finding all the elementary circuits of a directed graph. + D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. + http://dx.doi.org/10.1137/0204007 + + See Also + -------- + cycle_basis + """ + # Jon Olav Vik, 2010-08-09 + def _unblock(thisnode): + """Recursively unblock and remove nodes from B[thisnode].""" + if blocked[thisnode]: + blocked[thisnode] = False + while B[thisnode]: + _unblock(B[thisnode].pop()) + + def circuit(thisnode, startnode, component): + closed = False # set to True if elementary path is closed + path.append(thisnode) + blocked[thisnode] = True + for nextnode in component[thisnode]: # direct successors of thisnode + if nextnode == startnode: + result.append(path + [startnode]) + closed = True + elif not blocked[nextnode]: + if circuit(nextnode, startnode, component): + closed = True + if closed: + _unblock(thisnode) + else: + for nextnode in component[thisnode]: + if thisnode not in B[nextnode]: # TODO: use set for speedup? + B[nextnode].append(thisnode) + path.pop() # remove thisnode from path + return closed + + if not G.is_directed(): + raise nx.NetworkXError(\ + "simple_cycles() not implemented for undirected graphs.") + path = [] # stack of nodes in current path + blocked = defaultdict(bool) # vertex: blocked from search? + B = defaultdict(list) # graph portions that yield no elementary circuit + result = [] # list to accumulate the circuits found + # Johnson's algorithm requires some ordering of the nodes. + # They might not be sortable so we assign an arbitrary ordering. + ordering=dict(zip(G,range(len(G)))) + for s in ordering: + # Build the subgraph induced by s and following nodes in the ordering + subgraph = G.subgraph(node for node in G + if ordering[node] >= ordering[s]) + # Find the strongly connected component in the subgraph + # that contains the least node according to the ordering + strongcomp = nx.strongly_connected_components(subgraph) + mincomp=min(strongcomp, + key=lambda nodes: min(ordering[n] for n in nodes)) + component = G.subgraph(mincomp) + if component: + # smallest node in the component according to the ordering + startnode = min(component,key=ordering.__getitem__) + for node in component: + blocked[node] = False + B[node][:] = [] + dummy=circuit(startnode, startnode, component) + + return result diff --git a/rpm5utils/urpmgraphs/classes/__init__.py b/rpm5utils/urpmgraphs/classes/__init__.py new file mode 100644 index 0000000..f43dc33 --- /dev/null +++ b/rpm5utils/urpmgraphs/classes/__init__.py @@ -0,0 +1,3 @@ +from rpm5utils.urpmgraphs.classes.graph import Graph +from rpm5utils.urpmgraphs.classes.digraph import DiGraph +from rpm5utils.urpmgraphs.classes.function import * diff --git a/rpm5utils/urpmgraphs/classes/digraph.py b/rpm5utils/urpmgraphs/classes/digraph.py new file mode 100644 index 0000000..a50c756 --- /dev/null +++ b/rpm5utils/urpmgraphs/classes/digraph.py @@ -0,0 +1,996 @@ +"""Base class for directed graphs.""" +# Copyright (C) 2004-2011 by +# Aric Hagberg +# Dan Schult +# Pieter Swart +# All rights reserved. +# BSD license. +from copy import deepcopy +import rpm5utils as nx +from rpm5utils.urpmgraphs.classes.graph import Graph +from rpm5utils.urpmgraphs.exception import NetworkXError +#import urpmgraphs.convert as convert +__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)', + 'Pieter Swart (swart@lanl.gov)', + 'Dan Schult(dschult@colgate.edu)']) + +class DiGraph(Graph): + """ + Base class for directed graphs. + + A DiGraph stores nodes and edges with optional data, or attributes. + + DiGraphs hold directed edges. Self loops are allowed but multiple + (parallel) edges are not. + + Nodes can be arbitrary (hashable) Python objects with optional + key/value attributes. + + Edges are represented as links between nodes with optional + key/value attributes. + + Parameters + ---------- + data : input graph + Data to initialize graph. If data=None (default) an empty + graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a NumPy matrix + or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + Graph + MultiGraph + MultiDiGraph + + Examples + -------- + Create an empty graph structure (a "null graph") with no nodes and + no edges. + + >>> G = nx.DiGraph() + + G can be grown in several ways. + + **Nodes:** + + Add one node at a time: + + >>> G.add_node(1) + + Add the nodes from any container (a list, dict, set or + even the lines from a file or the nodes from another graph). + + >>> G.add_nodes_from([2,3]) + >>> G.add_nodes_from(range(100,110)) + >>> H=nx.Graph() + >>> H.add_path([0,1,2,3,4,5,6,7,8,9]) + >>> G.add_nodes_from(H) + + In addition to strings and integers any hashable Python object + (except None) can represent a node, e.g. a customized node object, + or even another Graph. + + >>> G.add_node(H) + + **Edges:** + + G can also be grown by adding edges. + + Add one edge, + + >>> G.add_edge(1, 2) + + a list of edges, + + >>> G.add_edges_from([(1,2),(1,3)]) + + or a collection of edges, + + >>> G.add_edges_from(H.edges()) + + If some edges connect nodes not yet in the graph, the nodes + are added automatically. There are no errors when adding + nodes or edges that already exist. + + **Attributes:** + + Each graph, node, and edge can hold key/value attribute pairs + in an associated attribute dictionary (the keys must be hashable). + By default these are empty, but can be added or changed using + add_edge, add_node or direct manipulation of the attribute + dictionaries named graph, node and edge respectively. + + >>> G = nx.DiGraph(day="Friday") + >>> G.graph + {'day': 'Friday'} + + Add node attributes using add_node(), add_nodes_from() or G.node + + >>> G.add_node(1, time='5pm') + >>> G.add_nodes_from([3], time='2pm') + >>> G.node[1] + {'time': '5pm'} + >>> G.node[1]['room'] = 714 + >>> G.nodes(data=True) + [(1, {'room': 714, 'time': '5pm'}), (3, {'time': '2pm'})] + + Warning: adding a node to G.node does not add it to the graph. + + Add edge attributes using add_edge(), add_edges_from(), subscript + notation, or G.edge. + + >>> G.add_edge(1, 2, weight=4.7 ) + >>> G.add_edges_from([(3,4),(4,5)], color='red') + >>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})]) + >>> G[1][2]['weight'] = 4.7 + >>> G.edge[1][2]['weight'] = 4 + + **Shortcuts:** + + Many common graph features allow python syntax to speed reporting. + + >>> 1 in G # check if node in graph + True + >>> [n for n in G if n<3] # iterate through nodes + [1, 2] + >>> len(G) # number of nodes in graph + 5 + >>> G[1] # adjacency dict keyed by neighbor to edge attributes + ... # Note: you should not change this dict manually! + {2: {'color': 'blue', 'weight': 4}} + + The fastest way to traverse all edges of a graph is via + adjacency_iter(), but the edges() method is often more convenient. + + >>> for n,nbrsdict in G.adjacency_iter(): + ... for nbr,eattr in nbrsdict.items(): + ... if 'weight' in eattr: + ... (n,nbr,eattr['weight']) + (1, 2, 4) + (2, 3, 8) + >>> [ (u,v,edata['weight']) for u,v,edata in G.edges(data=True) if 'weight' in edata ] + [(1, 2, 4), (2, 3, 8)] + + **Reporting:** + + Simple graph information is obtained using methods. + Iterator versions of many reporting methods exist for efficiency. + Methods exist for reporting nodes(), edges(), neighbors() and degree() + as well as the number of nodes and edges. + + For details on these and other miscellaneous methods, see below. + """ + def __init__(self, data=None, **attr): + """Initialize a graph with edges, name, graph attributes. + + Parameters + ---------- + data : input graph + Data to initialize graph. If data=None (default) an empty + graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a NumPy matrix + or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph. + name : string, optional (default='') + An optional name for the graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + convert + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G = nx.Graph(name='my graph') + >>> e = [(1,2),(2,3),(3,4)] # list of edges + >>> G = nx.Graph(e) + + Arbitrary graph attribute pairs (key=value) may be assigned + + >>> G=nx.Graph(e, day="Friday") + >>> G.graph + {'day': 'Friday'} + + """ + self.graph = {} # dictionary for graph attributes + self.node = {} # dictionary for node attributes + # We store two adjacency lists: + # the predecessors of node n are stored in the dict self.pred + # the successors of node n are stored in the dict self.succ=self.adj + self.adj = {} # empty adjacency dictionary + self.pred = {} # predecessor + self.succ = self.adj # successor + + # attempt to load graph with data + if data is not None: + convert.to_networkx_graph(data,create_using=self) + # load graph attributes (must be after convert) + self.graph.update(attr) + self.edge=self.adj + + + def add_node(self, n, attr_dict=None, **attr): + """Add a single node n and update node attributes. + + Parameters + ---------- + n : node + A node can be any hashable Python object except None. + attr_dict : dictionary, optional (default= no attributes) + Dictionary of node attributes. Key/value pairs will + update existing data associated with the node. + attr : keyword arguments, optional + Set or change attributes using key=value. + + See Also + -------- + add_nodes_from + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_node(1) + >>> G.add_node('Hello') + >>> K3 = nx.Graph([(0,1),(1,2),(2,0)]) + >>> G.add_node(K3) + >>> G.number_of_nodes() + 3 + + Use keywords set/change node attributes: + + >>> G.add_node(1,size=10) + >>> G.add_node(3,weight=0.4,UTM=('13S',382871,3972649)) + + Notes + ----- + A hashable object is one that can be used as a key in a Python + dictionary. This includes strings, numbers, tuples of strings + and numbers, etc. + + On many platforms hashable items also include mutables such as + NetworkX Graphs, though one should be careful that the hash + doesn't change on mutables. + """ + # set up attribute dict + if attr_dict is None: + attr_dict=attr + else: + try: + attr_dict.update(attr) + except AttributeError: + raise NetworkXError(\ + "The attr_dict argument must be a dictionary.") + if n not in self.succ: + self.succ[n] = {} + self.pred[n] = {} + self.node[n] = attr_dict + else: # update attr even if node already exists + self.node[n].update(attr_dict) + + + def add_nodes_from(self, nodes, **attr): + """Add multiple nodes. + + Parameters + ---------- + nodes : iterable container + A container of nodes (list, dict, set, etc.). + OR + A container of (node, attribute dict) tuples. + Node attributes are updated using the attribute dict. + attr : keyword arguments, optional (default= no attributes) + Update attributes for all nodes in nodes. + Node attributes specified in nodes as a tuple + take precedence over attributes specified generally. + + See Also + -------- + add_node + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_nodes_from('Hello') + >>> K3 = nx.Graph([(0,1),(1,2),(2,0)]) + >>> G.add_nodes_from(K3) + >>> sorted(G.nodes(),key=str) + [0, 1, 2, 'H', 'e', 'l', 'o'] + + Use keywords to update specific node attributes for every node. + + >>> G.add_nodes_from([1,2], size=10) + >>> G.add_nodes_from([3,4], weight=0.4) + + Use (node, attrdict) tuples to update attributes for specific + nodes. + + >>> G.add_nodes_from([(1,dict(size=11)), (2,{'color':'blue'})]) + >>> G.node[1]['size'] + 11 + >>> H = nx.Graph() + >>> H.add_nodes_from(G.nodes(data=True)) + >>> H.node[1]['size'] + 11 + + """ + for n in nodes: + try: + newnode=n not in self.succ + except TypeError: + nn,ndict = n + if nn not in self.succ: + self.succ[nn] = {} + self.pred[nn] = {} + newdict = attr.copy() + newdict.update(ndict) + self.node[nn] = newdict + else: + olddict = self.node[nn] + olddict.update(attr) + olddict.update(ndict) + continue + if newnode: + self.succ[n] = {} + self.pred[n] = {} + self.node[n] = attr.copy() + else: + self.node[n].update(attr) + + + def add_edge(self, u, v, attr_dict=None, **attr): + """Add an edge between u and v. + + The nodes u and v will be automatically added if they are + not already in the graph. + + Edge attributes can be specified with keywords or by providing + a dictionary with key/value pairs. See examples below. + + Parameters + ---------- + u,v : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + attr_dict : dictionary, optional (default= no attributes) + Dictionary of edge attributes. Key/value pairs will + update existing data associated with the edge. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + See Also + -------- + add_edges_from : add a collection of edges + + Notes + ----- + Adding an edge that already exists updates the edge data. + + NetworkX algorithms designed for weighted graphs use as + the edge weight a numerical value assigned to the keyword + 'weight'. + + Examples + -------- + The following all add the edge e=(1,2) to graph G: + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> e = (1,2) + >>> G.add_edge(1, 2) # explicit two-node form + >>> G.add_edge(*e) # single edge as tuple of two nodes + >>> G.add_edges_from( [(1,2)] ) # add edges from iterable container + + Associate data to edges using keywords: + + >>> G.add_edge(1, 2, weight=3) + >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7) + """ + # set up attribute dict + if attr_dict is None: + attr_dict=attr + else: + try: + attr_dict.update(attr) + except AttributeError: + raise NetworkXError(\ + "The attr_dict argument must be a dictionary.") + # add nodes + if u not in self.succ: + self.succ[u]={} + self.pred[u]={} + self.node[u] = {} + if v not in self.succ: + self.succ[v]={} + self.pred[v]={} + self.node[v] = {} + # add the edge + datadict=self.adj[u].get(v,{}) + datadict.update(attr_dict) + self.succ[u][v]=datadict + self.pred[v][u]=datadict + + + def has_successor(self, u, v): + """Return True if node u has successor v. + + This is true if graph has the edge u->v. + """ + return (u in self.succ and v in self.succ[u]) + + def has_predecessor(self, u, v): + """Return True if node u has predecessor v. + + This is true if graph has the edge u<-v. + """ + return (u in self.pred and v in self.pred[u]) + + def successors_iter(self,n): + """Return an iterator over successor nodes of n. + + neighbors_iter() and successors_iter() are the same. + """ + try: + return iter(self.succ[n].keys()) + except KeyError: + raise NetworkXError("The node %s is not in the digraph."%(n,)) + + def predecessors_iter(self,n): + """Return an iterator over predecessor nodes of n.""" + try: + return iter(self.pred[n].keys()) + except KeyError: + raise NetworkXError("The node %s is not in the digraph."%(n,)) + + def successors(self, n): + """Return a list of successor nodes of n. + + neighbors() and successors() are the same function. + """ + return list(self.successors_iter(n)) + + def predecessors(self, n): + """Return a list of predecessor nodes of n.""" + return list(self.predecessors_iter(n)) + + + # digraph definitions + neighbors = successors + neighbors_iter = successors_iter + + def edges_iter(self, nbunch=None, data=False): + """Return an iterator over the edges. + + Edges are returned as tuples with optional data + in the order (node, neighbor, data). + + Parameters + ---------- + nbunch : iterable container, optional (default= all nodes) + A container of nodes. The container will be iterated + through once. + data : bool, optional (default=False) + If True, return edge attribute dict in 3-tuple (u,v,data). + + Returns + ------- + edge_iter : iterator + An iterator of (u,v) or (u,v,d) tuples of edges. + + See Also + -------- + edges : return a list of edges + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + + Examples + -------- + >>> G = nx.DiGraph() # or MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> [e for e in G.edges_iter()] + [(0, 1), (1, 2), (2, 3)] + >>> list(G.edges_iter(data=True)) # default data is {} (empty dict) + [(0, 1, {}), (1, 2, {}), (2, 3, {})] + >>> list(G.edges_iter([0,2])) + [(0, 1), (2, 3)] + >>> list(G.edges_iter(0)) + [(0, 1)] + + """ + if nbunch is None: + nodes_nbrs=iter(self.adj.items()) + else: + nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch)) + if data: + for n,nbrs in nodes_nbrs: + for nbr,data in nbrs.items(): + yield (n,nbr,data) + else: + for n,nbrs in nodes_nbrs: + for nbr in nbrs: + yield (n,nbr) + + # alias out_edges to edges + out_edges_iter=edges_iter + out_edges=Graph.edges + + def in_edges_iter(self, nbunch=None, data=False): + """Return an iterator over the incoming edges. + + Parameters + ---------- + nbunch : iterable container, optional (default= all nodes) + A container of nodes. The container will be iterated + through once. + data : bool, optional (default=False) + If True, return edge attribute dict in 3-tuple (u,v,data). + + Returns + ------- + in_edge_iter : iterator + An iterator of (u,v) or (u,v,d) tuples of incoming edges. + + See Also + -------- + edges_iter : return an iterator of edges + """ + if nbunch is None: + nodes_nbrs=iter(self.pred.items()) + else: + nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch)) + if data: + for n,nbrs in nodes_nbrs: + for nbr,data in nbrs.items(): + yield (nbr,n,data) + else: + for n,nbrs in nodes_nbrs: + for nbr in nbrs: + yield (nbr,n) + + def in_edges(self, nbunch=None, data=False): + """Return a list of the incoming edges. + + See Also + -------- + edges : return a list of edges + """ + return list(self.in_edges_iter(nbunch, data)) + + def degree_iter(self, nbunch=None, weighted=False): + """Return an iterator for (node, degree). + + The node degree is the number of edges adjacent to the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + weighted : bool, optional (default=False) + If True return the sum of edge weights adjacent to the node. + + Returns + ------- + nd_iter : an iterator + The iterator returns two-tuples of (node, degree). + + See Also + -------- + degree, in_degree, out_degree, in_degree_iter, out_degree_iter + + Examples + -------- + >>> G = nx.DiGraph() # or MultiDiGraph + >>> G.add_path([0,1,2,3]) + >>> list(G.degree_iter(0)) # node 0 with degree 1 + [(0, 1)] + >>> list(G.degree_iter([0,1])) + [(0, 1), (1, 2)] + + """ + if nbunch is None: + nodes_nbrs=zip(iter(self.succ.items()),iter(self.pred.items())) + else: + nodes_nbrs=zip( + ((n,self.succ[n]) for n in self.nbunch_iter(nbunch)), + ((n,self.pred[n]) for n in self.nbunch_iter(nbunch))) + + if weighted: + # edge weighted graph - degree is sum of edge weights + for (n,succ),(n2,pred) in nodes_nbrs: + yield (n, + sum((succ[nbr].get('weight',1) for nbr in succ))+ + sum((pred[nbr].get('weight',1) for nbr in pred))) + else: + for (n,succ),(n2,pred) in nodes_nbrs: + yield (n,len(succ)+len(pred)) + + + def in_degree_iter(self, nbunch=None, weighted=False): + """Return an iterator for (node, in-degree). + + The node in-degree is the number of edges pointing in to the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + weighted : bool, optional (default=False) + If True return the sum of edge weights adjacent to the node. + + Returns + ------- + nd_iter : an iterator + The iterator returns two-tuples of (node, in-degree). + + See Also + -------- + degree, in_degree, out_degree, out_degree_iter + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_path([0,1,2,3]) + >>> list(G.in_degree_iter(0)) # node 0 with degree 0 + [(0, 0)] + >>> list(G.in_degree_iter([0,1])) + [(0, 0), (1, 1)] + + """ + if nbunch is None: + nodes_nbrs=iter(self.pred.items()) + else: + nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch)) + + if weighted: + # edge weighted graph - degree is sum of edge weights + for n,nbrs in nodes_nbrs: + yield (n, sum(data.get('weight',1) for data in nbrs.values())) + else: + for n,nbrs in nodes_nbrs: + yield (n,len(nbrs)) + + + def out_degree_iter(self, nbunch=None, weighted=False): + """Return an iterator for (node, out-degree). + + The node out-degree is the number of edges pointing out of the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + weighted : bool, optional (default=False) + If True return the sum of edge weights adjacent to the node. + + Returns + ------- + nd_iter : an iterator + The iterator returns two-tuples of (node, out-degree). + + See Also + -------- + degree, in_degree, out_degree, in_degree_iter + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_path([0,1,2,3]) + >>> list(G.out_degree_iter(0)) # node 0 with degree 1 + [(0, 1)] + >>> list(G.out_degree_iter([0,1])) + [(0, 1), (1, 1)] + + """ + if nbunch is None: + nodes_nbrs=iter(self.succ.items()) + else: + nodes_nbrs=((n,self.succ[n]) for n in self.nbunch_iter(nbunch)) + + if weighted: + # edge weighted graph - degree is sum of edge weights + for n,nbrs in nodes_nbrs: + yield (n, sum(data.get('weight',1) for data in nbrs.values())) + else: + for n,nbrs in nodes_nbrs: + yield (n,len(nbrs)) + + + def in_degree(self, nbunch=None, weighted=False): + """Return the in-degree of a node or nodes. + + The node in-degree is the number of edges pointing in to the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + weighted : bool, optional (default=False) + If True return the sum of edge weights adjacent to the node. + + Returns + ------- + nd : dictionary, or number + A dictionary with nodes as keys and in-degree as values or + a number if a single node is specified. + + See Also + -------- + degree, out_degree, in_degree_iter + + Examples + -------- + >>> G = nx.DiGraph() # or MultiDiGraph + >>> G.add_path([0,1,2,3]) + >>> G.in_degree(0) + 0 + >>> G.in_degree([0,1]) + {0: 0, 1: 1} + >>> list(G.in_degree([0,1]).values()) + [0, 1] + """ + if nbunch in self: # return a single node + return next(self.in_degree_iter(nbunch,weighted=weighted))[1] + else: # return a dict + return dict(self.in_degree_iter(nbunch,weighted=weighted)) + + def out_degree(self, nbunch=None, weighted=False): + """Return the out-degree of a node or nodes. + + The node out-degree is the number of edges pointing out of the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + weighted : bool, optional (default=False) + If True return the sum of edge weights adjacent to the node. + + Returns + ------- + nd : dictionary, or number + A dictionary with nodes as keys and out-degree as values or + a number if a single node is specified. + + Examples + -------- + >>> G = nx.DiGraph() # or MultiDiGraph + >>> G.add_path([0,1,2,3]) + >>> G.out_degree(0) + 1 + >>> G.out_degree([0,1]) + {0: 1, 1: 1} + >>> list(G.out_degree([0,1]).values()) + [1, 1] + + + """ + if nbunch in self: # return a single node + return next(self.out_degree_iter(nbunch,weighted=weighted))[1] + else: # return a dict + return dict(self.out_degree_iter(nbunch,weighted=weighted)) + + def clear(self): + """Remove all nodes and edges from the graph. + + This also removes the name, and all graph, node, and edge attributes. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.clear() + >>> G.nodes() + [] + >>> G.edges() + [] + + """ + self.succ.clear() + self.pred.clear() + self.node.clear() + self.graph.clear() + + + def is_multigraph(self): + """Return True if graph is a multigraph, False otherwise.""" + return False + + + def is_directed(self): + """Return True if graph is directed, False otherwise.""" + return True + + def to_directed(self): + """Return a directed copy of the graph. + + Returns + ------- + G : DiGraph + A deepcopy of the graph. + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar D=DiGraph(G) which returns a + shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, http://docs.python.org/library/copy.html. + + Examples + -------- + >>> G = nx.Graph() # or MultiGraph, etc + >>> G.add_path([0,1]) + >>> H = G.to_directed() + >>> H.edges() + [(0, 1), (1, 0)] + + If already directed, return a (deep) copy + + >>> G = nx.DiGraph() # or MultiDiGraph, etc + >>> G.add_path([0,1]) + >>> H = G.to_directed() + >>> H.edges() + [(0, 1)] + """ + return deepcopy(self) + + def to_undirected(self, reciprocal=False): + """Return an undirected representation of the digraph. + + Parameters + ---------- + reciprocal : bool (optional) + If True only keep edges that appear in both directions + in the original digraph. + + Returns + ------- + G : Graph + An undirected graph with the same name and nodes and + with edge (u,v,data) if either (u,v,data) or (v,u,data) + is in the digraph. If both edges exist in digraph and + their edge data is different, only one edge is created + with an arbitrary choice of which edge data to use. + You must check and correct for this manually if desired. + + Notes + ----- + If edges in both directions (u,v) and (v,u) exist in the + graph, attributes for the new undirected edge will be a combination of + the attributes of the directed edges. The edge data is updated + in the (arbitrary) order that the edges are encountered. For + more customized control of the edge attributes use add_edge(). + + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar G=DiGraph(D) which returns a + shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, http://docs.python.org/library/copy.html. + """ + H=Graph() + H.name=self.name + H.add_nodes_from(self) + if reciprocal is True: + H.add_edges_from( (u,v,deepcopy(d)) + for u,nbrs in self.adjacency_iter() + for v,d in nbrs.items() + if v in self.pred[u]) + else: + H.add_edges_from( (u,v,deepcopy(d)) + for u,nbrs in self.adjacency_iter() + for v,d in nbrs.items() ) + H.graph=deepcopy(self.graph) + H.node=deepcopy(self.node) + return H + + + def reverse(self, copy=True): + """Return the reverse of the graph. + + The reverse is a graph with the same nodes and edges + but with the directions of the edges reversed. + + Parameters + ---------- + copy : bool optional (default=True) + If True, return a new DiGraph holding the reversed edges. + If False, reverse the reverse graph is created using + the original graph (this changes the original graph). + """ + if copy: + H = self.__class__(name="Reverse of (%s)"%self.name) + H.pred=self.succ.copy() + H.adj=self.pred.copy() + H.succ=H.adj + H.graph=self.graph.copy() + H.node=self.node.copy() + else: + self.pred,self.succ=self.succ,self.pred + self.adj=self.succ + H=self + return H + + + def subgraph(self, nbunch): + """Return the subgraph induced on nodes in nbunch. + + The induced subgraph of the graph contains the nodes in nbunch + and the edges between those nodes. + + Parameters + ---------- + nbunch : list, iterable + A container of nodes which will be iterated through once. + + Returns + ------- + G : Graph + A subgraph of the graph with the same edge attributes. + + Notes + ----- + The graph, edge or node attributes just point to the original graph. + So changes to the node or edge structure will not be reflected in + the original graph while changes to the attributes will. + + To create a subgraph with its own copy of the edge/node attributes use: + nx.Graph(G.subgraph(nbunch)) + + If edge attributes are containers, a deep copy can be obtained using: + G.subgraph(nbunch).copy() + + For an inplace reduction of a graph to a subgraph you can remove nodes: + G.remove_nodes_from([ n in G if n not in set(nbunch)]) + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> H = G.subgraph([0,1,2]) + >>> H.edges() + [(0, 1), (1, 2)] + """ + bunch = self.nbunch_iter(nbunch) + # create new graph and copy subgraph into it + H = self.__class__() + # namespace shortcuts for speed + H_succ=H.succ + H_pred=H.pred + self_succ=self.succ + # add nodes + for n in bunch: + H_succ[n]={} + H_pred[n]={} + # add edges + for u in H_succ: + Hnbrs=H_succ[u] + for v,datadict in self_succ[u].items(): + if v in H_succ: + # add both representations of edge: u-v and v-u + Hnbrs[v]=datadict + H_pred[v][u]=datadict + # copy node and attribute dictionaries + for n in H: + H.node[n]=self.node[n] + H.graph=self.graph + return H diff --git a/rpm5utils/urpmgraphs/classes/function.py b/rpm5utils/urpmgraphs/classes/function.py new file mode 100644 index 0000000..296653a --- /dev/null +++ b/rpm5utils/urpmgraphs/classes/function.py @@ -0,0 +1,375 @@ +""" +Functional interface to graph methods and assorted utilities. + +""" +__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)', + 'Pieter Swart (swart@lanl.gov)', + 'Dan Schult(dschult@colgate.edu)']) +# Copyright (C) 2004-2010 by +# Aric Hagberg +# Dan Schult +# Pieter Swart +# All rights reserved. +# BSD license. +# +import rpm5utils as nx + +# functional style helpers + + +__all__ = ['nodes', 'edges', 'degree', 'degree_histogram', 'neighbors', + 'number_of_nodes', 'number_of_edges', 'density', + 'nodes_iter', 'edges_iter', 'is_directed','info', + 'freeze','is_frozen','subgraph','create_empty_copy', + 'set_node_attributes','get_node_attributes', + 'set_edge_attributes','get_edge_attributes'] + +def nodes(G): + """Return a copy of the graph nodes in a list.""" + return G.nodes() + +def nodes_iter(G): + """Return an iterator over the graph nodes.""" + return G.nodes_iter() + +def edges(G,nbunch=None): + """Return list of edges adjacent to nodes in nbunch. + + Return all edges if nbunch is unspecified or nbunch=None. + + For digraphs, edges=out_edges + """ + return G.edges(nbunch) + +def edges_iter(G,nbunch=None): + """Return iterator over edges adjacent to nodes in nbunch. + + Return all edges if nbunch is unspecified or nbunch=None. + + For digraphs, edges=out_edges + """ + return G.edges_iter(nbunch) + +def degree(G,nbunch=None,weighted=False): + """Return degree of single node or of nbunch of nodes. + If nbunch is ommitted, then return degrees of *all* nodes. + """ + return G.degree(nbunch,weighted=weighted) + +def neighbors(G,n): + """Return a list of nodes connected to node n. """ + return G.neighbors(n) + +def number_of_nodes(G): + """Return the number of nodes in the graph.""" + return G.number_of_nodes() + +def number_of_edges(G): + """Return the number of edges in the graph. """ + return G.number_of_edges() + +def density(G): + r"""Return the density of a graph. + + The density for undirected graphs is + + .. math:: + + d = \frac{2m}{n(n-1)}, + + and for directed graphs is + + .. math:: + + d = \frac{m}{n(n-1)}, + + where `n` is the number of nodes and `m` is the number of edges in `G`. + + Notes + ----- + The density is 0 for an graph without edges and 1.0 for a complete graph. + + The density of multigraphs can be higher than 1. + + """ + n=number_of_nodes(G) + m=number_of_edges(G) + if m==0: # includes cases n==0 and n==1 + d=0.0 + else: + if G.is_directed(): + d=m/float(n*(n-1)) + else: + d= m*2.0/float(n*(n-1)) + return d + +def degree_histogram(G): + """Return a list of the frequency of each degree value. + + Parameters + ---------- + G : Networkx graph + A graph + + Returns + ------- + hist : list + A list of frequencies of degrees. + The degree values are the index in the list. + + Notes + ----- + Note: the bins are width one, hence len(list) can be large + (Order(number_of_edges)) + """ + degseq=list(G.degree().values()) + dmax=max(degseq)+1 + freq= [ 0 for d in range(dmax) ] + for d in degseq: + freq[d] += 1 + return freq + +def is_directed(G): + """ Return True if graph is directed.""" + return G.is_directed() + + +def freeze(G): + """Modify graph to prevent addition of nodes or edges. + + Parameters + ----------- + G : graph + A NetworkX graph + + Examples + -------- + >>> G=nx.Graph() + >>> G.add_path([0,1,2,3]) + >>> G=nx.freeze(G) + >>> try: + ... G.add_edge(4,5) + ... except nx.NetworkXError as e: + ... print(str(e)) + Frozen graph can't be modified + + Notes + ----- + This does not prevent modification of edge data. + + To "unfreeze" a graph you must make a copy. + + See Also + -------- + is_frozen + + """ + def frozen(*args): + raise nx.NetworkXError("Frozen graph can't be modified") + G.add_node=frozen + G.add_nodes_from=frozen + G.remove_node=frozen + G.remove_nodes_from=frozen + G.add_edge=frozen + G.add_edges_from=frozen + G.remove_edge=frozen + G.remove_edges_from=frozen + G.clear=frozen + G.frozen=True + return G + +def is_frozen(G): + """Return True if graph is frozen. + + Parameters + ----------- + G : graph + A NetworkX graph + + See Also + -------- + freeze + """ + try: + return G.frozen + except AttributeError: + return False + +def subgraph(G, nbunch): + """Return the subgraph induced on nodes in nbunch. + + Parameters + ---------- + G : graph + A NetworkX graph + + nbunch : list, iterable + A container of nodes that will be iterated through once (thus + it should be an iterator or be iterable). Each element of the + container should be a valid node type: any hashable type except + None. If nbunch is None, return all edges data in the graph. + Nodes in nbunch that are not in the graph will be (quietly) + ignored. + + Notes + ----- + subgraph(G) calls G.subgraph() + + """ + return G.subgraph(nbunch) + +def create_empty_copy(G,with_nodes=True): + """Return a copy of the graph G with all of the edges removed. + + Parameters + ---------- + G : graph + A NetworkX graph + + with_nodes : bool (default=True) + Include nodes. + + Notes + ----- + Graph, node, and edge data is not propagated to the new graph. + """ + H=G.__class__() + if with_nodes: + H.add_nodes_from(G) + return H + + +def info(G, n=None): + """Print short summary of information for the graph G or the node n. + + Parameters + ---------- + G : Networkx graph + A graph + n : node (any hashable) + A node in the graph G + """ + info='' # append this all to a string + if n is None: + info+="Name: %s\n"%G.name + type_name = [type(G).__name__] + info+="Type: %s\n"%",".join(type_name) + info+="Number of nodes: %d\n"%G.number_of_nodes() + info+="Number of edges: %d\n"%G.number_of_edges() + nnodes=G.number_of_nodes() + if len(G) > 0: + if G.is_directed(): + info+="Average in degree: %8.4f\n"%\ + (sum(G.in_degree().values())/float(nnodes)) + info+="Average out degree: %8.4f"%\ + (sum(G.out_degree().values())/float(nnodes)) + else: + s=sum(G.degree().values()) + info+="Average degree: %8.4f"%\ + (float(s)/float(nnodes)) + + else: + if n not in G: + raise nx.NetworkXError("node %s not in graph"%(n,)) + info+="Node % s has the following properties:\n"%n + info+="Degree: %d\n"%G.degree(n) + info+="Neighbors: " + info+=' '.join(str(nbr) for nbr in G.neighbors(n)) + return info + +def set_node_attributes(G,name,attributes): + """Set node attributes from dictionary of nodes and values + + Parameters + ---------- + G : NetworkX Graph + + name : string + Attribute name + + attributes: dict + Dictionary of attributes keyed by node. + + Examples + -------- + >>> G=nx.path_graph(3) + >>> bb=nx.betweenness_centrality(G) + >>> nx.set_node_attributes(G,'betweenness',bb) + >>> G.node[1]['betweenness'] + 1.0 + """ + for node,value in attributes.items(): + G.node[node][name]=value + +def get_node_attributes(G,name): + """Get node attributes from graph + + Parameters + ---------- + G : NetworkX Graph + + name : string + Attribute name + + Returns + ------- + Dictionary of attributes keyed by node. + + Examples + -------- + >>> G=nx.Graph() + >>> G.add_nodes_from([1,2,3],color='red') + >>> color=nx.get_node_attributes(G,'color') + >>> color[1] + 'red' + """ + return dict( (n,d[name]) for n,d in G.node.items() if name in d) + + +def set_edge_attributes(G,name,attributes): + """Set edge attributes from dictionary of edge tuples and values + + Parameters + ---------- + G : NetworkX Graph + + name : string + Attribute name + + attributes: dict + Dictionary of attributes keyed by edge (tuple). + + Examples + -------- + >>> G=nx.path_graph(3) + >>> bb=nx.edge_betweenness_centrality(G) + >>> nx.set_edge_attributes(G,'betweenness',bb) + >>> G[1][2]['betweenness'] + 4.0 + """ + for (u,v),value in attributes.items(): + G[u][v][name]=value + +def get_edge_attributes(G,name): + """Get edge attributes from graph + + Parameters + ---------- + G : NetworkX Graph + + name : string + Attribute name + + Returns + ------- + Dictionary of attributes keyed by node. + + Examples + -------- + >>> G=nx.Graph() + >>> G.add_path([1,2,3],color='red') + >>> color=nx.get_edge_attributes(G,'color') + >>> color[(1,2)] + 'red' + """ + return dict( ((u,v),d[name]) for u,v,d in G.edges(data=True) if name in d) diff --git a/rpm5utils/urpmgraphs/classes/graph.py b/rpm5utils/urpmgraphs/classes/graph.py new file mode 100644 index 0000000..2e0a2d8 --- /dev/null +++ b/rpm5utils/urpmgraphs/classes/graph.py @@ -0,0 +1,1804 @@ +"""Base class for undirected graphs. + +The Graph class allows any hashable object as a node +and can associate key/value attribute pairs with each undirected edge. + +Self-loops are allowed but multiple edges are not (see MultiGraph). + +For directed graphs see DiGraph and MultiDiGraph. +""" +# Copyright (C) 2004-2011 by +# Aric Hagberg +# Dan Schult +# Pieter Swart +# All rights reserved. +# BSD license. +from copy import deepcopy +import rpm5utils as nx +#from urpmgraphs.exception import NetworkXError +#import urpmgraphs.convert as convert + +__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)', + 'Pieter Swart (swart@lanl.gov)', + 'Dan Schult(dschult@colgate.edu)']) + +class Graph(object): + """ + Base class for undirected graphs. + + A Graph stores nodes and edges with optional data, or attributes. + + Graphs hold undirected edges. Self loops are allowed but multiple + (parallel) edges are not. + + Nodes can be arbitrary (hashable) Python objects with optional + key/value attributes. + + Edges are represented as links between nodes with optional + key/value attributes. + + Parameters + ---------- + data : input graph + Data to initialize graph. If data=None (default) an empty + graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a NumPy matrix + or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + DiGraph + MultiGraph + MultiDiGraph + + Examples + -------- + Create an empty graph structure (a "null graph") with no nodes and + no edges. + + >>> G = nx.Graph() + + G can be grown in several ways. + + **Nodes:** + + Add one node at a time: + + >>> G.add_node(1) + + Add the nodes from any container (a list, dict, set or + even the lines from a file or the nodes from another graph). + + >>> G.add_nodes_from([2,3]) + >>> G.add_nodes_from(range(100,110)) + >>> H=nx.Graph() + >>> H.add_path([0,1,2,3,4,5,6,7,8,9]) + >>> G.add_nodes_from(H) + + In addition to strings and integers any hashable Python object + (except None) can represent a node, e.g. a customized node object, + or even another Graph. + + >>> G.add_node(H) + + **Edges:** + + G can also be grown by adding edges. + + Add one edge, + + >>> G.add_edge(1, 2) + + a list of edges, + + >>> G.add_edges_from([(1,2),(1,3)]) + + or a collection of edges, + + >>> G.add_edges_from(H.edges()) + + If some edges connect nodes not yet in the graph, the nodes + are added automatically. There are no errors when adding + nodes or edges that already exist. + + **Attributes:** + + Each graph, node, and edge can hold key/value attribute pairs + in an associated attribute dictionary (the keys must be hashable). + By default these are empty, but can be added or changed using + add_edge, add_node or direct manipulation of the attribute + dictionaries named graph, node and edge respectively. + + >>> G = nx.Graph(day="Friday") + >>> G.graph + {'day': 'Friday'} + + Add node attributes using add_node(), add_nodes_from() or G.node + + >>> G.add_node(1, time='5pm') + >>> G.add_nodes_from([3], time='2pm') + >>> G.node[1] + {'time': '5pm'} + >>> G.node[1]['room'] = 714 + >>> G.nodes(data=True) + [(1, {'room': 714, 'time': '5pm'}), (3, {'time': '2pm'})] + + Warning: adding a node to G.node does not add it to the graph. + + Add edge attributes using add_edge(), add_edges_from(), subscript + notation, or G.edge. + + >>> G.add_edge(1, 2, weight=4.7 ) + >>> G.add_edges_from([(3,4),(4,5)], color='red') + >>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})]) + >>> G[1][2]['weight'] = 4.7 + >>> G.edge[1][2]['weight'] = 4 + + **Shortcuts:** + + Many common graph features allow python syntax to speed reporting. + + >>> 1 in G # check if node in graph + True + >>> [n for n in G if n<3] # iterate through nodes + [1, 2] + >>> len(G) # number of nodes in graph + 5 + >>> G[1] # adjacency dict keyed by neighbor to edge attributes + ... # Note: you should not change this dict manually! + {2: {'color': 'blue', 'weight': 4}} + + The fastest way to traverse all edges of a graph is via + adjacency_iter(), but the edges() method is often more convenient. + + >>> for n,nbrsdict in G.adjacency_iter(): + ... for nbr,eattr in nbrsdict.items(): + ... if 'weight' in eattr: + ... (n,nbr,eattr['weight']) + (1, 2, 4) + (2, 1, 4) + (2, 3, 8) + (3, 2, 8) + >>> [ (u,v,edata['weight']) for u,v,edata in G.edges(data=True) if 'weight' in edata ] + [(1, 2, 4), (2, 3, 8)] + + **Reporting:** + + Simple graph information is obtained using methods. + Iterator versions of many reporting methods exist for efficiency. + Methods exist for reporting nodes(), edges(), neighbors() and degree() + as well as the number of nodes and edges. + + For details on these and other miscellaneous methods, see below. + """ + def __init__(self, data=None, **attr): + """Initialize a graph with edges, name, graph attributes. + + Parameters + ---------- + data : input graph + Data to initialize graph. If data=None (default) an empty + graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a NumPy matrix + or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph. + name : string, optional (default='') + An optional name for the graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + convert + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G = nx.Graph(name='my graph') + >>> e = [(1,2),(2,3),(3,4)] # list of edges + >>> G = nx.Graph(e) + + Arbitrary graph attribute pairs (key=value) may be assigned + + >>> G=nx.Graph(e, day="Friday") + >>> G.graph + {'day': 'Friday'} + + """ + self.graph = {} # dictionary for graph attributes + self.node = {} # empty node dict (created before convert) + self.adj = {} # empty adjacency dict + # attempt to load graph with data + if data is not None: + convert.to_networkx_graph(data,create_using=self) + # load graph attributes (must be after convert) + self.graph.update(attr) + self.edge = self.adj + + @property + def name(self): + return self.graph.get('name','') + @name.setter + def name(self, s): + self.graph['name']=s + + def __str__(self): + """Return the graph name. + + Returns + ------- + name : string + The name of the graph. + + Examples + -------- + >>> G = nx.Graph(name='foo') + >>> str(G) + 'foo' + """ + return self.name + + def __iter__(self): + """Iterate over the nodes. Use the expression 'for n in G'. + + Returns + ------- + niter : iterator + An iterator over all nodes in the graph. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + """ + return iter(self.adj.keys()) + + def __contains__(self,n): + """Return True if n is a node, False otherwise. Use the expression + 'n in G'. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> 1 in G + True + """ + try: + return n in self.adj + except TypeError: + return False + + def __len__(self): + """Return the number of nodes. Use the expression 'len(G)'. + + Returns + ------- + nnodes : int + The number of nodes in the graph. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> len(G) + 4 + + """ + return len(self.adj) + + def __getitem__(self, n): + """Return a dict of neighbors of node n. Use the expression 'G[n]'. + + Parameters + ---------- + n : node + A node in the graph. + + Returns + ------- + adj_dict : dictionary + The adjacency dictionary for nodes connected to n. + + Notes + ----- + G[n] is similar to G.neighbors(n) but the internal data dictionary + is returned instead of a list. + + Assigning G[n] will corrupt the internal graph data structure. + Use G[n] for reading data only. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G[0] + {1: {}} + """ + return self.adj[n] + + + def add_node(self, n, attr_dict=None, **attr): + """Add a single node n and update node attributes. + + Parameters + ---------- + n : node + A node can be any hashable Python object except None. + attr_dict : dictionary, optional (default= no attributes) + Dictionary of node attributes. Key/value pairs will + update existing data associated with the node. + attr : keyword arguments, optional + Set or change attributes using key=value. + + See Also + -------- + add_nodes_from + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_node(1) + >>> G.add_node('Hello') + >>> K3 = nx.Graph([(0,1),(1,2),(2,0)]) + >>> G.add_node(K3) + >>> G.number_of_nodes() + 3 + + Use keywords set/change node attributes: + + >>> G.add_node(1,size=10) + >>> G.add_node(3,weight=0.4,UTM=('13S',382871,3972649)) + + Notes + ----- + A hashable object is one that can be used as a key in a Python + dictionary. This includes strings, numbers, tuples of strings + and numbers, etc. + + On many platforms hashable items also include mutables such as + NetworkX Graphs, though one should be careful that the hash + doesn't change on mutables. + """ + # set up attribute dict + if attr_dict is None: + attr_dict=attr + else: + try: + attr_dict.update(attr) + except AttributeError: + raise NetworkXError(\ + "The attr_dict argument must be a dictionary.") + if n not in self.adj: + self.adj[n] = {} + self.node[n] = attr_dict + else: # update attr even if node already exists + self.node[n].update(attr_dict) + + + def add_nodes_from(self, nodes, **attr): + """Add multiple nodes. + + Parameters + ---------- + nodes : iterable container + A container of nodes (list, dict, set, etc.). + OR + A container of (node, attribute dict) tuples. + Node attributes are updated using the attribute dict. + attr : keyword arguments, optional (default= no attributes) + Update attributes for all nodes in nodes. + Node attributes specified in nodes as a tuple + take precedence over attributes specified generally. + + See Also + -------- + add_node + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_nodes_from('Hello') + >>> K3 = nx.Graph([(0,1),(1,2),(2,0)]) + >>> G.add_nodes_from(K3) + >>> sorted(G.nodes(),key=str) + [0, 1, 2, 'H', 'e', 'l', 'o'] + + Use keywords to update specific node attributes for every node. + + >>> G.add_nodes_from([1,2], size=10) + >>> G.add_nodes_from([3,4], weight=0.4) + + Use (node, attrdict) tuples to update attributes for specific + nodes. + + >>> G.add_nodes_from([(1,dict(size=11)), (2,{'color':'blue'})]) + >>> G.node[1]['size'] + 11 + >>> H = nx.Graph() + >>> H.add_nodes_from(G.nodes(data=True)) + >>> H.node[1]['size'] + 11 + + """ + for n in nodes: + try: + newnode=n not in self.adj + except TypeError: + nn,ndict = n + if nn not in self.adj: + self.adj[nn] = {} + newdict = attr.copy() + newdict.update(ndict) + self.node[nn] = newdict + else: + olddict = self.node[nn] + olddict.update(attr) + olddict.update(ndict) + continue + if newnode: + self.adj[n] = {} + self.node[n] = attr.copy() + else: + self.node[n].update(attr) + + def remove_node(self,n): + """Remove node n. + + Removes the node n and all adjacent edges. + Attempting to remove a non-existent node will raise an exception. + + Parameters + ---------- + n : node + A node in the graph + + Raises + ------- + NetworkXError + If n is not in the graph. + + See Also + -------- + remove_nodes_from + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2]) + >>> G.edges() + [(0, 1), (1, 2)] + >>> G.remove_node(1) + >>> G.edges() + [] + + """ + adj = self.adj + try: + nbrs = list(adj[n].keys()) # keys handles self-loops (allow mutation later) + del self.node[n] + except KeyError: # NetworkXError if n not in self + raise NetworkXError("The node %s is not in the graph."%(n,)) + for u in nbrs: + del adj[u][n] # remove all edges n-u in graph + del adj[n] # now remove node + + + def remove_nodes_from(self, nodes): + """Remove multiple nodes. + + Parameters + ---------- + nodes : iterable container + A container of nodes (list, dict, set, etc.). If a node + in the container is not in the graph it is silently + ignored. + + See Also + -------- + remove_node + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2]) + >>> e = G.nodes() + >>> e + [0, 1, 2] + >>> G.remove_nodes_from(e) + >>> G.nodes() + [] + + """ + adj = self.adj + for n in nodes: + try: + del self.node[n] + for u in list(adj[n].keys()): # keys() handles self-loops + del adj[u][n] #(allows mutation of dict in loop) + del adj[n] + except KeyError: + pass + + + def nodes_iter(self, data=False): + """Return an iterator over the nodes. + + Parameters + ---------- + data : boolean, optional (default=False) + If False the iterator returns nodes. If True + return a two-tuple of node and node data dictionary + + Returns + ------- + niter : iterator + An iterator over nodes. If data=True the iterator gives + two-tuples containing (node, node data, dictionary) + + Notes + ----- + If the node data is not required it is simpler and equivalent + to use the expression 'for n in G'. + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2]) + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2]) + + >>> [d for n,d in G.nodes_iter(data=True)] + [{}, {}, {}] + """ + if data: + return iter(self.node.items()) + return iter(self.adj.keys()) + + def nodes(self, data=False): + """Return a list of the nodes in the graph. + + Parameters + ---------- + data : boolean, optional (default=False) + If False return a list of nodes. If True return a + two-tuple of node and node data dictionary + + Returns + ------- + nlist : list + A list of nodes. If data=True a list of two-tuples containing + (node, node data dictionary). + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2]) + >>> G.nodes() + [0, 1, 2] + >>> G.add_node(1, time='5pm') + >>> G.nodes(data=True) + [(0, {}), (1, {'time': '5pm'}), (2, {})] + """ + return list(self.nodes_iter(data=data)) + + def number_of_nodes(self): + """Return the number of nodes in the graph. + + Returns + ------- + nnodes : int + The number of nodes in the graph. + + See Also + -------- + order, __len__ which are identical + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2]) + >>> len(G) + 3 + """ + return len(self.adj) + + def order(self): + """Return the number of nodes in the graph. + + Returns + ------- + nnodes : int + The number of nodes in the graph. + + See Also + -------- + number_of_nodes, __len__ which are identical + + """ + return len(self.adj) + + def has_node(self, n): + """Return True if the graph contains the node n. + + Parameters + ---------- + n : node + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2]) + >>> G.has_node(0) + True + + It is more readable and simpler to use + + >>> 0 in G + True + + """ + try: + return n in self.adj + except TypeError: + return False + + def add_edge(self, u, v, attr_dict=None, **attr): + """Add an edge between u and v. + + The nodes u and v will be automatically added if they are + not already in the graph. + + Edge attributes can be specified with keywords or by providing + a dictionary with key/value pairs. See examples below. + + Parameters + ---------- + u,v : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + attr_dict : dictionary, optional (default= no attributes) + Dictionary of edge attributes. Key/value pairs will + update existing data associated with the edge. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + See Also + -------- + add_edges_from : add a collection of edges + + Notes + ----- + Adding an edge that already exists updates the edge data. + + NetworkX algorithms designed for weighted graphs use as + the edge weight a numerical value assigned to the keyword + 'weight'. + + Examples + -------- + The following all add the edge e=(1,2) to graph G: + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> e = (1,2) + >>> G.add_edge(1, 2) # explicit two-node form + >>> G.add_edge(*e) # single edge as tuple of two nodes + >>> G.add_edges_from( [(1,2)] ) # add edges from iterable container + + Associate data to edges using keywords: + + >>> G.add_edge(1, 2, weight=3) + >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7) + """ + # set up attribute dictionary + if attr_dict is None: + attr_dict=attr + else: + try: + attr_dict.update(attr) + except AttributeError: + raise NetworkXError(\ + "The attr_dict argument must be a dictionary.") + # add nodes + if u not in self.adj: + self.adj[u] = {} + self.node[u] = {} + if v not in self.adj: + self.adj[v] = {} + self.node[v] = {} + # add the edge + datadict=self.adj[u].get(v,{}) + datadict.update(attr_dict) + self.adj[u][v] = datadict + self.adj[v][u] = datadict + + + def add_edges_from(self, ebunch, attr_dict=None, **attr): + """Add all the edges in ebunch. + + Parameters + ---------- + ebunch : container of edges + Each edge given in the container will be added to the + graph. The edges must be given as as 2-tuples (u,v) or + 3-tuples (u,v,d) where d is a dictionary containing edge + data. + attr_dict : dictionary, optional (default= no attributes) + Dictionary of edge attributes. Key/value pairs will + update existing data associated with each edge. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + + See Also + -------- + add_edge : add a single edge + add_weighted_edges_from : convenient way to add weighted edges + + Notes + ----- + Adding the same edge twice has no effect but any edge data + will be updated when each duplicate edge is added. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edges_from([(0,1),(1,2)]) # using a list of edge tuples + >>> e = zip(range(0,3),range(1,4)) + >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 + + Associate data to edges + + >>> G.add_edges_from([(1,2),(2,3)], weight=3) + >>> G.add_edges_from([(3,4),(1,4)], label='WN2898') + """ + # set up attribute dict + if attr_dict is None: + attr_dict=attr + else: + try: + attr_dict.update(attr) + except AttributeError: + raise NetworkXError(\ + "The attr_dict argument must be a dictionary.") + # process ebunch + for e in ebunch: + ne=len(e) + if ne==3: + u,v,dd = e + elif ne==2: + u,v = e + dd = {} + else: + raise NetworkXError(\ + "Edge tuple %s must be a 2-tuple or 3-tuple."%(e,)) + if u not in self.adj: + self.adj[u] = {} + self.node[u] = {} + if v not in self.adj: + self.adj[v] = {} + self.node[v] = {} + datadict=self.adj[u].get(v,{}) + datadict.update(attr_dict) + datadict.update(dd) + self.adj[u][v] = datadict + self.adj[v][u] = datadict + + + def add_weighted_edges_from(self, ebunch, **attr): + """Add all the edges in ebunch as weighted edges with specified + weights. + + Parameters + ---------- + ebunch : container of edges + Each edge given in the list or container will be added + to the graph. The edges must be given as 3-tuples (u,v,w) + where w is a number. + attr : keyword arguments, optional (default= no attributes) + Edge attributes to add/update for all edges. + + See Also + -------- + add_edge : add a single edge + add_edges_from : add multiple edges + + Notes + ----- + Adding the same edge twice has no effect but any edge data + will be updated when each duplicate edge is added. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_weighted_edges_from([(0,1,3.0),(1,2,7.5)]) + """ + self.add_edges_from(((u,v,{'weight':d}) for u,v,d in ebunch),**attr) + + def remove_edge(self, u, v): + """Remove the edge between u and v. + + Parameters + ---------- + u,v: nodes + Remove the edge between nodes u and v. + + Raises + ------ + NetworkXError + If there is not an edge between u and v. + + See Also + -------- + remove_edges_from : remove a collection of edges + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.remove_edge(0,1) + >>> e = (1,2) + >>> G.remove_edge(*e) # unpacks e from an edge tuple + >>> e = (2,3,{'weight':7}) # an edge with attribute data + >>> G.remove_edge(*e[:2]) # select first part of edge tuple + """ + try: + del self.adj[u][v] + if u != v: # self-loop needs only one entry removed + del self.adj[v][u] + except KeyError: + raise NetworkXError("The edge %s-%s is not in the graph"%(u,v)) + + + + def remove_edges_from(self, ebunch): + """Remove all edges specified in ebunch. + + Parameters + ---------- + ebunch: list or container of edge tuples + Each edge given in the list or container will be removed + from the graph. The edges can be: + + - 2-tuples (u,v) edge between u and v. + - 3-tuples (u,v,k) where k is ignored. + + See Also + -------- + remove_edge : remove a single edge + + Notes + ----- + Will fail silently if an edge in ebunch is not in the graph. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> ebunch=[(1,2),(2,3)] + >>> G.remove_edges_from(ebunch) + """ + for e in ebunch: + u,v = e[:2] # ignore edge data if present + if u in self.adj and v in self.adj[u]: + del self.adj[u][v] + if u != v: # self loop needs only one entry removed + del self.adj[v][u] + + + def has_edge(self, u, v): + """Return True if the edge (u,v) is in the graph. + + Parameters + ---------- + u,v : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + + Returns + ------- + edge_ind : bool + True if edge is in the graph, False otherwise. + + Examples + -------- + Can be called either using two nodes u,v or edge tuple (u,v) + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.has_edge(0,1) # using two nodes + True + >>> e = (0,1) + >>> G.has_edge(*e) # e is a 2-tuple (u,v) + True + >>> e = (0,1,{'weight':7}) + >>> G.has_edge(*e[:2]) # e is a 3-tuple (u,v,data_dictionary) + True + + The following syntax are all equivalent: + + >>> G.has_edge(0,1) + True + >>> 1 in G[0] # though this gives KeyError if 0 not in G + True + + """ + try: + return v in self.adj[u] + except KeyError: + return False + + + def neighbors(self, n): + """Return a list of the nodes connected to the node n. + + Parameters + ---------- + n : node + A node in the graph + + Returns + ------- + nlist : list + A list of nodes that are adjacent to n. + + Raises + ------ + NetworkXError + If the node n is not in the graph. + + Notes + ----- + It is usually more convenient (and faster) to access the + adjacency dictionary as G[n]: + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge('a','b',weight=7) + >>> G['a'] + {'b': {'weight': 7}} + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.neighbors(0) + [1] + + """ + try: + return list(self.adj[n].keys()) + except KeyError: + raise NetworkXError("The node %s is not in the graph."%(n,)) + + def neighbors_iter(self, n): + """Return an iterator over all neighbors of node n. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> [n for n in G.neighbors_iter(0)] + [1] + + Notes + ----- + It is faster to use the idiom "in G[0]", e.g. + + >>> G = nx.path_graph(4) + >>> [n for n in G[0]] + [1] + """ + try: + return iter(self.adj[n].keys()) + except KeyError: + raise NetworkXError("The node %s is not in the graph."%(n,)) + + def edges(self, nbunch=None, data=False): + """Return a list of edges. + + Edges are returned as tuples with optional data + in the order (node, neighbor, data). + + Parameters + ---------- + nbunch : iterable container, optional (default= all nodes) + A container of nodes. The container will be iterated + through once. + data : bool, optional (default=False) + Return two tuples (u,v) (False) or three-tuples (u,v,data) (True). + + Returns + -------- + edge_list: list of edge tuples + Edges that are adjacent to any node in nbunch, or a list + of all edges if nbunch is not specified. + + See Also + -------- + edges_iter : return an iterator over the edges + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.edges() + [(0, 1), (1, 2), (2, 3)] + >>> G.edges(data=True) # default edge data is {} (empty dictionary) + [(0, 1, {}), (1, 2, {}), (2, 3, {})] + >>> G.edges([0,3]) + [(0, 1), (3, 2)] + >>> G.edges(0) + [(0, 1)] + + """ + return list(self.edges_iter(nbunch, data)) + + def edges_iter(self, nbunch=None, data=False): + """Return an iterator over the edges. + + Edges are returned as tuples with optional data + in the order (node, neighbor, data). + + Parameters + ---------- + nbunch : iterable container, optional (default= all nodes) + A container of nodes. The container will be iterated + through once. + data : bool, optional (default=False) + If True, return edge attribute dict in 3-tuple (u,v,data). + + Returns + ------- + edge_iter : iterator + An iterator of (u,v) or (u,v,d) tuples of edges. + + See Also + -------- + edges : return a list of edges + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + + Examples + -------- + >>> G = nx.Graph() # or MultiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> [e for e in G.edges_iter()] + [(0, 1), (1, 2), (2, 3)] + >>> list(G.edges_iter(data=True)) # default data is {} (empty dict) + [(0, 1, {}), (1, 2, {}), (2, 3, {})] + >>> list(G.edges_iter([0,3])) + [(0, 1), (3, 2)] + >>> list(G.edges_iter(0)) + [(0, 1)] + + """ + seen={} # helper dict to keep track of multiply stored edges + if nbunch is None: + nodes_nbrs = iter(self.adj.items()) + else: + nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch)) + if data: + for n,nbrs in nodes_nbrs: + for nbr,data in nbrs.items(): + if nbr not in seen: + yield (n,nbr,data) + seen[n]=1 + else: + for n,nbrs in nodes_nbrs: + for nbr in nbrs: + if nbr not in seen: + yield (n,nbr) + seen[n] = 1 + del seen + + + def get_edge_data(self, u, v, default=None): + """Return the attribute dictionary associated with edge (u,v). + + Parameters + ---------- + u,v : nodes + default: any Python object (default=None) + Value to return if the edge (u,v) is not found. + + Returns + ------- + edge_dict : dictionary + The edge attribute dictionary. + + Notes + ----- + It is faster to use G[u][v]. + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G[0][1] + {} + + Warning: Assigning G[u][v] corrupts the graph data structure. + But it is safe to assign attributes to that dictionary, + + >>> G[0][1]['weight'] = 7 + >>> G[0][1]['weight'] + 7 + >>> G[1][0]['weight'] + 7 + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.get_edge_data(0,1) # default edge data is {} + {} + >>> e = (0,1) + >>> G.get_edge_data(*e) # tuple form + {} + >>> G.get_edge_data('a','b',default=0) # edge not in graph, return 0 + 0 + """ + try: + return self.adj[u][v] + except KeyError: + return default + + def adjacency_list(self): + """Return an adjacency list representation of the graph. + + The output adjacency list is in the order of G.nodes(). + For directed graphs, only outgoing adjacencies are included. + + Returns + ------- + adj_list : lists of lists + The adjacency structure of the graph as a list of lists. + + See Also + -------- + adjacency_iter + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.adjacency_list() # in order given by G.nodes() + [[1], [0, 2], [1, 3], [2]] + + """ + return list(map(list,iter(self.adj.values()))) + + def adjacency_iter(self): + """Return an iterator of (node, adjacency dict) tuples for all nodes. + + This is the fastest way to look at every edge. + For directed graphs, only outgoing adjacencies are included. + + Returns + ------- + adj_iter : iterator + An iterator of (node, adjacency dictionary) for all nodes in + the graph. + + See Also + -------- + adjacency_list + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> [(n,nbrdict) for n,nbrdict in G.adjacency_iter()] + [(0, {1: {}}), (1, {0: {}, 2: {}}), (2, {1: {}, 3: {}}), (3, {2: {}})] + + """ + return iter(self.adj.items()) + + def degree(self, nbunch=None, weighted=False): + """Return the degree of a node or nodes. + + The node degree is the number of edges adjacent to that node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + weighted : bool, optional (default=False) + If True return the sum of edge weights adjacent to the node. + + Returns + ------- + nd : dictionary, or number + A dictionary with nodes as keys and degree as values or + a number if a single node is specified. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.degree(0) + 1 + >>> G.degree([0,1]) + {0: 1, 1: 2} + >>> list(G.degree([0,1]).values()) + [1, 2] + + """ + if nbunch in self: # return a single node + return next(self.degree_iter(nbunch,weighted=weighted))[1] + else: # return a dict + return dict(self.degree_iter(nbunch,weighted=weighted)) + + def degree_iter(self, nbunch=None, weighted=False): + """Return an iterator for (node, degree). + + The node degree is the number of edges adjacent to the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + weighted : bool, optional (default=False) + If True return the sum of edge weights adjacent to the node. + + Returns + ------- + nd_iter : an iterator + The iterator returns two-tuples of (node, degree). + + See Also + -------- + degree + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> list(G.degree_iter(0)) # node 0 with degree 1 + [(0, 1)] + >>> list(G.degree_iter([0,1])) + [(0, 1), (1, 2)] + + """ + if nbunch is None: + nodes_nbrs = iter(self.adj.items()) + else: + nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch)) + + if weighted: + # edge weighted graph - degree is sum of nbr edge weights + for n,nbrs in nodes_nbrs: + yield (n, sum((nbrs[nbr].get('weight',1) for nbr in nbrs)) + + (n in nbrs and nbrs[n].get('weight',1))) + else: + for n,nbrs in nodes_nbrs: + yield (n,len(nbrs)+(n in nbrs)) # return tuple (n,degree) + + + def clear(self): + """Remove all nodes and edges from the graph. + + This also removes the name, and all graph, node, and edge attributes. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.clear() + >>> G.nodes() + [] + >>> G.edges() + [] + + """ + self.name = '' + self.adj.clear() + self.node.clear() + self.graph.clear() + + def copy(self): + """Return a copy of the graph. + + Returns + ------- + G : Graph + A copy of the graph. + + See Also + -------- + to_directed: return a directed copy of the graph. + + Notes + ----- + This makes a complete copy of the graph including all of the + node or edge attributes. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> H = G.copy() + + """ + return deepcopy(self) + + def is_multigraph(self): + """Return True if graph is a multigraph, False otherwise.""" + return False + + + def is_directed(self): + """Return True if graph is directed, False otherwise.""" + return False + + def to_directed(self): + """Return a directed representation of the graph. + + Returns + ------- + G : DiGraph + A directed graph with the same name, same nodes, and with + each edge (u,v,data) replaced by two directed edges + (u,v,data) and (v,u,data). + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar D=DiGraph(G) which returns a + shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, http://docs.python.org/library/copy.html. + + Examples + -------- + >>> G = nx.Graph() # or MultiGraph, etc + >>> G.add_path([0,1]) + >>> H = G.to_directed() + >>> H.edges() + [(0, 1), (1, 0)] + + If already directed, return a (deep) copy + + >>> G = nx.DiGraph() # or MultiDiGraph, etc + >>> G.add_path([0,1]) + >>> H = G.to_directed() + >>> H.edges() + [(0, 1)] + """ + from urpmgraphs import DiGraph + G=DiGraph() + G.name=self.name + G.add_nodes_from(self) + G.add_edges_from( ((u,v,deepcopy(data)) + for u,nbrs in self.adjacency_iter() + for v,data in nbrs.items()) ) + G.graph=deepcopy(self.graph) + G.node=deepcopy(self.node) + return G + + def to_undirected(self): + """Return an undirected copy of the graph. + + Returns + ------- + G : Graph/MultiGraph + A deepcopy of the graph. + + See Also + -------- + copy, add_edge, add_edges_from + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar G=DiGraph(D) which returns a + shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, http://docs.python.org/library/copy.html. + + Examples + -------- + >>> G = nx.Graph() # or MultiGraph, etc + >>> G.add_path([0,1]) + >>> H = G.to_directed() + >>> H.edges() + [(0, 1), (1, 0)] + >>> G2 = H.to_undirected() + >>> G2.edges() + [(0, 1)] + """ + return deepcopy(self) + + def subgraph(self, nbunch): + """Return the subgraph induced on nodes in nbunch. + + The induced subgraph of the graph contains the nodes in nbunch + and the edges between those nodes. + + Parameters + ---------- + nbunch : list, iterable + A container of nodes which will be iterated through once. + + Returns + ------- + G : Graph + A subgraph of the graph with the same edge attributes. + + Notes + ----- + The graph, edge or node attributes just point to the original graph. + So changes to the node or edge structure will not be reflected in + the original graph while changes to the attributes will. + + To create a subgraph with its own copy of the edge/node attributes use: + nx.Graph(G.subgraph(nbunch)) + + If edge attributes are containers, a deep copy can be obtained using: + G.subgraph(nbunch).copy() + + For an inplace reduction of a graph to a subgraph you can remove nodes: + G.remove_nodes_from([ n in G if n not in set(nbunch)]) + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> H = G.subgraph([0,1,2]) + >>> H.edges() + [(0, 1), (1, 2)] + """ + bunch =self.nbunch_iter(nbunch) + # create new graph and copy subgraph into it + H = self.__class__() + # namespace shortcuts for speed + H_adj=H.adj + self_adj=self.adj + # add nodes and edges (undirected method) + for n in bunch: + Hnbrs={} + H_adj[n]=Hnbrs + for nbr,d in self_adj[n].items(): + if nbr in H_adj: + # add both representations of edge: n-nbr and nbr-n + Hnbrs[nbr]=d + H_adj[nbr][n]=d + # copy node and attribute dictionaries + for n in H: + H.node[n]=self.node[n] + H.graph=self.graph + return H + + + def nodes_with_selfloops(self): + """Return a list of nodes with self loops. + + A node with a self loop has an edge with both ends adjacent + to that node. + + Returns + ------- + nodelist : list + A list of nodes with self loops. + + See Also + -------- + selfloop_edges, number_of_selfloops + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge(1,1) + >>> G.add_edge(1,2) + >>> G.nodes_with_selfloops() + [1] + """ + return [ n for n,nbrs in self.adj.items() if n in nbrs ] + + def selfloop_edges(self, data=False): + """Return a list of selfloop edges. + + A selfloop edge has the same node at both ends. + + Parameters + ----------- + data : bool, optional (default=False) + Return selfloop edges as two tuples (u,v) (data=False) + or three-tuples (u,v,data) (data=True) + + Returns + ------- + edgelist : list of edge tuples + A list of all selfloop edges. + + See Also + -------- + selfloop_nodes, number_of_selfloops + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge(1,1) + >>> G.add_edge(1,2) + >>> G.selfloop_edges() + [(1, 1)] + >>> G.selfloop_edges(data=True) + [(1, 1, {})] + """ + if data: + return [ (n,n,nbrs[n]) + for n,nbrs in self.adj.items() if n in nbrs ] + else: + return [ (n,n) + for n,nbrs in self.adj.items() if n in nbrs ] + + + def number_of_selfloops(self): + """Return the number of selfloop edges. + + A selfloop edge has the same node at both ends. + + Returns + ------- + nloops : int + The number of selfloops. + + See Also + -------- + selfloop_nodes, selfloop_edges + + Examples + -------- + >>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge(1,1) + >>> G.add_edge(1,2) + >>> G.number_of_selfloops() + 1 + """ + return len(self.selfloop_edges()) + + + def size(self, weighted=False): + """Return the number of edges. + + Parameters + ---------- + weighted : boolean, optional (default=False) + If True return the sum of the edge weights. + + Returns + ------- + nedges : int + The number of edges in the graph. + + See Also + -------- + number_of_edges + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.size() + 3 + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge('a','b',weight=2) + >>> G.add_edge('b','c',weight=4) + >>> G.size() + 2 + >>> G.size(weighted=True) + 6.0 + """ + s=sum(self.degree(weighted=weighted).values())/2 + if weighted: + return float(s) + else: + return int(s) + + def number_of_edges(self, u=None, v=None): + """Return the number of edges between two nodes. + + Parameters + ---------- + u,v : nodes, optional (default=all edges) + If u and v are specified, return the number of edges between + u and v. Otherwise return the total number of all edges. + + Returns + ------- + nedges : int + The number of edges in the graph. If nodes u and v are specified + return the number of edges between those nodes. + + See Also + -------- + size + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.number_of_edges() + 3 + >>> G.number_of_edges(0,1) + 1 + >>> e = (0,1) + >>> G.number_of_edges(*e) + 1 + """ + if u is None: return int(self.size()) + if v in self.adj[u]: + return 1 + else: + return 0 + + + def add_star(self, nodes, **attr): + """Add a star. + + The first node in nodes is the middle of the star. It is connected + to all other nodes. + + Parameters + ---------- + nodes : iterable container + A container of nodes. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to every edge in star. + + See Also + -------- + add_path, add_cycle + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_star([0,1,2,3]) + >>> G.add_star([10,11,12],weight=2) + + """ + nlist = list(nodes) + v=nlist[0] + edges=((v,n) for n in nlist[1:]) + self.add_edges_from(edges, **attr) + + def add_path(self, nodes, **attr): + """Add a path. + + Parameters + ---------- + nodes : iterable container + A container of nodes. A path will be constructed from + the nodes (in order) and added to the graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to every edge in path. + + See Also + -------- + add_star, add_cycle + + Examples + -------- + >>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.add_path([10,11,12],weight=7) + + """ + nlist = list(nodes) + edges=list(zip(nlist[:-1],nlist[1:])) + self.add_edges_from(edges, **attr) + + def add_cycle(self, nodes, **attr): + """Add a cycle. + + Parameters + ---------- + nodes: iterable container + A container of nodes. A cycle will be constructed from + the nodes (in order) and added to the graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to every edge in cycle. + + See Also + -------- + add_path, add_star + + Examples + -------- + >>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_cycle([0,1,2,3]) + >>> G.add_cycle([10,11,12],weight=7) + + """ + nlist = list(nodes) + edges=list(zip(nlist,nlist[1:]+[nlist[0]])) + self.add_edges_from(edges, **attr) + + + def nbunch_iter(self, nbunch=None): + """Return an iterator of nodes contained in nbunch that are + also in the graph. + + The nodes in nbunch are checked for membership in the graph + and if not are silently ignored. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + + Returns + ------- + niter : iterator + An iterator over nodes in nbunch that are also in the graph. + If nbunch is None, iterate over all nodes in the graph. + + Raises + ------ + NetworkXError + If nbunch is not a node or or sequence of nodes. + If a node in nbunch is not hashable. + + See Also + -------- + Graph.__iter__ + + Notes + ----- + When nbunch is an iterator, the returned iterator yields values + directly from nbunch, becoming exhausted when nbunch is exhausted. + + To test whether nbunch is a single node, one can use + "if nbunch in self:", even after processing with this routine. + + If nbunch is not a node or a (possibly empty) sequence/iterator + or None, a NetworkXError is raised. Also, if any object in + nbunch is not hashable, a NetworkXError is raised. + """ + if nbunch is None: # include all nodes via iterator + bunch=iter(self.adj.keys()) + elif nbunch in self: # if nbunch is a single node + bunch=iter([nbunch]) + else: # if nbunch is a sequence of nodes + def bunch_iter(nlist,adj): + try: + for n in nlist: + if n in adj: + yield n + except TypeError as e: + message=e.args[0] +# sys.stdout.write(message) + # capture error for non-sequence/iterator nbunch. + if 'iter' in message: + raise NetworkXError(\ + "nbunch is not a node or a sequence of nodes.") + # capture error for unhashable node. + elif 'hashable' in message: + raise NetworkXError(\ + "Node %s in the sequence nbunch is not a valid node."%n) + else: + raise + bunch=bunch_iter(nbunch,self.adj) + return bunch diff --git a/rpm5utils/urpmgraphs/convert.py b/rpm5utils/urpmgraphs/convert.py new file mode 100644 index 0000000..571b47a --- /dev/null +++ b/rpm5utils/urpmgraphs/convert.py @@ -0,0 +1,708 @@ +""" +This module provides functions to convert +NetworkX graphs to and from other formats. + +The preferred way of converting data to a NetworkX graph +is through the graph constuctor. The constructor calls +the to_networkx_graph() function which attempts to guess the +input type and convert it automatically. + +Examples +-------- + +Create a 10 node random graph from a numpy matrix + +>>> import numpy +>>> a=numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10)) +>>> D=nx.DiGraph(a) + +or equivalently + +>>> D=nx.to_networkx_graph(a,create_using=nx.DiGraph()) + +Create a graph with a single edge from a dictionary of dictionaries + +>>> d={0: {1: 1}} # dict-of-dicts single edge (0,1) +>>> G=nx.Graph(d) + + +See Also +-------- +nx_pygraphviz, nx_pydot + +""" +__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)', + 'Pieter Swart (swart@lanl.gov)', + 'Dan Schult(dschult@colgate.edu)']) +# Copyright (C) 2006-2011 by +# Aric Hagberg +# Dan Schult +# Pieter Swart +# All rights reserved. +# BSD license. + +import warnings +import rpm5utils as nx + +__all__ = ['to_networkx_graph', + + 'from_dict_of_dicts', 'to_dict_of_dicts', + 'from_dict_of_lists', 'to_dict_of_lists', + 'from_edgelist', 'to_edgelist', + 'from_numpy_matrix', 'to_numpy_matrix', + 'to_numpy_recarray' + ] + +def _prep_create_using(create_using): + """Return a graph object ready to be populated. + + If create_using is None return the default (just networkx.Graph()) + If create_using.clear() works, assume it returns a graph object. + Otherwise raise an exception because create_using is not a networkx graph. + + """ + if create_using is None: + G=nx.Graph() + else: + G=create_using + try: + G.clear() + except: + raise TypeError("Input graph is not a networkx graph type") + return G + +def to_networkx_graph(data,create_using=None,multigraph_input=False): + """Make a NetworkX graph from a known data structure. + + The preferred way to call this is automatically + from the class constructor + + >>> d={0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1) + >>> G=nx.Graph(d) + + instead of the equivalent + + >>> G=nx.from_dict_of_dicts(d) + + Parameters + ---------- + data : a object to be converted + Current known types are: + any NetworkX graph + dict-of-dicts + dist-of-lists + list of edges + numpy matrix + numpy ndarray + scipy sparse matrix + pygraphviz agraph + + create_using : NetworkX graph + Use specified graph for result. Otherwise a new graph is created. + + multigraph_input : bool (default False) + If True and data is a dict_of_dicts, + try to create a multigraph assuming dict_of_dict_of_lists. + If data and create_using are both multigraphs then create + a multigraph from a multigraph. + + """ + # NX graph + if hasattr(data,"adj"): + try: + result= from_dict_of_dicts(data.adj,\ + create_using=create_using,\ + multigraph_input=data.is_multigraph()) + if hasattr(data,'graph') and isinstance(data.graph,dict): + result.graph=data.graph.copy() + if hasattr(data,'node') and isinstance(data.node,dict): + result.node=dict( (n,dd.copy()) for n,dd in data.node.items() ) + return result + except: + raise nx.NetworkXError("Input is not a correct NetworkX graph.") + + # pygraphviz agraph + if hasattr(data,"is_strict"): + try: + return nx.from_agraph(data,create_using=create_using) + except: + raise nx.NetworkXError("Input is not a correct pygraphviz graph.") + + # dict of dicts/lists + if isinstance(data,dict): + try: + return from_dict_of_dicts(data,create_using=create_using,\ + multigraph_input=multigraph_input) + except: + try: + return from_dict_of_lists(data,create_using=create_using) + except: + raise TypeError("Input is not known type.") + + # list or generator of edges + if (isinstance(data,list) + or hasattr(data,'next') + or hasattr(data, '__next__')): + try: + return from_edgelist(data,create_using=create_using) + except: + raise nx.NetworkXError("Input is not a valid edge list") + + # numpy matrix or ndarray + try: + import numpy + if isinstance(data,numpy.matrix) or \ + isinstance(data,numpy.ndarray): + try: + return from_numpy_matrix(data,create_using=create_using) + except: + raise nx.NetworkXError(\ + "Input is not a correct numpy matrix or array.") + except ImportError: + warnings.warn('numpy not found, skipping conversion test.', + ImportWarning) + + # scipy sparse matrix - any format + try: + import scipy + if hasattr(data,"format"): + try: + return from_scipy_sparse_matrix(data,create_using=create_using) + except: + raise nx.NetworkXError(\ + "Input is not a correct scipy sparse matrix type.") + except ImportError: + warnings.warn('scipy not found, skipping conversion test.', + ImportWarning) + + + raise nx.NetworkXError(\ + "Input is not a known data type for conversion.") + + return + + +def convert_to_undirected(G): + """Return a new undirected representation of the graph G. + + """ + return G.to_undirected() + + +def convert_to_directed(G): + """Return a new directed representation of the graph G. + + """ + return G.to_directed() + + +def to_dict_of_lists(G,nodelist=None): + """Return adjacency representation of graph as a dictionary of lists. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + Notes + ----- + Completely ignores edge data for MultiGraph and MultiDiGraph. + + """ + if nodelist is None: + nodelist=G + + d = {} + for n in nodelist: + d[n]=[nbr for nbr in G.neighbors(n) if nbr in nodelist] + return d + +def from_dict_of_lists(d,create_using=None): + """Return a graph from a dictionary of lists. + + Parameters + ---------- + d : dictionary of lists + A dictionary of lists adjacency representation. + + create_using : NetworkX graph + Use specified graph for result. Otherwise a new graph is created. + + Examples + -------- + >>> dol= {0:[1]} # single edge (0,1) + >>> G=nx.from_dict_of_lists(dol) + + or + >>> G=nx.Graph(dol) # use Graph constructor + + """ + G=_prep_create_using(create_using) + G.add_nodes_from(d) + if G.is_multigraph() and not G.is_directed(): + # a dict_of_lists can't show multiedges. BUT for undirected graphs, + # each edge shows up twice in the dict_of_lists. + # So we need to treat this case separately. + seen={} + for node,nbrlist in d.items(): + for nbr in nbrlist: + if nbr not in seen: + G.add_edge(node,nbr) + seen[node]=1 # don't allow reverse edge to show up + else: + G.add_edges_from( ((node,nbr) for node,nbrlist in d.items() + for nbr in nbrlist) ) + return G + + +def to_dict_of_dicts(G,nodelist=None,edge_data=None): + """Return adjacency representation of graph as a dictionary of dictionaries. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + edge_data : list, optional + If provided, the value of the dictionary will be + set to edge_data for all edges. This is useful to make + an adjacency matrix type representation with 1 as the edge data. + If edgedata is None, the edgedata in G is used to fill the values. + If G is a multigraph, the edgedata is a dict for each pair (u,v). + + """ + dod={} + if nodelist is None: + if edge_data is None: + for u,nbrdict in G.adjacency_iter(): + dod[u]=nbrdict.copy() + else: # edge_data is not None + for u,nbrdict in G.adjacency_iter(): + dod[u]=dod.fromkeys(nbrdict, edge_data) + else: # nodelist is not None + if edge_data is None: + for u in nodelist: + dod[u]={} + for v,data in ((v,data) for v,data in G[u].items() if v in nodelist): + dod[u][v]=data + else: # nodelist and edge_data are not None + for u in nodelist: + dod[u]={} + for v in ( v for v in G[u] if v in nodelist): + dod[u][v]=edge_data + return dod + +def from_dict_of_dicts(d,create_using=None,multigraph_input=False): + """Return a graph from a dictionary of dictionaries. + + Parameters + ---------- + d : dictionary of dictionaries + A dictionary of dictionaries adjacency representation. + + create_using : NetworkX graph + Use specified graph for result. Otherwise a new graph is created. + + multigraph_input : bool (default False) + When True, the values of the inner dict are assumed + to be containers of edge data for multiple edges. + Otherwise this routine assumes the edge data are singletons. + + Examples + -------- + >>> dod= {0: {1:{'weight':1}}} # single edge (0,1) + >>> G=nx.from_dict_of_dicts(dod) + + or + >>> G=nx.Graph(dod) # use Graph constructor + + """ + G=_prep_create_using(create_using) + G.add_nodes_from(d) + # is dict a MultiGraph or MultiDiGraph? + if multigraph_input: + # make a copy of the list of edge data (but not the edge data) + if G.is_directed(): + if G.is_multigraph(): + G.add_edges_from( (u,v,key,data) + for u,nbrs in d.items() + for v,datadict in nbrs.items() + for key,data in datadict.items() + ) + else: + G.add_edges_from( (u,v,data) + for u,nbrs in d.items() + for v,datadict in nbrs.items() + for key,data in datadict.items() + ) + else: # Undirected + if G.is_multigraph(): + seen=set() # don't add both directions of undirected graph + for u,nbrs in d.items(): + for v,datadict in nbrs.items(): + if (u,v) not in seen: + G.add_edges_from( (u,v,key,data) + for key,data in datadict.items() + ) + seen.add((v,u)) + else: + seen=set() # don't add both directions of undirected graph + for u,nbrs in d.items(): + for v,datadict in nbrs.items(): + if (u,v) not in seen: + G.add_edges_from( (u,v,data) + for key,data in datadict.items() ) + seen.add((v,u)) + + else: # not a multigraph to multigraph transfer + if G.is_multigraph() and not G.is_directed(): + # d can have both representations u-v, v-u in dict. Only add one. + # We don't need this check for digraphs since we add both directions, + # or for Graph() since it is done implicitly (parallel edges not allowed) + seen=set() + for u,nbrs in d.items(): + for v,data in nbrs.items(): + if (u,v) not in seen: + G.add_edge(u,v,attr_dict=data) + seen.add((v,u)) + else: + G.add_edges_from( ( (u,v,data) + for u,nbrs in d.items() + for v,data in nbrs.items()) ) + return G + +def to_edgelist(G,nodelist=None): + """Return a list of edges in the graph. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + """ + if nodelist is None: + return G.edges(data=True) + else: + return G.edges(nodelist,data=True) + +def from_edgelist(edgelist,create_using=None): + """Return a graph from a list of edges. + + Parameters + ---------- + edgelist : list or iterator + Edge tuples + + create_using : NetworkX graph + Use specified graph for result. Otherwise a new graph is created. + + Examples + -------- + >>> edgelist= [(0,1)] # single edge (0,1) + >>> G=nx.from_edgelist(edgelist) + + or + >>> G=nx.Graph(edgelist) # use Graph constructor + + """ + G=_prep_create_using(create_using) + G.add_edges_from(edgelist) + return G + +def to_numpy_matrix(G, nodelist=None, dtype=None, order=None, + multigraph_weight=sum, weight='weight'): + """Return the graph adjacency matrix as a NumPy matrix. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the NumPy matrix. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by G.nodes(). + + dtype : NumPy data type, optional + A valid single NumPy data type used to initialize the array. + This must be a simple type such as int or numpy.float64 and + not a compound data type (see to_numpy_recarray) + If None, then the NumPy default is used. + + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. If None, then the NumPy default + is used. + + multigraph_weight : {sum, min, max}, optional + An operator that determines how weights in multigraphs are handled. + The default is to sum the weights of the multiple edges. + + weight: string, optional + Edge data key corresponding to the edge weight. + + Returns + ------- + M : NumPy matrix + Graph adjacency matrix. + + See Also + -------- + to_numpy_recarray, from_numpy_matrix + + Notes + ----- + The matrix entries are assigned with weight edge attribute. When + an edge does not have the weight attribute, the value of the entry is 1. + For multiple edges, the values of the entries are the sums of the edge + attributes for each edge. + + When `nodelist` does not contain every node in `G`, the matrix is built + from the subgraph of `G` that is induced by the nodes in `nodelist`. + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0,1,weight=2) + >>> G.add_edge(1,0) + >>> G.add_edge(2,2,weight=3) + >>> G.add_edge(2,2) + >>> nx.to_numpy_matrix(G, nodelist=[0,1,2]) + matrix([[ 0., 2., 0.], + [ 1., 0., 0.], + [ 0., 0., 4.]]) + + """ + try: + import numpy as np + except ImportError: + raise ImportError(\ + "to_numpy_matrix() requires numpy: http://scipy.org/ ") + + if nodelist is None: + nodelist = G.nodes() + + nodeset = set(nodelist) + if len(nodelist) != len(nodeset): + msg = "Ambiguous ordering: `nodelist` contained duplicates." + raise nx.NetworkXError(msg) + + nlen=len(nodelist) + undirected = not G.is_directed() + index=dict(zip(nodelist,range(nlen))) + + if G.is_multigraph(): + # Handle MultiGraphs and MultiDiGraphs + # array of nan' to start with, any leftover nans will be converted to 0 + # nans are used so we can use sum, min, max for multigraphs + M = np.zeros((nlen,nlen), dtype=dtype, order=order)+np.nan + # use numpy nan-aware operations + operator={sum:np.nansum, min:np.nanmin, max:np.nanmax} + try: + op=operator[multigraph_weight] + except: + raise ValueError('multigraph_weight must be sum, min, or max') + + for u,v,attrs in G.edges_iter(data=True): + if (u in nodeset) and (v in nodeset): + i,j = index[u],index[v] + e_weight = attrs.get(weight, 1) + M[i,j] = op([e_weight,M[i,j]]) + if undirected: + M[j,i] = M[i,j] + # convert any nans to zeros + M = np.asmatrix(np.nan_to_num(M)) + else: + # Graph or DiGraph, this is much faster than above + M = np.zeros((nlen,nlen), dtype=dtype, order=order) + for u,nbrdict in G.adjacency_iter(): + for v,d in nbrdict.items(): + try: + M[index[u],index[v]]=d.get(weight,1) + except KeyError: + pass + M = np.asmatrix(M) + return M + + +def from_numpy_matrix(A,create_using=None): + """Return a graph from numpy matrix. + + The numpy matrix is interpreted as an adjacency matrix for the graph. + + Parameters + ---------- + A : numpy matrix + An adjacency matrix representation of a graph + + create_using : NetworkX graph + Use specified graph for result. The default is Graph() + + Notes + ----- + If the numpy matrix has a single data type for each matrix entry it + will be converted to an appropriate Python data type. + + If the numpy matrix has a user-specified compound data type the names + of the data fields will be used as attribute keys in the resulting + NetworkX graph. + + See Also + -------- + to_numpy_matrix, to_numpy_recarray + + Examples + -------- + Simple integer weights on edges: + + >>> import numpy + >>> A=numpy.matrix([[1,1],[2,1]]) + >>> G=nx.from_numpy_matrix(A) + + User defined compound data type on edges: + + >>> import numpy + >>> dt=[('weight',float),('cost',int)] + >>> A=numpy.matrix([[(1.0,2)]],dtype=dt) + >>> G=nx.from_numpy_matrix(A) + >>> G.edges(data=True) + [(0, 0, {'cost': 2, 'weight': 1.0})] + """ + kind_to_python_type={'f':float, + 'i':int, + 'u':int, + 'b':bool, + 'c':complex, + 'S':str, + 'V':'void'} + + try: # Python 3.x + blurb = chr(1245) # just to trigger the exception + kind_to_python_type['U']=str + except ValueError: # Python 2.6+ + kind_to_python_type['U']=unicode + + # This should never fail if you have created a numpy matrix with numpy... + try: + import numpy as np + except ImportError: + raise ImportError(\ + "from_numpy_matrix() requires numpy: http://scipy.org/ ") + + G=_prep_create_using(create_using) + n,m=A.shape + if n!=m: + raise nx.NetworkXError("Adjacency matrix is not square.", + "nx,ny=%s"%(A.shape,)) + dt=A.dtype + try: + python_type=kind_to_python_type[dt.kind] + except: + raise TypeError("Unknown numpy data type: %s"%dt) + + # make sure we get isolated nodes + G.add_nodes_from(range(n)) + # get a list of edges + x,y=np.asarray(A).nonzero() + + # handle numpy constructed data type + if python_type is 'void': + fields=sorted([(offset,dtype,name) for name,(dtype,offset) in + A.dtype.fields.items()]) + for (u,v) in zip(x,y): + attr={} + for (offset,dtype,name),val in zip(fields,A[u,v]): + attr[name]=kind_to_python_type[dtype.kind](val) + G.add_edge(u,v,attr) + else: # basic data type + G.add_edges_from( ((u,v,{'weight':python_type(A[u,v])}) + for (u,v) in zip(x,y)) ) + return G + + +def to_numpy_recarray(G,nodelist=None, + dtype=[('weight',float)], + order=None): + """Return the graph adjacency matrix as a NumPy recarray. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the NumPy matrix. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by G.nodes(). + + dtype : NumPy data-type, optional + A valid NumPy named dtype used to initialize the NumPy recarray. + The data type names are assumed to be keys in the graph edge attribute + dictionary. + + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. If None, then the NumPy default + is used. + + Returns + ------- + M : NumPy recarray + The graph with specified edge data as a Numpy recarray + + Notes + ----- + When `nodelist` does not contain every node in `G`, the matrix is built + from the subgraph of `G` that is induced by the nodes in `nodelist`. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edge(1,2,weight=7.0,cost=5) + >>> A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)]) + >>> print(A.weight) + [[ 0. 7.] + [ 7. 0.]] + >>> print(A.cost) + [[0 5] + [5 0]] + """ + try: + import numpy as np + except ImportError: + raise ImportError(\ + "to_numpy_matrix() requires numpy: http://scipy.org/ ") + + if G.is_multigraph(): + raise nx.NetworkXError("Not implemented for multigraphs.") + + if nodelist is None: + nodelist = G.nodes() + + nodeset = set(nodelist) + if len(nodelist) != len(nodeset): + msg = "Ambiguous ordering: `nodelist` contained duplicates." + raise nx.NetworkXError(msg) + + nlen=len(nodelist) + undirected = not G.is_directed() + index=dict(zip(nodelist,range(nlen))) + M = np.zeros((nlen,nlen), dtype=dtype, order=order) + + names=M.dtype.names + for u,v,attrs in G.edges_iter(data=True): + if (u in nodeset) and (v in nodeset): + i,j = index[u],index[v] + values=tuple([attrs[n] for n in names]) + M[i,j] = values + if undirected: + M[j,i] = M[i,j] + + return M.view(np.recarray) diff --git a/rpm5utils/urpmgraphs/exception.py b/rpm5utils/urpmgraphs/exception.py new file mode 100644 index 0000000..c2dd580 --- /dev/null +++ b/rpm5utils/urpmgraphs/exception.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +""" +********** +Exceptions +********** + +Base exceptions and errors for NetworkX. + +""" +__author__ = """Aric Hagberg (hagberg@lanl.gov)\nPieter Swart (swart@lanl.gov)\nDan Schult(dschult@colgate.edu)\nLoïc Séguin-C. """ +# Copyright (C) 2004-2008 by +# Aric Hagberg +# Dan Schult +# Pieter Swart +# All rights reserved. +# BSD license. +# + +# Exception handling + +# the root of all Exceptions +class NetworkXException(Exception): + """Base class for exceptions in NetworkX.""" + +class NetworkXError(NetworkXException): + """Exception for a serious error in NetworkX""" + +class NetworkXPointlessConcept(NetworkXException): + """Harary, F. and Read, R. "Is the Null Graph a Pointless Concept?" +In Graphs and Combinatorics Conference, George Washington University. +New York: Springer-Verlag, 1973. +""" + +class NetworkXAlgorithmError(NetworkXException): + """Exception for unexpected termination of algorithms.""" + +class NetworkXUnfeasible(NetworkXAlgorithmError): + """Exception raised by algorithms trying to solve a problem + instance that has no feasible solution.""" + +class NetworkXNoPath(NetworkXUnfeasible): + """Exception for algorithms that should return a path when running + on graphs where such a path does not exist.""" + +class NetworkXUnbounded(NetworkXAlgorithmError): + """Exception raised by algorithms trying to solve a maximization + or a minimization problem instance that is unbounded.""" + + diff --git a/urpm-downloader.py b/urpm-downloader.py new file mode 100755 index 0000000..b0bf573 --- /dev/null +++ b/urpm-downloader.py @@ -0,0 +1,675 @@ +#!/usr/bin/python2.7 +# -*- coding: UTF-8 -*- +''' +" urpm-downloader for URPM-based linux +" A tool for downloading RPMs from URPM-based linux repositories. +" +" Copyright (C) 2011 ROSA Laboratory. +" Written by Anton Kirilenko +" +" PLATFORMS +" ========= +" Linux +" +" REQUIREMENTS +" ============ +" - python 2.7 +" - python-rpm 5.3 +" - urpmi 6.68 +" +" This program is free software: you can redistribute it and/or modify +" it under the terms of the GNU General Public License or the GNU Lesser +" General Public License as published by the Free Software Foundation, +" either version 2 of the Licenses, or (at your option) any later version. +" +" This program is distributed in the hope that it will be useful, +" but WITHOUT ANY WARRANTY; without even the implied warranty of +" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +" GNU General Public License for more details. +" +" You should have received a copy of the GNU General Public License +" and the GNU Lesser General Public License along with this program. +" If not, see . +''' + + +import argparse +import sys +import subprocess +import os +import re +from urllib import urlretrieve +import rpm +from urllib2 import urlopen, HTTPError, URLError +import shutil + +import configparser +cp = ConfigParser.RawConfigParser() + +exit() + +import gettext +#gettext.install('urpm-tools', 'locale', unicode=True, names=['gettext']) +gettext.install('urpm-tools') + +#t = gettext.translation('urpm-tools', 'locale', fallback=True) +#_ = t.ugettext + +def vprint(text): + '''Print the message only if verbose mode is on''' + if(command_line_arguments.verbose): + print(text) + +def qprint(text): + '''Print the message only if quiet mode is off''' + if(not command_line_arguments.quiet): + print(text) + + +def eprint(text, fatal=False, code=1): + '''Print the message to stderr. Exit if fatal''' + print >> sys.stderr, text + if (fatal): + exit(code) + + +def url_exists(url): + '''Return True if the given url or local path exists. Otherwise, return False.''' + if(url.startswith("file://") or url.startswith("/")): + return os.path.isfile(url) + + #try to open file + try: + r = urlopen(url) + return True + except (HTTPError,URLError): + return False + +def parse_command_line(): + ''' Parse command line, adjust some flags and warn in some cases''' + global command_line_arguments + arg_parser = argparse.ArgumentParser(description=_('A tool for downloading RPMs and SRPMs from URPM-based linux repositories'), + epilog=_("If none of the options -b, -s, -d turned on, it will be treated as -b")) + arg_parser.add_argument('packages', action='store',nargs = '+', help=_("Package name(s) to download. It can contain not only package names, but (S)RPM files too. In this case package name extracted from this file will be used")) + arg_parser.add_argument('-u', '--urls', action='store_true', help=_("Instead of downloading files, list the URLs that would be processed")) + arg_parser.add_argument('-r', '--resolve', action='store_true', help=_("When downloading RPMs, resolve dependencies and also download the required packages, if they are not already installed")) + arg_parser.add_argument('-a', '--resolve-all', action='store_true', help=_("When downloading RPMs, resolve dependencies and also download the required packages, even if they are already installed")) + arg_parser.add_argument('-b', '--binary', action='store_true', help=_("Download binary RPMs")) + arg_parser.add_argument('-s', '--source', action='store_true', help=_("Download the source RPMs (SRPMs)")) + arg_parser.add_argument('-d', '--debug-info', action='store_true', help=_("Download debug RPMs")) + arg_parser.add_argument('-D', '--debug-info-install', action='store_true', help=_("Download debug RPMs and install")) + arg_parser.add_argument('--version', action='version', version=VERSION) + arg_parser.add_argument('-v', '--verbose', action='store_true', help=_("Verbose (print additional info)")) + arg_parser.add_argument('-q', '--quiet', action='store_true', help=_("Quiet operation.")) + arg_parser.add_argument('--include-media', '--media', action='append',nargs = '+', help=_("Use only selected URPM media")) + arg_parser.add_argument('--exclude-media', action='append',nargs = '+', help=_("Do not use selected URPM media")) + arg_parser.add_argument('-x', '--exclude-packages', action='store',nargs = '+', help=_("Exclude package(s) by regex")) + arg_parser.add_argument('-i', '--ignore-errors', action='store_true', help=_("Try to continue when error occurs")) + arg_parser.add_argument('-o', '--overwrite', action='store_true', help=_("If the file already exists, download it again and overwrite the old one")) + arg_parser.add_argument('--all-alternatives', action='store_true', help=_("If package dependency can be satisfied by several packages, download all of them (by default, only the first one is downloaded)")) + arg_parser.add_argument('--all-versions', action='store_true', help=_("If different versions of package present in repository, process them all")) + #arg_parser.add_argument('--self-test', action='store_true', help="Test urpm-downloader end exit") + + arg_parser.add_argument('--dest-dir', action='store', help=_("Specify a destination directory for the download")) + + command_line_arguments = arg_parser.parse_args(sys.argv[1:]) + + if(command_line_arguments.debug_info_install): + command_line_arguments.debug_info = True + + if(not command_line_arguments.debug_info and not command_line_arguments.source): + command_line_arguments.binary = True + + if(command_line_arguments.resolve_all): + command_line_arguments.resolve = True + + if(command_line_arguments.exclude_packages is None): + command_line_arguments.exclude_packages = [] + + if(command_line_arguments.verbose and command_line_arguments.quiet): + eprint(_("Use of --verbose with --quiet is senseless. Turning verbose mode off.")) + command_line_arguments.verbose = False + + if(command_line_arguments.resolve and command_line_arguments.source and command_line_arguments.urls): + eprint(_("Note that resolving of SRPM dependencies is not possible until SRPM downloaded. So, it will be done despite --urls")) + + if(command_line_arguments.dest_dir is not None): + if(not os.path.exists(command_line_arguments.dest_dir) or not os.path.isdir(command_line_arguments.dest_dir)): + os.mkdir(command_line_arguments.dest_dir) + else: + command_line_arguments.dest_dir = os.getcwd() + +def get_command_output(command, fatal_fails=True): + '''Execute command using subprocess.Popen and return its stdout output string. If + return code is not 0, print error message end exit''' + vprint("Executing command: " + str(command)) + res = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output = list(res.communicate()) + vprint('Output: ' + str(output)) + if sys.stdout.encoding: + if output[0]: + output[0] = output[0].decode(sys.stdout.encoding).encode("UTF-8") + if output[1]: + output[1] = output[1].decode(sys.stdout.encoding).encode("UTF-8") + + if(res.returncode != 0 and fatal_fails): # if not fatal_fails, do nothing. Caller have to deal with that himself + eprint(_("Error while calling command") + " '" + " ".join(command) + "'") + if(output[1] != None or output[0] != None): + eprint(_("Error message: \n")+ ((output[0].strip() + "\n") if output[0]!=None else "") + + (output[1].strip() if output[1]!=None else "") ) + exit(1) + return [output[0], output[1], res.returncode] + + +def parse_packages(pkgs_list, toresolve): + ''' Takes a list of package names, some of that are alternative (like 'pkg1|pkg2') + and returns a list of package names without '|' ''' + output = [] + for pkg in pkgs_list: + pkgs = pkg.split("|") + if(len(pkgs)>1): + vprint("Aternatives found: " + str(pkgs)) + if(command_line_arguments.all_alternatives): # download all the alternatives + for p in pkgs: + output.append(p) + else: # download only the firsl package(first in alphabetical order) + #check if one of the packages already ion the 'toresolve' list + already_presents = False + for p in pkgs: + if(p in toresolve or p in output): + already_presents = True + break + #if not - add the first package + if(not already_presents): + output.append(sorted(pkgs)[0]) + if(len(pkgs)>1): + vprint("Selected: " + sorted(pkgs)[0]) + return output + + +def get_installed_packages(): + '''Makes 'installed_packages' be filled with installed packages data and look like + {pkg_namei:[[version1,relese1], [version2,relese2], ...], ...} ''' + global installed_packages, installed_loaded + if(installed_loaded): + return + installed_loaded = True + installed_packages = {} + + ts = rpm.TransactionSet() + mi = ts.dbMatch() + for h in mi: + if(h['name'] not in installed_packages): + installed_packages[h['name']] = [] + installed_packages[h['name']].append( [h['version'], h['release']] ) + vprint("The list of installed packages loaded") + +def check_what_to_skip(package_names): + ''' Get the list of package names and return a list of packages from it, that don't have to be downloaded ''' + + def should_be_excluded(pkg): + for line in command_line_arguments.exclude_packages: + if(re.search(line, pkg) is not None): + return True + return False + + vprint("Check package to skip...") + pkgs = package_names[:] + to_skip = [] + # remove packages that have to be excluded dew to command line arguments + for pkg in pkgs[:]: + if(should_be_excluded(pkg)): + pkgs.remove(pkg) + to_skip.append(pkg) + + if(command_line_arguments.resolve_all): + return to_skip + + # Skip packages, that are already installed and have the same version + get_installed_packages() + + #remove from to_skip candidates all the packages, which are not installed + for pkg in pkgs[:]: + if(pkg not in installed_packages): + pkgs.remove(pkg) + + vprint("Retrieving possible downloading package versions...") + res = get_command_output(cmd + ['--sources'] + pkgs) + urls = res[0].strip().split('\n') + vprint("A list of urls retrieved: " + str(urls)) + to_download = {} + rpms = {} + for url in urls: # collect data + res = get_package_fields(url) + if(res[0] not in rpms): + rpms[res[0]] = [] + rpms[res[0]].append(res[1:4]) + + + if(not command_line_arguments.all_versions): + vprint("Removing urls of the older versions...") + for pkg in rpms.keys()[:]: # filter + L = rpms[pkg] + while(len(L) > 1): + if(rpm.evrCompare(L[0][0], L[1][0]) == 1): + del L[1] + else: + del L[0] + + # regroup data: to_download[pkg_name] = [ver-rel1, ver-rel2, ...] + for pkg in rpms: + if(pkg not in to_download): + to_download[pkg] = [] + for item in rpms[pkg]: + to_download[pkg].append(item[0]) # item[0] == version + + vprint("Checking what to skip...") + + for pkg in pkgs: + installed_versions = ['-'.join(i) for i in installed_packages[pkg]] + #print pkg, str(installed_versions) + for ver in to_download[pkg][:]: + if (ver in installed_versions): + to_download[pkg].remove(ver) + if(len(to_download[pkg]) == 0): + to_download.pop(pkg) + to_skip.append(pkg) + vprint("Skipping " + pkg) + return to_skip + + +def resolve_packages(package_names): + '''Returns a list of packages recursively resoled from given list''' + global installed_packages + + resolved_packages = [] + def _resolve_packages(pkg_names): + toresolve = [] + pkgs = parse_packages(pkg_names, toresolve) + to_skip = check_what_to_skip(pkgs) + for pkg in pkgs[:]: + if(pkg in resolved_packages or (pkg in to_skip and (pkg not in package_names or resolve_source))): + # don't resolve its dependencies. + pkgs.remove(pkg) + else: + resolved_packages.append(pkg) + toresolve.append(pkg) + + if (len(toresolve) == 0): + return + vprint ("Resolving " + str(toresolve)) + names = get_command_output(['urpmq', "--requires-recursive"] + toresolve)[0].strip().split("\n") + _resolve_packages(names) + + _resolve_packages(package_names) + return resolved_packages + +def get_srpm_names(pkgs): + '''get a list of srpms names for every given package name. Returns a dictionary {pakage_name_1:[srpm_name_1, srpm_name_2,...], ...}''' + srpms = {} + cmd_tmp = cmd[:] + ['--sourcerpm'] + pkgs + names = get_command_output(cmd_tmp)[0] + + for line in names.split("\n"): + line = line.strip() + if(line == ''): + continue + n = line.split(":")[0].strip() + v = ":".join((line.split(":")[1:])).strip() + if(n not in srpms): + srpms[n] = [] + srpms[n].append(v) + return srpms + + +def get_srpm_url(url): + if(url.startswith("file://") or url.startswith("/")): + return url + tmp = url.split("/") + tmp[-4] = "SRPMS" + del tmp[-3] + return "/".join(tmp) + + +def list_srpm_urls(): + global cmd, srpm_urls_loaded + try: + srpm_urls_loaded + return srpm_urls + except: + srpm_urls_loaded = True + vprint("Loading list of SRPM URLs...") + re_slash = re.compile("/") + lines = get_command_output(cmd + ["--list-url"])[0].strip().split("\n") + media = get_command_output(cmd + ["--list-media", 'active'])[0].strip().split("\n") + + srpm_urls = [] + for line in lines: + parts = line.split(" ") + medium = ' '.join(parts[:-1]) + if medium not in media: + continue + url = parts[-1] + if(url.endswith("/")): + url = url[:-1] + if(re_slash.search(url) is not None): + srpm_urls.append(get_srpm_url(url)) + + return srpm_urls + +def try_download(url): + ''' Try to download file and return True if success, else return False ''' + path = os.path.join(command_line_arguments.dest_dir, os.path.basename(url)) + vprint("Trying to download file " + url) + try: + if(not os.path.exists(path) or command_line_arguments.overwrite): + #(path, msg) = urlretrieve(url, path) + if(url.startswith('/')): # local file + shutil.copyfile(url, path) + else: + fd = urlopen(url) + file = open(path, 'w') + file.write(fd.read()) + file.close() + fd.close() + qprint (_("* Downloaded: ") + url) + else: + qprint (_("* File exists, skipping: ") + url) + return None + except IOError, e: + return e + +def get_package_fields(rpmname): + ''' Return [name, version, suffix, path(prefix)] for given rpm file or package name ''' + suffix = "" + path = os.path.dirname(rpmname) + if(path): + path += "/" + + filename = False + rpmname = os.path.basename(rpmname) + if(rpmname.endswith(".rpm")): + suffix = ".rpm" + rpmname = rpmname[:-4] + filename = True + + if(rpmname.endswith(".src")): + suffix = ".src" + suffix + rpmname = rpmname[:-4] + name = rpmname.split("-")[:-2] + version = rpmname.split("-")[-2:] + else: + re_version = re.compile("(\.)?((alpha)|(cvs)|(svn)|(r))?\d+((mdv)|(mdk)|(mnb))") + if(filename): + parts = rpmname.split('.') + suffix = "." + parts[-1] + suffix + rpmname = '.'.join(parts[:-1]) # remove the architecture part + sections = rpmname.split("-") + if(re_version.search(sections[-1]) == None): + name = sections[:-3] + version = sections[-3:-1] + suffix = "-" + sections[-1] + suffix + else: + name = sections[:-2] + version = sections[-2:] + return ["-".join(name), "-".join(version), suffix, path] + + +#url = 'ftp://ftp.sunet.se/pub/Linux/distributions/mandrakelinux/official/2011/x86_64/media/contrib/release/lib64oil0.3_0-0.3.17-2mdv2011.0.x86_64.rpm' +#url = 'ftp://ftp.sunet.se/pub/Linux/distributions/mandrakelinux/official/2011/x86_64/media/contrib/release/liboil-tools-0.3.17-2mdv2011.0.x86_64.rpm' +#res = get_package_fields(url) +#print res +#exit() + + +def filter_versions(rpm_list): + ''' When different versions of one package given, remove older version and returns only the newest one for every package. ''' + if(command_line_arguments.all_versions): + return rpm_list + + rpms = {} + vprint("Filtering input: " + str(rpm_list)) + for srpm in rpm_list: # collect data + res = get_package_fields(srpm) + if(res[0] not in rpms): + rpms[res[0]] = [] + rpms[res[0]].append(res[1:4]) + + for pkg in rpms.keys()[:]: # filter + L = rpms[pkg] + while(len(L)> 1): + if(rpm.evrCompare(L[0][0], L[1][0]) == 1): + del L[1] + else: + del L[0] + + output = [] + for pkg in rpms: # assembling package names + output.append ( rpms[pkg][0][2] + pkg + "-" + rpms[pkg][0][0] + rpms[pkg][0][1]) + vprint ("Filtering output: " + str(output)) + return output + +def download_srpm(package, srpms): + '''download the srpm with a given name. Try to find it in the repository. Returns a list of downloaded file names''' + vprint("downloading srpm(s) for package " + package) + + srpm_urls = list_srpm_urls() + downloaded = [] + for srpm in filter_versions(srpms[package]): + count = 0 + for srpm_url in srpm_urls: + url = srpm_url + "/" + srpm + if(command_line_arguments.urls): # a correct url have to be printed! + if(not url_exists(url)): + continue + qprint(url) + if(not command_line_arguments.resolve): + count += 1 + break + + if(try_download(url) == None): + count += 1 + downloaded.append(os.path.join(command_line_arguments.dest_dir, os.path.basename(url))) + break + + if(count == 0): + eprint(_("Can not download SRPM for package") + srpm) + if(not command_line_arguments.ignore_errors): + exit(2) + + return downloaded + + +def download_rpm(pkgs_to_download): + global resolve_source, downloaded_debug_pkgs + vprint("downloading packages " + ", ".join (pkgs_to_download)) + cmd_bin = cmd[:] + ['--sources'] + pkgs_to_download + urls = get_command_output(cmd_bin)[0].strip().split("\n") + + urls = filter_versions(urls) + + if(command_line_arguments.binary or resolve_source): + for url in urls: + if(command_line_arguments.urls): + qprint(url) + continue + + res = try_download(url) + if(res != None): + eprint(_("Can not download RPM") + "%s\n(%s)" % (url, res) ) + if(not command_line_arguments.ignore_errors): + exit(3) + if(command_line_arguments.debug_info): + pkgs_to_download_debug = [p+"-debug" for p in pkgs_to_download[:]] + qprint(_("Resolving debug-info packages...")) + cmd_debug = ['urpmq', '--media', 'debug', '--sources'] + pkgs_to_download_debug + res = get_command_output(cmd_debug, fatal_fails=False) + + # urpmq output. RU: Нет пакета с названием + text = _("No package named ") + vprint("Removing missed debug packages from query...") + removed = [] + if(res[2] != 0): # return code is not 0 + + for line in res[1].split("\n"): + if line.startswith(text): + pkg = line[len(text):] + pkgs_to_download_debug.remove(pkg) + removed.append(pkg) + + vprint("Removed %d packages" % len(removed)) + vprint(removed) + + cmd_debug = ['urpmq', '--media', 'debug', '--sources'] + pkgs_to_download_debug + urls = get_command_output(cmd_debug)[0].strip().split("\n") + urls = filter_versions(urls) + for url in urls: + if(command_line_arguments.urls): + qprint(url) + continue + res = try_download(url) + if(res != None): + eprint(_("Can not download RPM") + "%s:\n(%s)\n" % (os.path.basename(url), res) + + _("Maybe you need to update urpmi database (urpmi.update -a)?") ) + if(not command_line_arguments.ignore_errors): + exit(2) + else: + path = os.path.join(command_line_arguments.dest_dir, os.path.basename(url)) + downloaded_debug_pkgs.append(path) + + if(command_line_arguments.debug_info_install): + for pkg in downloaded_debug_pkgs: + qprint(_('Installing ') + os.path.basename(str(pkg)) + "...") + command = ['rpm', '-i', pkg] + res = get_command_output(command,fatal_fails=False) + if(res[2] != 0): # rpm return code is not 0 + qprint(_('Error while calling command') + ' "' + ' '.join(command) + '":\n' + res[1].strip()) + + +def filter_debug_rpm_urls(input_urls): + command = ['urpmq', '--media', 'debug', '--sources', pkg_name + "-debug"] + res = get_command_output(command, fatal_fails=False) + if(res[2] != 0): # return code is not 0 + qprint(_("Debug package for '%s' not found") % pkg_name) + return [] + names = res[0].strip().split("\n") + if(command_line_arguments.all_versions): + return names + + get_installed_packages() + #print names + #print installed_packages[pkg_name] + urls = [] + for n in names: + res = get_package_fields(os.path.basename(n)) + version = "-".join(res[1].split("-")[0:2] ) + if(pkg_name not in installed_packages): + break + for inst_pkg in installed_packages[pkg_name]: + if(version == inst_pkg[0] + "-" + inst_pkg[1]): + urls.append(n) + break + return urls + + +def Main(): + global cmd, resolve_source + resolve_source = False # variable that makes download_rpm to download resolved build-deps + cmd = ['urpmq'] + if(command_line_arguments.include_media != None): + media = '' + for i in command_line_arguments.include_media: + media = ",".join([media]+i) + cmd = cmd + ['--media', media[1:]] + + if(command_line_arguments.exclude_media != None): + media = '' + for i in command_line_arguments.exclude_media: + media = ",".join([media]+i) + cmd = cmd + ['--excludemedia', media[1:]] + + missing_files = [] + for pkg in command_line_arguments.packages[:]: + if(pkg.endswith(".rpm")): + if(not os.path.exists(pkg) or not os.path.isfile(pkg)): + missing_files.append(pkg) + continue + name = get_rpm_tag_from_file("name", pkg) + command_line_arguments.packages.remove(pkg) + command_line_arguments.packages.append(name) + + if(missing_files): + eprint(_("Parameters that end with '.rpm' seem to be local files, but the folowing files do not exist: ") + ", ".join(missing_files)) + if(not command_line_arguments.ignore_errors): + exit(4) + + if(command_line_arguments.source): + download(command_line_arguments.packages, True) + + if(command_line_arguments.binary or (not command_line_arguments.source and command_line_arguments.debug_info)): + download(command_line_arguments.packages, False) + + +def get_rpm_tag_from_file(tag, file): + rpm_ts = rpm.TransactionSet() + fd = os.open(file, os.O_RDONLY) + rpm_hdr = rpm_ts.hdrFromFdno(fd) + os.close(fd) + return rpm_hdr.sprintf("%{" + tag + "}").strip() + + +def download(packages, src): + global resolve_source + pkgs_to_download = packages + + if(src): + if(command_line_arguments.urls): + qprint(_("Searching src.rpm file(s) in repository...")) + else: + qprint(_("Downloading src.rpm file(s)...")) + srpms = get_srpm_names(packages) + #for pkg in packages[:]: + #if (pkg not in srpms: + #eprint("Package " + pkg + " not fond!") + #if(not command_line_arguments.ignore_errors): + # exit(1) + #else: + # eprint ("Package is dequeued.") + #packages.remove(pkg) + + srpms_list= [] + for package in packages: + srpms_list = srpms_list + download_srpm(package, srpms) + + if(len(srpms_list) == 0): + return + + if(command_line_arguments.resolve): + resolve_source = True + pkgs = [] + lines = get_command_output(cmd + ['--requires-recursive'] + srpms_list)[0].strip().split("\n") + pkgs = parse_packages(lines, []) + download(pkgs, False) + resolve_source = False + + else: + pkgs_to_download = packages + if(command_line_arguments.resolve): + if(resolve_source): + qprint(_("Resolving build dependencies...")) + else: + qprint(_("Resolving dependencies...")) + pkgs_to_download = resolve_packages(packages) + qprint (_("Resolved %d packages") % len(pkgs_to_download)) + if(len(pkgs_to_download) == 0): + qprint(_("Nothing to download")) + return + download_rpm(pkgs_to_download) + + +downloaded_debug_pkgs = [] +installed_loaded=False +VERSION = "urpm-downloader 2.2.4" +if __name__ == '__main__': + parse_command_line() + Main() diff --git a/urpm-package-cleanup.py b/urpm-package-cleanup.py new file mode 100755 index 0000000..7a1fc5d --- /dev/null +++ b/urpm-package-cleanup.py @@ -0,0 +1,556 @@ +#!/usr/bin/python +''' +" Package cleanup utility for distributions using urpm +" Based on package-cleanup from yum-utils +" +" Copyright (C) 2011 ROSA Laboratory. +" Written by Denis Silakov +" +" This program is free software: you can redistribute it and/or modify +" it under the terms of the GNU General Public License or the GNU Lesser +" General Public License as published by the Free Software Foundation, +" either version 2 of the Licenses, or (at your option) any later version. +" +" This program is distributed in the hope that it will be useful, +" but WITHOUT ANY WARRANTY; without even the implied warranty of +" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +" GNU General Public License for more details. +" +" You should have received a copy of the GNU General Public License +" and the GNU Lesser General Public License along with this program. +" If not, see . +''' + +import sys + +import logging +import os +import re +import subprocess +import string +import urpmmisc +import types + +from rpm5utils import miscutils, arch, transaction +import argparse +import rpm + +import gettext +gettext.install('urpm-tools') + +def exactlyOne(l): + return len(filter(None, l)) == 1 + + +class PackageCleanup(): + NAME = 'urpm-package-cleanup' + VERSION = '0.1' + USAGE = """ + urpm-package-cleanup: helps find problems in the rpmdb of system and correct them + + usage: urpm-package-cleanup --problems or --leaves or --orphans or --oldkernels + """ + def __init__(self): + self.addCmdOptions() + self.main() + + def addCmdOptions(self): + self.ArgParser = argparse.ArgumentParser(description=_('Find problems in the rpmdb of system and correct them')) + self.ArgParser.add_argument("--qf", "--queryformat", dest="qf", + action="store", + default='%{NAME}-%{VERSION}-%{RELEASE}.%{ARCH}', + help=_("Query format to use for output.")) + self.ArgParser.add_argument("--auto", default=False, + dest="auto",action="store_true", + help=_('Use non-interactive mode')) + self.ArgParser.add_argument("--version", action='version', version=self.VERSION) + + probgrp = self.ArgParser.add_argument_group(_('Orphans Options')) + probgrp.add_argument("--orphans", default=False, + dest="orphans",action="store_true", + help=_('List installed packages which are not available from'\ + ' currently configured repositories')) + probgrp.add_argument("--update", default=False, + dest="update",action="store_true", + help=_('Use only update media. This means that urpmq will search'\ + ' and resolve dependencies only in media marked as containing updates'\ + ' (e.g. which have been created with "urpmi.addmedia --update").')) + + probgrp.add_argument("--media", metavar='media', nargs='+', + help=_('Select specific media to be used, instead of defaulting to all available '\ + 'media (or all update media if --update is used). No rpm will be found in ' + 'other media.')) + + probgrp.add_argument("--excludemedia", metavar='media', nargs='+', + help=_('Do not use the specified media.')) + + probgrp = self.ArgParser.add_argument_group(_('Dependency Problems Options')) + probgrp.add_argument("--problems", default=False, + dest="problems", action="store_true", + help=_('List dependency problems in the local RPM database')) + probgrp.add_argument("--suggests", default=False, + dest="suggests", action="store_true", + help=_('List missing suggestions of installed packages')) + + + dupegrp = self.ArgParser.add_argument_group(_('Duplicate Package Options')) + dupegrp.add_argument("--dupes", default=False, + dest="dupes", action="store_true", + help=_('Scan for duplicates in your rpmdb')) + dupegrp.add_argument("--cleandupes", default=False, + dest="cleandupes", action="store_true", + help=_('Scan for duplicates in your rpmdb and remove older ')) + dupegrp.add_argument("--noscripts", default=False, + dest="noscripts", action="store_true", + help=_("disable rpm scriptlets from running when cleaning duplicates")) + + leafgrp = self.ArgParser.add_argument_group(_('Leaf Node Options')) + leafgrp.add_argument("--leaves", default=False, dest="leaves", + action="store_true", + help=_('List leaf nodes in the local RPM database')) + leafgrp.add_argument("--all", default=False, dest="all_nodes", + action="store_true", + help=_('list all packages leaf nodes that do not match'\ + ' leaf-regex')) + leafgrp.add_argument("--leaf-regex", + default="(^(compat-)?lib(?!reoffice).+|.*libs?[\d-]*|.*-data$)", + help=_('A package name that matches this regular expression' \ + ' (case insensitively) is a leaf')) + leafgrp.add_argument("--exclude-devel", default=False, + action="store_true", + help=_('do not list development packages as leaf nodes')) + leafgrp.add_argument("--exclude-bin", default=False, + action="store_true", + help=_('do not list packages with files in a bin dirs as '\ + 'leaf nodes')) + + kernelgrp = self.ArgParser.add_argument_group(_('Old Kernel Options')) + kernelgrp.add_argument("--oldkernels", default=False, + dest="kernels",action="store_true", + help=_("Remove old kernel and kernel-devel packages")) + kernelgrp.add_argument("--count",default=2,dest="kernelcount", + action="store", + help=_('Number of kernel packages to keep on the '\ + 'system (default 2)')) + kernelgrp.add_argument("--keepdevel", default=False, dest="keepdevel", + action="store_true", + help=_('Do not remove kernel-devel packages when ' + 'removing kernels')) + + def _removePkg(self, pkg): + """remove given package""" + # No smart behavior yet, simply call urpme for the package + pkgName = pkg['name'] + "-" + pkg['version'] + if pkg['release']: + pkgName += '-' + pkg['release'] + eraseOpts = string.join(self.tsflags, " ") + if eraseOpts: + subprocess.call(['urpme', pkgName, eraseOpts]) + else: + subprocess.call(['urpme', pkgName]) + + + @staticmethod + def _genDeptup(name, flags, version): + """ Given random stuff, generate a usable dep tuple. """ + + if flags == 0: + flags = None + + if type(version) is types.StringType: + (r_e, r_v, r_r) = miscutils.stringToVersion(version) + # would this ever be a ListType? + elif type(version) in (types.TupleType, types.ListType): + (r_e, r_v, r_r) = version + else: + # FIXME: This isn't always type(version) is types.NoneType: + # ...not sure what it is though, come back to this + r_e = r_v = r_r = None + + deptup = (name, urpmmisc.share_data(flags), + (urpmmisc.share_data(r_e), urpmmisc.share_data(r_v), + urpmmisc.share_data(r_r))) + return urpmmisc.share_data(deptup) + + def _getProvides(self, req, flags, ver): + """searches the rpmdb for what provides the arguments + returns a list of pkg objects of providing packages, possibly empty""" + + ts = rpm.TransactionSet() + mi = ts.dbMatch('provides', req) + + deptup = self._genDeptup(req, flags, ver) + if deptup in self._get_pro_cache: + return self._get_pro_cache[deptup] + r_v = deptup[2][1] + + result = { } + + for po in mi: + prov_idx = 0 + for prov in po['provides']: + if prov != req: + prov_idx += 1 + continue + + prov_ver = po['provideversion'][prov_idx] + prov_flags = po['provideflags'][prov_idx] + prov_idx += 1 + + if req[0] == '/' and r_v is None: + result[po] = [(req, None, (None, None, None))] + continue + + if deptup[2][1] is None and deptup[2][2] is None and deptup[2][0] is None: + result[po] = [(req, None, (None, None, None))] + else: + provtup = (req, prov_flags, (po['epoch'], po['version'], po['release'])) + matched = miscutils.rangeCompare(deptup, provtup) + if not matched: + print "NOT MATCHED " + str(deptup) + " VS " + str(provtup) + + if matched: + result[po] = [(req, None, (None, None, None))] + + self._get_pro_cache[deptup] = result + + # Check if we have dependency on file not listed + # directly in PROVIDES + if not result and req[0] == '/' and r_v is None: + mi = ts.dbMatch('filepaths', req) + for po in mi: + result[po] = [(req, None, (None, None, None))] + + return result + + def _find_missing_deps(self, pkgs): + """find any missing dependencies for any installed package in pkgs""" + + providers = {} # To speed depsolving, don't recheck deps that have + # already been checked + problems = [] + missing_suggests = [] + + for po in pkgs: + req_idx = 0; + for req in po['requires']: + ver = po['requireversion'][req_idx] + flags = po['requireflags'][req_idx] + req_idx += 1 + + if req.startswith('rpmlib'): continue # ignore rpmlib deps + if (req,flags,ver) not in providers: + resolve_sack = self._getProvides(req,flags,ver) + else: + resolve_sack = providers[(req,flags,ver)] + + if len(resolve_sack) < 1: + #~ flags = yum.depsolve.flags.get(flags, flags) + missing = miscutils.formatRequire(req,ver,flags) + # RPMSENSE_MISSINGOK == (1 << 19) + if req in po['suggests'] or flags & (1 << 19): + missing_suggests.append((po, "suggests %s" % missing)) + else: + problems.append((po, "requires %s" % missing)) + + else: + # Store the resolve_sack so that we can re-use it if another + # package has the same requirement + providers[(req,flags,ver)] = resolve_sack + + return [problems, missing_suggests] + + def _find_installed_duplicates(self, ignore_kernel=True): + """find installed duplicate packages returns a dict of + pkgname = [[dupe1, dupe2], [dupe3, dupe4]] """ + + multipkgs = {} + singlepkgs = {} + results = {} + + ts = rpm.TransactionSet() + mi = ts.dbMatch() + + for pkg in mi: + # just skip kernels and everyone is happier + if ignore_kernel: + if 'kernel' in pkg['provides_names']: + continue + if pkg['name'].startswith('kernel'): + continue + + # public keys from different repos may have different versions + if pkg['name'].startswith('gpg-pubkey'): + continue + + name = pkg['name'] + if name in multipkgs or name in singlepkgs: + continue + + pkgs = ts.dbMatch( 'name', name ) + + for po in pkgs: + if name not in multipkgs: + multipkgs[name] = [] + if name not in singlepkgs: + singlepkgs[name] = [] + + if arch.isMultiLibArch(arch=po['arch']): + multipkgs[name].append(po) + elif po['arch'] == 'noarch': + multipkgs[name].append(po) + singlepkgs[name].append(po) + elif not arch.isMultiLibArch(arch=po['arch']): + singlepkgs[name].append(po) + else: + print _("Warning: neither single nor multi lib arch: %s ") % po['arch'] + + for (name, pkglist) in multipkgs.items() + singlepkgs.items(): + if len(pkglist) <= 1: + continue + + if name not in results: + results[name] = [] + if pkglist not in results[name]: + results[name].append(pkglist) + + return results + + def _remove_old_dupes(self): + """add older duplicate pkgs to be removed in the transaction""" + dupedict = self._find_installed_duplicates() + + removedupes = [] + for (name,dupelists) in dupedict.items(): + for dupelist in dupelists: + dupelist.sort() + for lowpo in dupelist[0:-1]: + removedupes.append(lowpo) + + # No smart behavior yet, simply call urpme for every package + for po in removedupes: + self._removePkg(po) + + def _should_show_leaf(self, po, leaf_regex, exclude_devel, exclude_bin): + """ + Determine if the given pkg should be displayed as a leaf or not. + + Return True if the pkg should be shown, False if not. + """ + + if po['name'] == 'gpg-pubkey': + return False + name = po['name'] + if exclude_devel and name.endswith('devel'): + return False + if exclude_bin: + for file_name in po['filepaths']: + if file_name.find('bin') != -1: + return False + if leaf_regex.match(name): + return True + return False + + def _get_kernels(self): + """return a list of all installed kernels, sorted newest to oldest""" + + ts = rpm.TransactionSet() + mi = ts.dbMatch('provides','kernel') + kernlist = [] + + for h in mi: + kernlist.append(h) + + kernlist.sort() + kernlist.reverse() + return kernlist + + def _get_old_kernel_devel(self, kernels, removelist): + """ List all kernel devel packages that either belong to kernel versions that + are no longer installed or to kernel version that are in the removelist""" + + devellist = [] + ts = rpm.TransactionSet() + mi = ts.dbMatch('provides','kernel-devel') + + for po in mi: + # For all kernel-devel packages see if there is a matching kernel + # in kernels but not in removelist + keep = False + for kernel in kernels: + if kernel in removelist: + continue + (kname,karch,kepoch,kver,krel) = (kernel['name'],kernel['arch'],kernel['epoch'],kernel['version'],kernel['release']) + (dname,darch,depoch,dver,drel) = (po['name'],po['arch'],po['epoch'],po['version'],po['release']) + if (karch,kepoch,kver,krel) == (darch,depoch,dver,drel): + keep = True + if not keep: + devellist.append(po) + return devellist + + def _remove_old_kernels(self, count, keepdevel): + """Remove old kernels, keep at most count kernels (and always keep the running + kernel""" + + count = int(count) + kernels = self._get_kernels() + runningkernel = os.uname()[2] + # Vanilla kernels dont have a release, only a version + if '-' in runningkernel: + splt = runningkernel.split('-') + if len(splt) == 2: + (kver,krel) = splt + else: # Handle cases where a custom build kernel has an extra '-' in the release + kver=splt[1] + krel="-".join(splt[1:]) + if krel.split('.')[-1] == os.uname()[-1]: + krel = ".".join(krel.split('.')[:-1]) + else: + kver = runningkernel + krel = "" + remove = kernels[count:] + + toremove = [] + # Remove running kernel from remove list + for kernel in remove: + if kernel['version'] == kver and krel.startswith(kernel['release']): + print _("Not removing kernel %(kver)s-%(krel)s because it is the running kernel") % {'kver': kver, 'krel': krel} + else: + toremove.append(kernel) + + + # Now extend the list with all kernel-devel pacakges that either + # have no matching kernel installed or belong to a kernel that is to + # be removed + if not keepdevel: + toremove.extend(self._get_old_kernel_devel(kernels, toremove)) + + for po in toremove: + self._removePkg(po) + + + def main(self): + opts = self.ArgParser.parse_args(sys.argv[1:]) + if not exactlyOne([opts.problems, opts.dupes, opts.leaves, opts.kernels, + opts.orphans, opts.cleandupes]): + print self.ArgParser.format_help() + sys.exit(1) + + self.tsflags = [] + + if opts.problems: + ts = rpm.TransactionSet() + mi = ts.dbMatch() + self._get_pro_cache = {} + (issues, missing_suggests) = self._find_missing_deps(mi) + for (pkg, prob) in issues: + print _('Package %(qf)s %(prob)s') % {'qf': pkg.sprintf(opts.qf), 'prob': prob} + + if( opts.suggests ): + print _("Missing suggests:") + for (pkg, prob) in missing_suggests: + print 'Package %s %s' % (pkg.sprintf(opts.qf), prob) + + if issues: + sys.exit(2) + else: + if (not opts.suggests) or (len(missing_suggests) == 0): + print _('No Problems Found') + sys.exit(0) + else: + sys.exit(3) + + if opts.dupes: + dupes = self._find_installed_duplicates() + for name, pkglists in dupes.items(): + for pkglist in pkglists: + for pkg in pkglist: + print '%s' % pkg.sprintf(opts.qf) + sys.exit(0) + + if opts.kernels: + if os.geteuid() != 0: + print _("Error: Cannot remove kernels as a user, must be root") + sys.exit(1) + if int(opts.kernelcount) < 1: + print _("Error: should keep at least 1 kernel!") + sys.exit(100) + if opts.auto: + self.tsflags.append('--auto') + + self._remove_old_kernels(opts.kernelcount, opts.keepdevel) + sys.exit(0) + #~ self.run_with_package_names.add('yum-utils') + #~ if hasattr(self, 'doUtilBuildTransaction'): + #~ errc = self.doUtilBuildTransaction() + #~ if errc: + #~ sys.exit(errc) + #~ else: + #~ try: + #~ self.buildTransaction() + #~ except yum.Errors.YumBaseError, e: + #~ self.logger.critical("Error building transaction: %s" % e) + #~ sys.exit(1) +#~ + #~ if len(self.tsInfo) < 1: + #~ print 'No old kernels to remove' + #~ sys.exit(0) +#~ + #~ sys.exit(self.doUtilTransaction()) + + + if opts.leaves: + self._ts = transaction.TransactionWrapper() + leaves = self._ts.returnLeafNodes() + leaf_reg = re.compile(opts.leaf_regex, re.IGNORECASE) + for po in sorted(leaves): + if opts.all_nodes or \ + self._should_show_leaf(po, leaf_reg, opts.exclude_devel, + opts.exclude_bin): + print po.sprintf(opts.qf) + + sys.exit(0) + + if opts.orphans: + """ Just a wrapper that invokes urpmq """ + aux_opts = "" + if opts.excludemedia: + aux_opts = " --excludemedia " + " ".join(opts.excludemedia) + if opts.media: + aux_opts += " --media " + " ".join(opts.media) + if opts.update: + aux_opts += " --update " + + subprocess.call(["urpmq", "--not-available", aux_opts]) + sys.exit(0) + + if opts.cleandupes: + if os.geteuid() != 0: + print _("Error: Cannot remove packages as a user, must be root") + sys.exit(1) + if opts.noscripts: + self.tsflags.append('--noscripts') + if opts.auto: + self.tsflags.append('--auto') + + self._remove_old_dupes() + #~ self.run_with_package_names.add('yum-utils') + + #~ if hasattr(self, 'doUtilBuildTransaction'): + #~ errc = self.doUtilBuildTransaction() + #~ if errc: + #~ sys.exit(errc) + #~ else: + #~ try: + #~ self.buildTransaction() + #~ except yum.Errors.YumBaseError, e: + #~ self.logger.critical("Error building transaction: %s" % e) + #~ sys.exit(1) + + #~ if len(self.tsInfo) < 1: + #~ print 'No duplicates to remove' + #~ sys.exit(0) + +if __name__ == '__main__': +# setup_locale() + util = PackageCleanup() diff --git a/urpm-repoclosure.pl b/urpm-repoclosure.pl new file mode 100755 index 0000000..1689c6d --- /dev/null +++ b/urpm-repoclosure.pl @@ -0,0 +1,1167 @@ +#!/usr/bin/perl +######################################################## +# URPM Repos Closure Checker 1.3.1 for Linux +# A tool for checking closure of a set of RPM packages +# +# Copyright (C) 2012 ROSA Laboratory +# Written by Andrey Ponomarenko +# +# PLATFORMS +# ========= +# Linux (ROSA, Mandriva) +# +# REQUIREMENTS +# ============ +# - urpmi +# - Perl 5 (>=5.8) +# - Wget +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +######################################################## +use Getopt::Long; +Getopt::Long::Configure ("posix_default", "no_ignore_case"); +use Cwd qw(abs_path cwd); +use File::Path qw(mkpath rmtree); +use File::Temp qw(tempdir); +use File::Copy qw(copy move); +use Data::Dumper; +use Locale::gettext; +use strict; + +my $TOOL_VERSION = "1.3.1"; +my $CmdName = get_filename($0); + +my ($Help, $ShowVersion, $RPMlist, $RPMdir, $StaticMode, +$DynamicMode, $CheckRelease, $CheckSignature, $SelectRepos, +$NoClean, $Root, $HDlist, $FileDeps, $ResDir, $AddRPMs); + +textdomain("urpm-tools"); + +sub gettext_(@) +{ + my ($Str, @Params) = @_; + if(not $Str) { + return ""; + } + $Str = gettext($Str); + foreach my $N (1 .. $#Params+1) + { + my $P = $Params[$N-1]; + $Str=~s/\[_$N\]/$P/g; + } + return $Str; +} + +my $ShortUsage = gettext_("URPM Repos Closure Checker [_1] for Mandriva Linux +A tool for checking closure of a set of RPM packages +Copyright (C) 2012 ROSA Laboratory +License: GNU GPL + +Usage: [_2] [options] +Example: [_2] --hdlist=hdlist.txt + +More info: [_2] --help\n", $TOOL_VERSION, $CmdName); + +if($#ARGV==-1) { + print $ShortUsage."\n"; + exit(0); +} + +GetOptions("h|help!" => \$Help, + "v|version!" => \$ShowVersion, + "l|list=s" => \$RPMlist, + "d|dir=s" => \$RPMdir, + "hdlist=s" => \$HDlist, + "add=s" => \$AddRPMs, + "file-deps=s" => \$FileDeps, + "s|static!" => \$StaticMode, + "dynamic!" => \$DynamicMode, + "r|check-release!" => \$CheckRelease, + "sign|check-signature!" => \$CheckSignature, + "media=s" => \$SelectRepos, + "noclean!" => \$NoClean, + "root=s" => \$Root, + "o|res=s" => \$ResDir +) or ERR_MESSAGE(); + +my %EXIT_CODES = ( + "SUCCESS" => 0, + "ERROR" => 1, + "FAILED" => 2 +); + +my $HelpMessage = gettext_(" +NAME: + URPM Repos Closure Checker 1.0 for Mandriva Linux + A tool for checking closure of a set of RPM packages + +USAGE: + [_1] --hdlist=hdlist.txt + [_1] --hdlist=http://mirror.yandex.ru/mandriva/.../synthesis.hdlist.cz + [_1] --dir=rpms/ --static --file-deps=file-deps.txt + [_1] --list=list.txt --dynamic + +OPTIONS: + -h|-help + Print this help. + + -v|-version + Print version information. + + -hdlist + Path or URL of HDlist (synthesis) to check. + + -d|-dir + The directory with RPM packages to check. + + -l|-list + The list of packages to check. + + -add|-update + The directory with RPM packages that should + be added to the repository or updated. + + -file-deps + Read file-deps to ignore some unresolved + dependencies. + + -s|-static + Check statically if all required dependencies are + satisfied by provided dependencies in the set of + RPM packages. + + -dynamic + Install a set of RPM packages to the local chroot + and check if extra packages were installed. + + -r|-check-release + Check installation media (DVD). + + -sign|-check-signature + Validate package signatures. + + -noclean + Do not clean urpmi cache. + + -root + Where to install packages. + Default: + /tmp/... + +EXIT CODES: + 0 - Suceess. The tool has run without any errors + non-zero - Failed or the tool has run with errors. In particular: + 1 - Failed to run the tool + 2 - Discovered dependency problems + +\n", $CmdName); + +sub HELP_MESSAGE() { + print $HelpMessage; +} + +sub ERR_MESSAGE() +{ + print $ShortUsage; + exit(1); +} + +my %Cache; +my $RPM_CACHE = "/var/cache/urpmi/rpms"; +my $TMP_DIR = tempdir(CLEANUP=>1); +my %InstalledPackage; +my %RequiredBy; +my $TEST_MEDIA = "test_media"; +my %Packages; +my %BrokenSignature; +my %InstallFailed; +my $RESULTS_DIR = "repoclosure_reports"; + +sub appendFile($$) +{ + my ($Path, $Content) = @_; + return if(not $Path); + if(my $Dir = get_dirname($Path)) { + mkpath($Dir); + } + open(FILE, ">>".$Path) || die gettext_("can't open file \'[_1]\': [_2]\n", $Path, $!); + print FILE $Content; + close(FILE); +} + +sub writeFile($$) +{ + my ($Path, $Content) = @_; + return if(not $Path); + if(my $Dir = get_dirname($Path)) { + mkpath($Dir); + } + open (FILE, ">".$Path) || die gettext_("can't open file \'[_1]\': [_2]\n", $Path, $!); + print FILE $Content; + close(FILE); +} + +sub readFile($) +{ + my $Path = $_[0]; + return "" if(not $Path or not -f $Path); + open (FILE, $Path); + local $/ = undef; + my $Content = ; + close(FILE); + return $Content; +} + +sub get_filename($) +{ # much faster than basename() from File::Basename module + if($_[0]=~/([^\/\\]+)[\/\\]*\Z/) { + return $1; + } + return ""; +} + +sub get_dirname($) +{ # much faster than dirname() from File::Basename module + if($_[0]=~/\A(.*?)[\/\\]+[^\/\\]*[\/\\]*\Z/) { + return $1; + } + return ""; +} + +sub searchRPMs($) +{ + my $Path = $_[0]; + if(not $Path or not -d $Path) { + return (); + } + my @RPMs = split("\n", `find $Path -type f -name "*.rpm"`); # -maxdepth 1 + return sort {lc($a) cmp lc($b)} @RPMs; +} + +sub addMedia($) +{ + my $Dir = $_[0]; + if(not $Dir or not -d $Dir) { + return; + } + my %Media = map {$_=>1} split(/\n+/, `urpmq --list-media`); + if($Media{$TEST_MEDIA}) { + removeMedia(); + } + $Dir = abs_path($Dir); + system("/usr/sbin/urpmi.addmedia $TEST_MEDIA $Dir"); + system("/usr/sbin/urpmi.update $TEST_MEDIA"); +} + +sub removeMedia() { + system("/usr/sbin/urpmi.removemedia $TEST_MEDIA"); +} + +sub installPackage($) +{ + my $Package = $_[0]; + my $Cmd = "/usr/sbin/urpmi"; + if($CheckRelease) + { # from CD or DVD + $Cmd .= " --media=$TEST_MEDIA"; + } + elsif($SelectRepos) + { + if(-d $SelectRepos) { + $Cmd .= " --media=$TEST_MEDIA"; + } + else { + $Cmd .= " --media=$SelectRepos"; + } + } + # create root where to install packages + if(not -d $TMP_DIR."/root") { + mkpath($TMP_DIR."/root"); + } + if(not $CheckRelease) { + $Cmd .= " --no-install"; + } + if($Root) { + $Cmd .= " --root=\"$Root\""; + } + else { + $Cmd .= " --root=\"$TMP_DIR/root\""; + } + $Cmd .= " --noclean --auto --force"; + $Cmd .= " $Package"; + print "Running $Cmd\n"; + my $LogPath = $TMP_DIR."/ilog.txt"; + system($Cmd." >$LogPath 2>&1"); + my $Log = readFile($LogPath); + appendFile("$RESULTS_DIR/install-log.txt", $Log); + $Log=~s/The following packages have to be removed (.|\n)*\Z//g; + if($Log=~/ (unsatisfied|conflicts with|missing) ([\w\-\/]*)/i) + { + my ($Reason, $Dep) = ($1, $2); + $InstallFailed{getPName($Package)}=1; + print " FAILED: due to $Reason $Dep\n"; + } + if($CheckRelease) + { # installed + while($Log=~s/(installing\s+)([^\/\s]+\.rpm)(\s|\Z)/$1/) + { + my $RpmName = $2; + print " $RpmName\n"; + } + } + else + { # downloaded + while($Log=~s/(\/)([^\/\s]+\.rpm)(\s|\Z)/$1$3/) + { + my $RpmName = $2; + print " $RpmName\n"; + $RequiredBy{getPName($RPM_CACHE."/".$RpmName)}=getPName($Package); + } + } +} + +sub get_RPMname($) +{ + my $Path = $_[0]; + my $Name = get_filename($Path); + if($Cache{"get_RPMname"}{$Name}) { + return $Cache{"get_RPMname"}{$Name}; + } + if(not $Path or not -f $Path) { + return ""; + } + return ($Cache{"get_RPMname"}{$Name} = `rpm -qp --queryformat \%{name} \"$Path\"`); +} + +sub sepDep($) +{ + my $Dep = $_[0]; + if($Dep=~/\A(.+?)(\s+|\[)(=|==|<=|>=|<|>)\s+(.+?)(\]|\Z)/) + { + my ($N, $O, $V) = ($1, $3, $4); + # canonify version (1:3.2.5-5:2011.0) + return ($N, $O, $V); + } + else { + return ($Dep, "", ""); + } +} + +sub showDep($$$) +{ + my ($N, $O, $V) = @_; + if($O and $V) { + return $N." ".$O." ".$V; + } + else { + return $N + } +} + +sub sepVersion($) +{ + my $V = $_[0]; + if($V=~/\A(.+)(\-[^\-\:]+)(\:[^\:]+|)\Z/) + { # 3.2.5-5:2011.0 + return ($1, $2, $3); + } + return ($V, "", ""); +} + +sub simpleVersion($) +{ # x.y.z-r:n to x.y.z.r.n + my $V = $_[0]; + $V=~s/[\-:]/\./g; # -5:2011.0 + $V=~s/[a-z]+/\./ig; # 10-12mdk + $V=~s/\.\Z//g; + return $V; +} + +sub formatVersions(@) +{ # V1 - provided + # V2 - required + my ($V1, $V2) = @_; + my ($E1, $E2) = (); + if($V1=~s/\A([^\-\:]+)\://) { + $E1 = $1; + } + if($V2=~s/\A([^\-\:]+)\://) { + $E2 = $1; + } + my ($V1_M, $V1_R, $V1_RR) = sepVersion($V1); + my ($V2_M, $V2_R, $V2_RR) = sepVersion($V2); + if(not $V2_RR) { + $V1_RR = ""; + } + if(not $V2_R) { + $V1_R = ""; + } + $V1 = $V1_M.$V1_R.$V1_RR; + $V2 = $V2_M.$V2_R.$V2_RR; + if(defined $E1 and defined $E2) + { + $V1 = $E1.".".$V1; + $V2 = $E2.".".$V2; + } + return (simpleVersion($V1), simpleVersion($V2)); +} + +sub cmpVersions($$) +{ # compare two versions + # 3.2.5-5:2011.0 + # NOTE: perl 5.00503 and 5.12 + my ($V1, $V2) = formatVersions(@_); + return 0 if($V1 eq $V2); + my @V1Parts = split(/\./, $V1); + my @V2Parts = split(/\./, $V2); + for (my $i = 0; $i <= $#V1Parts && $i <= $#V2Parts; $i++) + { + my $N1 = $V1Parts[$i]; + my $N2 = $V2Parts[$i]; + if(defined $N1 and not defined $N2) { + return 1; + } + elsif(not defined $N1 and defined $N2) { + return -1; + } + if(my $R = cmpNums($N1, $N2)) { + return $R; + } + } + return -1 if($#V1Parts < $#V2Parts); + return 1 if($#V1Parts > $#V2Parts); + return 0; +} + +sub cmpNums($$) +{ + my ($N1, $N2) = @_; + # 00503 + # 12 + if($N1 eq $N2) { + return 0; + } + while($N1=~s/\A0([0]*[1-9]+)/$1/) { + $N2.="0"; + } + while($N2=~s/\A0([0]*[1-9]+)/$1/) { + $N1.="0"; + } + return int($N1)<=>int($N2); +} + +sub checkDeps($$$$) +{ + my ($N, $O, $V, $Provides) = @_; + if(not $O or not $V) + { # requires any version + return 1; + } + foreach my $OP (keys(%{$Provides})) + { + if(not $OP) + { # provides any version + return 1; + } + foreach my $VP (keys(%{$Provides->{$OP}})) + { + if($O eq "=" or $O eq "==") + { + if(cmpVersions($VP, $V)==0) + { # requires the same version + return 1; + } + } + elsif($O eq "<=") + { + if(cmpVersions($VP, $V)<=0) { + return 1; + } + } + elsif($O eq ">=") + { + if(cmpVersions($VP, $V)>=0) { + return 1; + } + } + elsif($O eq "<") + { + if(cmpVersions($VP, $V)<0) { + return 1; + } + } + elsif($O eq ">") + { + if(cmpVersions($VP, $V)>0) { + return 1; + } + } + } + } + return 0; +} + +sub checkSignature($) +{ + my $Path = $_[0]; + my $Info = `rpm --checksig $Path`; + if($Info!~/ OK(\s|\Z)/) { + $BrokenSignature{getPName($Path)}=1; + return 0; + } + return 1; +} + +sub checkRoot() +{ + if(not -w "/usr") { + print STDERR gettext_("ERROR: you should be root\n"); + exit(1); + } +} + +sub readRPMlist($$) +{ + my ($Path, $Type) = @_; + if(not -f $Path) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $Path); + exit(1); + } + my @RPMs = split(/\s+/, readFile($Path)); + if($#RPMs==-1) { + print STDERR gettext_("ERROR: the list of packages is empty\n"); + exit(1); + } + if($Type eq "RPMs") + { + foreach my $P (@RPMs) + { + if($P!~/\.rpm\Z/) + { + print STDERR gettext_("ERROR: file \'[_1]\' is not RPM package\n", $P); + exit(1); + } + elsif(not -f $P) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $P); + exit(1); + } + } + } + return @RPMs; +} + +sub checkRelease() +{ + checkRoot(); + if(not $RPMdir and not $RPMlist) + { + print STDERR gettext_("ERROR: --dir or --list option should be specified\n"); + exit(1); + } + clearCache(); + my @RPMs = (); + if($RPMlist) + { + @RPMs = readRPMlist($RPMlist, "RPMs"); + $RPMdir = get_dirname($RPMs[0]); + if(not $RPMdir) { + $RPMdir = "."; + } + } + else + { + if(not -d $RPMdir) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $RPMdir); + exit(1); + } + @RPMs = searchRPMs($RPMdir); + } + addMedia($RPMdir); + foreach my $Path (@RPMs) + { # add to cache + if(not -f $RPM_CACHE."/".get_filename($Path)) { + # copy($Path, $RPM_CACHE); + } + } + foreach my $Path (@RPMs) + { + installPackage($Path); + $Packages{get_filename($Path)} = 1; + } + removeMedia(); + checkResult(); +} + +sub dynamicCheck() +{ + checkRoot(); + if(not $RPMdir and not $RPMlist) + { + print STDERR gettext_("ERROR: --dir or --list option should be specified\n"); + exit(1); + } + clearCache(); + my @RPMs = (); + if($RPMdir) + { # --dir option + if(not -d $RPMdir) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $RPMdir); + exit(1); + } + @RPMs = searchRPMs($RPMdir); + foreach my $Path (@RPMs) + { # add to cache + copy($Path, $RPM_CACHE); + } + if(-d $SelectRepos) { + addMedia($SelectRepos); + } + foreach my $Path (@RPMs) + { + installPackage($Path); + $Packages{get_RPMname($Path)} = 1; + $Packages{get_filename($Path)} = 1; + } + if(-d $SelectRepos) { + removeMedia(); + } + } + elsif($RPMlist) + { + @RPMs = readRPMlist($RPMlist, "Names"); + if(-d $SelectRepos) { + addMedia($SelectRepos); + } + foreach my $Name (@RPMs) + { + installPackage($Name); + $Packages{$Name} = 1; + } + if(-d $SelectRepos) { + removeMedia(); + } + } + checkResult(); +} + +sub getPName($) +{ # package ID + my $Path = $_[0]; + if($RPMdir or not -f $Path) + { # input: RPMs + return get_filename($Path); + } + else + { # input: RPM names + return get_RPMname($Path); + } +} + +sub isInstalled($) +{ + my $Name = $_[0]; + if($InstallFailed{$Name}) { + return 0; + } + if(not $CheckRelease) { + if(not $InstalledPackage{$Name}) { + return 0; + } + } + return 1; +} + +sub checkResult() +{ + my (%ExtraPackages, %BrokenPackages) = (); + foreach my $Path (searchRPMs($RPM_CACHE)) + { # extra + my $Name = getPName($Path); + $InstalledPackage{$Name} = 1; + if(not $Packages{$Name}) { + $ExtraPackages{$Name} = $Path; + } + } + foreach my $Name (keys(%Packages)) + { # broken + if(not isInstalled($Name)) { + $BrokenPackages{$Name}=1; + } + } + if(my @Names = sort {lc($a) cmp lc($b)} keys(%ExtraPackages)) + { + my $Report = gettext_("Extra Packages:\n\n"); + foreach my $Name (@Names) + { + $Report .= $Name; + if(my $Req = $RequiredBy{$Name}) { + $Report .= gettext_(" (required by: [_1])", $Req); + } + $Report .= "\n"; + } + print $Report; + writeFile("$RESULTS_DIR/extra-packages.txt", $Report); + } + if(my @Names = sort {lc($a) cmp lc($b)} keys(%BrokenPackages)) + { + my $Report = gettext_("Broken Packages:\n\n"); + foreach my $Name (@Names) { + $Report .= "$Name\n"; + } + print $Report; + writeFile("$RESULTS_DIR/broken-packages.txt", $Report); + } + print gettext_("Report has been generated to:"); + print "\n $RESULTS_DIR/extra-packages.txt\n $RESULTS_DIR/broken-packages.txt\n"; + if(keys(%ExtraPackages) or keys(%BrokenPackages)) + { + exit($EXIT_CODES{"FAILED"}); + } + else { + exit($EXIT_CODES{"SUCCESS"}); + } +} + +sub sigCheck() +{ + if(not $RPMdir and not $RPMlist) + { + print STDERR gettext_("ERROR: --dir or --list option should be specified\n"); + exit(1); + } + print gettext_("Checking RPMs ...\n"); + my @RPMs = (); + if($RPMdir) + { + if(not -d $RPMdir) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $RPMdir); + exit(1); + } + @RPMs = searchRPMs($RPMdir); + } + elsif($RPMlist) { + @RPMs = readRPMlist($RPMlist, "RPMs"); + } + foreach my $Path (@RPMs) + { + print gettext_("Checking [_1]\n", get_filename($Path)); + if(not checkSignature($Path)) { + print gettext_(" FAILED: invalid signature\n"); + } + } + if(my @Names = sort {lc($a) cmp lc($b)} keys(%BrokenSignature)) + { + my $Report = gettext_("Broken Signature:\n\n"); + foreach my $Name (@Names) { + $Report .= "$Name\n"; + } + print $Report; + writeFile("$RESULTS_DIR/report.txt", $Report); + } + print gettext_("Report has been generated to:"); + print "\n $RESULTS_DIR/report.txt\n"; + if(keys(%BrokenSignature)) { + exit($EXIT_CODES{"FAILED"}); + } + else { + exit($EXIT_CODES{"SUCCESS"}); + } +} + +sub readLineNum($$) +{ + my ($Path, $Num) = @_; + return "" if(not $Path or not -f $Path); + open (FILE, $Path); + foreach (1 ... $Num) { + ; + } + my $Line = ; + close(FILE); + return $Line; +} + +sub cmd_find($$$$) +{ + my ($Path, $Type, $Name, $MaxDepth) = @_; + return () if(not $Path or not -e $Path); + my $Cmd = "find \"$Path\""; + if($MaxDepth) { + $Cmd .= " -maxdepth $MaxDepth"; + } + if($Type) { + $Cmd .= " -type $Type"; + } + if($Name) { + if($Name=~/\]/) { + $Cmd .= " -regex \"$Name\""; + } + else { + $Cmd .= " -name \"$Name\""; + } + } + return split(/\n/, `$Cmd`); +} + +sub readDeps($$$) +{ + my ($Path, $Dep, $RPMdep) = @_; + my $Name = get_filename($Path); + foreach my $Type ("provides", "suggests", "requires") + { + foreach my $D (split("\n", `rpm -qp -$Type $Path`)) + { + my ($N, $O, $V) = sepDep($D); + $Dep->{$Type}{$N}{$O}{$V}=$Name; + $RPMdep->{$Type}{$Name}{$N}=1; + } + } +} + +sub staticCheck() +{ + if(not $RPMdir and not $HDlist and not $RPMlist) + { + print STDERR gettext_("ERROR: --hdlist, --dir or --list option should be specified\n"); + exit(1); + } + my (%Dep, %RPMdep, %AddedRPMs) = (); + if($AddRPMs) + { + if(not -d $AddRPMs) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $AddRPMs); + exit(1); + } + if(my @AddedRPMs = searchRPMs($AddRPMs)) + { + foreach my $Path (@AddedRPMs) + { + readDeps($Path, \%Dep, \%RPMdep); + if(my $Name = get_RPMname($Path)) { + $AddedRPMs{$Name}=1; + } + } + } + } + if($RPMdir or $RPMlist) + { + print gettext_("Checking RPMs ...\n"); + my @RPMs = (); + if($RPMdir) + { + if(not -d $RPMdir) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $RPMdir); + exit(1); + } + @RPMs = searchRPMs($RPMdir); + } + elsif($RPMlist) { + @RPMs = readRPMlist($RPMlist, "RPMs"); + } + foreach my $Path (@RPMs) + { + if($AddRPMs) + { + if(my $Name = get_RPMname($Path)) + { + if($AddedRPMs{$Name}) + { # already added + next; + } + } + } + readDeps($Path, \%Dep, \%RPMdep); + } + } + elsif($HDlist) + { + my $Content = readFile($HDlist); + if($HDlist=~/(http|https|ftp):\/\//) + { + print gettext_("Downloading HDlist ...\n"); + my $DownloadTo = $TMP_DIR."/extract/".get_filename($HDlist); + $DownloadTo=~s/\.cz/\.gz/g; # cz == gz + my $Dir = get_dirname($DownloadTo); + mkdir($Dir); + system("wget -U '' --no-check-certificate \"$HDlist\" --connect-timeout=5 --tries=1 --output-document=\"$DownloadTo\" >/dev/null 2>&1"); + if(not -f $DownloadTo + or not -s $DownloadTo) { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $HDlist); + exit(1); + } + + my %Extract = ( + "xz"=>"unxz", + "lzma"=>"unlzma", + "gz"=>"gunzip" + ); + if($DownloadTo=~/\.(gz|xz|lzma)\Z/) + { + my ($Format, $Cmd) = ($1, $Extract{$1}); + if($Cmd) { + system("cd $Dir && $Cmd $DownloadTo"); + } + my @Files = cmd_find($Dir, "f", "", ""); + if(not @Files) { + print STDERR gettext_("ERROR: cannot extract \'[_1]\'\n", $HDlist); + exit(1); + } + $DownloadTo = $Files[0]; + } + if(my $Line = readLineNum($DownloadTo, 1)) + { + if($Line!~/\A\@\w+\@/) { + print STDERR gettext_("ERROR: unknown format of hdlist\n"); + exit(1); + } + } + $Content = readFile($DownloadTo); + } + else + { + if(not -f $HDlist) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $HDlist); + exit(1); + } + $Content = readFile($HDlist); + } + print gettext_("Checking HDlist ...\n"); + my $Name = ""; + foreach (reverse(split(/\n/, $Content))) + { + $_=~s/\A\@//g; + my @Parts = split("\@", $_); + my $Type = shift(@Parts); + if($Type eq "info") + { + $Name = $Parts[0]; + next; + } + if($AddRPMs) + { + if(my $PName = parse_RPMname($Name)) + { + if($AddedRPMs{$PName}) + { # already added + next; + } + } + } + if($Type=~/\A(requires|provides|suggests)\Z/) + { + foreach my $D (@Parts) + { + my ($N, $O, $V) = sepDep($D); + $N=~s/\[\*\]//g;# /sbin/ldconfig[*] + $Dep{$Type}{$N}{$O}{$V}=$Name; + $RPMdep{$Type}{$Name}{$D} = 1; + } + } + } + } + my %IgnoreDeps = (); + if($FileDeps) + { + if(not -f $FileDeps) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $FileDeps); + exit(1); + } + %IgnoreDeps = map {$_=>1} split(/\s+/, readFile($FileDeps)); + } + my (%Unresolved, %UnresolvedSuggested, %Broken) = (); + foreach my $N (sort {lc($a) cmp lc($b)} keys(%{$Dep{"requires"}})) + { + foreach my $O (keys(%{$Dep{"requires"}{$N}})) + { + foreach my $V (keys(%{$Dep{"requires"}{$N}{$O}})) + { + if(not defined $Dep{"provides"}{$N} + or not checkDeps($N, $O, $V, $Dep{"provides"}{$N})) + { # unresolved + if($N=~/\A(rpmlib|executable)\(.+\)\Z/) + { # rpmlib(PayloadIsLzma), ... + # executable(rm), ... + next; + } + if($IgnoreDeps{$N}) { + next; + } + my $Name = $Dep{"requires"}{$N}{$O}{$V}; + if($RPMdep{"suggests"}{$Name}{$N}) { + $UnresolvedSuggested{$N}{$O}{$V} = $Name; + } + else { + $Unresolved{$N}{$O}{$V} = $Name; + } + $Broken{$Name}=1; + } + } + } + } + my $Report = ""; + if(my @Ns = sort {lc($a) cmp lc($b)} keys(%Unresolved)) + { + $Report .= "\n".gettext_("Unresolved \"Required\" Dependencies ([_1]):", $#Ns+1)."\n\n"; + foreach my $N (@Ns) + { + foreach my $O (keys(%{$Unresolved{$N}})) + { + foreach my $V (keys(%{$Unresolved{$N}{$O}})) + { + $Report .= showDep($N, $O, $V)." (".gettext_("required by [_1]", $Unresolved{$N}{$O}{$V}).")\n"; + } + } + } + } + if(my @Ns = sort {lc($a) cmp lc($b)} keys(%UnresolvedSuggested)) + { + if($Report) { + $Report .= "\n"; + } + $Report .= "\n".gettext_("Unresolved \"Suggested\" Dependencies ([_1]):", $#Ns+1)."\n\n"; + foreach my $N (@Ns) + { + foreach my $O (keys(%{$UnresolvedSuggested{$N}})) + { + foreach my $V (keys(%{$UnresolvedSuggested{$N}{$O}})) + { + $Report .= showDep($N, $O, $V)." (required by ".$UnresolvedSuggested{$N}{$O}{$V}.")\n"; + } + } + } + } + if(my @Ns = sort {lc($a) cmp lc($b)} keys(%Broken)) + { + $Report .= "\n".gettext_("Broken Packages ([_1]):", $#Ns+1)."\n\n"; + foreach my $N (@Ns) { + $Report .= parse_RPMname($N)."\n"; + } + } + if($Report) + { + print $Report."\n"; + writeFile("$RESULTS_DIR/report.txt", $Report); + } + writeFile("$RESULTS_DIR/debug/rpm-provides.txt", Dumper($RPMdep{"provides"})); + writeFile("$RESULTS_DIR/debug/rpm-requires.txt", Dumper($RPMdep{"requires"})); + writeFile("$RESULTS_DIR/debug/rpm-suggests.txt", Dumper($RPMdep{"suggests"})); + print gettext_("Report has been generated to:"); + print "\n $RESULTS_DIR/report.txt\n"; + if(keys(%Unresolved)) { + exit($EXIT_CODES{"FAILED"}); + } + else { + exit($EXIT_CODES{"SUCCESS"}); + } +} + +sub parse_RPMname($) +{ + my $Name = $_[0]; + if($Name=~/\d(mdv|mdk|rosa(\.\w+|))\d+/) + { # plexus-interactivity-1.0-0.1.a5.2.2.5mdv2011.0.i586 + $Name=~s/\-[^\-]+\Z//; + $Name=~s/\-[^\-]+\Z//; + } + else + { # x11-server-source-1.10.3-1-mdv2011.0.i586 + $Name=~s/\-[^\-]+\Z//; + $Name=~s/\-[^\-]+\Z//; + $Name=~s/\-[^\-]+\Z//; + } + return $Name; +} + +sub clearCache() +{ + if(not $NoClean) + { + rmtree($RPM_CACHE); + mkpath($RPM_CACHE); + } +} + +sub scenario() +{ + if($Help) + { + HELP_MESSAGE(); + exit(0); + } + if($ShowVersion) + { + print gettext_("URPM Repos Closure Checker [_1] for Mandriva Linux\nCopyright (C) 2012 ROSA Laboratory\nLicense: GPL \nThis program is free software: you can redistribute it and/or modify it.\n\nWritten by Andrey Ponomarenko.\n", $TOOL_VERSION); + exit(0); + } + if($HDlist) { + $StaticMode = 1; + } + if($Root) + { + if(not -d $Root) { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $Root); + exit(1); + } + } + if($ResDir) { + $RESULTS_DIR = $ResDir; + } + if(-d $RESULTS_DIR) + { + # print "Removing old $RESULTS_DIR directory\n"; + rmtree($RESULTS_DIR); + } + if($CheckSignature) + { + if(not $ResDir) { + $RESULTS_DIR .= "/signature"; + } + sigCheck(); + exit(0); + } + if($StaticMode) + { + if(not $ResDir) { + $RESULTS_DIR .= "/static"; + } + staticCheck(); + } + if($CheckRelease) + { + if(not $ResDir) { + $RESULTS_DIR .= "/release"; + } + checkRelease(); + exit(0); + } + if($DynamicMode) + { + if(not $ResDir) { + $RESULTS_DIR .= "/dynamic"; + } + dynamicCheck(); + } + exit(0); +} + +scenario(); + diff --git a/urpm-repodiff.py b/urpm-repodiff.py new file mode 100755 index 0000000..bd0b836 --- /dev/null +++ b/urpm-repodiff.py @@ -0,0 +1,1379 @@ +#!/usr/bin/python +''' +" Repodiff utility for finding differences between different repositories +" +" The tool downloads, unpacks and parses synthesis.hdlist.cz and +" changelog.xml.lzma to genererate lists of newly added packages, +" removed from new repository packages and updated packages. +" The tool outputs data to standart output or to file. +" It can show if a removed packages is obsoleted by some package +" in new repositories. Also the tool can output data in format of +" HTML table. +" +" REQUIREMENTS +" ============ +" - urpmi +" - python-2.7 +" - lzma +" - gzip +" - libxml2 python library +" - rpm python library +" +" Copyright (C) 2012 ROSA Laboratory. +" Written by Vladimir Testov +" +" This program is free software: you can redistribute it and/or modify +" it under the terms of the GNU General Public License or the GNU Lesser +" General Public License as published by the Free Software Foundation, +" either version 2 of the Licenses, or (at your option) any later version. +" +" This program is distributed in the hope that it will be useful, +" but WITHOUT ANY WARRANTY; without even the implied warranty of +" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +" GNU General Public License for more details. +" +" You should have received a copy of the GNU General Public License +" and the GNU Lesser General Public License along with this program. +" If not, see . +''' + +import argparse +import urllib +import tempfile +import os +import subprocess +import re +import libxml2 +import sys +from datetime import date +import rpm +import shutil +import urllib2 +import urpmmisc + +import gettext +gettext.install('urpm-tools') + +old_dir = "old" +new_dir = "new" +htmlname = "repodiff.html" + +synthtags = ["provides", "requires", "obsoletes", "conflicts", "suggests", + "summary", "info"] + +minus_check = re.compile('-') +re_search_unver = re.compile("([^\[\]]+)[\[\]]") +re_search_verrel = re.compile("\[(== |> |< |>= |<= )([\{\}+=0-9a-zA-Z_\.]*:)?([[\{\}+=0-9a-zA-Z_\.]+)(-[[\{\}+=0-9a-zA-Z_\.]+)?([^\[\]]*)\]$") + +synthesis_arch = "synthesis.hdlist.cz" +synthesis_arch_renamed = "synthesis.hdlist.gz" +synthesis_file = "synthesis.hdlist" +changelog_arch = "changelog.xml.lzma" +changelog_file = "changelog.xml" +default_output = "sys.stdout" +timeout = 5 + +def ParseCommandLine(): + """Parse arguments. + + Parse arguments from command line. + Return these arguments. + """ + parser = argparse.ArgumentParser( + description=_("Tool for comparing sets of repositories.")) + parser.add_argument("--old", "-o", action="store", nargs='+', required="True", + metavar="OLD_REPO", help=_("URL or PATH to old repositories")) + parser.add_argument("--new", "-n", action="store", nargs='+', required="True", + metavar="NEW_REPO", help=_("URL or PATH to new repositories")) + parser.add_argument("--size", "-s", action="store_true", + help=_("Show differences in package sizes.")) + parser.add_argument("--simple", action="store_false", + help=_("Simple output format.")) + parser.add_argument("--quiet", "-q", action="store_false", + help=_("Hide service messages.")) + parser.add_argument("--changelog", "-c", action="store_true", + help=_("Show changelog difference.")) + parser.add_argument("--html", action="store_true", + help=_("Output in HTML format, if --output is not present\ + \"%s\" will be created in current directory. \ + --size, --simple and --changelog options are ignored.") % htmlname) + parser.add_argument("--output", "-out", action="store", nargs=1, default='', + metavar="OUTPUT_FILE", help=_("Change standart output to \"OUTPUT_FILE\".")) + return parser.parse_args() + +def exit_proc(arg): + """ + Remove trash. + """ + err_tmpdir = arg.temp_dir + err_output = arg.output + + if err_output != None: + err_output.close() + if os.path.isdir(err_tmpdir): + shutil.rmtree(err_tmpdir) + exit(0) + +def CheckURL(url, arg): + """URL check. + + Check that URL is gettable. + """ + try: + urllib2.urlopen(url, None, timeout) + except: + print _("Error: URL to repository \"%s\" is incorrect") % url + exit_proc(arg) + +def CheckArgs(urlpath, arg): + """Trivial checks. + + Check that url or path is correct. + """ + if (urlpath.startswith("http://") or urlpath.startswith("ftp://")): + if not urlpath.endswith('/'): + urlpath = urlpath + '/' + tmp_url = urlpath + "media_info/" + CheckURL(tmp_url, arg) + elif (os.path.isdir(urlpath)) or urlpath.startswith("file://"): + if urlpath.startswith("file://./"): + urlpath = urlpath[7:] + else: + urlpath = urlpath[6:] + if not urlpath.endswith('/'): + urlpath = urlpath + '/' + urlpath = urlpath + "media_info/" + if not os.path.isdir(urlpath): + print _("Error: directory %s does not exist") % urlpath + exit_proc(arg) + else: + (e1,e2,urltmp) = urpmmisc.GetUrlFromRepoName(urlpath) + if (urltmp): + if not urltmp.endswith('/'): + urltmp = urltmp + '/' + urlpath = urltmp + "media_info/" + CheckURL(urlpath, arg) + else: + print _("Error: \"%s\" is not correct url, path or name of repository") % urlpath + exit_proc(arg) + return urlpath + +def CheckOutput(arg): + """Check output file. + + Check if the file can be created and redirect standart output to this file. + """ + file_output = arg.output + ifhtml = arg.html + + if (file_output == default_output): + if(ifhtml): + try: + arg.output = open(htmlname, "w") + except: + print _("Error: Cannot open %s for writing.") % htmlname + exit_proc(arg) + return + else: + arg.output = sys.stdout + return + + if(file_output != ''): + if(os.path.isfile(file_output)): + print _("Error: File %s already exists") % file_output + arg.output = None + exit_proc(arg) + else: + dirname = os.path.dirname(file_output) + if(dirname == '') or (os.path.exists(dirname)): + try: + arg.output = open(file_output, "w") + except IOError: + print _("Error: File %s cannot be created") % file_output + arg.output = None + exit_proc(arg) + else: + print _("Error: Path %s does not exist.") % dirname + arg.output = None + exit_proc(arg) + +def CheckParam(arg): + """Check parameters. + + Ignore some parameters in HTML-case. + """ + if arg.html: + arg.size = 0 + arg.simple = 0 + arg.changelog = 0 + +def GetFile(urlpath, filename, localdir, arg): + """Donwload archive. + """ + ifnotquiet = arg.quiet + + if not os.path.isdir(localdir): + os.makedirs(os.path.realpath(localdir)) + if ifnotquiet: + print (_("getting file %s from ") % filename) + "\n " + urlpath + filename + if os.path.isdir(urlpath): + try: + shutil.copyfile(urlpath + filename, localdir + filename) + except: + print _("Error: file %s was not copied") % filename + exit_proc(arg) + else: + try: + file_from = urllib2.urlopen(urllib2.Request(urlpath + filename), None, timeout) + file_to = open(localdir + filename, "w") + shutil.copyfileobj(file_from, file_to) + except: + print _("Error: file %(from)s was not downloaded to %(to)s") %{"from": urlpath + filename, "to": localdir + filename} + exit_proc(arg) + file_from.close() + file_to.close() + +def GetFiles(arg): + """Get all needed files. + """ + ifchangelog = arg.changelog + file_dir = [] + file_name = [] + file_path = [] + for i in range(len(arg.old)): + file_name.append(synthesis_arch) + file_dir.append(arg.temp_old[i]) + file_path.append(arg.old[i] + "media_info/") + if ifchangelog: + file_name.append(changelog_arch) + file_dir.append(arg.temp_old[i]) + file_path.append(arg.old[i] + "media_info/") + + for i in range(len(arg.new)): + file_name.append(synthesis_arch) + file_dir.append(arg.temp_new[i]) + file_path.append(arg.new[i] + "media_info/") + if ifchangelog: + file_name.append(changelog_arch) + file_dir.append(arg.temp_new[i]) + file_path.append(arg.new[i] + "media_info/") + + for i in range(len(file_name)): + GetFile(file_path[i], file_name[i], file_dir[i], arg) + +def RenameSynthFile(localdir, arg): + """Rename. + + Rename Synthesis file so zgip can understand format. + """ + ifnotquiet = arg.quiet + + if not os.path.isfile(localdir + synthesis_arch): + print _("Error: file not found: ") + localdir + synthesis_arch + exit_proc(arg) + try: + os.rename(localdir + synthesis_arch, localdir + synthesis_arch_renamed) + except OSError: + print _("Error: cannot rename file %(from)s to %(to)s") % {"from": synthesis_arch, "to": synthesis_arch_renamed} + + exit_proc(arg) + if not os.path.isfile(localdir + synthesis_arch_renamed): + print _("Error: file %s is missing.") % (localdir + synthesis_arch_renamed) + exit_proc(arg) + else: + if ifnotquiet: + print _("file %(from)s was renamed to %(to)s") % {"from": synthesis_arch, "to": synthesis_arch_renamed} + +def UnpackFiles(files_dir, ifchangelog, ifnotquiet): + """Unpack. + + Unpack needed files in selected directory. + """ + if ifchangelog: + if ifnotquiet: + print _("unpacking file ") + changelog_arch + subprocess.call(["lzma", "-df", files_dir + changelog_arch]) + if ifnotquiet: + print _("unpacking file ") + synthesis_arch_renamed + subprocess.call(["gzip", "-df", files_dir + synthesis_arch_renamed]) + +def ParseVersion(names_list): + """Parse version info is present. + + Parse version information from the field. e.g. provided_name[>= 1.2.3-4.5.6] + is parsed to (provided_name, sign, (epoch, version, release)) + """ + new_names_list = [] + for name in names_list: + match = re_search_unver.match(name) + if match: + tmp_entry = match.group(1) + else: + tmp_entry = name + match = re_search_verrel.search(name) + if match: + sign = match.group(1)[:-1] + epoch = match.group(2) + if epoch: + epoch = epoch[:-1] + else: + epoch = '' + version = match.group(3) + release = match.group(4) + if release: + release = release[1:] + else: + release = '' + verrel = (epoch, version, release) + else: + sign = '' + verrel = ('','','') + new_names_list.append((tmp_entry, sign, verrel)) + return new_names_list + +def ParseSynthesis(synthfile, pkgdict, arg): + """Collect info about packages. + + Parse synthesis.hdlist file (or add new entries to pkgdict). + + pkgdict is a dictionary with format: + pkgdict[name]=(verrel,(s0,s1,s2)) + where: + name - is package name parsed from package filename + verrel - is tuple (version, release, epoch) + s0[] - is package info + s1 - is package summary + s2[] - is list of obsoleted packages + """ + ifnotquiet = arg.quiet + + if not os.path.isfile(synthfile): + print _("Error: Synthesis file %s was not found.") % synthfile + exit_proc(arg) + if ifnotquiet: + print _("Parsing synthesis") + try: + synth = open(synthfile) + tmp = ['', '', ''] + for synthline in synth: + if synthline.endswith('\n'): + synthline = synthline[:-1] + tmpline = synthline.split('@') + tag = tmpline[1] + if tag == synthtags[2]: + tmp[2] = tmpline[2:] + elif tag == synthtags[5]: + tmp[1] = '@'.join(tmpline[2:]) + elif tag == synthtags[6]: + tmp[0] = tmpline[2:] + disttagepoch = ChkTagEpoch(tmp[0]) + tmp[2] = ParseVersion(tmp[2]) + (name, version, release) = RPMNameFilter(tmp[0][0], disttagepoch) #disttag + distepoch + verrel = (version, release, tmp[0][1]) + if(not name in pkgdict): + pkgdict[name]=(verrel, (tmp[0], tmp[1], tmp[2])) + elif(compare_versions(pkgdict[name][0], verrel) == -1): + pkgdict[name]=(verrel, (tmp[0], tmp[1], tmp[2])) + tmp = ['', '', ''] + synth.close() + except IOError: + print _("Error: Failed to open synthesis file ") + synthfile + exit_proc(arg) + +def ChkDist(disttag, distepoch): + """No minus in tag and epoch. + + Trivial check that tag and epoch hasn't got '-' in their name + """ + if minus_check.search(disttag) or minus_check.search(distepoch): + print _("REPODIFF-Warning: strange format of or : ") +\ + disttag + distepoch + +def ChkTagEpoch(i): + """No minus in tag and epoch. + + Trivial check that tag and epoch hasn't got '-' in their name + """ + if len(i) == 4: + return '-' + elif len(i) == 5: + disttag = i[4] + distepoch = '' + ChkDist(disttag, distepoch) + return disttag + distepoch + elif len(i) == 6: + disttag = i[4] + distepoch = i[5] + ChkDist(disttag, distepoch) + return disttag + distepoch + else: + print _("REPODIFF-Warning: strange : ") + str(i) + +def RPMNameFilter(rpmname, disttagepoch): + """Parse name and verrel. + + Function that parses name, version and release of a package. + """ + string = rpmname.split('-') + lastpart = string.pop() + tmp = lastpart.split('.') + tmp.pop() + lastpart = '.'.join(tmp) + if (lastpart[0].isdigit() or (not lastpart.startswith(disttagepoch))) and\ + (not lastpart.isdigit()): + name = '-'.join(string[:-1]) + ver = string[-1] + rel = lastpart + else: + name = '-'.join(string[:-2]) + ver = string[-2] + rel = string[-1] + return (name, ver, rel) + +def compare_versions(first_entry, second_entry): + """Compare two verrel tuples. + + dict_entry and comp_entry are verrel tuples + verrel = (version, release, epoch). + Return 1 if the first argument is higher. + 0 if they are equivalent. + -1 if the second argument is higher. + """ + (version1, release1, first_epoch) = first_entry + (version2, release2, second_epoch) = second_entry + return(rpm.labelCompare((first_epoch, version1, release1), + (second_epoch, version2, release2))) + +def ParsePackage(arg): + """Processing files, parsing synthesis, getting pkgdict. + + pkgdict is a dictionary with format: + pkgdict[name]=(verrel,(s0,s1,s2)) + where: + name - is package name parsed from package filename + verrel - is tuple (version, release, epoch) + s0[] - is package info + s1 - is package summary + s2[] - is list of obsoleted packages + """ + ifchangelog = arg.changelog + ifnotquiet = arg.quiet + pkgdict_old = {} + for directory in arg.temp_old: + RenameSynthFile(directory, arg) + UnpackFiles(directory, ifchangelog, ifnotquiet) + ParseSynthesis(directory + synthesis_file, pkgdict_old, arg) + pkgdict_new = {} + for directory in arg.temp_new: + RenameSynthFile(directory, arg) + UnpackFiles(directory, ifchangelog, ifnotquiet) + ParseSynthesis(directory + synthesis_file, pkgdict_new, arg) + return pkgdict_old, pkgdict_new + +def CreateDicts(dict_old, dict_new): + """Creating dictionaries. + + Creating dictionaries for new, updated and removed(deleted) packages + from two dictionaries: old and new, for old and new repositories. + + dict_old, dict_new are dictionaries with format: + pkgdict[name]=(verrel,(s0,s1,s2)) + where: + name - is package name parsed from package filename + verrel - is tuple (version, release, epoch) + s0[] - is package info + s1 - is package summary + s2[] - is list of obsoleted packages + + dict_new_packages and dict_del_packages have the same format. + dict_upd_packages has format: + dict_upd_packages[name]=((verrel_old,(so0,so1,so2)), + (verrel_new,(sn0,sn1,sn2)),ifdowngraded) + or + dict_upd_packages[name]=(dict_old[name],dict_new[name],ifdowngraded) + """ + dict_new_packages = {} + dict_del_packages = {} + dict_upd_packages = {} + + for name in dict_new: + if(name in dict_old): #updated or downgraded + compare_result = compare_versions(dict_new[name][0], + dict_old[name][0]) + if(compare_result > 0): #updated + dict_upd_packages[name] = (dict_old[name], dict_new[name], 0) + elif(compare_result < 0): #downgraded ? + dict_upd_packages[name] = (dict_old[name], dict_new[name], 1) + else: #new + dict_new_packages[name] = dict_new[name] + for name in dict_old: + if(not name in dict_new): #removed + dict_del_packages[name] = dict_old[name] + return (dict_new_packages, dict_del_packages, dict_upd_packages) + +def ProcessNewPackages(dict_new_packages, file_output): + """Processing newly added packages. + + dict_new_packages[name]=(verrel,(s0,s1,s2)) + where: + name - is package name parsed from package filename + verrel - is tuple (version, release, epoch) + s0[] - is package info + s1 - is package summary + s2[] - is list of obsoleted packages + """ + sorted_list = sorted(dict_new_packages) + for name in sorted_list: + file_output.write(_("New package: ") + dict_new_packages[name][1][0][0] +\ + "\n " + dict_new_packages[name][1][1] + "\n\n") + +def GenerateDictObsoleted(dict_new, ifnotquiet): + """Generate Dictionary of obsoleted packages. + + pkgdict[name]=(verrel,(s0,s1,s2)) + where: + name - is package name parsed from package filename + verrel - is tuple (version, release, epoch) + s0[] - package info + s1 - package summary + s2[] - list of packages obsoleted by current package + """ + if ifnotquiet: + print _("Generating obsoleted list.") + obsoleted_by = {} + for name in dict_new: + for (obsolete, sign, verrel) in dict_new[name][1][2]: + if(not obsolete in obsoleted_by): + obsoleted_by[obsolete] = [] + obsoleted_by[obsolete].append((dict_new[name][1][0][0], sign, verrel)) + return obsoleted_by + +def compare_verrel(verrel1, sign, verrel2): + if (sign == ''): + return 1 + (e1, v1, r1) = verrel1 + (e2, v2, r2) = verrel2 + # checks + if (v2 == '') or (v1 == ''): + return 1 + if (e1 == '') or (e2 == ''): + e1 = '0' + e2 = '0' + if (r1 == '') or (r2 == ''): + r1 = '0' + r2 = '0' + # compare + compare = rpm.labelCompare((e1, v1, r1), (e2, v2, r2)) + if (sign == "=="): + if (compare == 0): + return 1 + elif (sign == ">"): + if (compare == 1): + return 1 + elif (sign == "<"): + if (compare == -1): + return 1 + elif (sign == ">="): + if (compare > -1): + return 1 + elif (sign == "<="): + if (compare < 1): + return 1 + return 0 + +def ProcessDelPackages(dict_del_packages, dict_obsoleted, file_output): + """Process deleted packages. + + Printing every deleted package. Show if package is obsoleted. + pkgdict[name]=(verrel,(s0,s1,s2)) + where: + name - is package name parsed from package filename + verrel - is tuple (version, release, epoch) + s0[] - is package info + s1 - is package summary + s2[] - is list of obsoleted packages + + dict_obsoleted is dictionary + dict_obsoleted[name]=[obs1, ...] + """ + sorted_list = sorted(dict_del_packages) + for name in sorted_list: + file_output.write(_("Removed package: ") + dict_del_packages[name][1][0][0] + '\n') + if (name in dict_obsoleted): + tmp_list = [] + for (obsolete, sign, verrel) in dict_obsoleted[name]: + if (compare_verrel(dict_del_packages[name][0], sign, verrel)): + tmp_list.append(obsolete) + sorted_obsolete = sorted(tmp_list) + for obs_package_name in sorted_obsolete: + file_output.write(_(" Obsoleted by ") + obs_package_name + '\n') + +def ParseLogfile(dict_log, logfile, dict_upd_packages, mode, arg): + """Parse Changelog. + + mode == 0 - for old changelog: we search only for 1st entry in changelog + mode == 1 - for new changelog: we collect entries from changelog untill + we find remembered entry from changelog + + Parse changelog.xml to compare changes between updated packages. + dict_log - is dictionary with format: + dict_log[name] = + [(verrel, (time,name,text)), (verrel,[(time,name,text),...])] + + dict_upd_packages[name] = [old_pkg[name],new_pkg[name],ifdowngraded] + or dict_upd_packages[name] = + [(verler,(s0,s1,s2)),(verrel,(s0,s1,s2)),ifdowngraded] + """ + ifnotquiet = arg.quiet + + if ifnotquiet: + print _("Reading changelog") + if not os.path.isfile(logfile): + print _("Error: Can't find changelog ") + logfile + exit_proc(arg) + doc = libxml2.parseFile(logfile) + if (not doc): + print _("Error: Can't read changelog ") + logfile + "." + exit_proc(arg) + root = doc.children + if root.name != "media_info": + print _("Error: Wrong changelog.") + doc.freeDoc() + exit_proc(arg) + tag_changelog = root.children + while(tag_changelog): + if(tag_changelog.name != "changelogs"): + tag_changelog = tag_changelog.next + continue + + tag_property = tag_changelog.properties + pkgname = '' + disttag = '' + distepoch = '' + while(tag_property): + if (tag_property.name == "fn"): + pkgname = tag_property.content + elif (tag_property.name == "disttag"): + disttag = tag_property.content + elif (tag_property.name == "distepoch"): + distepoch = tag_property.content + tag_property = tag_property.next + if (pkgname == ''): + print _("Error: Corrupted changelog") + doc.freeDoc() + exit_proc(arg) + disttagepoch = disttag + distepoch + if (disttagepoch == ''): + disttagepoch = '-' + (result_key, version, release) = RPMNameFilter(pkgname, disttagepoch) + verrel = (version, release, "-1") + # skip entry if it wasn't updated + if result_key not in dict_upd_packages: + tag_changelog = tag_changelog.next + continue + ifdowngraded = dict_upd_packages[result_key][2] + # skip entry if it's name is not in dictionary + if(dict_upd_packages[result_key][mode][1][0][0] != pkgname): + tag_changelog = tag_changelog.next + continue + # skip entry if it has been found already with appropriate version + if(result_key in dict_log) and (dict_log[result_key][mode]): + tag_changelog = tag_changelog.next + continue + # if "old" repository do not have changelog of the package + if(mode == 1) and (not result_key in dict_log): + dict_log[result_key] = [] + dict_log[result_key].append([]) + dict_log[result_key].append([]) + dict_log[result_key][0] = (verrel, []) + + log_current = tag_changelog.children + result_changelog = [] + while(log_current): + if(log_current.name != "log"): + log_current = log_current.next + continue + + if(log_current.properties.name == "time"): + entry_time = log_current.properties.content + else: + entry_time = 0 + + if(mode == 1) and (not ifdowngraded) and\ + (entry_time <= dict_log[result_key][0][1][0]): + break + log_child = log_current.children + while(log_child): + if(log_child.name == "log_name"): + entry_name = log_child.content + elif(log_child.name == "log_text"): + entry_text = log_child.content + log_child = log_child.next + result_changelog.append((entry_time, entry_name, entry_text)) + if(mode == ifdowngraded): + break + log_current = log_current.next + if(mode == 0): + dict_log[result_key] = [] + dict_log[result_key].append([]) + dict_log[result_key].append([]) + if not ifdowngraded: + dict_log[result_key][0] = (verrel, result_changelog[0]) + else: + dict_log[result_key][0] = (verrel, result_changelog) + else: + if not ifdowngraded: + dict_log[result_key][1] = (verrel, result_changelog) + else: #special actions for downgraded packages + new_result = [] + time_to_stop = result_changelog[0][0] + tmp_change = dict_log[result_key][0][1] #changelog list + if tmp_change: #changelog is not empty + i = 0 + length = len(tmp_change) + while i < length: + if tmp_change[i][0] <= time_to_stop: + i = i + 1 + break + new_result.append(tmp_change[i]) + i = i + 1 + dict_log[result_key][1] = (verrel, new_result) + tag_changelog = tag_changelog.next + doc.freeDoc() + +def GenerateLogfileDiff(dict_upd_packages, arg): + """Changelog difference list. + + Generate changelog difference list. + dict_upd_packages[name] = [old_pkg[name],new_pkg[name],ifdowngraded] + or dict_upd_packages[name] = [(verler,(s0,s1,s2)),(verrel,(s0,s1,s2)),ifdowngraded] + """ + ifnotquiet = arg.quiet + temp_old = arg.temp_old + temp_new = arg.temp_new + + if ifnotquiet: + print _("Generating changes list.") + dict_logfile_diff = {} + dict_log = {} + + for old_dir in temp_old: + ParseLogfile(dict_log, old_dir + changelog_file, dict_upd_packages, 0, arg) + for new_dir in temp_new: + ParseLogfile(dict_log, new_dir + changelog_file, dict_upd_packages, 1, arg) + + for name in dict_upd_packages: + if(name in dict_log): + if dict_log[name][1]: + entry = dict_log[name][1][1] + else: + print _("REPODIFF-Warning: Package %s was not described in changelogs.xml") % name + entry = [(0, '', _("REPODIFF-Warning: Changelogs of a package are absent in \"new\" repository."))] + else: + print _("REPODIFF-Warning: Package %s was not described in changelogs.xml") % name + entry = [(0, '', _("REPODIFF-Warning: Changelogs of a package are absent."))] + dict_logfile_diff[name] = entry + + return dict_logfile_diff + +def ChangelogPrint(changes_list, file_output): + """Changelog difference. + + Output changes in changelog. + changes_list is list with format: + changes_list = [(time,author,text)] + """ + for entry in changes_list: + file_output.write("* " + str(date.fromtimestamp(float(entry[0]))) +\ + " " + entry[1] + '\n' + entry[2] + '\n\n') + +def PrintLogfileDiff(package_name, dict_logfile_diff, file_output): + """Changelog difference. + + Output changes in changelog. + dict_logfile_diff is dictionary with format: + dict_logfile_diff[name] = [(time,author,text)] + """ + if package_name in dict_logfile_diff: + ChangelogPrint(dict_logfile_diff[package_name], file_output) + else: + file_output.write(_("Package %s has no changelog info\n") % package_name) + +def ProcessUpdPackages(dict_upd_packages, dict_logfile_diff, arg): + """Process updated packages. + + ifsizes - is indicator: should we (1) or should we not (0) print + difference in package sizes. + ifnotsimple - is indicator: should we (0) or shoudl we not (1) print + difference in changelogs. + Process updated packages and output everything needed info. + dict_upd_packages[name] = [old_pkg[name],new_pkg[name],ifdowngraded] + or dict_upd_packages[name] = [(verler,(s0,s1,s2)),(verrel,(s0,s1,s2)),ifdowngraded] + """ + ifnotsimple = arg.simple + file_output = arg.output + ifchangelog = arg.changelog + ifsizes = arg.size + + file_output.write(_("\n\nUpdated packages:\n\n")) + sorted_list = sorted(dict_upd_packages) + for name in sorted_list: + package = dict_upd_packages[name][1][1][0][0] + if ifnotsimple: + file_output.write(package + '\n' + '-'*len(package) + '\n') + if dict_upd_packages[name][2]: + file_output.write(_(" ***DOWNGRADED***\n")) + if ifchangelog: + PrintLogfileDiff(name, dict_logfile_diff, file_output) + else: + old_package = dict_upd_packages[name][0][1][0][0] + file_output.write(name + ": " + old_package + " -> " + package + '\n') + if(ifsizes): + sizediff = int(dict_upd_packages[name][1][1][0][2]) - \ + int(dict_upd_packages[name][0][1][0][2]) + file_output.write(_("Size Change: %d bytes\n\n") % sizediff) + +def PrintSummary(dict_new_packages, dict_del_packages, dict_upd_packages, file_output): + """Output summary. + + Output summary: numbers of new/removew/updated packages at all. + """ + file_output.write("Summary:\n") + length = len(dict_new_packages) + if length: + file_output.write(_(" Total added packages: ") + str(length) + '\n') + length = len(dict_del_packages) + if length: + file_output.write(_(" Total removed packages: ") + str(length) + '\n') + length = 0 + length_d = 0 + for packagename in dict_upd_packages: + if dict_upd_packages[packagename][2] == 0: + length = length + 1 + else: + length_d = length_d + 1 + if length: + file_output.write(_(" Total updated packages: ") + str(length) + '\n') + if length_d: + file_output.write(_(" Total downgraded packages: ") + str(length_d) + '\n') + +def HTML_ParsePackage(arg): + """Parse hdlist. + + HTML-specific ParsePackage(). Calls for ParsePackage + """ + ifchangelog = arg.changelog + ifnotquiet = arg.quiet + + html_old_dict_list = [] + html_new_dict_list = [] + + for directory in arg.temp_old: + tmp_dict = {} + RenameSynthFile(directory, arg) + UnpackFiles(directory, 0, ifnotquiet) + ParseSynthesis(directory + synthesis_file, tmp_dict, arg) + html_old_dict_list.append(tmp_dict) + for directory in arg.temp_new: + tmp_dict = {} + RenameSynthFile(directory, arg) + UnpackFiles(directory, 0, ifnotquiet) + ParseSynthesis(directory + synthesis_file, tmp_dict, arg) + html_new_dict_list.append(tmp_dict) + return html_old_dict_list, html_new_dict_list + +def HTML_UniteOld(list_dict_old): + """Union of dictionaries. + + HTML-specific. + """ + dict_old = list_dict_old[0] + i = 1 + while(i < len(list_dict_old)): + for name in list_dict_old[i]: + if name not in dict_old: + dict_old[name] = list_dict_old[i][name] + elif(compare_versions(dict_old[name][0], list_dict_old[i][name][0]) == -1): + dict_old[name] = list_dict_old[i][name] + i = i + 1 + return dict_old + +def HTML_CreateDicts(dict_old, list_dict_new): + """Create dictionary of packages. + + Dictionary of packages and types of changes. + """ + dict_packages = {} + i = 0 + for dict_new in list_dict_new: + (tmp_new, tmp_del, tmp_upd) = CreateDicts(dict_old, dict_new) + for packagename in tmp_new: + if packagename not in dict_packages: + dict_packages[packagename] = [] + dict_packages[packagename].append((tmp_new[packagename], i, 1)) + for packagename in tmp_del: + if packagename not in dict_packages: + dict_packages[packagename] = [] + dict_packages[packagename].append((tmp_del[packagename], i, 2)) + for packagename in tmp_upd: + if packagename not in dict_packages: + dict_packages[packagename] = [] + if tmp_upd[packagename][2] == 0: + dict_packages[packagename].append((tmp_upd[packagename][1], i, 3)) + elif tmp_upd[packagename][2] == 1: + dict_packages[packagename].append((tmp_upd[packagename][1], i, 4)) + i = i + 1 + return dict_packages + +def CssOutput(): + """Output style. + + Output contents of style tag or to .css file. + """ + csscontent = '\nbody {\nfont-size: 1em;\nmargin: 1em;\ncolor: black;\nbackground-color: white;\n}\n' +\ + 'th {\nborder-bottom-style: double;\n}\n' +\ + 'h1 {\nfont-size: 1.6em;\n}\n' +\ + 'h2 {\nfont-size: 1.4em;\n}\n' +\ + 'ul {\nfont-size: 1.2em;\n}\n' +\ + 'li {\nfont-size: 1em; list-style: disc;\n}\n' +\ + '.even {\nbackground-color: #CCCCCC;\n}\n' +\ + '.odd {\nbackground-color: #FFFFFF;\n}\n' +\ + '.new {\nbackground-color: #C6DEFF;\n}\n' +\ + '.removed {\nbackground-color: #FFC3CE;\n}\n' +\ + '.updated {\nbackground-color: #CCFFCC;\n}\n' +\ + '.downgraded {\nbackground-color: #F4F4AF;\n}\n' +\ + 'p.bold {\n font-weight: bold\n}\n' + return csscontent + +def JavaScriptOutput(): + """Output scripts. + + Output javascript to script tag or to .js file. + """ + javacontent = """ +var tableBody; +var table2sort; +var imgUp; +var imgDown; +var suffix; +var lastSortCol; +var lastSortOrderAsc; +var index; +var rows; + +function TableSorter(table,suf) { + this.table2sort = table; + this.suffix = suf; + this.lastSortCol = -1; + this.lastSortOrderAsc = true; + this.tableBody = this.table2sort.getElementsByTagName("tbody")[0]; + + this.imgUp = document.createTextNode(String.fromCharCode(0x2193)); + this.imgDown = document.createTextNode(String.fromCharCode(0x2191)); +} + +TableSorter.prototype.sort = function (col, type) { + if (this.lastSortCol != -1) { + sortCell = document.getElementById("sortCell" + this.suffix + this.lastSortCol); + if (sortCell != null) { + if (this.lastSortOrderAsc == true) { + sortCell.removeChild(this.imgUp); + } else { + sortCell.removeChild(this.imgDown); + } + } + sortLink = document.getElementById("sortCellLink" + this.suffix + this.lastSortCol); + if(sortLink != null) { + sortLink.title = "Sort Ascending"; + } + }else{ + this.rows = this.tableBody.rows; + } + + if (this.lastSortCol == col) { + this.lastSortOrderAsc = !this.lastSortOrderAsc; + } else { + this.lastSortCol = col; + this.lastSortOrderAsc = true; + } + + var newRows = new Array(); + + var newRowsCount = 0; + for (i = 1; i < this.rows.length; i ++) { + newRows[newRowsCount++] = this.rows[i]; + } + + index = this.lastSortCol; + if (type == 'string') { + newRows.sort(sortFunction_string); + } + else { + newRows.sort(sortFunction_attr); + } + + if (this.lastSortOrderAsc == false) { + newRows.reverse(); + } + + var count = 0; + var newclass; + for (i = 0; i < newRows.length; i++) { + if (count++ % 2 == 0){ + newclass = "odd"; + }else{ + newclass = "even"; + } + newRows[i].className = newclass; + this.table2sort.tBodies[0].appendChild(newRows[i]); + } + + sortCell = document.getElementById("sortCell" + this.suffix + col); + if (sortCell == null) { + } else { + if (this.lastSortOrderAsc == true) { + sortCell.appendChild(this.imgUp); + } else { + sortCell.appendChild(this.imgDown); + } + } + + sortLink = document.getElementById("sortCellLink" + this.suffix + col); + if (sortLink == null) { + } else { + if (this.lastSortOrderAsc == true) { + sortLink.title = "Sort Descending"; + } else { + sortLink.title = "Sort Ascending"; + } + } +} + +function getCellContent(elem) { + if (typeof elem == "string") return elem; + if (typeof elem == "undefined") { return elem }; + if (elem.innerText) return elem.innerText; + var str = ""; + + var cs = elem.childNodes; + var l = cs.length; + for (var i = 0; i < l; i++) { + switch (cs[i].nodeType) { + case 1: // 'ELEMENT_NODE' + str += getCellContent(cs[i]); + break; + case 3: // 'TEXT_NODE' + str += cs[i].nodeValue; + break; + } + } + return str; +} + +function sortFunction_attr(a, b) { + elem1 = a.cells[index] ; + elem2 = b.cells[index] ; + str1 = elem1.className; + str2 = elem2.className; + sub1 = getCellContent(a.cells[0]).toLowerCase(); + sub2 = getCellContent(b.cells[0]).toLowerCase(); + + if (str1 == str2){ + if (sub1 == sub2) return 0; + if (sub1 < sub2) return -1; + return 1; + } + if (str1 < str2) return -1; + return 1; +} + +function sortFunction_string(a, b) { + str1 = getCellContent(a.cells[index]).toLowerCase(); + str2 = getCellContent(b.cells[index]).toLowerCase(); + + if (str1 == str2) return 0; + if (str1 < str2) return -1; + return 1; +} + +var diffTableSorter = null; + +function init_diff(){ + if( document.getElementById("table_diff") ) { + diffTableSorter = new TableSorter(document.getElementById("table_diff"), 'diff'); + } +} + +function sort_diff(col, type) { + if( diffTableSorter != null ) { + diffTableSorter.sort(col, type); + } +} +""" + return javacontent + +def HTML_OutputHead(file_output): + """Output beginning of the document. + + Outputs static text. + """ + file_output.write('\n' +\ + '\n' +\ + '\n' + + '\n' +\ + 'Differences between Mandriva / Rosa releases\n' +\ + '\n' +\ + '\n' +\ + '\n' +\ + '\n' +\ + '\n' +\ + '\n' +\ + '\n\n') + +def GetRepoInfo(dict_packages, packagename, lenold, lennew, list_dict_old, list_dict_new): + """Generate package-specific information. + + Generates class and name to be displayed in the table. + """ + result1 = [] + result2 = [] + flag = 0 + for i in range(lenold): + if packagename in list_dict_old[i]: + result1.append(list_dict_old[i][packagename][0][0] + '-' +\ + list_dict_old[i][packagename][0][1]) + else: + result1.append("N/A") + result2.append('') + + tmplist = dict_packages[packagename] + tmpdict = {} + for (entry, reponum, entry_type) in dict_packages[packagename]: + tmpdict[reponum] = (entry[0][0] + '-' + entry[0][1], entry_type) + + for i in range(lennew): + if(i not in tmpdict): + if(packagename not in list_dict_new[i]): + result1.append("N/A") + result2.append("") + else: + result1.append(list_dict_new[i][packagename][0][0] + '-' +\ + list_dict_new[i][packagename][0][1]) + result2.append("") + else: + (name, entry_type) = tmpdict[i] + if entry_type == 1: + result1.append(name) + result2.append('class = "new"') + elif entry_type == 2: + result1.append("Removed") + result2.append('class = "removed"') + flag = 1 + elif entry_type == 3: + result1.append(name) + result2.append('class = "updated"') + elif entry_type == 4: + result1.append(name) + result2.append('class = "downgraded"') + + return (result1, result2, flag) + +def HTML_OutputBody(dict_packages, list_dict_old, list_dict_new, arg): + """Output table. + + Outputs table in HTML format. + """ + old = arg.old + new = arg.new + file_output = arg.output + + file_output.write('

Difference between repositories.

\n' +\ + '

The use of color coding in tables:

\n' +\ + '\n' +\ + '\n' +\ + '\n' +\ + '\n' +\ + '\n' +\ + '
NewUpdated
DowngradedRemoved
\n\n') + repo_list = [] + + all_list = [] + all_list.extend(old) + all_list.extend(new) + lenold = len(old) + lennew = len(new) + length = lenold + lennew + + reptext = 'repositories' if lenold > 1 else 'repository' + tmp_string = '

Old ' + reptext + ':

\n
    \n' + for i in range(lenold): + tmp_string = tmp_string + '
  • Repository ' + str(i) + ' : ' + old[i] + '
  • \n' + tmp_string = tmp_string + '
\n' + file_output.write(tmp_string) + + reptext = 'repositories' if lennew > 1 else 'repository' + tmp_string = '

New ' + reptext + ':

\n
    \n' + for k in range(lennew): + i = i + 1 + tmp_string = tmp_string + '
  • Repository ' + str(i) + ' : ' + new[k] + '
  • \n' + tmp_string = tmp_string + '
\n' + file_output.write(tmp_string) + + tmp_string = '

Difference between ' + i = 0 + while(i < length): + if(i < length - 2): + delimeter = " , " + elif(i == length - 2): + delimeter = " and " + else: + delimeter = '' + temp = '' + \ + 'Repository ' + str(i) + '' + if i < lenold: + repo_list.append('Repository ' + str(i) + '') + else: + ii = i + 1 + repo_list.append('Repository '+str(i)+'') + tmp_string = tmp_string + temp + delimeter + i = i + 1 + tmp_string = tmp_string + ".

\n" + file_output.write(tmp_string) + + tmp_string = '\n\n' + for reponame in repo_list: + tmp_string = tmp_string + reponame + tmp_string = tmp_string + '\n' + + strnum = 1 + resrange = [] + for i in range(lennew): + resrange.append(lenold + i) + + sorted_list = sorted(dict_packages, key=str.lower) + for packagename in sorted_list: + if strnum % 2: + strtype = "odd" + else: + strtype = "even" + tmp_string = tmp_string + '' + tmp_string = tmp_string + '' + (repo_name, repo_class, flag) = GetRepoInfo(dict_packages, packagename, + lenold, lennew, list_dict_old, list_dict_new) + if flag: + if(repo_name[lenold] == "Removed"): + res = 0 + for k in resrange: + if(repo_name[k] != "Removed"): + res = 1 + if res: + for k in resrange: + if(repo_name[k] == "Removed"): + repo_name[k] = "N/A" + repo_class[k] = '' + else: + for k in resrange: + if(repo_name[k] == "Removed"): + repo_name[k] = "N/A" + repo_class[k] = '' + + for i in range(length): + tmp_string = tmp_string + '' + tmp_string = tmp_string + '\n' + strnum = strnum + 1 + tmp_string = tmp_string + '\n
Package name
' + packagename + '' +\ + repo_name[i] + '
\n' + + file_output.write(tmp_string) + +def HTML_OutputTail(file_output): + """Output end of document. + + Outputs static text. + """ + file_output.write(''' + +'''); + file_output.write('\n\n') + +def HTML_Output(dict_packages, list_dict_old, list_dict_new, arg): + """Output HTML file. + + Generates HTML file. + """ + ifnotquiet = arg.quiet + file_output = arg.output + + if ifnotquiet: + print _("Creating HTML file.") + HTML_OutputHead(file_output) + HTML_OutputBody(dict_packages, list_dict_old, list_dict_new, arg) + HTML_OutputTail(file_output) + +def main(args): + arg = ParseCommandLine() + arg.temp_dir = tempfile.mkdtemp() + '/' + head_old = arg.temp_dir + old_dir + head_new = arg.temp_dir + new_dir + arg.temp_old = [] + arg.temp_new = [] + if (arg.output): + tmp_output = arg.output[0] + else: + tmp_output = default_output + arg.output = None; + for i in range(len(arg.old)): + arg.old[i] = CheckArgs(arg.old[i], arg) + arg.temp_old.append(head_old + str(i) + '/') + for i in range(len(arg.new)): + arg.new[i] = CheckArgs(arg.new[i], arg) + arg.temp_new.append(head_new + str(i) + '/') + arg.output = tmp_output + CheckOutput(arg) + CheckParam(arg) + + ifsizes = arg.size + ifnotsimple = arg.simple + output_file = arg.output + ifnotquiet = arg.quiet + ifhtml = arg.html + ifchangelog = arg.changelog + + + GetFiles(arg) + + if not ifhtml: + (dict_old, dict_new) = ParsePackage(arg) + + (dict_new_packages, dict_del_packages, dict_upd_packages) = CreateDicts( + dict_old, dict_new) + + dict_obsoleted = GenerateDictObsoleted(dict_new, ifnotquiet) + if(dict_upd_packages) and (ifnotsimple) and (ifchangelog): + dict_logfile_diff = GenerateLogfileDiff(dict_upd_packages, arg) + if not ifnotsimple or not ifchangelog: + dict_logfile_diff = {} + + ProcessNewPackages(dict_new_packages, arg.output) + ProcessDelPackages(dict_del_packages, dict_obsoleted, arg.output) + if dict_upd_packages: + ProcessUpdPackages(dict_upd_packages, dict_logfile_diff, arg) + PrintSummary(dict_new_packages, dict_del_packages, dict_upd_packages, arg.output) + else: + (list_dict_old, list_dict_new) = HTML_ParsePackage(arg) + dict_old = HTML_UniteOld(list_dict_old) + dict_packages = HTML_CreateDicts(dict_old, list_dict_new) + HTML_Output(dict_packages, list_dict_old, list_dict_new, arg) + + exit_proc(arg) + +if __name__ == "__main__": + main(sys.argv) diff --git a/urpm-repograph.py b/urpm-repograph.py new file mode 100755 index 0000000..bb58427 --- /dev/null +++ b/urpm-repograph.py @@ -0,0 +1,1472 @@ +#!/usr/bin/python +''' +" Repograph utility for outputting graph of packages and their dependencies +" on each other. Also checks for unprovided dependencies. +" +" The tool downloads, unpacks and parses synthesis.hdlist.cz and +" (if necessary) files.xml.lzma to check for unprovided dependencies and +" to output graph of packages and their dependencies in DOT language format. +" The tool outputs data to standart output or to file. +" +" REQUIREMENTS +" ============ +" - urpmi +" - python-2.7 +" - lzma +" - gzip +" - libxml2 python library +" - rpm python library +" - networkx python library +" +" Copyright (C) 2012 ROSA Laboratory. +" Written by Vladimir Testov +" +" This program is free software: you can redistribute it and/or modify +" it under the terms of the GNU General Public License or the GNU Lesser +" General Public License as published by the Free Software Foundation, +" either version 2 of the Licenses, or (at your option) any later version. +" +" This program is distributed in the hope that it will be useful, +" but WITHOUT ANY WARRANTY; without even the implied warranty of +" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +" GNU General Public License for more details. +" +" You should have received a copy of the GNU General Public License +" and the GNU Lesser General Public License along with this program. +" If not, see . +''' +import argparse +import shutil +import sys +import os +import urllib2 +import urllib +import tempfile +import subprocess +import re +import time + +import rpm +import libxml2 +import urpmmisc + +import rpm5utils +from rpm5utils.urpmgraphs.classes.digraph import DiGraph +from rpm5utils.urpmgraphs.algorithms.cycles import simple_cycles +import gettext + +gettext.install('urpm-tools') +#import rpm5utils.urpmgraphs +#from rpm5utils.urpmgraphs.algorithms import cycles +#from rpm5utils.urpmgraphs.classes import digraph + + +synthesis_arch = "synthesis.hdlist.cz" +synthesis_arch_renamed = "synthesis.hdlist.gz" +synthesis_file = "synthesis.hdlist" +synthesis_search_field = ["info", "requires", "suggests", "provides"] +fileslist_arch = "files.xml.lzma" +fileslist_file = "files.xml" +tmp_cross_path = "cross" +loopdotfile = "loopgraph" +altdotfile = "altgraph" +default_output = "sys.stdout" +timeout = 5 + +re_search_unver = re.compile("([^\[\]]+)[\[\]]") +re_search_verrel = re.compile("\[(== |> |< |>= |<= )([\{\}+=0-9a-zA-Z_\.]*:)?([[\{\}+=0-9a-zA-Z_\.]+)(-[[\{\}+=0-9a-zA-Z_\.]+)?([^\[\]]*)\]$") + +def ParseCommandLine(): + """Parse arguments. + + Parse arguments from command line. + Return these arguments. + """ + parser = argparse.ArgumentParser( + description=_("Tool for generating dependency graph for REPOSITORY packages.")) + parser.add_argument("repository", action="store", nargs=1, + metavar="REPOSITORY", help="URL or local PATH to repository.") + parser.add_argument("--cross", "-c", action="store", nargs='+', metavar="CROSS_REPO", + help=_("Search for cross-repository references in CROSS_REPO(s) repositories.")) + + parser.add_argument("--quiet", "-q", action="store_false", + help=_("Hide service messages. (About progress status etc.)")) + parser.add_argument("--verbose", "-v", action="store_true", + help=_("Show warnings. (About unprovided packages etc.)")) + + parser.add_argument("--requires", "-r", action="store_true", + help=_("Process \"requires\" package dependencies. Used by default.")) + parser.add_argument("--suggests", "-s", action="store_true", + help=_("Process \"suggests\" package dependencies. If used without \ + --requires then only suggests dependencies are processed.")) + parser.add_argument("--file", "-f", action="store_true", + help=_("Process file dependencies.")) + parser.add_argument("--unprovided", "-u", action="store_true", + help=_("Show unprovided dependencies.")) + + pkgrequiresgroup = parser.add_mutually_exclusive_group() + pkgrequiresgroup.add_argument("--requires-recursive", action="store", nargs=1, default=None, + metavar="PKG", help=_("Search for packages, which are required by package PKG (PKG is a file name or package name)")) + pkgrequiresgroup.add_argument("--whatrequires", action="store", nargs=1, default=None, + metavar="PKG", help=_("Search for packages, which requires package PKG (PKG is a file name or package name)")) + + opactgroup = parser.add_mutually_exclusive_group() + opactgroup.add_argument("--loops", "-l", action="store_true", + help=_("Search for all simple loops of package dependecies.")) + opactgroup.add_argument("--alternatives", "-a", action="store_true", + help=_("Search for alternative packages providing the same feature.")) + opactgroup.add_argument("--broken", "-b", action="store_true", + help=_("Search for all broken packages and anything beetween them")) + parser.add_argument("--different", "-d", action="store_true", + help=_("Output each loop or each alternative in different file. \ + Ignored if --loops or --alternatives options are not present. \ + OUTPUT_FILE (if present) is tracted as folder name for new files in that case.")) + + graphgroup = parser.add_mutually_exclusive_group() + graphgroup.add_argument("--output", "-o", action="store", nargs=1, default='', + metavar="OUTPUT_FILE", help=_("Change graph output to \"OUTPUT_FILE\". STDOUT by default.")) + graphgroup.add_argument("--nograph", "-n", action="store_true", + help=_("Do not output graph. Tool will not start working if --quiet, --nograph are present \ + and --verbose is not. (If there is nothing to output - then nothing has to be done.)")) + return parser.parse_args() + +def exit_proc(arg): + """ + Remove trash. + """ + err_tmp_dir = arg.tmp_dir + err_output = arg.output + err_loops = arg.loops + err_alternatives = arg.alternatives + err_different = arg.different + + if (err_output != None) and not ((err_loops or err_alternatives) and (err_different)): + err_output.close() + if os.path.isdir(err_tmp_dir): + shutil.rmtree(err_tmp_dir) + exit(0) + +def CheckURL(url, arg): + """URL check. + + Check that URL is gettable. + """ + try: + urllib2.urlopen(url, None, timeout) + except: + print _("Error: URL to repository \"%s\" is incorrect") % url + exit_proc(arg) + +def CheckURLPATH(urlpath, arg): + """Argument checks. + + Check, that url or path is correct. + """ + if (urlpath.startswith("http://") or urlpath.startswith("ftp://")): + if not urlpath.endswith('/'): + urlpath = urlpath + '/' + urlpath = urlpath + "media_info/" + CheckURL(urlpath, arg) + elif (os.path.isdir(urlpath)) or urlpath.startswith("file://"): + if urlpath.startswith("file://./"): + urlpath = urlpath[7:] + else: + urlpath = urlpath[6:] + if not urlpath.endswith('/'): + urlpath = urlpath + '/' + urlpath = urlpath + "media_info/" + if not os.path.isdir(urlpath): + print _("Error: directory %s does not exist") % urlpath + exit_proc(arg) + else: + (e1,e2,urltmp) = urpmmisc.GetUrlFromRepoName(urlpath) + if (urltmp): + if not urltmp.endswith('/'): + urltmp = urltmp + '/' + urlpath = urltmp + "media_info/" + CheckURL(urlpath, arg) + else: + print _("Error: \"%s\" is not correct url, path or name of repository") % urlpath + exit_proc(arg) + return urlpath + +def CheckOptions(arg): + """Options check. + + Make options understandable for the program. + """ + if (arg.suggests == 0): + arg.requires = 1 + +def CheckOutput(arg): + """Check output file. + + Check if the file can be created and redirect standart output to this file. + """ + file_output = arg.output + ifloops = arg.loops + ifalternatives = arg.alternatives + ifdifferent = arg.different + + if (file_output == "sys.stdout") or (file_output == "stdout"): + arg.output = sys.stdout + return + if((ifloops or ifalternatives) and ifdifferent): # check for dir + if(os.path.isdir(file_output)): + print _("Error: directory %s already exists") % file_output + arg.output = None + exit_proc(arg) + else: + file_output = os.path.realpath(file_output) + if (os.path.isfile(file_output)): + print _("Error: File %s already exists") % file_output + arg.output = None + exit_proc(arg) + + try: + os.makedirs(file_output) + except: + print _("Error: directory %s was not created") % file_output + arg.output = None + exit_proc(arg) + if not file_output.endswith('/'): + file_output = file_output + '/' + arg.output = file_output + else: + if(os.path.isfile(file_output)): + print _("Error: File %s already exists") % file_output + arg.output = None + exit_proc(arg) + else: + dirname = os.path.dirname(file_output) + if(dirname == '') or (os.path.exists(dirname)): + try: + arg.output = open(file_output, "w") + except IOError: + print _("Error: File %s cannot be created") % file_output + arg.output = None + exit_proc(arg) + else: + print _("Error: Path %s does not exist.") % dirname + arg.output = None + exit_proc(arg) + +def GetFile(urlpath, filename, localdir, arg): + """Donwload archive. + """ + ifnotquiet = arg.quiet + + if not os.path.isdir(localdir): + os.makedirs(os.path.realpath(localdir)) + if ifnotquiet: + print (_("getting file %s from ") % filename) + "\n " + urlpath + filename + if os.path.isdir(urlpath): + try: + shutil.copyfile(urlpath + filename, localdir + filename) + except: + print _("Error: file %s was not copied") % filename + exit_proc(arg) + else: + try: + file_from = urllib2.urlopen(urllib2.Request(urlpath + filename), None, timeout) + file_to = open(localdir + filename, "w") + shutil.copyfileobj(file_from, file_to) + except: + print _("Error: file %(from)s was not downloaded to %(to)s") %{"from": urlpath + filenam, "to": localdir + filename} + exit_proc(arg) + file_from.close() + file_to.close() + +def RenameSynthFile(localdir, arg): + """Rename. + + Rename Synthesis file so zgip can understand format. + """ + ifnotquiet = arg.quiet + + if not os.path.isfile(localdir + synthesis_arch): + print _("Error: file not found: ") + localdir + synthesis_arch + exit_proc(arg) + try: + os.rename(localdir + synthesis_arch, localdir + synthesis_arch_renamed) + except OSError: + print _("Error: cannot rename file %(from)s to %(to)s") % {"from": synthesis_arch, "to": synthesis_arch_renamed} + + exit_proc(arg) + if not os.path.isfile(localdir + synthesis_arch_renamed): + print _("Error: file %s is missing.") % (localdir + synthesis_arch_renamed) + exit_proc(arg) + else: + if ifnotquiet: + print _("file %(from)s was renamed to %(to)s") % {"from": synthesis_arch, "to": synthesis_arch_renamed} + +def UnpackSynthFile(localdir, arg): + """Unpack Synthesis file. + + Unpack renamed synthesis file using gzip. + """ + ifnotquiet = arg.quiet + + if ifnotquiet: + print _("unpacking file ") + synthesis_arch_renamed + if not os.path.isfile(localdir + synthesis_arch_renamed): + print _("Error: file %s is missing.") % (localdir + synthesis_arch_renamed) + exit_proc(arg) + subprocess.call(["gzip", "-df", localdir + synthesis_arch_renamed]) + +def PrepareSynthFile(localdir, arg): + """Prepare Synthesis file for parsing. + """ + RenameSynthFile(localdir, arg) + UnpackSynthFile(localdir, arg) + +def ParseVersion(names_list): + """Parse version info if present. + + Parse version information from the field. e.g. provided_name[>= 1.2.3-4.5.6] + is parsed to (provided_name, sign, (epoch, version, release)) + """ + new_names_list = [] + for name in names_list: + match = re_search_unver.match(name) + if match: + tmp_entry = match.group(1) + else: + tmp_entry = name + match = re_search_verrel.search(name) + if match: + sign = match.group(1)[:-1] + epoch = match.group(2) + if epoch: + epoch = epoch[:-1] + else: + epoch = '' + version = match.group(3) + release = match.group(4) + if release: + release = release[1:] + else: + release = '' + verrel = (epoch, version, release) + else: + sign = '' + verrel = ('','','') + new_names_list.append((tmp_entry, sign, verrel)) + return new_names_list + +def TagEpoch(i): + """Return disttagepoch value. + """ + if len(i) == 4: + return '-' + elif len(i) == 5: + disttag = i[4] + distepoch = '' + return disttag + distepoch + elif len(i) == 6: + disttag = i[4] + distepoch = i[5] + return disttag + distepoch + else: + print _("REPODIFF-Warning: strange : ") + str(i) + +def RPMNameFilter(rpmname, disttagepoch): + """Parse name and verrel. + + Function that parses name, version and release of a package. + """ + string = rpmname.split('-') + lastpart = string.pop() + tmp = lastpart.split('.') + tmp.pop() + lastpart = '.'.join(tmp) + if (lastpart[0].isdigit() or (not lastpart.startswith(disttagepoch))) and\ + (not lastpart.isdigit()): + name = '-'.join(string[:-1]) + else: + name = '-'.join(string[:-2]) + return name + +def ParseSynthFile(dict_provides, dict_asks, localdir, arg): + """Collect packages information. + + Parse synthesis.hdlist file. + dict_provides[phrase]=[(name, sign, verrel)] contain names of packages providing phrase + dict_asks[pkg_name]=[(name, sign, verrel)] contain everything + that pkg_name package asks + """ + ifnotquiet = arg.quiet + ifrequires = arg.requires + ifsuggests = arg.suggests + ifverbose = arg.verbose + iftagepoch = arg.requires_recursive or arg.whatrequires + ifnothide = not iftagepoch + + if not os.path.isfile(localdir + synthesis_file): + print _("Error: Synthesis file %s was not found.") % (localdir + synthesis_file) + exit_proc(-1) + if ifnotquiet: + print _("Parsing synthesis.") + try: + synth = open(localdir + synthesis_file) + tmp = ['', [], [], []] + for synthline in synth: + if synthline.endswith('\n'): + synthline = synthline[:-1] + tmpline = synthline.split('@') + tag = tmpline[1] + if(tag == synthesis_search_field[1]) and ifrequires: + tmp[1] = tmpline[2:] + elif(tag == synthesis_search_field[2]) and ifsuggests: + tmp[2] = tmpline[2:] + elif tag == synthesis_search_field[3]: + tmp[3] = tmpline[2:] + elif tag == synthesis_search_field[0]: + if (iftagepoch): + tmp[0] = tmpline[2:] + disttagepoch = TagEpoch(tmp[0]) + tmp[0] = tmp[0][0] + else: + tmp[0] = tmpline[2] + + parsed_tmp = ParseVersion(tmp[3]) + for (phrase, sign, verrel) in parsed_tmp: + if ((ifverbose and ifnothide) and (sign != '==') and (sign != '')): + print _("Warning: Unexpected sign %(sign)s in 'provides' section of %(of)s") %\ + {"sign": sign, "of": tmp[0]} + if (not phrase in dict_provides): + dict_provides[phrase] = [(tmp[0], sign, verrel)] + else: + dict_provides[phrase].append((tmp[0], sign, verrel)) + tmp_list = [] + tmp_list.extend(tmp[1]) + tmp_list.extend(tmp[2]) + if (iftagepoch): + dict_asks[tmp[0]] = (ParseVersion(tmp_list), RPMNameFilter(tmp[0], disttagepoch)) + else: + dict_asks[tmp[0]] = [ParseVersion(tmp_list)] + tmp = ['', [], [], []] + synth.close() + except IOError: + print _("Error: Failed to open synthesis file ") + localdir + synthesis_file + exit_proc(-1) + return (dict_provides, dict_asks) + +def compare_verrel(verrel1, sign, verrel2): + """Compare versions. + + Compare versions with attention to sign. + """ + (e1, v1, r1) = verrel1 + (e2, v2, r2) = verrel2 + # checks + if (v2 == '') or (v1 == ''): + return 1 + if (e1 == '') or (e2 == ''): + e1 = '0' + e2 = '0' + if (r1 == '') or (r2 == ''): + r1 = '0' + r2 = '0' + # compare + compare = rpm.labelCompare((e1, v1, r1), (e2, v2, r2)) + if (sign == "=="): + if (compare == 0): + return 1 + elif (sign == ">"): + if (compare == 1): + return 1 + elif (sign == "<"): + if (compare == -1): + return 1 + elif (sign == ">="): + if (compare > -1): + return 1 + elif (sign == "<="): + if (compare < 1): + return 1 + return 0 + +def compare_2signs_verrel(provide_verrel, provide_sign, verrel, sign): + """Compare versions. + + Compare versions with attention to two signs. + """ + (e1, v1, r1) = provide_verrel + (e2, v2, r2) = verrel + if ((sign == '>') or (sign == '>=')) and ((provide_sign == '>') or (provide_sign == '>=')): + return 1 + if ((sign == '<') or (sign == '<=')) and ((provide_sign == '<') or (provide_sign == '<=')): + return 1 + if (v1 == '') or (v2 == ''): + return 1 + if (e1 == '') or (e2 == ''): + e1 = '0' + e2 = '0' + if (r1 == '') or (r2 == ''): + r1 = '0' + r2 = '0' + compare = rpm.labelCompare((e1, v1, r1), (e2, v2, r2)) + if (compare == 0): + return 1 + if ((provide_sign == '<') or (provide_sign == '<=')) and (compare == 1): + return 1 + if ((provide_sign == '>') or (provide_sign == '>=')) and (compare == -1): + return 1 + return 0 + +def print_verrel(verrel): + """Output version info. + + Formatted output of version info. + """ + (e, v, r) = verrel + result = '' + if (e != ''): + result = e + ":" + if (v != ''): + result = result + v + if (r != ''): + result = result + '-' + r + return result + +def unpack_fileslist(localdir, arg): + """Unpack files.xml file. + + Unpack files.xml.lzma using lzma. + """ + ifnotquiet = arg.quiet + + if ifnotquiet: + print _("unpacking file ") + fileslist_arch + if not os.path.isfile(localdir + fileslist_arch): + print _("Error: file %s is missing.") % (localdir + fileslist_arch) + exit_proc(arg) + subprocess.call(["lzma", "-df", localdir + fileslist_arch]) + +def parse_fileslist(filename_check, filename_found, count_depend, dict_depend, localdir, ifcry, arg): + """Parse files.xml. + """ + ifnotquiet = arg.quiet + ifverbose = arg.verbose + ifnothide = (not arg.requires_recursive) and (not arg.whatrequires) + + if ifnotquiet: + print _("Reading fileslist") + if not os.path.isfile(localdir + fileslist_file): + print _("Error: Can't find fileslist ") + localdir + fileslist_file + exit_proc(arg) + doc = libxml2.parseFile(localdir + fileslist_file) + if (not doc): + print _("Error: Can't read fileslist ") + localdir + fileslist_file + exit_proc(arg) + root = doc.children + if root.name != "media_info": + print _("Error: Wrong fileslist.") + doc.freeDoc() + exit_proc(arg) + tag_package = root.children + while(tag_package): + if(tag_package.name != "files"): + tag_package = tag_package.next + continue + + tag_property = tag_package.properties + while(tag_property) and (tag_property.name != "fn"): + tag_property = tag_property.next + if not tag_property: + print _("Error: Corrupted fileslist") + doc.freeDoc() + exit_proc(arg) + name = tag_property.content + files = tag_package.content.split('\n') + for filename in files: + if filename in filename_check: + for packagename in filename_check[filename]: + if (packagename != name): + if (ifcry > 0): + if (filename_check[filename][packagename] == 1): + continue + else: + isdotted = 1 + else: + if (filename_check[filename][packagename] == 1): + isdotted = 1 + else: + isdotted = 0 + if packagename not in dict_depend: + dict_depend[packagename]={} + if name not in dict_depend[packagename]: + dict_depend[packagename][name] = isdotted + if packagename not in count_depend: + count_depend[packagename] = 1 + else: + count_depend[packagename] = count_depend[packagename] + 1 + if filename not in filename_found: + filename_found.append(filename) + if (ifverbose and ifnothide) and (ifcry == None): + print _("Warning: cross-repository dependency: ") + packagename +\ + "\n -> " + name + else: + if (ifverbose and ifnothide): + print _("Warning: package has self-dependecies: ") + packagename +\ + "\n <" + filename + ">" + tag_package = tag_package.next + doc.freeDoc() + #found!!! update count_depend dict_depend add to filename_found + +def process_fileslist(filename_check, filename_found, count_depend, dict_depend, localdir, ifcry, arg): + """Process files.xml. + + Make necessary steps to process files.xml. + """ + if (ifcry == None): + path = arg.repository + else: + path = arg.crossurl[ifcry] + if (not os.path.isfile(localdir + fileslist_file)): + GetFile(path, fileslist_arch, localdir, arg) + unpack_fileslist(localdir, arg) + parse_fileslist(filename_check, filename_found, count_depend, dict_depend, localdir, ifcry, arg) + +def remake_count_depend(count_depend): + """Build count_depend. + + Build count_depend in case of using --file option. + """ + result = {} + for packagename in count_depend: + length = count_depend[packagename] + if length not in result: + result[length] = 1 + else: + result[length] = result[length] + 1 + return result + +def AddDepend(provides, temp_dict, packagename, asked, mode, dict_cross_error, ifshow): + """Add dependency to temp dictionary. + + Used in FillDepend function. + """ + if (provides not in temp_dict) and (provides != packagename): + if mode == 0: + temp_dict[provides] = 0 + else: + temp_dict[provides] = 1 + dict_cross_error[packagename] = "" + if (ifshow): + print _("Warning: cross-repository dependency:\n package %(pkg)s is dependent from\n <- %(from)s located in another repository") %\ + {"pkg": packagename, "from": provides} + elif (provides == packagename): + if (ifshow): + print _("Warning: package has self-dependecies: ") + packagename +\ + "\n <" + asked + ">" + +def FillDepend(dict_tmp_provides, asked, temp_dict, packagename, sign, verrel, + dict_error, dict_cross_error, mode, ifshow, ifshowunprovided): + """Fill dependency dictionary. + + Used in FindDepend function. + """ + found = 0 + tmp = 0 + for (provides, provide_sign, provide_verrel) in dict_tmp_provides[asked]: + if (sign == '') or (provide_sign == ''): + AddDepend(provides, temp_dict, packagename, asked, mode, dict_cross_error, ifshow) + tmp = 1 + found = 1 + elif (provide_sign == '=='): + if compare_verrel(provide_verrel, sign, verrel): + AddDepend(provides, temp_dict, packagename, asked, mode, dict_cross_error, ifshow) + tmp = 2 + found = 1 + else: + if compare_2signs_verrel(provide_verrel, provide_sign, verrel, sign): + AddDepend(provides, temp_dict, packagename, asked, mode, dict_cross_error, ifshow) + tmp = 3 + found = 1 + if found == 0: + dict_error[packagename] = '' + if (ifshow): + print _("Warning: needed version is absent <%(ver)s> %(rel)s required by package") %\ + {"ver": asked, "rel": print_verrel(verrel)} + "\n <%s>" % packagename + if (ifshowunprovided): + if asked not in temp_dict: + temp_dict[asked] = 2 + +def generate_error_dict(filename_check, filename_found, dict_error, dict_depend, count_depend, ifshow, ifshowunprovided): + """Generate Warnings about unprovided packages. + + Used in FindDepend function. + """ + for filename in filename_check: + if filename not in filename_found: + for packagename in filename_check[filename]: + if (filename_check[filename][packagename] == 1): + continue + if (ifshow): + print _("Warning: Package %(pkg)s unprovided by %(by)s") %{'pkg': packagename, 'by': filename} + if (ifshowunprovided): + if filename not in dict_depend[packagename]: + dict_depend[packagename][filename] = 2 + if packagename not in count_depend: + count_depend[packagename] = 1 + else: + count_depend[packagename] = count_depend[packagename] + 1 + if packagename not in dict_error: + dict_error[packagename] = '' + #if in filename_check but not in filename_found then update dict_error by contents of filename_check + +def FindDepend(dict_provides, dict_asks, dict_cross_provides, dict_cross_asks, arg): + """Find dependencies. + + Find dependencies and tell about unprovided packages. + """ + ifnotquiet = arg.quiet + ifcheckfiles = arg.file + ifcross = arg.cross + ifverbose = arg.verbose + ifnothide = (not arg.requires_recursive) and (not arg.whatrequires) + ifshow = ifverbose and ifnothide + ifshowunprovided = arg.unprovided or arg.broken + + dict_error = {} + dict_cross_error = {} + dict_depend = {} + count_depend = {} + filename_check = {} + filename_found = [] + if (ifnotquiet and ifnothide): + print _("Finding dependencies.") + for packagename in dict_asks: + temp_dict = {} + for (asked, sign, verrel) in dict_asks[packagename][0]: + if asked not in dict_provides: + if asked not in dict_cross_provides: + if not asked.startswith('/'): + dict_error[packagename] = '' + if (ifshow): + print _("Warning: can't find <%(ask)s> required by package\n <%(pkg)s>") %\ + {'ask': asked, 'pkg': packagename} + if (ifshowunprovided): + if asked not in temp_dict: + temp_dict[asked] = 2 + elif ifcheckfiles: + if asked not in filename_check: + filename_check[asked] = {} + filename_check[asked][packagename] = 0 # usual + else: + FillDepend(dict_cross_provides, asked, temp_dict, packagename, + sign, verrel, dict_error, dict_cross_error, 1, ifshow, ifshowunprovided) + else: + FillDepend(dict_provides, asked, temp_dict, packagename, + sign, verrel, dict_error, dict_cross_error, 0, ifshow, ifshowunprovided) + dict_depend[packagename] = temp_dict + if not ifcheckfiles: + length = len(temp_dict) + if length not in count_depend: + count_depend[length] = 1 + else: + count_depend[length] = count_depend[length] + 1 + else: + count_depend[packagename] = len(temp_dict) + + for packagename in dict_cross_asks: # cross-rep dependency + if packagename in dict_depend: + continue + temp_dict = {} + for (asked, sign, verrel) in dict_cross_asks[packagename][0]: + if asked in dict_provides: + FillDepend(dict_provides, asked, temp_dict, packagename, + sign, verrel, dict_error, dict_cross_error, 2, ifshow, ifshowunprovided) + else: + if (asked not in dict_cross_provides) and (asked.startswith('/')) and (ifcheckfiles): + if (asked not in filename_check): + filename_check[asked] = {} + filename_check[asked][packagename] = 1 # from cross-repo + + if packagename not in dict_depend: + dict_depend[packagename] = temp_dict + else: + temp_dict.update(dict_depend[packagename]) + dict_depend[packagename] = temp_dict + if not ifcheckfiles: + length = len(temp_dict) + if length not in count_depend: + count_depend[length] = 1 + else: + count_depend[length] = count_depend[length] + 1 + else: + count_depend[packagename] = len(temp_dict) + + if ifcheckfiles: + process_fileslist(filename_check, filename_found, count_depend, dict_depend, arg.tmp_dir, None, arg) + if ifcross: + for i in range(len(ifcross)): + process_fileslist(filename_check, filename_found, count_depend, dict_depend, get_temp(i, arg), i, arg) + generate_error_dict(filename_check, filename_found, dict_error, dict_depend, count_depend, ifshow, ifshowunprovided) + count_depend = remake_count_depend(count_depend) + if (ifshow): + if (ifcross): + sorted_tmp = sorted(dict_cross_error) + print "\n" + _("Total cross-referenced packages: ") + str(len(sorted_tmp)) + for tmp_ent in sorted_tmp: + print tmp_ent + sorted_tmp = sorted(dict_error) + print "\n" + _("Total unprovided packages: ") + str(len(sorted_tmp)) + for tmp_ent in sorted_tmp: + print tmp_ent + return dict_depend, count_depend + +def AssignColors(dict_depend, count_depend, arg): + """Assign colors. + + Assign colors for graph output. + """ + ifnotquiet = arg.quiet + ifchangecolors = arg.whatrequires + + dict_colors = {} + dict_count = {} + + if ifnotquiet: + print _("Calculating colors.") + sorted_count = sorted(count_depend) + length = len(count_depend) + normalized_count = {} + i = 0 + for number in sorted_count: + normalized_count[number] = float(i) / length + dict_count[number] = count_depend[number] + i = i + 1 + for package_name in dict_depend: + number = len(dict_depend[package_name]) + if (ifchangecolors): + h = float(dict_count[number]) / count_depend[number] + s = 0.6 + 0.4 * normalized_count[number] + else: + h = normalized_count[number] + s = 0.6 + (0.4 * dict_count[number]) / count_depend[number] + b = 1.0 + dict_colors[package_name] = (h, s, b) + dict_count[number] = dict_count[number] - 1 + return dict_colors + +def OutputGraphHead(file_output): + """Output Graph head. + + Static information about graph. + """ + file_output.write('\n\ndigraph packages {\nsize="20.69,25.52";\nratio="fill";\n' +\ + 'rankdir="TB";\nnode[style="filled"];\nnode[shape="box"];\n\n') + +def print_color(color_tuple): + """Format color. + + Format color for outputting. + """ + return str(color_tuple[0]) + ' ' + str(color_tuple[1]) + ' ' +\ + str(color_tuple[2]) + +def OutputGraphLoopBody(loop, loop_color, file_output): + """Output Graph body in --loop case. + """ + beg = 1 + for pkg in loop: + if (beg): + beg = 0 + tmp_string = '"' + pkg + '"' + else: + tmp_string = tmp_string + ' -> "' + pkg + '"' + file_output.write(tmp_string + ' [color="' + str(loop_color) + ' 1.0 1.0"];\n') + +def OutputGraphAltBody(phrase, alt, alt_color, file_output): + """Output Graph body in --alternative case. + """ + tmp_string = '"' + phrase + '" -> {\n' + sorted_list = sorted(alt) + for packagename in sorted_list: + tmp_string = tmp_string + '"' + packagename + '"\n' + tmp_string = tmp_string + '} [color="' + str(alt_color) + ' 1.0 1.0"];\n\n' + file_output.write(tmp_string) + +def OutputGraphBody(some_list, dict_color, file_output, packagename, node_type): + """Output Graph body. + + Output Graph. + """ + tmp_string = '"' + packagename + '" -> {\n' + sorted_depend = sorted(some_list) + if (node_type == 1): + arrow_style = ', style="dotted"' + else: + arrow_style = '' + if (node_type == 2): + tmp_string = tmp_string + 'node[shape="ellipse", fillcolor="0.0 1.0 1.0"];\n' + for dependfrom in sorted_depend: + tmp_string = tmp_string + '"' + dependfrom + '"\n' + if (node_type == 0) or (node_type == 1): + tmp_string = tmp_string + '} [color="' +\ + print_color(dict_color[packagename]) +\ + '"' + arrow_style + '];\n\n' + elif (node_type == 2): + tmp_string = tmp_string + '};\n\n' + file_output.write(tmp_string) + + +def OutputGraphTail(file_output): + """Finish the graph. + """ + file_output.write('}\n') + +def OutputGraph(dict_depend, dict_color, arg): + """Output the graph. + """ + file_output = arg.output + if arg.whatrequires: + selected_node = arg.whatrequires[0] + elif arg.requires_recursive: + selected_node = arg.requires_recursive[0] + else: + selected_node = None + OutputGraphHead(file_output) + + if (selected_node): + file_output.write('"' + selected_node + '" [color="0.4 1.0 1.0"];\n') + sorted_list = sorted(dict_depend) + for packagename in sorted_list: + if not dict_depend[packagename]: + continue + usual_list = [] + cross_list = [] + missed_list = [] + for pkg in dict_depend[packagename]: + mode = dict_depend[packagename][pkg] + if (mode == 0): + usual_list.append(pkg) + elif (mode == 1): + cross_list.append(pkg) + elif (mode == 2): + missed_list.append(pkg) + + if (len(usual_list) > 0): + OutputGraphBody(usual_list, dict_color, file_output, packagename, 0) + if (len(cross_list) > 0): + OutputGraphBody(cross_list, dict_color, file_output, packagename, 1) + if (len(missed_list) > 0): + OutputGraphBody(missed_list, None, file_output, packagename, 2) + + OutputGraphTail(file_output) + +def CountPor(number): + tmp = number / 10 + por = 0 + while tmp: + tmp = tmp / 10 + por = por + 1 + return por + +def LeadingZeroes(number, por): + por2 = CountPor(number) + return (por-por2)*'0' + str(number) + +def OutputLoopGraph(loops, colors, arg): + """Output graph(s) of loops. + """ + ifdifferent = arg.different + if arg.whatrequires: + selected_node = arg.whatrequires[0] + elif arg.requires_recursive: + selected_node = arg.requires_recursive[0] + else: + selected_node = None + + output = arg.output + file_output = output + if not ifdifferent: + OutputGraphHead(file_output) + if (selected_node): + file_output.write('"' + selected_node + '" [color="0.4 1.0 1.0"];\n') + + length = len(colors) + por = CountPor(length) + for i in range(length): + if ifdifferent: + filename = output + loopdotfile + LeadingZeroes(i, por) + '.dot' + file_output = open(filename, 'w') + OutputGraphHead(file_output) + if (selected_node): + file_output.write('"' + selected_node + '" [color="0.4 1.0 1.0"];\n') + OutputGraphLoopBody(loops[i], colors[i], file_output) + if ifdifferent: + OutputGraphTail(file_output) + file_output.close() + + if not ifdifferent: + OutputGraphTail(file_output) + +def OutputAltGraph(alternatives, colors, arg): + """Output graph(s) of alternatives. + """ + ifdifferent = arg.different + if arg.whatrequires: + selected_node = arg.whatrequires[0] + elif arg.requires_recursive: + selected_node = arg.requires_recursive[0] + else: + selected_node = None + + output = arg.output + file_output = output + if not ifdifferent: + OutputGraphHead(file_output) + if (selected_node): + file_output.write('"' + selected_node + '" [color="0.4 1.0 1.0"];\n') + + i = 0 + length = len(colors) + por = CountPor(length) + for phrase in alternatives: + if ifdifferent: + filename = output + altdotfile + LeadingZeroes(i, por) + '.dot' + file_output = open(filename, 'w') + OutputGraphHead(file_output) + if (selected_node): + file_output.write('"' + selected_node + '" [color="0.4 1.0 1.0"];\n') + OutputGraphAltBody(phrase, alternatives[phrase], colors[i], file_output) + if ifdifferent: + OutputGraphTail(file_output) + file_output.close() + i = i + 1 + + if not ifdifferent: + OutputGraphTail(file_output) + +def BuildGraph(dict_depend): + """Build additional structures. + + Build structures used in algorithm that finds loops. And later in --pkg-... options. + """ + dict_out = {} + dict_in = {} + for packagename in dict_depend: + for pkg2 in dict_depend[packagename]: + if pkg2 not in dict_out: + dict_out[pkg2] = [] + if packagename not in dict_in: + dict_in[packagename] = [] + dict_out[pkg2].append(packagename) + dict_in[packagename].append(pkg2) + return (dict_in, dict_out) + +def RemoveNonCycle(dict_in, dict_out, arg): + """Remove non-cycle nodes from graph. + + Remove all nodes that are not present in any loop. + Linear algorithm. On each step it checks all marked nodes. + If node hasn't got any nodes dependent from it or it's not + dependent on any node, then this node cannot be present in any loop. + So we exlude this node and mark all nodes that are connected to this node. + Because only for them the situation has been changed a little. + All remained nodes are included in some loop. + """ + ifnotquiet = arg.quiet + + check = [] #items for further checks + to_remove = [] #items for remove + for pkg in dict_in: + check.append(pkg) + for pkg in dict_out: + if pkg not in check: + check.append(pkg) + + ischanged = 1 + removed = 0 + while(ischanged): + ischanged = 0 + for pkg in check: + if (pkg not in dict_in) or (pkg not in dict_out): + to_remove.append(pkg) + removed = removed + 1 + ischanged = 1 + check = [] + for pkg in to_remove: + if (pkg in dict_in): + for pkg2 in dict_in[pkg]: + dict_out[pkg2].remove(pkg) + if (len(dict_out[pkg2]) == 0): + dict_out.pop(pkg2) + if pkg2 not in check: + check.append(pkg2) + dict_in.pop(pkg) + if (pkg in dict_out): + for pkg2 in dict_out[pkg]: + dict_in[pkg2].remove(pkg) + if (len(dict_in[pkg2]) == 0): + dict_in.pop(pkg2) + if pkg2 not in check: + check.append(pkg2) + dict_out.pop(pkg) + to_remove = [] + if ifnotquiet: + print _("Non-cycle nodes removed: ") + str(removed) + print _("Cyclic packages: ") + str(len(dict_in)) + +def FindLoops(dict_depend, arg): + """Find all simple loops in oriented graph. + + First, remove all nodes, that are not present in any loop. + Then search for all loops in what has remained. + """ + ifnotquiet = arg.quiet + ifverbose = arg.verbose + file_output = arg.output + + benchtime = time.clock() + (dict_in, dict_out) = BuildGraph(dict_depend) + RemoveNonCycle(dict_in, dict_out, arg) + if ifnotquiet: + benchtime1 = time.clock() - benchtime + print _("Worktime: %s seconds") % str(benchtime1) + G = DiGraph() + for pkg1 in dict_in: + for pkg2 in dict_in[pkg1]: + G.add_edge(pkg1, pkg2) + if ifnotquiet: + print _("Searching loops.") + loops = simple_cycles(G) + if ifnotquiet: + benchtime2 = time.clock() - benchtime + print _("End of search.") + print _("Loops search: %s seconds") % str(benchtime2) + + if ifverbose: + i = 1 + print _("Total: %s loops.") % str(len(loops)) + for loop in loops: + beg = 1 + for pkg in loop: + if beg: + beg = 0 + tmpstr = _("Loop ") + str(i) + ": " + pkg + else: + tmpstr = tmpstr + " -> " + pkg + print tmpstr + i = i + 1 + + return loops + +def FindAlternatives(dict_provides, arg): + """Find Alternatives. + + Select all phrases that are provided by more than one package. + """ + ifverbose = arg.verbose + ifnotquiet = arg.quiet + + if (ifnotquiet): + print _("Searching alternatives.") + altlist = {} + for phrase in dict_provides: + if len(dict_provides[phrase]) > 1: + altlist[phrase] = [] + for (packagename, r1, r2) in dict_provides[phrase]: + altlist[phrase].append(packagename) + + if ifverbose: + length = len(altlist) + i = 1 + sorted_list = sorted(altlist) + print _("Total: %d alternatives.") % length + for phrase in sorted_list: + print _("Alternative ") + str(i) + ": " + phrase + _(" is provided by:") + for packagename in altlist[phrase]: + print " -> " + packagename + i = i + 1 + + if (ifnotquiet): + print _("End of search.") + return altlist + +def FindBroken(dict_depend, count_depend, dict_asks, dict_provides, dict_cross_asks, dict_cross_provides, arg): + """Find Broken packages. + + Select all unprovided packages (with unprovided dependencies or dependent from packages with unprovided dependencies. + """ + startlist = [] + for packagename in dict_depend: + for pkg in dict_depend[packagename]: + if dict_depend[packagename][pkg] == 2: + if packagename not in startlist: + startlist.append(packagename) + return RemakeDicts(dict_depend, count_depend, dict_asks, dict_provides, dict_cross_asks, dict_cross_provides, arg, startlist) + +def AssignDictColors(tmpdict): + """Assign color for every loop. + """ + length = len(tmpdict) + colors = [] + for i in range(length): + colors.append ((i * 1.) / length) + return colors + +def get_temp(i, arg): + """Get numbered temporarily directory name. + """ + return arg.tmp_dir + tmp_cross_path + str(i) + '/' + +def PkgCheck(pkgname, dict_asks, dict_cross_asks): + """Check that PKG from --pkg-require or --pkg-provide is existent in repository. + + Searches PKG in file names and package names from repository. + """ + if pkgname in dict_asks: + return pkgname + else: + for filename in dict_asks: + if (pkgname == dict_asks[filename][1]): + return filename + + if pkgname in dict_cross_asks: + return pkgname + else: + for filename in dict_cross_asks: + if (pkgname == dict_cross_asks[filename][1]): + return filename + return None + +def RemakeAsks(startlist, dict_asks, dict_depend, dict_cross_asks, arg, ifbroken): + """Select needed packages, so we can rebuild everything else. + """ + ifwhatrequires = arg.whatrequires + ifrequires_recursive = arg.requires_recursive + ifnotquite = arg.quiet + ifverbose = arg.verbose + + (dict_in, dict_out) = BuildGraph(dict_depend) + if (ifbroken != None): + dict_tmp = dict_out + elif (ifwhatrequires): + dict_tmp = dict_out + elif (ifrequires_recursive): + dict_tmp = dict_in + + list_selected = [] + list_selected.extend(startlist) + list_append = [] + list_append.extend(startlist) + if (ifnotquite): + if (ifbroken != None): + print _("Searching for broken packages.") + if (ifverbose): + sorted_list = sorted(startlist) + for pkgname in sorted_list: + print " -> " + pkgname + elif (ifrequires_recursive): + print _("Searching for packages REQUIRED by ") + startlist[0] + elif (ifwhatrequires): + print _("Searching for packages that REQUIRE ") + startlist[0] + #select what we need, show what we have found (if --verbose option is used) + level_cnt = 0 + ischanged = 1 + while (ischanged == 1): + if (ifverbose): + if (level_cnt > 0): + if (ifnotquite): + print _("Level %d dependency.") % level_cnt + for tmppkg in list_append: + print " -> " + tmppkg + + ischanged = 0 + tmp_append = [] + #check for every filename in custody if it in list_selected. + for name in list_append: + if name in dict_tmp: + for tmpname in dict_tmp[name]: + #if we haven't met it yet - put it undet custody + if (tmpname not in list_selected) and (tmpname not in tmp_append): + tmp_append.append(tmpname) + ischanged = 1 + + list_selected.extend(list_append) + list_append = tmp_append + level_cnt = level_cnt + 1 + #remove what has remained unselected + new_dict_asks = {} + new_dict_cross_asks = {} + for filename in list_selected: + if filename in dict_asks: + new_dict_asks[filename] = dict_asks[filename] + else: + if not filename in dict_cross_asks: + new_dict_asks[filename] = [[], ""] + else: + new_dict_cross_asks[filename] = dict_cross_asks[filename] + return (new_dict_asks, new_dict_cross_asks) + +def RemoveExternal(dict_asks, dict_provides, dict_cross_asks, dict_cross_provides, ifshow): + """Remove dependecies external to group. + """ + new_dict_asks = {} + new_dict_provides = {} + for filename in dict_asks: + new_dict_asks[filename] = ([], filename) + for asks in dict_asks[filename][0]: + if asks[0] in dict_provides: + found = 0 + for pkg in dict_provides[asks[0]]: + if pkg[0] in dict_asks: + found = 1 + if asks[0] not in new_dict_provides: + new_dict_provides[asks[0]] = [] + if not pkg in new_dict_provides[asks[0]]: + new_dict_provides[asks[0]].append(pkg) + if (found == 1): + new_dict_asks[filename][0].append(asks) + elif asks[0] in dict_cross_provides: + new_dict_asks[filename][0].append(asks) + elif ifshow: + new_dict_asks[filename][0].append(asks) + + for filename in dict_cross_asks: + for asks in dict_cross_asks[filename][0]: + if asks[0] in dict_provides: + for pkg in dict_provides[asks[0]]: + if pkg[0] in dict_asks: + if asks[0] not in new_dict_provides: + new_dict_provides[asks[0]] = [] + if not pkg in new_dict_provides[asks[0]]: + new_dict_provides[asks[0]].append(pkg) + + return (new_dict_asks, new_dict_provides) + +def RemakeDicts(dict_depend, count_depend, dict_asks, dict_provides, dict_cross_asks, dict_cross_provides, arg, brokenlist=None): + """Procedure for rebuilding packages lists. + + for --whatrequires and --requires-recursive options + and for --broken option + """ + ifnotquiet = arg.quiet + whatrequires = arg.whatrequires + requires_recursive = arg.requires_recursive + ifshow = arg.unprovided or arg.broken + + if (ifnotquiet): + print _("Remaking structures.") + if (brokenlist == None): + if (whatrequires): + pkgname = whatrequires[0] + else: + pkgname = requires_recursive[0] + filename = PkgCheck(pkgname, dict_asks, dict_cross_asks) + if (whatrequires): + arg.whatrequires[0] = filename + else: + arg.requires_recursive[0] = filename + if (not filename): + print _("Error: can't find package name or filename \"") + pkgname + "\"." + exit_proc(arg) + startlist = [filename] + else: + startlist = brokenlist + + (dict_asks, dict_cross_asks) = RemakeAsks(startlist, dict_asks, dict_depend, dict_cross_asks, arg, brokenlist) + (new_dict_asks, new_dict_provides) = RemoveExternal(dict_asks, dict_provides, dict_cross_asks, dict_cross_provides, ifshow) + (new_dict_cross_asks, new_dict_cross_provides) = RemoveExternal(dict_cross_asks, dict_cross_provides, dict_asks, dict_provides, ifshow) + (dict_depend, count_depend) = FindDepend(new_dict_provides, new_dict_asks, new_dict_cross_provides, new_dict_cross_asks, arg) + return (dict_depend, count_depend, new_dict_asks, new_dict_provides, new_dict_cross_asks, new_dict_cross_provides) + +def main(args): + #define arguments namespace + arg = ParseCommandLine() + ifnotquiet = arg.quiet + ifverbose = arg.verbose + ifnograph = arg.nograph + ifrequires_recursive = arg.requires_recursive + ifwhatrequires = arg.whatrequires + ifloops = arg.loops + ifalternatives = arg.alternatives + ifbroken = arg.broken + ifoptact = ifloops or ifalternatives or ifbroken + ifunprovided = arg.unprovided + + arg.crossurl = [] + arg.tmp_dir = "" + if (arg.output): + file_output = arg.output[0] + else: + file_output = default_output + arg.output = None + if (not ifnotquiet) and (not ifverbose) and (ifnograph): + print _("Do not use -q/--quiet and -n/--nograph without -v/--verbose together.") + print _("That way there is no information to output anywhere. Nothing will be done.") + exit_proc(arg) + if (ifunprovided and ifbroken): + print _("Do not use -u/--unprovided and -b/--broken options together.") + print _("-b does everything that do -u and a little more.") + exit_proc(arg) + arg.repository = arg.repository[0] + arg.repository = CheckURLPATH(arg.repository, arg) + if (arg.cross): + crossrange = range(len(arg.cross)) + for i in crossrange: + arg.crossurl.append(CheckURLPATH(arg.cross[i], arg)) + CheckOptions(arg) + arg.tmp_dir = tempfile.mkdtemp() + '/' + #get all needed files + GetFile(arg.repository, synthesis_arch, arg.tmp_dir, arg) + PrepareSynthFile(arg.tmp_dir, arg) + if (arg.cross): + for i in crossrange: + temp_subdir = get_temp(i, arg) + GetFile(arg.crossurl[i], synthesis_arch, temp_subdir, arg) + PrepareSynthFile(temp_subdir, arg) + + #generate dictionaries + dict_provides = {} + dict_asks = {} + dict_cross_provides = {} + dict_cross_asks = {} + ParseSynthFile(dict_provides, dict_asks, arg.tmp_dir, arg) + if (arg.cross): + for i in crossrange: + temp_subdir = get_temp(i, arg) + ParseSynthFile(dict_cross_provides, dict_cross_asks, temp_subdir, arg) + (dict_depend, count_depend) = FindDepend(dict_provides, dict_asks, dict_cross_provides, dict_cross_asks, arg) + + if (ifrequires_recursive or ifwhatrequires): + answer = RemakeDicts(dict_depend, count_depend, dict_asks, dict_provides, dict_cross_asks, dict_cross_provides, arg) + if (answer): + (dict_depend, count_depend, dict_asks, dict_provides, dict_cross_asks, dict_cross_provides) = answer + + arg.output = file_output + CheckOutput(arg) + if (ifoptact): ##REMAKE (MUTUALLY EXCLUSIVE) + if (ifloops): + loops = FindLoops(dict_depend, arg) + if (ifnograph): + exit_proc(arg) + colors = AssignDictColors(loops) + OutputLoopGraph(loops, colors, arg) + elif (ifalternatives): + alternatives = FindAlternatives(dict_provides, arg) + if ifnograph: + exit_proc(arg) + colors = AssignDictColors(alternatives) + OutputAltGraph(alternatives, colors, arg) + elif (ifbroken): + brokengraph = FindBroken(dict_depend, count_depend, dict_asks, dict_provides, dict_cross_asks, dict_cross_provides, arg) + if ifnograph: + exit_proc(arg) + dict_color = AssignColors(brokengraph[0], brokengraph[1], arg) + OutputGraph(brokengraph[0], dict_color, arg) + else: + if ifnograph: + exit_proc(arg) + dict_color = AssignColors(dict_depend, count_depend, arg) + OutputGraph(dict_depend, dict_color, arg) + + exit_proc(arg) + +if __name__ == "__main__": + main(sys.argv) diff --git a/urpm-repomanage.py b/urpm-repomanage.py new file mode 100755 index 0000000..7437f3a --- /dev/null +++ b/urpm-repomanage.py @@ -0,0 +1,239 @@ +#!/usr/bin/python +''' +" Repomanage utility for distributions using urpm +" +" The tool traverses a directory, build a dict of +" foo[(name, arch)] = [/path/to/file/that/is/highest, /path/to/equalfile] +" and then reports newest/old packages +" +" Based on repomanage from yum-utils +" +" Copyright (C) 2011 ROSA Laboratory. +" Written by Denis Silakov +" +" This program is free software: you can redistribute it and/or modify +" it under the terms of the GNU General Public License or the GNU Lesser +" General Public License as published by the Free Software Foundation, +" either version 2 of the Licenses, or (at your option) any later version. +" +" This program is distributed in the hope that it will be useful, +" but WITHOUT ANY WARRANTY; without even the implied warranty of +" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +" GNU General Public License for more details. +" +" You should have received a copy of the GNU General Public License +" and the GNU Lesser General Public License along with this program. +" If not, see . +''' + +import os +import sys +import rpm +import fnmatch +import subprocess +import string +from rpm5utils import miscutils, arch, transaction, Rpm5UtilsError +import urpmmisc + +import argparse + +import gettext +gettext.install('urpm-tools') + + +def errorprint(stuff): + print >> sys.stderr, stuff + + +def getFileList(path, ext, filelist): + """Return all files in path matching ext, store them in filelist, recurse dirs + return list object""" + + extlen = len(ext) + try: + dir_list = os.listdir(path) + except OSError, e: + errorprint(_('Error accessing directory %(path)s, %(e)s') % {"path": path,"e": str(e)}) + return [] + + for d in dir_list: + if os.path.isdir(path + '/' + d): + filelist = getFileList(path + '/' + d, ext, filelist) + else: + if string.lower(d[-extlen:]) == '%s' % (ext): + newpath = os.path.normpath(path + '/' + d) + filelist.append(newpath) + + return filelist + + +def trimRpms(rpms, excludeGlobs): + badrpms = [] + for fn in rpms: + for glob in excludeGlobs: + if fnmatch.fnmatch(fn, glob): + #~ print 'excluded: %s' % fn + if fn not in badrpms: + badrpms.append(fn) + for fn in badrpms: + if fn in rpms: + rpms.remove(fn) + + return rpms + + +def parseargs(args): + parser = argparse.ArgumentParser(description=_('manage a directory of rpm packages and report newest or oldest packages')) + + # new is only used to make sure that the user is not trying to get both + # new and old, after this old and not old will be used. + # (default = not old = new) + parser.add_argument("path", metavar="path", + help=_('path to directory with rpm packages')) + group = parser.add_mutually_exclusive_group(); + group.add_argument("-o", "--old", default=False, action="store_true", + help=_('print the older packages')) + group.add_argument("-n", "--new", default=False, action="store_true", + help=_('print the newest packages (this is the default behavior)')) + parser.add_argument("-r", "--remove-old", default=False, action="store_true", + help=_('remove older packages')) + parser.add_argument("-s", "--space", default=False, action="store_true", + help=_('space separated output, not newline')) + parser.add_argument("-k", "--keep", default=1, dest='keep', action="store", + help=_('number of newest packages to keep - defaults to 1')) + parser.add_argument("-c", "--nocheck", default=0, action="store_true", + help=_('do not check package payload signatures/digests')) + group_log = parser.add_mutually_exclusive_group(); + group_log.add_argument("-q", "--quiet", default=0, action="store_true", + help=_('be completely quiet')) + group_log.add_argument("-V", "--verbose", default=False, action="store_true", + help=_('be verbose - say which packages are decided to be old and why \ + (this info is dumped to STDERR)')) + + opts = parser.parse_args() + + return opts + + +def main(args): + + options = parseargs(args) + mydir = options.path + + rpmList = [] + rpmList = getFileList(mydir, '.rpm', rpmList) + verfile = {} + pkgdict = {} # hold all of them - put them in (n,a) = [(e,v,r),(e1,v1,r1)] + + keepnum = int(options.keep)*(-1) # the number of items to keep + + if len(rpmList) == 0: + errorprint(_('No files to process')) + sys.exit(1) + + ts = rpm.TransactionSet() + if options.nocheck: + ts.setVSFlags(~(rpm._RPMVSF_NOPAYLOAD)) + else: + ts.setVSFlags(~(rpm.RPMVSF_NOMD5|rpm.RPMVSF_NEEDPAYLOAD)) + + for pkg in rpmList: + try: + hdr = miscutils.hdrFromPackage(ts, pkg) + except Rpm5UtilsError, e: + msg = _("Error opening pkg %(pkg)s: %(err)s") % {"pkg": pkg, "err": str(e)} + errorprint(msg) + continue + + pkgtuple = miscutils.pkgDistTupleFromHeader(hdr) + (n,a,e,v,r,d) = pkgtuple + del hdr + + if (n,a) not in pkgdict: + pkgdict[(n,a)] = [] + pkgdict[(n,a)].append((e,v,r,d)) + + if pkgtuple not in verfile: + verfile[pkgtuple] = [] + verfile[pkgtuple].append(pkg) + + for natup in pkgdict.keys(): + evrlist = pkgdict[natup] + if len(evrlist) > 1: + evrlist = urpmmisc.unique(evrlist) + evrlist.sort(miscutils.compareDEVR) + pkgdict[natup] = evrlist + + del ts + + # now we have our dicts - we can return whatever by iterating over them + + outputpackages = [] + + # a flag indicating that old packages were found + old_found = 0 + + #if new + if not options.old: + for (n,a) in pkgdict.keys(): + evrlist = pkgdict[(n,a)] + + if len(evrlist) < abs(keepnum): + newevrs = evrlist + else: + newevrs = evrlist[keepnum:] + if len(evrlist[:keepnum]) > 0: + old_found = 1 + if options.remove_old: + for dropped in evrlist[:keepnum]: + (e,v,r,d) = dropped + pkg = str(verfile[(n,a,e,v,r,d)]).replace("['","").replace("']","") + subprocess.call(["rm", pkg]) + if options.verbose: + for dropped in evrlist[:keepnum]: + (e,v,r,d) = dropped + print >> sys.stderr, _("Dropped ") + str(verfile[(n,a,e,v,r,d)]) + print >> sys.stderr, _(" superseded by: ") + for left in newevrs: + (e,v,r,d) = left + print >> sys.stderr, " " + str(verfile[(n,a,e,v,r,d)]) + + for (e,v,r,d) in newevrs: + for pkg in verfile[(n,a,e,v,r,d)]: + outputpackages.append(pkg) + + if options.old: + for (n,a) in pkgdict.keys(): + evrlist = pkgdict[(n,a)] + + if len(evrlist) < abs(keepnum): + continue + + oldevrs = evrlist[:keepnum] + if len(oldevrs) > 0: + old_found = 1 + for (e,v,r,d) in oldevrs: + for pkg in verfile[(n,a,e,v,r,d)]: + outputpackages.append(pkg) + if options.remove_old: + subprocess.call(["rm", "-f", pkg]) + if options.verbose: + print >> sys.stderr, _("Dropped ") + pkg + print >> sys.stderr, _(" superseded by: ") + for left in evrlist[keepnum:]: + (e,v,r,d) = left + print >> sys.stderr, " " + str(verfile[(n,a,e,v,r,d)]) + + if not options.quiet: + outputpackages.sort() + for pkg in outputpackages: + if options.space: + print '%s' % pkg, + else: + print pkg + + if old_found==1: + sys.exit(3) + +if __name__ == "__main__": + main(sys.argv) diff --git a/urpm-reposync.py b/urpm-reposync.py new file mode 100755 index 0000000..b4f4328 --- /dev/null +++ b/urpm-reposync.py @@ -0,0 +1,1332 @@ +#!/usr/bin/python2.7 +''' +Created on Jan 11, 2012 + +@author: flid +''' + +import rpm +import argparse +import sys +import subprocess +import re +import os +from urllib2 import urlopen, HTTPError, URLError +import zlib +import glob +import shutil +import platform +import copy +import unittest + +import gettext +gettext.install('urpm-tools') + + +ARCH = platform.machine() +downloaded_rpms_dir = '/tmp/urpm-reposync.rpms' +VERSION = "urpm-reposync 2.1" + +def vprint(text): + '''Print the message only if verbose mode is on''' + if(command_line.verbose): + print(text) + +def qprint(text): + '''Print the message only if quiet mode is off and 'printonly' is off''' + if command_line.printonly: + return + if(not command_line.quiet): + print(text) + + +def eprint(text, fatal=False, code=1): + '''Print the message to stderr. Exit if fatal''' + print >> sys.stderr, text + if (fatal): + exit(code) + +def oprint(text): + '''Print the message only if quiet mode is off''' + if(not command_line.quiet): + print(text) + + +def get_command_output(command, fatal_fails=True): + '''Execute command using subprocess.Popen and return its stdout output string. If return code is not 0, print error message and exit''' + vprint("Executing command: " + str(command)) + res = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output = list(res.communicate()) + if sys.stdout.encoding: + output[0] = output[0].decode(sys.stdout.encoding).encode("UTF-8") + output[1] = output[1].decode(sys.stdout.encoding).encode("UTF-8") + if(res.returncode != 0 and fatal_fails): + eprint(_("Error while calling command") + " '" + " ".join(command) + "'") + if(output[1] != None or output[0] != None): + eprint(_("Error message: \n")+ ((output[0].strip() + "\n") if output[0]!=None else "") + + (output[1].strip() if output[1]!=None else "") ) + exit(1) + return [output[0], output[1], res.returncode] + + +def parse_command_line(): + global command_line + arg_parser = argparse.ArgumentParser(description=_('reposync is used to synchronize a set of packages on the local computer with the remote repository.')) + + arg_parser.add_argument('--include-media', '--media', action='append',nargs = '+', help=_("Use only selected URPM media")) + arg_parser.add_argument('--exclude-media', action='append',nargs = '+', help=_("Do not use selected URPM media")) + #arg_parser.add_argument('-x', '--exclude-packages', action='store',nargs = '+', help="Exclude package(s) by regex") + arg_parser.add_argument('-v', '--verbose', action='store_true', help=_("Verbose (print additional info)")) + arg_parser.add_argument('-q', '--quiet', action='store_true', help=_("Quiet operation. Senseless without --auto.")) + arg_parser.add_argument('-a', '--auto', action='store_true', help=_("Do not ask questions, just do it!")) + arg_parser.add_argument('-p', '--printonly', action='store_true', help=_("Only print the list of actions to be done and do nothing more!")) + arg_parser.add_argument('-d', '--download', action='store_true', help=_("Only download the rpm files, but install or remove nothing.")) + #arg_parser.add_argument('-n', '--noremove', action='store_true', help=_("Do not remove packages at all. If some installed package prevent another package from beeing updated - do not update it.")) + arg_parser.add_argument('-r', '--remove', action='store_true', help=_("Remove all the packages which do not present in repository. By default, only some of them would be removed.")) + arg_parser.add_argument('-c', '--check', action='store_true', help=_("Download packages and check wether they can be installed to your system, but do not install them.")) + arg_parser.add_argument('-k', '--nokernel', action='store_true', help=_("Do nothing with kernels.")) + arg_parser.add_argument('--runselftests', action='store_true', help=_("Run self-tests end exit.")) + arg_parser.add_argument('--detailed', action='store_true', help=_("Show detailed information about packages are going to be removed or installed (why does it have to be done)")) + + command_line = arg_parser.parse_args(sys.argv[1:]) + if(command_line.quiet and not command_line.auto): + eprint(_("It's senseless to use --quiet without --auto!"), fatal=True, code=2) + + if command_line.verbose: + command_line.detailed = True + + + +cmd = ['urpmq'] + + +class MediaSet(object): + def __init__(self): + global cmd + self.urls = [] + self.media = {} + self.by_url = {} + vprint("Loading media urls...") + lines = get_command_output(cmd + ["--list-url", "--list-media", 'active'])[0].strip().split("\n") + + for line in lines: + parts = line.split(" ") + medium = ' '.join(parts[:-1]) + url = parts[-1] + if(url.endswith("/")): + url = url[:-1] + if(url.find('/') != -1): + self.media[medium] = url + self.by_url[parts[-1]] = medium + self.urls.append(url) + vprint("Media urls: " + str(self.urls)) + + +class NEVR: + EQUAL = rpm.RPMSENSE_EQUAL #8 + GREATER = rpm.RPMSENSE_GREATER #4 + LESS = rpm.RPMSENSE_LESS #2 + #re_ver = re.compile('^([\d\.]+:)?([\w\d\.\-\[\]]+)(:[\d\.]+)?$') + + re_dep_ver = re.compile('^([^ \[\]]+)\[([\>\<\=\!]*) ([^ ]+)\]$') + re_dep = re.compile('^([^ \[\]]+)$') + types = {None: 0, + '==' : EQUAL, + '' : EQUAL, + '=' : EQUAL, + '>=' : EQUAL|GREATER, + '<=' : EQUAL|LESS, + '>' : GREATER, + '<' : LESS, + '!=' : LESS|GREATER, + '<>' : LESS|GREATER} + + def __init__(self, N, EVR, DE=None, DT=None, FL=None, E=None): + self.N = N + self.EVR = EVR + self.DE = DE + self.DT = DT + self.FL = FL + self.E = E + self.VR = EVR + + if E: + if EVR.startswith(E + ':'): + self.VR = EVR[len(E)+1:] + else: + self.EVR = E + ':' + self.EVR + + #try to get E + if not self.E and self.EVR and self.EVR.find(':') != -1: + items = self.EVR.split(':') + if items[0].find('.') == -1 and items[0].find('-') == -1: + self.E = items[0] + if not self.E and self.EVR: + self.E = '0' + self.EVR = '0:' + self.EVR + + if self.DE == 'None': + self.DE = None + + def __str__(self): + if self.FL: + for t in NEVR.types: + if not t: + continue + if NEVR.types[t] == self.FL: + return "%s %s %s" % (self.N, t, self.EVR) + if self.EVR: + return "%s == %s" % (self.N, self.EVR) + + return "%s" % (self.N) + + def __repr__(self): + return self.__str__() + + def __eq__(self, val): + if not isinstance(val, NEVR): + raise Exception("Internal error: comparing between NEVR and " + str(type(val))) + return str(self) == str(val) + + def __ne__(self, val): + return not (self == val) + + @staticmethod + def from_depstring(s, DE_toremove=None): + s = s.replace('[*]', '') + + if DE_toremove: + res = NEVR.re_dep_ver.match(s) + if res: + (name, t, val) = res.groups() + + if val.endswith(':' + DE_toremove): + val = val[:-(len(DE_toremove) + 1)] + EVR = '%s[%s %s]' % (name, t, val) + + res = NEVR.re_dep.match(s) + if res: + return NEVR(res.group(1), None) + + res = NEVR.re_dep_ver.match(s) + + if not res: + raise Exception('Incorrect requirement string: ' + s) + (name, t, val) = res.groups() + + return NEVR(name, val, FL=NEVR.types[t]) + + + re_version = re.compile("(\.)?((alpha)|(cvs)|(svn)|(r))?\d+((mdv)|(mdk)|(mnb))") + @staticmethod + def from_filename(rpmname, E=None): + ''' Returns [name, version] for given rpm file or package name ''' + suffix = ['.x86_64', '.noarch'] + ['.i%s86' % i for i in range(3,6)] + for s in suffix: + if(rpmname.endswith(s)): + rpmname = rpmname[:-len(s)] + + sections = rpmname.split("-") + if(NEVR.re_version.search(sections[-1]) == None): + name = sections[:-3] + version = sections[-3:-1] + else: + name = sections[:-2] + version = sections[-2:] + return NEVR("-".join(name), "-".join(version), FL=NEVR.EQUAL, E=E) + + def satisfies(self, val): + if self.N != val.N: + return False + + if self.EVR == None or val.EVR == None: + return True + + (pname, pt, pval) = (self.N, self.FL, self.EVR) + (rname, rt, rval) = (val.N, val.FL, val.EVR) + + def cut_part(seperator, val1, val2): + if val1 and val2 and val1.count(seperator) != val2.count(seperator): + n = max(val1.count(seperator), val2.count(seperator)) + val1 = seperator.join(val1.split(seperator)[:n]) + val2 = seperator.join(val2.split(seperator)[:n]) + return (val1, val2) + + (rval, pval) = cut_part(':', rval, pval) + (rval, pval) = cut_part('-', rval, pval) + + res = rpm.evrCompare(rval, pval) + + if res == 1: # > + if pt & NEVR.GREATER: + return True + elif pt & NEVR.LESS: + if rt & NEVR.LESS: + return True + else: + return False + else: + if rt & NEVR.LESS: + return True + else: + return False + + elif res == 0: + if rt & NEVR.EQUAL and pt & NEVR.EQUAL: + return True + if rt & NEVR.LESS and pt & NEVR.LESS: + return True + if rt & NEVR.GREATER and pt & NEVR.GREATER: + return True + return False + + else: # < + if rt & NEVR.GREATER: + return True + elif rt & NEVR.LESS: + if pt & NEVR.LESS: + return True + else: + return False + else: + if pt & NEVR.LESS: + return True + else: + return False + + +class PackageSet: + tags = ['provides','requires','obsoletes','suggests', 'conflicts'] + alltags = tags + ['nevr', 'arch'] + def __init__(self): + self.what = {} + self.packages = {} + + def load_from_system(self): + qprint(_("Loading the list of installed packages...")) + ts = rpm.TransactionSet() + mi = ts.dbMatch() + + for tag in PackageSet.tags: + self.what[tag] = {} + + for h in mi: + name = h['name'] + if(name == 'gpg-pubkey'): + continue + if(name not in self.packages): + self.packages[h['name']] = {} + else: + qprint(_("Duplicating ") + name + '-' + h['version'] + '-' + h['release']) + qprint(_("Already found: ") + name + '-' + self.packages[name]["nevr"].EVR) + + E = str(h['epoch']) + V = h['version'] + R = h['release'] + DE = h['distepoch'] + DT = h['disttag'] + + if E == None or E == 'None': + E = '0' + + EVR = "%s:%s-%s" % (E, V, R) + + nevr = NEVR(name, EVR, FL=NEVR.EQUAL, DE=DE, DT=DT, E=E) + self.packages[name]['nevr'] = nevr + self.packages[name]['arch'] = h['arch'] + + for tag in PackageSet.tags: + if tag not in self.packages[name]: + self.packages[name][tag] = [] + dss = h.dsFromHeader(tag[:-1] + 'name') + for s in dss: + fl = s.Flags() + #undocumented flag for special dependencies + if fl & 16777216: + continue + fl = fl % 16 + + _evr = s.EVR() + + if _evr == '': + evr = NEVR(s.N(), None, FL=fl) + else: + evr = NEVR(s.N(), _evr, FL=fl) + + self.packages[name][tag].append(evr) + + if evr.N not in self.what[tag]: + self.what[tag][evr.N] = [] + self.what[tag][evr.N].append((name, evr)) + + def load_from_repository(self): + url_by_synthesis_url = {} + global fields + + def get_synthesis_by_url(url): + if url.startswith('file://'): + url = url[6:] + if url.startswith('/'): + medium = ms.by_url[url] + return '/var/lib/urpmi/%s/synthesis.hdlist.cz' % medium + else: + return url + "/media_info/synthesis.hdlist.cz" + + medium_by_synth = {} + synthesis_lists = [] + for url in ms.urls: + synth = get_synthesis_by_url(url) + synthesis_lists.append(synth) + url_by_synthesis_url[synth] = url + medium_by_synth[synth] = ms.by_url[url] + + def clear_data(): + '''Clears the data of the current package from 'fields' dictionary''' + global fields + fields = {"provides":[], "requires":[], "obsoletes":[], "suggests":[], + "conflicts":[], "info":[], "summary":[]} + arches32 = ['i%d86' for i in range(3,6)] + for tag in PackageSet.tags: + self.what[tag] = {} + + #the following code is awful, I know. But it's easy-to-understand and clear. + # don't like it - write better and send me :) + for synthesis_list in synthesis_lists: + try: + #print synthesis_list + qprint(_("Processing medium ") + medium_by_synth[synthesis_list] + "...") + vprint(synthesis_list) + if(synthesis_list.startswith("http://") or synthesis_list.startswith("ftp://")): + r = urlopen(synthesis_list) + s = r.read() + r.close() + elif(synthesis_list.startswith("rsync://")): + tmppath = '/tmp/urpm-reposync.synthesis_lists' + if (not os.path.exists(tmppath)): + os.mkdir(tmppath) + filename = tmppath + '/' + os.path.basename(synthesis_list) + os.system("rsync --copy-links %s %s 1>/dev/null 2>&1" % (synthesis_list, filename)) + r = open(filename) + s = r.read() + r.close() + shutil.rmtree(tmppath) + elif(synthesis_list.startswith("/")): #local file + if not os.path.exists(synthesis_list): + eprint(_('Could not read synthesis file. (File %s not found)') % synthesis_list) + continue + r = open(synthesis_list) + s = r.read() + r.close() + res = subprocess.Popen(['gzip', '-d'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output = res.communicate(s) + clear_data() + for line in output[0].split('\n'): + if(line == ''): # there can be empty lines + continue + + items = line.split("@") + data = [x.strip() for x in items[2:]] + fields[items[1]] = data + + if(items[1] == "info"): + rpmname = items[2] + size = int(items[4]) + nevr = NEVR.from_filename(items[2], E=items[3]) + nevr.E = items[3] + + disttagepoch = '-' + if(len(items)>6): + disttagepoch = items[6] + nevr.DT = items[6] + if(len(items)>7): + disttagepoch += items[7] + nevr.DE = items[7] + + arch = items[2].split('.')[-1] + if arch in arches32 and ARCH in arches: + arch = ARCH + + in_repo = nevr.N in self.packages + new_arch_correct = arch == ARCH + + if in_repo: + if nevr.DE == self.packages[nevr.N]['nevr'].DE: + ver_newer = rpm.evrCompare(nevr.EVR, self.packages[nevr.N]['nevr'].EVR) == 1 + else: + ver_newer = (nevr.DE > self.packages[nevr.N]['nevr'].DE) + + old_arch_correct = self.packages[nevr.N]['arch'] == ARCH + else: + ver_newer = None + old_arch_correct = None + + toinst = not in_repo or (not old_arch_correct and new_arch_correct) or \ + (ver_newer and old_arch_correct == new_arch_correct) + + if toinst: + #remove old data + if nevr.N in self.packages: + for tag in PackageSet.tags: + for dep in self.packages[nevr.N][tag]: + self.what[tag][dep.N].remove((nevr.N, dep)) + else: + self.packages[nevr.N] = {} + + self.packages[nevr.N]['nevr'] = nevr + self.packages[nevr.N]["arch"] = arch + self.packages[nevr.N]["synthesis_list"] = synthesis_list + self.packages[nevr.N]["filename"] = rpmname + self.packages[nevr.N]["size"] = size + for tag in PackageSet.tags: + self.packages[nevr.N][tag] = [] + for item in fields[tag]: + if item == '': + continue + dep = NEVR.from_depstring(item, DE_toremove=nevr.DE) + self.packages[nevr.N][tag].append(dep) + if dep.N not in self.what[tag]: + self.what[tag][dep.N] = [] + self.what[tag][dep.N].append((nevr.N, dep)) + + self.packages[nevr.N]['medium'] = medium_by_synth[synthesis_list] + clear_data() + except (HTTPError,URLError): + eprint(_("File can not be processed! Url: ") + synthesis_list) + + + def whattag(self, tag, val): + if val.N not in self.what[tag]: + return [] + found = [] + for (pkg, dep) in self.what[tag][val.N]: + if dep.satisfies(val): + found.append(pkg) + return found + + def whattag_revert(self, tag, val): + if val.N not in self.what[tag]: + return [] + found = [] + for (pkg, dep) in self.what[tag][val.N]: + if val.satisfies(dep): + found.append(pkg) + return found + + def whatprovides(self, val): + return self.whattag('provides', val) + + def whatobsoletes(self, val): + return self.whattag_revert('obsoletes', val) + + def whatrequires(self, val): + return self.whattag_revert('requires', val) + + def whatconflicts(self, val): + return self.whattag_revert('conflicts', val) + + def whatrequires_pkg(self, pkg): + found = [] + for req in self.packages[pkg]['provides']: + found += [(d, req) for d in self.whatrequires(req)] + return found + + +to_update = [] +to_downgrade = [] +to_remove = [] +to_remove_pre = [] +to_append = [] +unresolved = {} +to_append_bysource = {} +to_remove_problems = {} +to_remove_saved = [] +files_to_download = [] +#If one of package deps matches this regexp and this package is +#not in the repository - don't try to save this package. +to_remove_force_list = [ + NEVR.from_depstring("plymouth(system-theme)"), + NEVR.from_depstring("mandriva-theme-screensaver"), + ] + + +flags = {rpm.RPMCALLBACK_UNKNOWN:'RPMCALLBACK_UNKNOWN', + rpm.RPMCALLBACK_INST_PROGRESS:'RPMCALLBACK_INST_PROGRESS', + rpm.RPMCALLBACK_INST_START:'RPMCALLBACK_INST_START', + rpm.RPMCALLBACK_INST_OPEN_FILE:'RPMCALLBACK_INST_OPEN_FILE', + rpm.RPMCALLBACK_INST_CLOSE_FILE:'RPMCALLBACK_INST_CLOSE_FILE', + rpm.RPMCALLBACK_TRANS_PROGRESS:'RPMCALLBACK_TRANS_PROGRESS', + rpm.RPMCALLBACK_TRANS_START:'RPMCALLBACK_TRANS_START', + rpm.RPMCALLBACK_TRANS_STOP:'RPMCALLBACK_TRANS_STOP', + rpm.RPMCALLBACK_UNINST_PROGRESS:'RPMCALLBACK_UNINST_PROGRESS', + rpm.RPMCALLBACK_UNINST_START:'RPMCALLBACK_UNINST_START', + rpm.RPMCALLBACK_UNINST_STOP:'RPMCALLBACK_UNINST_STOP', + rpm.RPMCALLBACK_REPACKAGE_PROGRESS:'RPMCALLBACK_REPACKAGE_PROGRESS', + rpm.RPMCALLBACK_REPACKAGE_START:'RPMCALLBACK_REPACKAGE_START', + rpm.RPMCALLBACK_REPACKAGE_STOP:'RPMCALLBACK_REPACKAGE_STOP', + rpm.RPMCALLBACK_UNPACK_ERROR:'RPMCALLBACK_UNPACK_ERROR', + rpm.RPMCALLBACK_CPIO_ERROR:'RPMCALLBACK_CPIO_ERROR', + rpm.RPMCALLBACK_SCRIPT_ERROR:'RPMCALLBACK_SCRIPT_ERROR'} + +rpmtsCallback_fd = None +file_id = 0 +current_file = "NotSet" +def runCallback(reason, amount, total, key, client_data): + global i, file_id, rpmtsCallback_fd, current_file + + if reason in flags: + fl = flags[reason] + #if not fl.endswith('PROGRESS'): + vprint ("rpm_callback was called: %s, %s, %s, %s, %s" %(fl, str(amount), str(total), + str(key), str(client_data))) + if reason == rpm.RPMCALLBACK_INST_OPEN_FILE: + vprint ("Opening file: " + key) + current_file = key + file_id += 1 + qprint("[%d/%d] %s" % (file_id, len(files_to_download), os.path.basename(key))) + rpmtsCallback_fd = os.open(key, os.O_RDONLY) + return rpmtsCallback_fd + if reason == rpm.RPMCALLBACK_UNINST_START: + qprint(_("Removing %s") % os.path.basename(key)) + elif reason == rpm.RPMCALLBACK_INST_START: + vprint ("Closing file") + os.close(rpmtsCallback_fd) + elif reason == rpm.RPMCALLBACK_UNPACK_ERROR or \ + reason == rpm.RPMCALLBACK_CPIO_ERROR or \ + reason == rpm.RPMCALLBACK_SCRIPT_ERROR: + eprint(_('urpm-reposync: error in package %s. Data: %(data)s') %{ 'cur_file': current_file, 'data': "%s; %s, %s, %s, %s" % (flags[reason], str(amount), + str(total), str(key), str(client_data))}) + + +def get_problem_dependencies(pkg): + ''' Get all the packages to satisfy dependencies not provided by some installed package or by some action ''' + global actions + + output = [] + for req in repository.packages[pkg]['requires']: # for every package requirement + pkgs_inst = installed.whatprovides(req) + if pkgs_inst: + continue #dependency is satisfied by one of installed packages + + #look for dependency in 'actions' + pkgs_rep = repository.whatprovides(req) + for p in pkgs_rep[:]: + if p not in actions: + pkgs_rep.remove(p) + if not pkgs_rep: + output.append(req) + + vprint("Problem deps for %s: %s" %(pkg, str(output))) + return output + + +def resolve_dependency(dep, pkg): + res = repository.whatprovides(dep) + if command_line.nokernel: + for p in res[:]: + if p.startswith('kernel'): + res.remove('kernel') + + if not res: + if pkg not in unresolved: + unresolved[pkg] = [] + if str(dep) not in unresolved[pkg]: + unresolved[pkg].append(str(dep)) + return None + res = sorted(res) + vprint("Resolved dependencies: " + str(res)) + + if not pkg in to_append_bysource: + to_append_bysource[pkg] = [] + + to_append_bysource[pkg].append(res[0]) + return res[0] + +def resolve_dep_while_emulation(requirement, package): + #try to resolve the dep in repository + pkgs = repository.whatprovides(requirement) + + found = False + for p in pkgs: + if p in actions: + found = True + break + + if not found and pkgs: + vprint('NEW ACTION: ' + pkgs[0]) + actions.append(pkgs[0]) + if not package in to_append_bysource: + to_append_bysource[package] = [] + to_append_bysource[package].append(pkgs[0]) + +def emulate_install(pkg): + global actions, not_provided_packages, conflicting_packages + vprint('Emulating package installation: ' + pkg) + + emptied = [] + for p in not_provided_packages: + for req in not_provided_packages[p][:]: + + for prov in repository.packages[pkg]['provides']: + if prov.satisfies(req): + vprint("Missing dep satisfied: %s -- %s" % (p, req)) + not_provided_packages[p].remove(req) + if not not_provided_packages[p]: + emptied.append(p) + break + for p in emptied: + not_provided_packages.pop(p) + + + + conflicts = False + for confl in repository.packages[pkg]['conflicts']: + res = installed.whatprovides(confl) + if res: + conflicts = True + conflicting_packages.append( (pkg, res) ) + vprint("New conflict: %s, %s" % (str(pkg), str(res))) + + + for prov in repository.packages[pkg]['provides']: + res = installed.whatconflicts(prov) + if res: + conflicts = True + conflicting_packages.append( (res, pkg) ) + vprint("New conflict: %s, %s" % (str(res), str(pkg))) + + if conflicts: + return + + url = ms.media[repository.packages[pkg]['medium']] + url += '/' + repository.packages[pkg]['filename'] + '.rpm' + files_to_download.append(url) + + if pkg not in to_update and pkg not in to_downgrade and pkg not in to_append: + to_append.append(pkg) + + if pkg not in installed.packages: + installed.packages[pkg] = {} + + for tag in PackageSet.alltags: + installed.packages[pkg][tag] = repository.packages[pkg][tag] + + for tag in PackageSet.tags: + deps = installed.packages[pkg][tag] + for dep in deps: + if dep.N not in installed.what[tag]: + installed.what[tag][dep.N] = [] + installed.what[tag][dep.N].append((pkg,dep)) + + actions.remove(pkg) + + for req in repository.packages[pkg]['requires']: + provs = installed.whatprovides(req) + if not provs: # nothing provides it + if pkg not in not_provided_packages: + not_provided_packages[pkg] = [] + vprint("New missing dep: %s -- %s" % (pkg, req)) + not_provided_packages[pkg].append(req) + + resolve_dep_while_emulation(req, pkg) + +def emulate_remove(pkg, updating=False): + global not_provided_packages + vprint("Emulating package removing: " + pkg) + if pkg not in installed.packages: + vprint("Nothing to remove") + return + + if pkg in not_provided_packages: + not_provided_packages.pop(pkg) + + for tag in PackageSet.tags: + deps = installed.packages[pkg][tag] + for dep in deps: + installed.what[tag][dep.N].remove((pkg,dep)) + + P = copy.deepcopy(installed.packages[pkg]) + installed.packages[pkg] = {} + installed.packages[pkg]['old_package'] = P + + if not actions: #do nothing while initial packages removing + return + + for dep in installed.packages[pkg]['old_package']['provides']: + if dep.N not in installed.what['requires']: + continue + + for (package, requirement) in installed.what['requires'][dep.N]: + if dep.satisfies(requirement) and not installed.whatprovides(requirement): + if package not in not_provided_packages: + not_provided_packages[package] = [] + vprint("New missing dep: %s -- %s" % (package, requirement)) + not_provided_packages[package].append(requirement) + + resolve_dep_while_emulation(requirement, package) + + +def have_to_be_removed(pkg): + to_remove_problems[pkg] = [] + for dep in installed.packages[pkg]['requires']: + res = installed.whatprovides(dep) + if not res: + to_remove_problems[pkg].append(_("\tRequires %s, which will not be installed.") % (str(dep) )) + continue + + for dep in installed.packages[pkg]['provides']: + res = installed.whatconflicts(dep) + if res: + to_remove_problems[pkg].append(_("\t%s conflicts with it" %(', '.join(res)))) + + for dep in installed.packages[pkg]['conflicts']: + res = installed.whatprovides(dep) + if res: + to_remove_problems[pkg].append(_("\tIt conflicts with %s" %(', '.join(res)))) + return to_remove_problems[pkg] + + +def process_packages(): + global actions, to_remove, not_provided_packages, conflicting_packages + qprint("Computing actions list...") + if command_line.remove: + for pkg in to_remove_pre: + emulate_remove(pkg) + to_remove.append(pkg) + + actions = to_update + to_downgrade + actions_backup = actions[:] + conflicting_packages = [] + + problems = {} + changed = True + + while changed: + i = 0 + l = len(actions) + changed = False + for act in actions[:]: + i = i + 1 + vprint('[%d/%d] %s' % (i, l, act)) + prob = get_problem_dependencies(act) + problems[act] = [] + for p in prob: + problems[act].append((p, resolve_dependency(p, act))) + + if problems[act]: + vprint ("\nPROBLEM: %s: %s" % (act, problems[act])) + if not problems[act]: + emulate_remove(act, updating=True) + emulate_install(act) + changed = True + for pr in problems: + if len(problems[pr])>0: + for prob, resolved in problems[pr]: + if resolved: + vprint ("Package '%s' requires '%s' via dependency '%s'" % (pr, resolved, prob)) + changed = True + if resolved not in actions: + actions.append(resolved) + + if not command_line.remove: + for pkg in to_remove_pre[:]: + vprint("Checking wether to remove " + pkg) + res = have_to_be_removed(pkg) + if res: + vprint("%s have to be removed because:" % (pkg)) + for item in res: + vprint(str(item)) + emulate_remove(pkg) + if not pkg in to_remove: + to_remove.append(pkg) + + if pkg in to_remove_saved: + to_remove_saved.remove(pkg) + changed = True + to_remove_pre.remove(pkg) + else: + if pkg not in to_remove_saved: + to_remove_saved.append(pkg) + have_to_exit = False + if not_provided_packages: + for p_name in not_provided_packages: + eprint('>>>ERROR: Package %s has unsatisfied dependencies: %s' % + (p_name, str(not_provided_packages[p_name]))) + have_to_exit = True + + vprint ('Actions left: ' + str(actions)) + if actions: + for pkg in unresolved: + eprint(">>>ERROR: %s requires %s" %(pkg, ', '.join(unresolved[pkg]))) + have_to_exit = True + + if conflicting_packages: + def format_conflicts(a): + if type(a) is list: + return '[%s]' % ', '.join(a) + else: + return str(a) + + for (a, b) in conflicting_packages: + a_text = format_conflicts(a) + b_text = format_conflicts(b) + + eprint(">>>ERROR: %s conflicts with %s" %(a_text, b_text)) + have_to_exit = True + + if have_to_exit: + eprint(_(">>> Contact repository maintaiers and send them this information, please."), fatal=True, code=4) + + +def download_packages(): + if not files_to_download: + return + qprint(_('Downloading files...')) + l = len(files_to_download) + i = 0 + for url in files_to_download: + i += 1 + qprint("[%d/%d] %s " %(i, l, os.path.basename(url))) + path = os.path.join(downloaded_rpms_dir, os.path.basename(url)) + if os.path.isfile(path): + continue + try: + if(url.startswith('/')): # local file + shutil.copyfile(url, path) + else: + fd = urlopen(url) + file = open(path, 'w') + file.write(fd.read()) + file.close() + fd.close() + except IOError, e: + eprint("Can not download file %s: %s" % (url, str(e)), fatal=True, code=5) + +def install_packages(): + + def readRpmHeader(ts, filename): + vprint("Reading header of " + filename) + fd = os.open(filename, os.O_RDONLY) + h = ts.hdrFromFdno(fd) + os.close(fd) + return h + + qprint(_("Generating transaction...")) + ts = rpm.TransactionSet() + + # turn all the checks off. They can cause segfault in RPM for now. + ts.setVSFlags(rpm.RPMVSF_NOHDRCHK|rpm.RPMVSF_NOSHA1HEADER|rpm.RPMVSF_NODSAHEADER|rpm.RPMVSF_NORSAHEADER|rpm.RPMVSF_NOMD5|rpm.RPMVSF_NODSA|rpm.RPMVSF_NORSA|rpm._RPMVSF_NODIGESTS|rpm._RPMVSF_NOSIGNATURES) + ts.setProbFilter(rpm.RPMPROB_FILTER_OLDPACKAGE) + + #flags for ts.run execution. We need it to speed the process up + ts.setFlags(rpm.RPMTRANS_FLAG_NOFDIGESTS) + + for file in files_to_download: + f = os.path.join(downloaded_rpms_dir, os.path.basename(file)) + h = readRpmHeader(ts, f) + ts.addInstall(h, f, 'u') + + for pkg in to_remove: + ts.addErase(pkg) + + qprint(_("Checking dependencies...")) + def format_dep(dep): + ((name, ver, rel), (namereq, verreq), needsFlags, suggestedPackage, sense) = dep + + vprint (dep) + t = _('requires') + if sense & 1: + t = _('conflicts with') + + s = '' + if needsFlags & rpm.RPMSENSE_LESS: #2 + s = '<' + if needsFlags & rpm.RPMSENSE_EQUAL: #8 + s = '=' + if needsFlags & rpm.RPMSENSE_GREATER: #4 + s = '>' + if needsFlags & rpm.RPMSENSE_NOTEQUAL: #6 + s = '!=' + + if(verreq): + verreq = '[%s %s]' % (s, verreq) + else: + verreq = '' + return _("Package %(name)s-%(ver)s-%(rel)s %(t)s %(namereq)s%(verreq)s") % \ + {'name': name,'ver': ver,'rel': rel,'namereq': namereq,'verreq': verreq, 't': t} + + + unresolved_dependencies = ts.check() + if(unresolved_dependencies): + eprint(_("There are some unresolved dependencies: ") ) + for dep in unresolved_dependencies: + eprint("\t" + format_dep(dep)) + eprint(_("Packages can not be installed. Please, contact urpm-tools developers and provide this output."), fatal=True, code=3) + else: + qprint(_("No errors found in transaction")) + ts.order() + + if command_line.check: + return + qprint(_("Running transaction...")) + ts.run(runCallback, 1) + + +def check_media_set(): + def try_solve_lib_arch(pkgname): + '''if you have lib64A installed, but there is only libA in repository, it have not to be removed. And vice versa''' + if not pkgname.startswith('lib'): + return None + if pkgname in repository.packages: + return None + + is64 = (pkgname[3:5] == '64') + is32 = not is64 + + if is32: + l32 = pkgname + l64 = 'lib64' + pkgname[3:] + else: + l32 = 'lib' + pkgname[5:] + l64 = pkgname + + e32 = (l32 in repository.packages) + e64 = (l64 in repository.packages) + + if(is32 and e64): # you have 32bit version installed, but there is only 64 bit version in repository + if(ARCH=="x86_64"): + return l64 + else: + return # 64bit library can not work in 32bit system + if(is64 and e32): + return l32 + + found = [] + for pkg in to_remove: + res = try_solve_lib_arch(pkg) + if res: + found.append((pkg, res)) + + vprint("The list of libs with incorrect arch in repository: " + str(found)) + if found: + qprint(_("WARNING: Some libraries are going to be removed because there are only the packages with the other architecture in the repository. Maybe you missed media with the correct architecture?")) + + +def print_actions(): + if(command_line.quiet): + return + + def count_total_size(): + sum = 0 + for pkg in to_append + to_update + to_downgrade: + sum += repository.packages[pkg]['size'] + return sum + + def bytes_to_human_readable(bytes): + bytes = float(bytes) + if bytes >= 1099511627776: + terabytes = bytes / 1099511627776 + size = '%.2fT' % terabytes + elif bytes >= 1073741824: + gigabytes = bytes / 1073741824 + size = '%.2fG' % gigabytes + elif bytes >= 1048576: + megabytes = bytes / 1048576 + size = '%.2fM' % megabytes + elif bytes >= 1024: + kilobytes = bytes / 1024 + size = '%.2fK' % kilobytes + else: + size = '%.2fb' % bytes + return size + + media = ms.media.keys() + def print_pkg_list(pkglist, tag): + media_contents = {} + for medium in media: + for pkg in pkglist: + if(repository.packages[pkg]['medium'] == medium): + if( medium not in media_contents): + media_contents[medium] = [] + media_contents[medium].append(pkg) + + qprint(" %-30s %-15s %-15s %-10s" %(_('Package Name'), _('Current Version'), _('New Version'), _('Arch'))) + for medium in media_contents: + qprint("(%s %s)" %( _("medium"), medium)) + for pkg in sorted(media_contents[medium]): + nevri = installed.packages[pkg]['nevr'] + nevrr = repository.packages[pkg]['nevr'] + + + if(nevri.E == nevrr.E): + veri = nevri.VR + verr = nevrr.VR + else: + veri = nevri.EVR + verr = nevrr.EVR + + if nevri.DE and nevrr.DE and nevri.DE != nevrr.DE: + veri += '(%s%s) ' % ( nevri.DT, nevri.DE) + verr += '(%s%s) ' % ( nevrr.DT, nevrr.DE) + + oprint("%s %-30s %-15s %-15s %-10s" %(prefix, pkg, veri, verr, installed.packages[pkg]['arch'])) + qprint('') + + prefix = '' + if to_update: + qprint(_("The following packages are going to be upgraded:")) + if command_line.printonly: + prefix = 'U' + print_pkg_list(to_update, 'U') + if to_downgrade: + qprint(_("The following packages are going to be downgraded:")) + if command_line.printonly: + prefix = 'D' + print_pkg_list(to_downgrade, 'D') + if to_append: + qprint(_("Additional packages are going to be installed:")) + qprint(" %-30s %-15s %-10s" %(_('Package Name'), _('Version'), _('Arch'))) + + if command_line.printonly: + prefix = 'A' + + def get_append_sources(pkg): + out = [] + for item in to_append_bysource: + if pkg in to_append_bysource[item]: + out.append(item) + return out + + for pkg in to_append: + nevr = repository.packages[pkg]['nevr'] + oprint("%s %-30s %-15s %-10s" %(prefix, pkg, nevr.VR, repository.packages[pkg]['arch'])) + if command_line.detailed: + qprint(_("\tRequired by %s") % (", ".join(get_append_sources(pkg)))) + + qprint('') + + if to_remove: + qprint(_("The following packages are going to be removed:")) + qprint(" %-30s %-15s %-10s" %(_('Package Name'), _('Current Version'), _('Arch'))) + if command_line.printonly: + prefix = 'R' + for pkg in sorted(to_remove): + nevr = installed.packages[pkg]['nevr'] + oprint("%s %-30s %-15s %-10s" %(prefix, pkg, nevr.VR, installed.packages[pkg]['arch'])) + if command_line.detailed and not command_line.remove: + for problem in sorted(to_remove_problems[pkg]): + qprint(problem) + qprint('') + + if to_remove_saved and command_line.detailed: + qprint(_("Packages which do not present in repositories, but do not have to be removed (will be saved):")) + qprint(" %-30s %-15s %-10s" %(_('Package Name'), _('Current Version'), _('Arch'))) + if command_line.printonly: + prefix = 'S' + for pkg in sorted(to_remove_saved): + oprint("%s %-30s %-15s %-10s" %(prefix, pkg, installed.packages[pkg]['nevr'].VR, installed.packages[pkg]['arch'])) + + qprint(_("%d packages are going to be downloaded and installed.") % len(files_to_download)) + qprint(_("%d packages are going to be removed.") % len(to_remove)) + qprint(_("%s will be downloaded.") % bytes_to_human_readable(count_total_size())) + + +def have_to_be_forced(pkg): + for dep in installed.packages[pkg]['provides']: + for f in to_remove_force_list: + if dep.satisfies(f): + vprint("Package %s have been forced to removal." % pkg) + return f + return None + + +def Main(): + global cmd, resolve_source, installed, repository, include_media, exclude_media + global not_provided_packages, installed_backup, ms, actions + resolve_source = False # variable that makes download_rpm to download resolved build-deps + cmd = ['urpmq'] + include_media = [] + actions = [] + if(command_line.include_media != None): + media = '' + for i in command_line.include_media: + media = ",".join([media]+i) + for ii in i: + include_media.append(ii) + cmd = cmd + ['--media', media[1:]] + + exclude_media = [] + if(command_line.exclude_media != None): + media = '' + for i in command_line.exclude_media: + media = ",".join([media]+i) + for ii in i: + exclude_media.append(ii) + cmd = cmd + ['--excludemedia', media[1:]] + + ms = MediaSet() + installed = PackageSet() + installed.load_from_system() + + repository = PackageSet() + repository.load_from_repository() + + installed_backup = copy.deepcopy(installed) + not_provided_packages = {} + + for inst in installed.packages: + + if command_line.nokernel and inst.startswith('kernel'): + continue + + if inst not in repository.packages: + if command_line.remove: + to_remove_pre.append(inst) + else: + res = have_to_be_forced(inst) + if res: + emulate_remove(inst) + to_remove.append(inst) + to_remove_problems[inst]=[_('\tForced to be removed dew to "%s" policy.') % str(res)] + else: + to_remove_pre.append(inst) + continue + + #compare distepochs first + if installed.packages[inst]["nevr"].DE == None or repository.packages[inst]["nevr"].DE == None: + res_epoch = 0 + else: + res_epoch = rpm.evrCompare(installed.packages[inst]["nevr"].DE, repository.packages[inst]["nevr"].DE) + + if res_epoch == -1: + to_update.append(inst) + elif res_epoch == 1: + to_downgrade.append(inst) + else: # disteposhs are the same + #now versions can be compared + res = rpm.evrCompare(installed.packages[inst]["nevr"].EVR, repository.packages[inst]["nevr"].EVR) + if(res == -1): + to_update.append(inst) + elif res == 1: + to_downgrade.append(inst) + else: # res == 0 + pass # do nothing + + process_packages() + + if len(to_update + to_downgrade + to_remove) == 0: + qprint(_("Nothing to do")) + return + installed = installed_backup + print_actions() + if command_line.printonly: + return + + vprint("Installed packages: " + str(len(installed.packages))) + vprint("Repository packages: " + str(len(repository.packages))) + vprint("Packages that need some actions: " + str(len(to_update) + len(to_downgrade) + len(to_remove) + len(to_append))) + + check_media_set() + if(not command_line.auto): + sys.stdout.write(_("Do you want to proceed? (y/n): ")) + sys.stdout.flush() + while(True): + res = sys.stdin.readline() + res = res.strip() + if res in [_('y'), _('yes'), 'y', 'yes']: + break + if res in [_('n'), _('no'), 'n', 'no']: + exit(0) + + download_packages() + if command_line.download: + return + install_packages() + + +if not os.path.exists(downloaded_rpms_dir): + os.makedirs(downloaded_rpms_dir) + + +class Tests(unittest.TestCase): + def setUp(self): + self.p1 = NEVR.from_depstring('a[== 1.0]') + self.p2 = NEVR.from_depstring('a[> 1.0]') + self.p3 = NEVR.from_depstring('a[< 1.0]') + self.p4 = NEVR.from_depstring('a[>= 1.0]') + self.p5 = NEVR.from_depstring('b[== 1.0]') + + self.r1 = NEVR.from_depstring('a[== 1.0]') + self.r2 = NEVR.from_depstring('a[== 1.1]') + self.r3 = NEVR.from_depstring('a[<= 1.1]') + self.r4 = NEVR.from_depstring('a[>= 1.1]') + self.r5 = NEVR.from_depstring('a[< 0.9]') + self.r6 = NEVR.from_depstring('a[> 0.9]') + self.r7 = NEVR.from_depstring('a[< 1.0]') + self.r8 = NEVR.from_depstring('b[== 1.0]') + + self.pkg1 = NEVR.from_filename("s-c-t-0.0.1-0.20091218.2-rosa.lts2012.0.x86_64") + + def test_nevr_parse(self): + self.assertEqual(self.p1.N, 'a') + self.assertEqual(self.p1.VR, '1.0') + self.assertEqual(self.p1.EVR, '1.0') + self.assertEqual(self.p1.FL, NEVR.EQUAL) + self.assertEqual(self.p2.FL, NEVR.GREATER) + self.assertEqual(self.p3.FL, NEVR.LESS) + self.assertEqual(self.p4.FL, NEVR.EQUAL | NEVR.GREATER) + + self.assertEqual(self.pkg1.N, 's-c-t') + self.assertEqual(self.pkg1.EVR, '0.0.1-0.20091218.2') + self.assertEqual(self.pkg1.FL, NEVR.EQUAL) + + def test_version_compare(self): + self.assertTrue(self.p1.satisfies(self.r1)) + self.assertTrue(self.p1.satisfies(self.r3)) + self.assertTrue(self.p1.satisfies(self.r6)) + self.assertFalse(self.p1.satisfies(self.r4)) + self.assertFalse(self.p1.satisfies(self.r5)) + self.assertFalse(self.p1.satisfies(self.r7)) + self.assertFalse(self.p1.satisfies(self.r8)) + + self.assertTrue(self.p2.satisfies(self.r2)) + self.assertTrue(self.p2.satisfies(self.r2)) + self.assertTrue(self.p2.satisfies(self.r4)) + self.assertTrue(self.p2.satisfies(self.r6)) + self.assertFalse(self.p2.satisfies(self.r1)) + self.assertFalse(self.p2.satisfies(self.r5)) + self.assertFalse(self.p2.satisfies(self.r7)) + + self.assertTrue(self.p3.satisfies(self.r3)) + self.assertTrue(self.p3.satisfies(self.r5)) + self.assertTrue(self.p3.satisfies(self.r6)) + self.assertTrue(self.p3.satisfies(self.r7)) + self.assertFalse(self.p3.satisfies(self.r1)) + self.assertFalse(self.p3.satisfies(self.r2)) + self.assertFalse(self.p3.satisfies(self.r4)) + + self.assertTrue(self.p4.satisfies(self.r1)) + self.assertTrue(self.p4.satisfies(self.r6)) + self.assertFalse(self.p4.satisfies(self.r5)) + self.assertFalse(self.p4.satisfies(self.r7)) + + self.assertTrue(self.p5.satisfies(self.r8)) + + self.assertEqual(self.p1, self.r1) + self.assertNotEqual(self.p1, self.r2) + + self.assertRaises(Exception, NEVR.from_depstring, "a [== 1.0]") + self.assertRaises(Exception, NEVR.from_depstring, "a [== 1.0 ]") + self.assertRaises(Exception, NEVR.from_depstring, "a[! 1.0]") + self.assertRaises(Exception, NEVR.from_depstring, "a == 1.0") + + self.assertRaises(Exception, self.p1.__eq__, "a [== 1.0]") + + +if __name__ == '__main__': + parse_command_line() + + if command_line.runselftests: + suite = unittest.TestLoader().loadTestsFromTestCase(Tests) + unittest.TextTestRunner(verbosity=2).run(suite) + else: + Main() diff --git a/urpm-tools.pot b/urpm-tools.pot new file mode 100644 index 0000000..58ddc58 --- /dev/null +++ b/urpm-tools.pot @@ -0,0 +1,1101 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2012-08-21 16:34+0400\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#. if not fatal_fails, do nothing. Caller have to deal with that himself +#. rpm return code is not 0 +#: urpm-reposync.py:64 urpm-downloader.py:156 urpm-downloader.py:546 +msgid "Error while calling command" +msgstr "" + +#: urpm-reposync.py:66 urpm-downloader.py:158 +msgid "Error message: \n" +msgstr "" + +#: urpm-reposync.py:74 +msgid "reposync is used to synchronize a set of packages on the local computer with the remote repository." +msgstr "" + +#: urpm-reposync.py:76 urpm-downloader.py:104 +msgid "Use only selected URPM media" +msgstr "" + +#: urpm-reposync.py:77 urpm-downloader.py:105 +msgid "Do not use selected URPM media" +msgstr "" + +#. arg_parser.add_argument('-x', '--exclude-packages', action='store',nargs = '+', help="Exclude package(s) by regex") +#: urpm-reposync.py:79 urpm-downloader.py:102 +msgid "Verbose (print additional info)" +msgstr "" + +#: urpm-reposync.py:80 +msgid "Quiet operation. Senseless without --auto." +msgstr "" + +#: urpm-reposync.py:81 +msgid "Do not ask questions, just do it!" +msgstr "" + +#: urpm-reposync.py:82 +msgid "Only print the list of actions to be done and do nothing more!" +msgstr "" + +#: urpm-reposync.py:83 +msgid "Only download the rpm files, but install or remove nothing." +msgstr "" + +#. arg_parser.add_argument('-n', '--noremove', action='store_true', help=_("Do not remove packages at all. If some installed package prevent another package from beeing updated - do not update it.")) +#: urpm-reposync.py:85 +msgid "Remove all the packages which do not present in repository. By default, only some of them would be removed." +msgstr "" + +#: urpm-reposync.py:86 +msgid "Download packages and check wether they can be installed to your system, but do not install them." +msgstr "" + +#: urpm-reposync.py:87 +msgid "Do nothing with kernels." +msgstr "" + +#: urpm-reposync.py:88 +msgid "Run self-tests end exit." +msgstr "" + +#: urpm-reposync.py:89 +msgid "Show detailed information about packages are going to be removed or installed (why does it have to be done)" +msgstr "" + +#: urpm-reposync.py:93 +msgid "It's senseless to use --quiet without --auto!" +msgstr "" + +#: urpm-reposync.py:305 +msgid "Loading the list of installed packages..." +msgstr "" + +#: urpm-reposync.py:319 +msgid "Duplicating " +msgstr "" + +#: urpm-reposync.py:320 +msgid "Already found: " +msgstr "" + +#. print synthesis_list +#: urpm-reposync.py:396 +msgid "Processing medium " +msgstr "" + +#: urpm-reposync.py:414 +#, python-format +msgid "Could not read synthesis file. (File %s not found)" +msgstr "" + +#: urpm-reposync.py:484 +msgid "File can not be processed! Url: " +msgstr "" + +#: urpm-reposync.py:579 +#, python-format +msgid "Removing %s" +msgstr "" + +#: urpm-reposync.py:586 +msgid "urpm-reposync: error in package %s. Data: %(data)s" +msgstr "" + +#: urpm-reposync.py:683 +#, python-format +msgid "\tRequires %s, which will not be installed." +msgstr "" + +#: urpm-reposync.py:689 +#, python-format +msgid "\t%s conflicts with it" +msgstr "" + +#: urpm-reposync.py:694 +#, python-format +msgid "\tIt conflicts with %s" +msgstr "" + +#: urpm-reposync.py:768 +msgid "Some packages can not be installed dew to unresolved dependencies: " +msgstr "" + +#: urpm-reposync.py:771 +msgid "Contact repository maintaiers and send them this information, please." +msgstr "" + +#: urpm-reposync.py:777 +msgid "Downloading files..." +msgstr "" + +#: urpm-reposync.py:807 +msgid "Generating transaction..." +msgstr "" + +#: urpm-reposync.py:825 +msgid "Checking dependencies..." +msgstr "" + +#: urpm-reposync.py:830 +msgid "requires" +msgstr "" + +#: urpm-reposync.py:832 +msgid "conflicts with" +msgstr "" + +#: urpm-reposync.py:848 +#, python-format +msgid "Package %(name)s-%(ver)s-%(rel)s %(t)s %(namereq)s%(verreq)s" +msgstr "" + +#: urpm-reposync.py:854 +msgid "There are some unresolved dependencies: " +msgstr "" + +#: urpm-reposync.py:857 +msgid "Packages can not be installed. Please, contact urpm-tools developers and provide this output." +msgstr "" + +#: urpm-reposync.py:859 +msgid "No errors found in transaction" +msgstr "" + +#: urpm-reposync.py:864 +msgid "Running transaction..." +msgstr "" + +#: urpm-reposync.py:905 +msgid "WARNING: Some libraries are going to be removed because there are only the packages with the other architecture in the repository. Maybe you missed media with the correct architecture?" +msgstr "" + +#: urpm-reposync.py:946 urpm-reposync.py:981 urpm-reposync.py:1003 +#: urpm-reposync.py:1016 +msgid "Package Name" +msgstr "" + +#: urpm-reposync.py:946 urpm-reposync.py:1003 urpm-reposync.py:1016 +msgid "Current Version" +msgstr "" + +#: urpm-reposync.py:946 +msgid "New Version" +msgstr "" + +#: urpm-reposync.py:946 urpm-reposync.py:981 urpm-reposync.py:1003 +#: urpm-reposync.py:1016 +msgid "Arch" +msgstr "" + +#: urpm-reposync.py:948 +msgid "medium" +msgstr "" + +#: urpm-reposync.py:970 +msgid "The following packages are going to be upgraded:" +msgstr "" + +#: urpm-reposync.py:975 +msgid "The following packages are going to be downgraded:" +msgstr "" + +#: urpm-reposync.py:980 +msgid "Additional packages are going to be installed:" +msgstr "" + +#: urpm-reposync.py:981 +msgid "Version" +msgstr "" + +#: urpm-reposync.py:997 +#, python-format +msgid "\tRequired by %s" +msgstr "" + +#: urpm-reposync.py:1002 +msgid "The following packages are going to be removed:" +msgstr "" + +#: urpm-reposync.py:1015 +msgid "Packages which do not present in repositories, but do not have to be removed (will be saved):" +msgstr "" + +#: urpm-reposync.py:1022 +#, python-format +msgid "%d packages are going to be downloaded and installed." +msgstr "" + +#: urpm-reposync.py:1023 +#, python-format +msgid "%d packages are going to be removed." +msgstr "" + +#: urpm-reposync.py:1024 +#, python-format +msgid "%s will be downloaded." +msgstr "" + +#: urpm-reposync.py:1080 +#, python-format +msgid "\tForced to be removed dew to \"%s\" policy." +msgstr "" + +#: urpm-reposync.py:1108 +msgid "Nothing to do" +msgstr "" + +#: urpm-reposync.py:1121 +msgid "Do you want to proceed? (y/n): " +msgstr "" + +#: urpm-reposync.py:1126 +msgid "y" +msgstr "" + +#: urpm-reposync.py:1126 +msgid "yes" +msgstr "" + +#: urpm-reposync.py:1128 +msgid "n" +msgstr "" + +#: urpm-reposync.py:1128 +msgid "no" +msgstr "" + +#: urpm-repograph.py:86 +msgid "Tool for generating dependency graph for REPOSITORY packages." +msgstr "" + +#: urpm-repograph.py:90 +msgid "Search for cross-repository references in CROSS_REPO(s) repositories." +msgstr "" + +#: urpm-repograph.py:93 +msgid "Hide service messages. (About progress status etc.)" +msgstr "" + +#: urpm-repograph.py:95 +msgid "Show warnings. (About unprovided packages etc.)" +msgstr "" + +#: urpm-repograph.py:98 +msgid "Process \"requires\" package dependencies. Used by default." +msgstr "" + +#: urpm-repograph.py:100 +msgid "Process \"suggests\" package dependencies. If used without --requires then only suggests dependencies are processed." +msgstr "" + +#: urpm-repograph.py:103 +msgid "Process file dependencies." +msgstr "" + +#: urpm-repograph.py:105 +msgid "Show unprovided dependencies." +msgstr "" + +#: urpm-repograph.py:109 +msgid "Search for packages, which are required by package PKG (PKG is a file name or package name)" +msgstr "" + +#: urpm-repograph.py:111 +msgid "Search for packages, which requires package PKG (PKG is a file name or package name)" +msgstr "" + +#: urpm-repograph.py:115 +msgid "Search for all simple loops of package dependecies." +msgstr "" + +#: urpm-repograph.py:117 +msgid "Search for alternative packages providing the same feature." +msgstr "" + +#: urpm-repograph.py:119 +msgid "Search for all broken packages and anything beetween them" +msgstr "" + +#: urpm-repograph.py:121 +msgid "Output each loop or each alternative in different file. Ignored if --loops or --alternatives options are not present. OUTPUT_FILE (if present) is tracted as folder name for new files in that case." +msgstr "" + +#: urpm-repograph.py:127 +msgid "Change graph output to \"OUTPUT_FILE\". STDOUT by default." +msgstr "" + +#: urpm-repograph.py:129 +msgid "Do not output graph. Tool will not start working if --quiet, --nograph are present and --verbose is not. (If there is nothing to output - then nothing has to be done.)" +msgstr "" + +#: urpm-repograph.py:157 urpm-repodiff.py:125 +#, python-format +msgid "Error: URL to repository \"%s\" is incorrect" +msgstr "" + +#: urpm-repograph.py:179 urpm-repodiff.py:147 +#, python-format +msgid "Error: directory %s does not exist" +msgstr "" + +#: urpm-repograph.py:189 urpm-repodiff.py:157 +#, python-format +msgid "Error: \"%s\" is not correct url, path or name of repository" +msgstr "" + +#: urpm-repograph.py:216 +#, python-format +msgid "Error: directory %s already exists" +msgstr "" + +#: urpm-repograph.py:222 urpm-repograph.py:237 urpm-repodiff.py:183 +#, python-format +msgid "Error: File %s already exists" +msgstr "" + +#: urpm-repograph.py:229 +#, python-format +msgid "Error: directory %s was not created" +msgstr "" + +#: urpm-repograph.py:246 urpm-repodiff.py:192 +#, python-format +msgid "Error: File %s cannot be created" +msgstr "" + +#: urpm-repograph.py:250 urpm-repodiff.py:196 +#, python-format +msgid "Error: Path %s does not exist." +msgstr "" + +#: urpm-repograph.py:262 urpm-repodiff.py:218 +#, python-format +msgid "getting file %s from " +msgstr "" + +#: urpm-repograph.py:267 urpm-repodiff.py:223 +#, python-format +msgid "Error: file %s was not copied" +msgstr "" + +#: urpm-repograph.py:275 urpm-repodiff.py:231 +#, python-format +msgid "Error: file %(from)s was not downloaded to %(to)s" +msgstr "" + +#: urpm-repograph.py:288 urpm-repodiff.py:272 +msgid "Error: file not found: " +msgstr "" + +#: urpm-repograph.py:293 urpm-repodiff.py:277 +#, python-format +msgid "Error: cannot rename file %(from)s to %(to)s" +msgstr "" + +#: urpm-repograph.py:297 urpm-repograph.py:313 urpm-repograph.py:543 +#: urpm-repodiff.py:281 +#, python-format +msgid "Error: file %s is missing." +msgstr "" + +#: urpm-repograph.py:301 urpm-repodiff.py:285 +#, python-format +msgid "file %(from)s was renamed to %(to)s" +msgstr "" + +#: urpm-repograph.py:311 urpm-repograph.py:541 urpm-repodiff.py:294 +#: urpm-repodiff.py:297 +msgid "unpacking file " +msgstr "" + +#: urpm-repograph.py:371 urpm-repodiff.py:410 +msgid "REPODIFF-Warning: strange : " +msgstr "" + +#: urpm-repograph.py:406 urpm-repodiff.py:351 +#, python-format +msgid "Error: Synthesis file %s was not found." +msgstr "" + +#: urpm-repograph.py:409 +msgid "Parsing synthesis." +msgstr "" + +#: urpm-repograph.py:435 +#, python-format +msgid "Warning: Unexpected sign %(sign)s in 'provides' section of %(of)s" +msgstr "" + +#: urpm-repograph.py:451 urpm-repodiff.py:380 +msgid "Error: Failed to open synthesis file " +msgstr "" + +#: urpm-repograph.py:555 +msgid "Reading fileslist" +msgstr "" + +#: urpm-repograph.py:557 +msgid "Error: Can't find fileslist " +msgstr "" + +#: urpm-repograph.py:561 +msgid "Error: Can't read fileslist " +msgstr "" + +#: urpm-repograph.py:565 +msgid "Error: Wrong fileslist." +msgstr "" + +#: urpm-repograph.py:578 +msgid "Error: Corrupted fileslist" +msgstr "" + +#: urpm-repograph.py:608 +msgid "Warning: cross-repository dependency: " +msgstr "" + +#: urpm-repograph.py:612 urpm-repograph.py:662 +msgid "Warning: package has self-dependecies: " +msgstr "" + +#: urpm-repograph.py:658 +#, python-format +msgid "" +"Warning: cross-repository dependency:\n" +" package %(pkg)s is dependent from\n" +" <- %(from)s located in another repository" +msgstr "" + +#: urpm-repograph.py:691 +#, python-format +msgid "Warning: needed version is absent <%(ver)s> %(rel)s required by package" +msgstr "" + +#: urpm-repograph.py:708 +#, python-format +msgid "Warning: Package %(pkg)s unprovided by %(by)s" +msgstr "" + +#: urpm-repograph.py:740 +msgid "Finding dependencies." +msgstr "" + +#: urpm-repograph.py:749 +#, python-format +msgid "" +"Warning: can't find <%(ask)s> required by package\n" +" <%(pkg)s>" +msgstr "" + +#: urpm-repograph.py:812 +msgid "Total cross-referenced packages: " +msgstr "" + +#: urpm-repograph.py:816 +msgid "Total unprovided packages: " +msgstr "" + +#: urpm-repograph.py:833 +msgid "Calculating colors." +msgstr "" + +#: urpm-repograph.py:1112 +msgid "Non-cycle nodes removed: " +msgstr "" + +#: urpm-repograph.py:1113 +msgid "Cyclic packages: " +msgstr "" + +#: urpm-repograph.py:1130 +#, python-format +msgid "Worktime: %s seconds" +msgstr "" + +#: urpm-repograph.py:1136 +msgid "Searching loops." +msgstr "" + +#: urpm-repograph.py:1140 urpm-repograph.py:1188 +msgid "End of search." +msgstr "" + +#: urpm-repograph.py:1141 +#, python-format +msgid "Loops search: %s seconds" +msgstr "" + +#: urpm-repograph.py:1145 +#, python-format +msgid "Total: %s loops." +msgstr "" + +#: urpm-repograph.py:1151 +msgid "Loop " +msgstr "" + +#: urpm-repograph.py:1168 +msgid "Searching alternatives." +msgstr "" + +#: urpm-repograph.py:1180 +#, python-format +msgid "Total: %d alternatives." +msgstr "" + +#: urpm-repograph.py:1182 +msgid "Alternative " +msgstr "" + +#: urpm-repograph.py:1182 +msgid " is provided by:" +msgstr "" + +#: urpm-repograph.py:1260 +msgid "Searching for broken packages." +msgstr "" + +#: urpm-repograph.py:1266 +msgid "Searching for packages REQUIRED by " +msgstr "" + +#: urpm-repograph.py:1268 +msgid "Searching for packages that REQUIRE " +msgstr "" + +#: urpm-repograph.py:1276 +#, python-format +msgid "Level %d dependency." +msgstr "" + +#: urpm-repograph.py:1355 +msgid "Remaking structures." +msgstr "" + +#: urpm-repograph.py:1367 +msgid "Error: can't find package name or filename \"" +msgstr "" + +#: urpm-repograph.py:1401 +msgid "Do not use -q/--quiet and -n/--nograph without -v/--verbose together." +msgstr "" + +#: urpm-repograph.py:1402 +msgid "That way there is no information to output anywhere. Nothing will be done." +msgstr "" + +#: urpm-repograph.py:1405 +msgid "Do not use -u/--unprovided and -b/--broken options together." +msgstr "" + +#: urpm-repograph.py:1406 +msgid "-b does everything that do -u and a little more." +msgstr "" + +#: urpm-downloader.py:91 +msgid "A tool for downloading RPMs and SRPMs from URPM-based linux repositories" +msgstr "" + +#: urpm-downloader.py:92 +msgid "If none of the options -b, -s, -d turned on, it will be treated as -b" +msgstr "" + +#: urpm-downloader.py:93 +msgid "Package name(s) to download. It can contain not only package names, but (S)RPM files too. In this case package name extracted from this file will be used" +msgstr "" + +#: urpm-downloader.py:94 +msgid "Instead of downloading files, list the URLs that would be processed" +msgstr "" + +#: urpm-downloader.py:95 +msgid "When downloading RPMs, resolve dependencies and also download the required packages, if they are not already installed" +msgstr "" + +#: urpm-downloader.py:96 +msgid "When downloading RPMs, resolve dependencies and also download the required packages, even if they are already installed" +msgstr "" + +#: urpm-downloader.py:97 +msgid "Download binary RPMs" +msgstr "" + +#: urpm-downloader.py:98 +msgid "Download the source RPMs (SRPMs)" +msgstr "" + +#: urpm-downloader.py:99 +msgid "Download debug RPMs" +msgstr "" + +#: urpm-downloader.py:100 +msgid "Download debug RPMs and install" +msgstr "" + +#: urpm-downloader.py:103 +msgid "Quiet operation." +msgstr "" + +#: urpm-downloader.py:106 +msgid "Exclude package(s) by regex" +msgstr "" + +#: urpm-downloader.py:107 +msgid "Try to continue when error occurs" +msgstr "" + +#: urpm-downloader.py:108 +msgid "If the file already exists, download it again and overwrite the old one" +msgstr "" + +#: urpm-downloader.py:109 +msgid "If package dependency can be satisfied by several packages, download all of them (by default, only the first one is downloaded)" +msgstr "" + +#: urpm-downloader.py:110 +msgid "If different versions of package present in repository, process them all" +msgstr "" + +#. arg_parser.add_argument('--self-test', action='store_true', help="Test urpm-downloader end exit") +#: urpm-downloader.py:113 +msgid "Specify a destination directory for the download" +msgstr "" + +#: urpm-downloader.py:130 +msgid "Use of --verbose with --quiet is senseless. Turning verbose mode off." +msgstr "" + +#: urpm-downloader.py:134 +msgid "Note that resolving of SRPM dependencies is not possible until SRPM downloaded. So, it will be done despite --urls" +msgstr "" + +#: urpm-downloader.py:375 +msgid "* Downloaded: " +msgstr "" + +#: urpm-downloader.py:377 +msgid "* File exists, skipping: " +msgstr "" + +#: urpm-downloader.py:476 +msgid "Can not download SRPM for package" +msgstr "" + +#: urpm-downloader.py:499 urpm-downloader.py:532 +msgid "Can not download RPM" +msgstr "" + +#: urpm-downloader.py:504 +msgid "Resolving debug-info packages..." +msgstr "" + +#. urpmq output. RU: Нет пакета с названием +#: urpm-downloader.py:509 +msgid "No package named " +msgstr "" + +#: urpm-downloader.py:533 +msgid "Maybe you need to update urpmi database (urpmi.update -a)?" +msgstr "" + +#: urpm-downloader.py:542 +msgid "Installing " +msgstr "" + +#. return code is not 0 +#: urpm-downloader.py:553 +#, python-format +msgid "Debug package for '%s' not found" +msgstr "" + +#: urpm-downloader.py:602 +msgid "Parameters that end with '.rpm' seem to be local files, but the folowing files do not exist: " +msgstr "" + +#: urpm-downloader.py:627 +msgid "Searching src.rpm file(s) in repository..." +msgstr "" + +#: urpm-downloader.py:629 +msgid "Downloading src.rpm file(s)..." +msgstr "" + +#: urpm-downloader.py:659 +msgid "Resolving build dependencies..." +msgstr "" + +#: urpm-downloader.py:661 +msgid "Resolving dependencies..." +msgstr "" + +#: urpm-downloader.py:663 +#, python-format +msgid "Resolved %d packages" +msgstr "" + +#: urpm-downloader.py:665 +msgid "Nothing to download" +msgstr "" + +#: urpm-repomanage.py:56 +#, python-format +msgid "Error accessing directory %(path)s, %(e)s" +msgstr "" + +#: urpm-repomanage.py:86 +msgid "manage a directory of rpm packages and report newest or oldest packages" +msgstr "" + +#: urpm-repomanage.py:92 +msgid "path to directory with rpm packages" +msgstr "" + +#: urpm-repomanage.py:95 +msgid "print the older packages" +msgstr "" + +#: urpm-repomanage.py:97 +msgid "print the newest packages (this is the default behavior)" +msgstr "" + +#: urpm-repomanage.py:99 +msgid "remove older packages" +msgstr "" + +#: urpm-repomanage.py:101 +msgid "space separated output, not newline" +msgstr "" + +#: urpm-repomanage.py:103 +msgid "number of newest packages to keep - defaults to 1" +msgstr "" + +#: urpm-repomanage.py:105 +msgid "do not check package payload signatures/digests" +msgstr "" + +#: urpm-repomanage.py:108 +msgid "be completely quiet" +msgstr "" + +#: urpm-repomanage.py:110 +msgid "be verbose - say which packages are decided to be old and why (this info is dumped to STDERR)" +msgstr "" + +#: urpm-repomanage.py:131 +msgid "No files to process" +msgstr "" + +#: urpm-repomanage.py:144 +#, python-format +msgid "Error opening pkg %(pkg)s: %(err)s" +msgstr "" + +#: urpm-repomanage.py:195 urpm-repomanage.py:221 +msgid "Dropped " +msgstr "" + +#: urpm-repomanage.py:196 urpm-repomanage.py:222 +msgid " superseded by: " +msgstr "" + +#: urpm-repodiff.py:83 +msgid "Tool for comparing sets of repositories." +msgstr "" + +#: urpm-repodiff.py:85 +msgid "URL or PATH to old repositories" +msgstr "" + +#: urpm-repodiff.py:87 +msgid "URL or PATH to new repositories" +msgstr "" + +#: urpm-repodiff.py:89 +msgid "Show differences in package sizes." +msgstr "" + +#: urpm-repodiff.py:91 +msgid "Simple output format." +msgstr "" + +#: urpm-repodiff.py:93 +msgid "Hide service messages." +msgstr "" + +#: urpm-repodiff.py:95 +msgid "Show changelog difference." +msgstr "" + +#: urpm-repodiff.py:97 +#, python-format +msgid "Output in HTML format, if --output is not present \"%s\" will be created in current directory. --size, --simple and --changelog options are ignored." +msgstr "" + +#: urpm-repodiff.py:101 +msgid "Change standart output to \"OUTPUT_FILE\"." +msgstr "" + +#: urpm-repodiff.py:174 +#, python-format +msgid "Error: Cannot open %s for writing." +msgstr "" + +#: urpm-repodiff.py:354 +msgid "Parsing synthesis" +msgstr "" + +#: urpm-repodiff.py:389 +msgid "REPODIFF-Warning: strange format of or : " +msgstr "" + +#: urpm-repodiff.py:527 +msgid "New package: " +msgstr "" + +#: urpm-repodiff.py:542 +msgid "Generating obsoleted list." +msgstr "" + +#: urpm-repodiff.py:601 +msgid "Removed package: " +msgstr "" + +#: urpm-repodiff.py:609 +msgid " Obsoleted by " +msgstr "" + +#: urpm-repodiff.py:630 +msgid "Reading changelog" +msgstr "" + +#: urpm-repodiff.py:632 +msgid "Error: Can't find changelog " +msgstr "" + +#: urpm-repodiff.py:636 +msgid "Error: Can't read changelog " +msgstr "" + +#: urpm-repodiff.py:640 +msgid "Error: Wrong changelog." +msgstr "" + +#: urpm-repodiff.py:662 +msgid "Error: Corrupted changelog" +msgstr "" + +#: urpm-repodiff.py:756 +msgid "Generating changes list." +msgstr "" + +#: urpm-repodiff.py:770 urpm-repodiff.py:773 +#, python-format +msgid "REPODIFF-Warning: Package %s was not described in changelogs.xml" +msgstr "" + +#: urpm-repodiff.py:771 +msgid "REPODIFF-Warning: Changelogs of a package are absent in \"new\" repository." +msgstr "" + +#: urpm-repodiff.py:774 +msgid "REPODIFF-Warning: Changelogs of a package are absent." +msgstr "" + +#: urpm-repodiff.py:800 +#, python-format +msgid "Package %s has no changelog info\n" +msgstr "" + +#: urpm-repodiff.py:818 +msgid "" +"\n" +"\n" +"Updated packages:\n" +"\n" +msgstr "" + +#: urpm-repodiff.py:825 +msgid " ***DOWNGRADED***\n" +msgstr "" + +#: urpm-repodiff.py:834 +#, python-format +msgid "" +"Size Change: %d bytes\n" +"\n" +msgstr "" + +#: urpm-repodiff.py:844 +msgid " Total added packages: " +msgstr "" + +#: urpm-repodiff.py:847 +msgid " Total removed packages: " +msgstr "" + +#: urpm-repodiff.py:856 +msgid " Total updated packages: " +msgstr "" + +#: urpm-repodiff.py:858 +msgid " Total downgraded packages: " +msgstr "" + +#: urpm-repodiff.py:1316 +msgid "Creating HTML file." +msgstr "" + +#: urpm-package-cleanup.py:58 +msgid "Find problems in the rpmdb of system and correct them" +msgstr "" + +#: urpm-package-cleanup.py:62 +msgid "Query format to use for output." +msgstr "" + +#: urpm-package-cleanup.py:65 +msgid "Use non-interactive mode" +msgstr "" + +#: urpm-package-cleanup.py:68 +msgid "Orphans Options" +msgstr "" + +#: urpm-package-cleanup.py:71 +msgid "List installed packages which are not available from currently configured repositories" +msgstr "" + +#: urpm-package-cleanup.py:75 +msgid "Use only update media. This means that urpmq will search and resolve dependencies only in media marked as containing updates (e.g. which have been created with \"urpmi.addmedia --update\")." +msgstr "" + +#: urpm-package-cleanup.py:80 +msgid "Select specific media to be used, instead of defaulting to all available media (or all update media if --update is used). No rpm will be found in other media." +msgstr "" + +#: urpm-package-cleanup.py:85 +msgid "Do not use the specified media." +msgstr "" + +#: urpm-package-cleanup.py:87 +msgid "Dependency Problems Options" +msgstr "" + +#: urpm-package-cleanup.py:90 +msgid "List dependency problems in the local RPM database" +msgstr "" + +#: urpm-package-cleanup.py:93 +msgid "List missing suggestions of installed packages" +msgstr "" + +#: urpm-package-cleanup.py:96 +msgid "Duplicate Package Options" +msgstr "" + +#: urpm-package-cleanup.py:99 +msgid "Scan for duplicates in your rpmdb" +msgstr "" + +#: urpm-package-cleanup.py:102 +msgid "Scan for duplicates in your rpmdb and remove older " +msgstr "" + +#: urpm-package-cleanup.py:105 +msgid "disable rpm scriptlets from running when cleaning duplicates" +msgstr "" + +#: urpm-package-cleanup.py:107 +msgid "Leaf Node Options" +msgstr "" + +#: urpm-package-cleanup.py:110 +msgid "List leaf nodes in the local RPM database" +msgstr "" + +#: urpm-package-cleanup.py:113 +msgid "list all packages leaf nodes that do not match leaf-regex" +msgstr "" + +#: urpm-package-cleanup.py:117 +msgid "A package name that matches this regular expression (case insensitively) is a leaf" +msgstr "" + +#: urpm-package-cleanup.py:121 +msgid "do not list development packages as leaf nodes" +msgstr "" + +#: urpm-package-cleanup.py:124 +msgid "do not list packages with files in a bin dirs as leaf nodes" +msgstr "" + +#: urpm-package-cleanup.py:127 +msgid "Old Kernel Options" +msgstr "" + +#: urpm-package-cleanup.py:130 +msgid "Remove old kernel and kernel-devel packages" +msgstr "" + +#: urpm-package-cleanup.py:133 +msgid "Number of kernel packages to keep on the system (default 2)" +msgstr "" + +#: urpm-package-cleanup.py:137 +msgid "Do not remove kernel-devel packages when removing kernels" +msgstr "" + +#: urpm-package-cleanup.py:306 +#, python-format +msgid "Warning: neither single nor multi lib arch: %s " +msgstr "" + +#: urpm-package-cleanup.py:417 +#, python-format +msgid "Not removing kernel %(kver)s-%(krel)s because it is the running kernel" +msgstr "" + +#: urpm-package-cleanup.py:447 +#, python-format +msgid "Package %(qf)s %(prob)s" +msgstr "" + +#: urpm-package-cleanup.py:450 +msgid "Missing suggests:" +msgstr "" + +#: urpm-package-cleanup.py:458 +msgid "No Problems Found" +msgstr "" + +#: urpm-package-cleanup.py:473 +msgid "Error: Cannot remove kernels as a user, must be root" +msgstr "" + +#: urpm-package-cleanup.py:476 +msgid "Error: should keep at least 1 kernel!" +msgstr "" + +#: urpm-package-cleanup.py:529 +msgid "Error: Cannot remove packages as a user, must be root" +msgstr "" diff --git a/urpm-tools.spec b/urpm-tools.spec new file mode 100644 index 0000000..081e529 --- /dev/null +++ b/urpm-tools.spec @@ -0,0 +1,80 @@ +Name: urpm-tools +Version: 2.1 +Release: 1 +Summary: Utilities that help to work with URPM-based repositories +Group: System/Configuration/Packaging +License: GPLv2 +URL: http://wiki.rosalab.ru/index.php/Urpm-tools +Source0: %{name}-%{version}.tar.gz +BuildArch: noarch +BuildRoot: %{name}-%{version} + +Requires: urpmi >= 6.68 +Requires: python-rpm >= 5.3 +Requires: libxml2-python >= 2.7 +Requires: gzip +Requires: python-rpm5utils = %{version} + +%description +%{name} is a collection of utilities for URPM-based repositories. +They make URPM-based repositories easier and more powerful to use. +These tools include: urpm-downloader, urpm-package-cleanup, +urpm-repoclosure, urpm-repodiff, urpm-repomanage, urpm-repograph, +urpm-reposync + +%package -n python-rpm5utils +Group: Development/Python +Summary: Auxiliary modules to work with rpm +Provides: python-rpm5utils = %{version}-%{release} + +%description -n python-rpm5utils +%{name} contains some useful modules that are used by %{name}. +Mostly taken from yum. + +%prep +%setup -q -n %{name}-%{version} + +%install +rm -rf %{buildroot} +make install DESTDIR=$RPM_BUILD_ROOT +%find_lang %{name} + +%files -f %{name}.lang +%defattr(-,root,root,-) + +%{_bindir}/urpm-downloader +%{_bindir}/urpm-package-cleanup +%{_bindir}/urpm-repoclosure +%{_bindir}/urpm-repodiff +%{_bindir}/urpm-repomanage +%{_bindir}/urpm-repograph +%{_bindir}/urpm-reposync +%{_mandir}/man1/urpm-downloader.1.xz +%{_mandir}/man1/urpm-package-cleanup.1.xz +%{_mandir}/man1/urpm-repoclosure.1.xz +%{_mandir}/man1/urpm-repodiff.1.xz +%{_mandir}/man1/urpm-repomanage.1.xz +%{_mandir}/man1/urpm-repograph.1.xz +%{_mandir}/man1/urpm-reposync.1.xz + +%{_datadir}/locale/*/LC_MESSAGES/urpm-tools.mo +%doc COPYING + +%files -n python-rpm5utils +%defattr(-,root,root,-) +%dir %{py_puresitedir}/rpm5utils +%dir %{py_puresitedir}/rpm5utils/tests +%dir %{py_puresitedir}/rpm5utils/urpmgraphs +%dir %{py_puresitedir}/rpm5utils/urpmgraphs/algorithms +%dir %{py_puresitedir}/rpm5utils/urpmgraphs/algorithms/components +%dir %{py_puresitedir}/rpm5utils/urpmgraphs/classes + +%{py_puresitedir}/urpmmisc.py +%{py_puresitedir}/rpm5utils/*.py* +%{py_puresitedir}/rpm5utils/tests/*.py* +%{py_puresitedir}/rpm5utils/urpmgraphs/*.py* +%{py_puresitedir}/rpm5utils/urpmgraphs/algorithms/*.py* +%{py_puresitedir}/rpm5utils/urpmgraphs/algorithms/components/*.py* +%{py_puresitedir}/rpm5utils/urpmgraphs/classes/*.py* + +%doc rpm5utils/COPYING diff --git a/urpm-tools/AUTHORS b/urpm-tools/AUTHORS new file mode 100644 index 0000000..91a101f --- /dev/null +++ b/urpm-tools/AUTHORS @@ -0,0 +1,9 @@ +------------------- +Urpm-tools Authors +------------------- + + Anton Kirilenko + Andrey Ponomarenko + Denis Silakov + Vladimir Testov + diff --git a/urpm-tools/COPYING b/urpm-tools/COPYING new file mode 100644 index 0000000..e77696a --- /dev/null +++ b/urpm-tools/COPYING @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 675 Mass Ave, Cambridge, MA 02139, USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) 19yy + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) 19yy name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/urpm-tools/Makefile b/urpm-tools/Makefile new file mode 100644 index 0000000..61fb63b --- /dev/null +++ b/urpm-tools/Makefile @@ -0,0 +1,43 @@ +SUBDIRS = rpm5utils +PKGNAME = urpm-tools +PYTHON_UTILS = urpm-downloader urpm-package-cleanup urpm-repodiff urpm-repomanage urpm-repograph urpm-reposync +PERL_UTILS = urpm-repoclosure + +PYTHON=python +PYFILES = $(wildcard *.py) +PYVER := $(shell $(PYTHON) -c 'import sys; print "%.3s" %(sys.version)') +PYSYSDIR := $(shell $(PYTHON) -c 'import sys; print sys.prefix') +PYLIBDIR = $(PYSYSDIR)/lib/python$(PYVER) +PKGDIR = $(PYLIBDIR)/site-packages +SHELL=/bin/bash +all: + @echo "Nothing to do. Run 'make install' or 'make clean'" + +clean: + rm -f *.pyc *.pyo *~ + rm -f test/*~ + rm -f *.tar.gz + +install: + mkdir -p $(DESTDIR)/usr/bin/ + mkdir -p $(DESTDIR)/usr/share/man/man1 + for util in $(PYTHON_UTILS); do \ + install -m 755 $$util.py $(DESTDIR)/usr/bin/$$util; \ + install -m 664 docs/$$util.1 $(DESTDIR)/usr/share/man/man1/$$util.1; \ + done + + for util in $(PERL_UTILS); do \ + install -m 755 $$util.pl $(DESTDIR)/usr/bin/$$util; \ + install -m 664 docs/$$util.1 $(DESTDIR)/usr/share/man/man1/$$util.1; \ + done + + for d in $(SUBDIRS); do make DESTDIR=$(DESTDIR) -C $$d install; [ $$? = 0 ] || exit 1; done + + install -m 644 urpmmisc.py $(DESTDIR)/$(PKGDIR)/urpmmisc.py; + + + for d in `python localizer.py --list`; do\ + mkdir -p $(DESTDIR)/usr/share/locale/$$d/LC_MESSAGES;\ + install -m 644 locale/$$d/LC_MESSAGES/urpm-tools.mo $(DESTDIR)/usr/share/locale/$$d/LC_MESSAGES/urpm-tools.mo;\ + done + \ No newline at end of file diff --git a/urpm-tools/README b/urpm-tools/README new file mode 100644 index 0000000..0fd7377 --- /dev/null +++ b/urpm-tools/README @@ -0,0 +1,7 @@ +Urpm-tools - a set of utilities to work with Urpm repositories +They make URPM-based repositories easier and more powerful to use. +These tools include: urpm-downloader, urpm-package-cleanup, +urpm-repoclosure, urpm-repodiff, urpm-repomanage, urpm-repograph, +urpm-reposync + +rpm5utils are based on rpmUtils from yum, http://yum.baseurl.org diff --git a/urpm-tools/docs/urpm-downloader.1 b/urpm-tools/docs/urpm-downloader.1 new file mode 100644 index 0000000..9524489 --- /dev/null +++ b/urpm-tools/docs/urpm-downloader.1 @@ -0,0 +1,85 @@ +.\" urpm-downloader +.TH "urpm-downloader" "1" "21 December 2011" "Anton Kirilenko" "" +.SH "NAME" +urpm-downloader - download RPMs from URPM-based linux repositories +.SH "SYNOPSIS" +\fBurpm-downloader\fP [options] package(s) +.SH "DESCRIPTION" +.PP +\fBurpm-downloader\fP is a tool for downloading RPMs and SRPMs from URPM-based linux repositories +.PP +\fBpackage\fP Package name(s) to download. It can contain not only package names, but (S)RPM files too. In this case package name extracted from this file will be used +.PP +.SH "GENERAL OPTIONS" +.IP "\fB\-h, \-\-help\fP" +Help; display a help message and then quit. +.IP "\fB\-\-version\fP" +Report program version and exit. +.IP "\fB\-\-dest\-dir\fP" +Specify a destination directory for the download. +.IP "\fB\-v, \-\-verbose\fP" +Verbose (print additional info) +.IP "\fB-q, \-\-quiet\fP" +Quiet operation +.IP "\fB\-\-include\-media, \-\-media\fP" +Use only selected URPM media +.IP "\fB\-\-exclude\-media\fP" +Do not use selected URPM media +.IP "\fB\-\-fail\-broken\fP" +Exit if fail to resolve package dependencies. +.IP "\fB\-i, \-\-ignore-errors\fP" +Try to continue when error occurs + +.PP +.SH "DOWNLOAD OPTIONS" +.IP "\fB\-s, \-\-source\fP" +Download the source RPMs (SRPMs) +.IP "\fB\-u, \-\-urls\fP" +Instead of downloading files, list the URLs that would be processed + +.IP "\fB\-b, \-\-binary\fP" +Download binary RPMs +.IP "\fB\-s, \-\-source\fP" +Download the source RPMs (SRPMs) +.IP "\fB\-d, \-\-debug-info \fP" +Download debug RPMs + +.IP "\fB\-r, \-\-resolve\fP" +When downloading RPMs, resolve dependencies and also download the required packages, if they are not already installed +.IP "\fB\-a, \-\-resolve\-all\fP" +When downloading RPMs, resolve dependencies and also download the required packages, even if they are already installed +.IP "\fB\-x, \-\-exclude\-packages\fP" +Exclude package(s) by regex +.IP "\fB\-o, \-\-overwrite\fP" +If the file already exists, download it again and overwrite the old one +.IP "\fB\-\-all\-alternatives\fP" +If package dependency can be satisfied by several packages, download all of them (by default, only the first one is downloaded) +.IP "\fB\-\-all\-versions\fP" +If different versions of package present in repository, process them all +.PP +.SH "EXAMPLES" +.IP "Download RPMs for given packages (pk1, pk2, ...) into the directory 'path':" +\fBurpm-downloader --dest-dir path pkg1 pkg2\fP +.IP "Download SRPMs for given packages (pk1, pk2, ...) into the current directory:" +\fBurpm-downloader -s pkg1 pkg2\fP +.IP "Download the package with a whole dependency tree to the specified directory:" +\fBurpm-downloader -a --dest-dir path package-name\fP +.IP "You want to rebuild existing rpm. Download corresponding SRPM and all the packages missing for building:" +\fBurpm-downloader -sr --dest-dir path package.rpm\fP +.PP +.SH "EXIT CODES" +.IP \fB0\fP +Completed successfully +.IP \fB1\fP +Error calling external command (urpmq, rpm, etc.). This command output will be printed before exit +.IP \fB2\fP +Can not download SRPM +.IP \fB3\fP +Can not download RPM +.IP \fB4\fP +One or more specified rpm files not exist +.PP +.SH "AUTHORS" +.nf +See the Authors file included with this program. +.fi diff --git a/urpm-tools/docs/urpm-package-cleanup.1 b/urpm-tools/docs/urpm-package-cleanup.1 new file mode 100644 index 0000000..6afa4ff --- /dev/null +++ b/urpm-tools/docs/urpm-package-cleanup.1 @@ -0,0 +1,100 @@ +.\" package-cleanup +.TH "urpm-package-cleanup" "1" "21 December 2011" "Denis Silakov" "" +.SH "NAME" +urpm-package-cleanup - find and fix rpmdb problems +.SH "SYNOPSIS" +\fBurpm-package-cleanup\fP [options] +.SH "DESCRIPTION" +.PP +\fBurpm-package-cleanup\fP is a program for cleaning up the locally-installed RPMs. +.PP +.SH "GENERAL OPTIONS" +.IP "\fB\-h, \-\-help\fP" +Help; display a help message and then quit\&. +.IP "\fB\-v, \-\-version\fP" +Report program version and exit. +.IP "\fB\-\-leaves\fP" +List leaf nodes in the local RPM database. Leaf nodes are RPMs that +are not relied upon by any other RPM. +.IP "\fB\-\-orphans\fP" +List installed packages which are not available from currently configured +repositories. This is identical to "urpmq --not-available". +.IP "\fB\-\-oldkernels\fP" +Remove old kernel and kernel-devel packages. +.IP "\fB\-\-problems\fP" +List dependency problems in the local RPM database. +.IP "\fB\-\-dupes\fP" +Scan for duplicates in the local RPM database. +.PP +.SH "LEAVES OPTIONS" +.IP "\fB\-\-all\fP" +When listing leaf nodes also list leaf nodes that are +not libraries. +.IP "\fB\-\-leaf\-regex\fP" +A package name that matches this regular expression will be considered a leaf. +.IP "\fB\-\-exclude\-devel\fP" +When listing leaf nodes do not list development packages. +.IP "\fB\-\-exclude\-bin\fP" +When listing leaf nodes do not list packages with files in bin directories. +.PP +.SH "OLDKERNELS OPTIONS" +.IP "\fB\-\-count \fP" +Number of kernel packages to keep on the system (default 2) +.IP "\fB\-\-keepdevel\fP" +Do not remove kernel-devel packages when removing kernels +.PP +.SH "DUPLICATE PACKAGE OPTIONS" +.IP "\fB\-\-cleandupes\fP" +Scan for duplicates in the local RPM database and clean out the +older versions. +.IP "\fB\-\-noscripts\fP" +Disable rpm scriptlets from running when cleaning duplicates +.PP +.SH "DEPENDENCY PROBLEMS OPTIONS" +.IP "\fB\-\-suggests\fP" +List missing suggestions of installed packages + + +.SH "EXAMPLES" +.IP "List all dependency problems:" +\fBurpm-package-cleanup --problems\fP +.IP "List all packages that are not in any configured repository:" +\fBurpm-package-cleanup --orphans\fP +.IP "Remove old kernels keeping 3 and leaving old kernel-devel packages installed:" +\fBurpm-package-cleanup --oldkernels --count=3 --keepdevel\fP +.PP +.IP "List all leaf packages with no files in a bin directory whose name begins with either 'perl' or 'python':" +\fBurpm-package-cleanup --leaves --exclude-bin --leaf-regex="^(perl)|(python)"\fP +.PP +.SH "FILES" +For some actions urpm-package-cleanup invokes urpmi and relies on its +configuration file: +.PP +.nf +/etc/urpmi/urpmi.cfg +.fi + +.PP +.SH "EXIT CODES" +.IP \fB0\fP +Completed successfully +.IP \fB1\fP +Script execution error (wrong option, insufficient permissions, etc.) +.IP \fB2\fP +Unsatisfied dependencies detected +.IP \fB3\fP +Unsatisfied soft dependencies detected +.IP \fB100\fP +Illegal option value + +.PP +.SH "SEE ALSO" +.nf +.I urpmi.cfg (1) +.fi + +.PP +.SH "AUTHORS" +.nf +See the Authors file included with this program. +.fi diff --git a/urpm-tools/docs/urpm-repoclosure.1 b/urpm-tools/docs/urpm-repoclosure.1 new file mode 100644 index 0000000..d144a5d --- /dev/null +++ b/urpm-tools/docs/urpm-repoclosure.1 @@ -0,0 +1,77 @@ +.\" urpm-repoclosure +.TH "urpm-repoclosure" "1" "21 February 2012" "Andrey Ponomarenko" "" +.SH "NAME" +urpm-repoclosure - check closure of a set of RPM packages +.SH "SYNOPSIS" +\fBurpm-repoclosure\fP [options] +.SH "DESCRIPTION" +.PP +\fBurpm-repoclosure\fP a tool for checking closure of a set of RPM packages +.PP +.SH "GENERAL OPTIONS" +.IP "\fB\-h, \-help\fP" +Print this help. + +.IP "\fB\-v, \-version\fP" +Print version information. + +.IP "\fB\-hdlist \fP" +Path or URL of HDlist (synthesis) to check. + +.IP "\fB\-d, \-dir \fP" +The directory with RPM packages to check. + +.IP "\fB\-l, \-list \fP" +The list of packages to check. + +.IP "\fB\-add, \-update \fP" +The directory with RPM packages that should +be added to the repository or updated. + +.IP "\fB\-file\-deps \fP" +Read file\-deps to ignore some unresolved +dependencies. + +.IP "\fB\-s, \-static\fP" +Check statically if all required dependencies are +satisfied by provided dependencies in the set of +RPM packages. + +.IP "\fB\-dynamic\fP" +Install a set of RPM packages to the local chroot +and check if extra packages were installed. + +.IP "\fB\-r, \-check\-release\fP" +Check installation media (DVD). + +.IP "\fB\-sign, \-check\-signature\fP" +Validate package signatures. + +.IP "\fB\-noclean\fP" +Do not clean urpmi cache. + +.IP "\fB\-root \fP" +Where to install packages. + Default: /tmp/... +.PP + +.SH "EXIT CODES" +.IP "0 \- Suceess. The tool has run without any errors and has not discover any issues." +.IP "non\-zero \- Failed or the tool has run with errors. In particular:" +.IP "1 \- Failed to run the tool" +.IP "2 \- Discovered dependency problems" + +.SH "EXAMPLES" +.IP "Run a static test using an hdlist:" +\fBurpm-repoclosure --hdlist=hdlist.txt\fP + +\fBurpm-repoclosure --hdlist=http://mirror.yandex.ru/mandriva/.../synthesis.hdlist.cz\fP +.IP "Check closure of a local set of RPMs:" +\fBurpm-repoclosure --dir=rpms/ --static\fP +.IP "Check a set of RPMs, specified in list.txt:" +\fBurpm-repoclosure --list=list.txt --dynamic\fP +.PP +.SH "AUTHORS" +.nf +See the Authors file included with this program. +.fi diff --git a/urpm-tools/docs/urpm-repodiff.1 b/urpm-tools/docs/urpm-repodiff.1 new file mode 100644 index 0000000..3b3b02b --- /dev/null +++ b/urpm-tools/docs/urpm-repodiff.1 @@ -0,0 +1,49 @@ +.\" urpm-repodiff +.TH "urpm-repodiff" "1" "21 December 2011" "Vladimir Testov" "Mandriva Package Management" +.SH "NAME" +urpm-repodiff - diff for urpmi repositories +.SH "SYNOPSIS" +\fBurpm-repodiff\fP [options] --old old_repo_baseurl [old_repo_baseurl ...] --new new_repo_baseurl [new_repo_baseurl ...] +.SH "DESCRIPTION" +.PP +\fBurpm-repodiff\fP is a program which will list differences between two sets of +repositories. +.PP +.SH "GENERAL OPTIONS" +.IP "\fB\-\-old, -o\fP" +"Old" repository or list of "old" repositories if several present. +.IP "\fB\-\-new, -n\fP" +"New" repository or list of "new" repositories if several present. +.IP "\fB\-\-quiet, -q\fP" +Quiet mode: hide service messages. +.PP +.SH "USUAL OUTPUT OPTIONS" +.IP "\fB\-\-size, -s\fP" +Show differences in package sizes. +.IP "\fB\-\-simple\fP" +Simple output format. +.IP "\fB\-\-changelog, -s\fP" +Show changelog difference. +.PP +.SH "HTML OUTPUT OPTION" +.IP "\fB\-\-html\fP" +Output difference in format of html page. In case of using this option \fB--size, -s\fP, \fB--simple\fP and \fB--changelog\fP options are ignored. +If \fB--output, -o\fP option is not present, page will be output to file 'repodiff.html' in the current directory. +.PP +.SH "OUTPUT OPTION" +.IP "\fB\-\-output, -o OUTPUT_FILE\fP" +Change standart output to OUTPUT_FILE. +.SH "EXAMPLES" +.IP "Compare packages in two local repositories:" +\fBurpm-repodiff --old /tmp/repo-old --new /tmp/repo-new\fP +.IP "Compare packages in two remote repositories, and two local ones:" +\fBurpm-repodiff --old http://example.com/repo1-old --old /tmp/repo-old --new http://example.com/repo1-new --new /tmp/repo-new\fP +.IP "Compare packages, use simple report format (no chanlog difference), but report difference in package size:" +\fBurpm-repodiff --old /tmp/repo-old --new /tmp/repo-new --size --simple\fP +.PP + +.PP +.SH "AUTHORS" +.nf +See the Authors file included with this program. +.fi diff --git a/urpm-tools/docs/urpm-repograph.1 b/urpm-tools/docs/urpm-repograph.1 new file mode 100644 index 0000000..c2a0976 --- /dev/null +++ b/urpm-tools/docs/urpm-repograph.1 @@ -0,0 +1,106 @@ +.\" urpm-repograph +.TH "urpm-repograph" "1" "21 December 2011" "Vladimir Testov" "Mandriva Package Management" +.SH "NAME" +urpm-repograph - build dependency graph of repository +.SH "SYNOPSIS" +\fBurpm-repograph\fP [options] REPOSITORY +.SH "DESCRIPTION" +.PP +\fBurpm-repograph\fP is a tool for generating dependency graph for REPOSITORY packages. +Output is in the format of language "DOT". Meanwhile it can check for +missing dependecies, track cross-repository dependecies, search and display dependency cycles +(A needs B, B needs C, C needs A), search and display alternatives ("word" is provided by +A, B and C), also the tool with options \fB--whatrequires\fP and \fB--requires-recursive\fP can +select only that part of the graph which is provided (in recursive sense) by PKG or +requires (also in recursive sense) PKG. Note that warning about repository mismatches +will not be shown in the last case. +.PP +.SH "GENERAL OPTIONS" +.IP "\fBREPOSITORY\fP" +The only required argument. URL (starts with "http://" or "ftp://") +or PATH (global or local, can starts with "file://") +to repository (exactly url or path which consists of packages and includes directory "media_info", +which is the only object of interest for this tool. (You can download separately files +"synthesis.hdlist.cz" and "files.xml.lzma" to folder (for example) +"./A/media_info" and run tool with "./A": "urpm-repograph ./A", +"files.xml.lzma" is needed only if \fB--file / -f\fP option is present.) +.IP "\fB\-\-cross, -c CROSS_REPO [CROSS_REPO ...]\fP" +Check \fBCROSS_REPO(s)\fP for cross-repository dependencies. Note that dependencies inside \fBCROSS_REPO(s)\fP +(PKG1 from CROSS_REPO(s) needs PKG2 from CROSS_REPO(s)) will not be shown, still dependencies inside \fBREPOSITORY\fP will be. +.IP "\fB\-\-quiet, -q\fP" +Hide service messages. Hides all kinds of status messages. +Note, that you can not use options \fB--quiet, -q\fP, \fB--nograph, -n\fP and do not use option \fB--verbose, -v\fP at the same time. +.IP "\fB\-\-verbose, -v\fP" +Show extended information. Shows more detailed information. Also shows warnings - +about missing dependecies, self-dependecies, cross-repository dependencies. +Note, that you can not use options \fB--quiet, -q\fP, \fB--nograph, -n\fP and do not use option \fB--verbose, -v\fP at the same time. +.IP "\fB\-\-requires, -r\fP" +Also describes \fB--suggests, -s\fP. These two options declare for which types of dependecies +the graph should be build and whick dependecies should be checked and processed. +\fB--requires, -r\fP - required dependencies, as in RPM spec-file. \fB--suggests, -s\fP - suggested dependencies, as in RPM spec-file. +If none of the options are present then tool works as if \fB--requires, -r\fP option was present. +.IP "\fB\-\-suggest, -s\fP" +See \fB--requires, -r\fP description. +.IP "\fB\-\-file, -f\fP" +Process file dependecies. If not present then tool will skip both checking and processing +dependencies from files. If present, then "files.xml.lzma" should be present. +.IP "\fB\-\-unprovided, -u\fP" +Show unprovided dependencies. Unprovided phrases in requires (and \ or suggests) sections of synthesis.hdlist will be shown in final graph. +Do not use with \fB--broken, -b\fP option, error will be shown and workflow terminated. \fB--broken, -b\fP does the same as \fB--unprovided, -u\fP. +So there is no sense in using these two options together. +.PP +.SH "PACKAGE SPECIFIC OPTIONS" +Only one option in this group can be present. PKG is either packagename (e.g. urpm-tools) +or full package name (with version, release etc). Note that if option from this group is +present then PKG will be checked - if there is no package named PKG in \fBREPOSITORY\fP and +(if \fB--cross, -c\fP option is present) there is no package named PKG in \fBCROSS_REPO(s)\fP +(or if there is no cross-repository dependencies to or from PKG really present in \fBCROSS_REPO(s)\fP) +then selecting of sub-graph will not be performed, warning will be shown and the tool will stop. +Also note that no warnings will be shown (even if \fB--verbose, -v\fP option is present). +If \fB--verbose, -v\fP option is present then list of packages will be written to STDIN. Also some types of warnings will be written to STDIN +when using \fB--verbose, -v\fP. +.IP "\fB\-\-requires-recursive PKG\fP" +Search for packages, which are required by package PKG. +(in recursive sense, for example, if PKG needs PKG2 and PKG2 needs PKG3, +then PKG3 will be also checked and processed and so on) +.IP "\fB\-\-whatrequires PKG\fP" +Search for packages, which requires package PKG. +(in recursive sense, for example, if PKG is needed by PKG2 and PKG2 is needed by PKG3, +then PKG3 will be also checked and processed and so on) +.PP +.SH "ALTERNATIVE TASK OPTIONS" +Only one option from this group can be present. Note that \fB--requires-recursive\fP and \fB--whatrequires\fP processes are first to made (if present). +So, for example, you can select subgraph connected with specific package and then select subgraph of broken packages from the first subgraph. +If \fB--loops, -l\fP, \fB--alternatives, -a\fP or \fB--broken, -b\fP options are present - then another graph will be shown and additional algorithms will be performed. +.IP "\fB\-\-loops, -l\fP" +Search for all simple loops of cycled dependencies. +.IP "\fB\-\-alternatives, -a\fP" +Search for alternative packages providing the same feature. +.IP "\fB\-\-broken, -b\fP" +Search for broken packages and those, which are dependend from broken. +.IP "\fB\-\-different, -d\fP" +Output each loop or each alternative in different file. \fBOUTPUT_FILE\fP is tracted as folder name for new files in that case. +Ignored if both \fB--loops, -l\fP and \fB--alternatives, -a\fP options are absent. Also ignored if \fB--output, -o\fP option is not present. +.PP +.SH "OUTPUT OPTIONS" +Only one option in this group can be present. If not specified, graph will be outputted to STDIN. +.IP "\fB\-\-output, -o OUTPUT_FILE\fP" +Output graph to a specified file OUTPUT_FILE. OUTPUT_FILE is treated as directory name if \fB--different, -d\fP option is present. +.IP "\fB\-\-nograph, -n\fP" +Do not output graph. +Note, that you can not use options \fB--quiet, -q\fP, \fB--nograph, -n\fP and do not use option \fB--verbose, -v\fP at the same time. +.PP +.SH "EXAMPLES" +.IP "Analyze local repository and output graph to file './full-graph.dot', show service messages:" +\fBurpm-repograph /tmp/repo -v -o ./full-graph.dot\fP +.IP "Analyze external repository, hide service messages, show warnings and save them into 'warnings.txt':" +\fBurpm-repograph http://example.com/repo -qvn > warnings.txt\fP +.IP "Analyze two external repository - 'http://example.com/main/release' and additional 'http://example.com/contrib/release'. Select only packages that requires 'example-pkg' (in recursive sense). Search for loops in this group of packages and output every loop in different file in directory '/tmp/tmp-forever':" +\fBurpm-repograph http://example.com/main/release -c http://example.com/contrib/release --whatrequires example-pkg -qad -o /tmp/tmp-forever/\fP +.PP + +.PP +.SH "AUTHORS" +.nf +See the Authors file included with this program. +.fi diff --git a/urpm-tools/docs/urpm-repomanage.1 b/urpm-tools/docs/urpm-repomanage.1 new file mode 100644 index 0000000..5667885 --- /dev/null +++ b/urpm-tools/docs/urpm-repomanage.1 @@ -0,0 +1,56 @@ +.\" urpm-repomanage +.TH "urpm-repomanage" "1" "21 December 2011" "Denis Silakov" "Mandriva Package Management" +.SH "NAME" +urpm-repomanage - report newest and oldest packages in a given set +.SH "SYNOPSIS" +\fBurpm-repomanage\fP [-h] [-o | -n] [-r] [-s] [-k KEEP] [-c] [-q | -V] path +.SH "DESCRIPTION" +.PP +\fBurpm-repomanage\fP is a program that scans directory of rpm packages and report newest or oldest packages. +.PP +.SH "ARGUMENTS" +.IP "\fBpath\fP" +Path to directory with rpm packages. The tool traverses directory recursively +and analyzes all RPM packages found +.PP +.SH "OPTIONS" +.IP "\fB\-\-help, -h\fP" +show help message and exit +.IP "\fB\-\-old, -o\fP" +print the older packages +.IP "\fB\-\-new, -n\fP" +print the newest packages (this is the default behavior) +.IP "\fB\-\-remove-old, -r\fP" +remove older packages +.IP "\fB\-\-space, -s\fP" +space separated output, not newline +.IP "\fB\-\-keep KEEP, -k KEEP\fP" +number of newest packages to keep; defaults to 1 +.IP "\fB\-\-nocheck, -c\fP" +do not check package payload signatures/digests +.IP "\fB\-\-quiet, -q\fP" +be completely quiet +.IP "\fB\-\-verbose, -V\fP" +be verbose - say which packages are decided to be old +and why (this info is dumped to STDERR) + +.SH "EXIT CODES" +.IP "0 \- Suceess. The tool has run without any errors and old packages were not found." +.IP "1 \- No packages were found" +.IP "2 \- Illegal option or missing argument" +.IP "3 \- The tool has run successfully and detected old packages" + +.SH "EXAMPLES" +.IP "Scan local directory with packages and for every package name print only file with the latest version:" +\fBurpm-repomanage /tmp/repo\fP +.IP "Scan local directory with packages, for every package detect two latest versions and print older versions. For every old package, print names of newer packages:" +\fBurpm-repomanage --old -V -k 2 /tmp/repo\fP +.IP "Remove older packages in a local directory without printing anything to terminal:" +\fBurpm-repomanage --remove-old -q /tmp/repo\fP +.PP + +.PP +.SH "AUTHORS" +.nf +See the Authors file included with this program. +.fi diff --git a/urpm-tools/docs/urpm-reposync.1 b/urpm-tools/docs/urpm-reposync.1 new file mode 100644 index 0000000..b08213d --- /dev/null +++ b/urpm-tools/docs/urpm-reposync.1 @@ -0,0 +1,69 @@ +.\" urpm-reposync +.TH "urpm-reposync" "1" "21 December 2011" "Anton Kirilenko" "" +.SH "NAME" +urpm-sync - synchronize packages on your computer with repository +.SH "SYNOPSIS" +\fBurpm-reposync\fP [options] +.SH "DESCRIPTION" +.PP +\fBurpm-reposync\fP is used to synchronize a set of packages on the local computer with the remote repository +.PP + +.SH "OPTIONS" +.IP "\fB\-h, \-\-help\fP" +Help; display a help message and then quit. +.IP "\fB\-v, \-\-verbose\fP" +Verbose (print additional info) +.IP "\fB\-q, \-\-quiet\fP" +Quiet operation +.IP "\fB\-\-include\-media, \-\-media\fP" +Use only selected URPM media +.IP "\fB\-\-exclude\-media\fP" +Do not use selected URPM media +.IP "\fB\-a, \-\-auto\fP" +Do not ask questions, just do it! + +.IP "\fB\-\-include-media, \-\-media\fP" +Use only selected URPM media +.IP "\fB\-\-exclude-media\fP" +Do not use selected URPM media +.IP "\fB\-v, \-\-verbose\fP" +Verbose (print additional info) +.IP "\fB\-q, \-\-quiet\fP" +Quiet operation. Senseless without --auto +.IP "\fB\-a, \-\-auto\fP" +Do not ask questions, just do it! +.IP "\fB\-p, \-\-printonly\fP" +Only print the list of actions to be done and do nothing more! +.IP "\fB\-d, \-\-download\fP" +Only download the rpm files, but install or remove nothing. +.IP "\fB\-n, \-\-noremove\fP" +Do not remove packages at all. If some installed package prevent another package from beeing updated - do not update it. +.IP "\fB\-c, \-\-check\fP" +Download packages and check wether they can be installed to your system, but do not install them. +.IP "\fB\-k, \-\-nokernel\fP" +Do nothing with kernels. +.IP "\fB\-\-runselftests\fP" +Run self-tests end exit. +.IP "\fB\-\-detailed\fP" +Show detailed information about packages are going to be removed or installed (why does it have to be done) + + +.SH "EXIT CODES" +.IP \fB0\fP +Completed successfully +.IP \fB1\fP +Error calling external command (urpmq, rpm, etc.). This command output will be printed before exit +.IP \fB2\fP +Incorrect command line options combination. For example, if you try to execute it with --auto and --quiet +.IP \fB3\fP +Dependencies were resolved incorrectly. Please, contact the tool developer and provide the full program output. +.IP \fB4\fP +Inconsistent repository. Please, contact distributive maintainers and show the the output. +.IP \fB5\fP +Error while downloading rpm file. +.PP +.SH "AUTHORS" +.nf +See the Authors file included with this program. +.fi diff --git a/urpm-tools/locale/ru/LC_MESSAGES/urpm-tools.po b/urpm-tools/locale/ru/LC_MESSAGES/urpm-tools.po new file mode 100644 index 0000000..046ea84 --- /dev/null +++ b/urpm-tools/locale/ru/LC_MESSAGES/urpm-tools.po @@ -0,0 +1,1422 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) 2012 +# This file is distributed under the same license as the urpm-tools package. +# Anton Kirilenko , 2012. +# +msgid "" +msgstr "" +"Project-Id-Version: 1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2012-08-21 16:34+0400\n" +"PO-Revision-Date: 2012-08-21 16:35+0300\n" +"Last-Translator: Anton Kirilenko \n" +"Language-Team: RUSSIAN\n" +"Language: ru\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#. if not fatal_fails, do nothing. Caller have to deal with that himself +#. rpm return code is not 0 +#: urpm-reposync.py:64 +#: urpm-downloader.py:156 +#: urpm-downloader.py:546 +msgid "Error while calling command" +msgstr "Ошибка при выполнении команды" + +#: urpm-reposync.py:66 +#: urpm-downloader.py:158 +msgid "Error message: \n" +msgstr "Сообщение об ошибке: \n" + +#: urpm-reposync.py:74 +msgid "reposync is used to synchronize a set of packages on the local computer with the remote repository." +msgstr "Инструмент reposync используется для синхронизации установленных на компьютере пакетов с удаленным репозиторием." + +#: urpm-reposync.py:76 +#: urpm-downloader.py:104 +msgid "Use only selected URPM media" +msgstr "Использовать только указанные источники" + +#: urpm-reposync.py:77 +#: urpm-downloader.py:105 +msgid "Do not use selected URPM media" +msgstr "Не использовать указанные источники" + +#. arg_parser.add_argument('-x', '--exclude-packages', action='store',nargs = '+', help="Exclude package(s) by regex") +#: urpm-reposync.py:79 +#: urpm-downloader.py:102 +msgid "Verbose (print additional info)" +msgstr "Выводить при исполнении отладочную информацию" + +#: urpm-reposync.py:80 +msgid "Quiet operation. Senseless without --auto." +msgstr "Ничего не выводить на экран. Не используется без --auto." + +#: urpm-reposync.py:81 +msgid "Do not ask questions, just do it!" +msgstr "Выполнять все действия без вопросов" + +#: urpm-reposync.py:82 +msgid "Only print the list of actions to be done and do nothing more!" +msgstr "Только вывести список планируемых действий и выйти." + +#: urpm-reposync.py:83 +msgid "Only download the rpm files, but install or remove nothing." +msgstr "Только скачать пакеты, но ничего не устанавливать и не удалять." + +#. arg_parser.add_argument('-n', '--noremove', action='store_true', help=_("Do not remove packages at all. If some installed package prevent another package from beeing updated - do not update it.")) +#: urpm-reposync.py:85 +msgid "Remove all the packages which do not present in repository. By default, only some of them would be removed." +msgstr "Удалять все пакеты, которых нет в репозитории. По умолчанию инструмент пытается сохранить их, если возможно." + +#: urpm-reposync.py:86 +msgid "Download packages and check wether they can be installed to your system, but do not install them." +msgstr "Скачать пакеты и проверить, могут ли они быть установлены на текущую систему. Пакеты не будут установлены." + +#: urpm-reposync.py:87 +msgid "Do nothing with kernels." +msgstr "Ничего не делать с ядрами." + +#: urpm-reposync.py:88 +msgid "Run self-tests end exit." +msgstr "Запустить самопроверку." + +#: urpm-reposync.py:89 +msgid "Show detailed information about packages are going to be removed or installed (why does it have to be done)" +msgstr "Показывать детальную информацию о пакетах, которые будут удалены и установлены (будут объяснены решения по каждому пакету)." + +#: urpm-reposync.py:93 +msgid "It's senseless to use --quiet without --auto!" +msgstr "Использование --quiet без --auto лишено смысла!" + +#: urpm-reposync.py:305 +msgid "Loading the list of installed packages..." +msgstr "Загрузка списка установленных пакетов..." + +#: urpm-reposync.py:319 +msgid "Duplicating " +msgstr "Дублирующийся пакет " + +#: urpm-reposync.py:320 +msgid "Already found: " +msgstr "Уже найдено: " + +#. print synthesis_list +#: urpm-reposync.py:396 +msgid "Processing medium " +msgstr "Обработка источника " + +#: urpm-reposync.py:414 +#, python-format +msgid "Could not read synthesis file. (File %s not found)" +msgstr "Файл %s не найден. Невозможно обработать synthesis файл." + +#: urpm-reposync.py:484 +msgid "File can not be processed! Url: " +msgstr "Не удалось обработать файл! Url: " + +#: urpm-reposync.py:579 +#, python-format +msgid "Removing %s" +msgstr "Удаление %s" + +#: urpm-reposync.py:586 +msgid "urpm-reposync: error in package %s. Data: %(data)s" +msgstr "urpm-reposync: ошибка при работе с пакетом %s. Данные: %(data)s" + +#: urpm-reposync.py:683 +#, python-format +msgid "\tRequires %s, which will not be installed." +msgstr "\tТребует пакет %s, который не будет установлен." + +#: urpm-reposync.py:689 +#, python-format +msgid "\t%s conflicts with it" +msgstr "\t%s конфликтует с этим пакетом" + +#: urpm-reposync.py:694 +#, python-format +msgid "\tIt conflicts with %s" +msgstr "\tКонфликтует с %s" + +#: urpm-reposync.py:768 +msgid "Some packages can not be installed dew to unresolved dependencies: " +msgstr "Некоторые пакеты не могут быть установлены из-за неразрешенных зависимостей:" + +#: urpm-reposync.py:771 +msgid "Contact repository maintaiers and send them this information, please." +msgstr "Пожалуйста, отправьте разработчикам дистрибутива эту информацию." + +#: urpm-reposync.py:777 +msgid "Downloading files..." +msgstr "Скачивание файлов..." + +#: urpm-reposync.py:807 +msgid "Generating transaction..." +msgstr "Создание транзакции..." + +#: urpm-reposync.py:825 +msgid "Checking dependencies..." +msgstr "Проверка зависимостей..." + +#: urpm-reposync.py:830 +msgid "requires" +msgstr "требует" + +#: urpm-reposync.py:832 +msgid "conflicts with" +msgstr "конфликтует с" + +#: urpm-reposync.py:848 +#, python-format +msgid "Package %(name)s-%(ver)s-%(rel)s %(t)s %(namereq)s%(verreq)s" +msgstr "Пакет %(name)s-%(ver)s-%(rel)s %(t)s %(namereq)s%(verreq)s" + +#: urpm-reposync.py:854 +msgid "There are some unresolved dependencies: " +msgstr "Найдены неразрешенные зависимости: " + +#: urpm-reposync.py:857 +msgid "Packages can not be installed. Please, contact urpm-tools developers and provide this output." +msgstr "Пакеты не могут быть установлены. Пожалуйста, отправьте разработчику весь вывод программы." + +#: urpm-reposync.py:859 +msgid "No errors found in transaction" +msgstr "Ошибок не найдено" + +#: urpm-reposync.py:864 +msgid "Running transaction..." +msgstr "Запуск транзакции..." + +#: urpm-reposync.py:905 +msgid "WARNING: Some libraries are going to be removed because there are only the packages with the other architecture in the repository. Maybe you missed media with the correct architecture?" +msgstr "ПРЕДУПРЕЖДЕНИЕ: Некоторые библиотеки будут удалены, потому что в репозитории присутствуют только эти библиотеки с другой архитектурой. Может быть, нужно добавить источники с правильными архитектурами?" + +#: urpm-reposync.py:946 +#: urpm-reposync.py:981 +#: urpm-reposync.py:1003 +#: urpm-reposync.py:1016 +msgid "Package Name" +msgstr "Имя пакета" + +#: urpm-reposync.py:946 +#: urpm-reposync.py:1003 +#: urpm-reposync.py:1016 +msgid "Current Version" +msgstr "Текущая версия" + +#: urpm-reposync.py:946 +msgid "New Version" +msgstr "Новая версия" + +#: urpm-reposync.py:946 +#: urpm-reposync.py:981 +#: urpm-reposync.py:1003 +#: urpm-reposync.py:1016 +msgid "Arch" +msgstr "Архитектура" + +#: urpm-reposync.py:948 +msgid "medium" +msgstr "источник " + +#: urpm-reposync.py:970 +msgid "The following packages are going to be upgraded:" +msgstr "Следующие пакеты будут обновлены:" + +#: urpm-reposync.py:975 +msgid "The following packages are going to be downgraded:" +msgstr "Версии следующих пакетов будут понижены:" + +#: urpm-reposync.py:980 +msgid "Additional packages are going to be installed:" +msgstr "Дополнительные пакеты будут установлены:" + +#: urpm-reposync.py:981 +msgid "Version" +msgstr "Версия" + +#: urpm-reposync.py:997 +#, python-format +msgid "\tRequired by %s" +msgstr "\tТребуется для %s" + +#: urpm-reposync.py:1002 +msgid "The following packages are going to be removed:" +msgstr "Следующие пакеты будут удалены:" + +#: urpm-reposync.py:1015 +msgid "Packages which do not present in repositories, but do not have to be removed (will be saved):" +msgstr "Пакеты, которые отсутствуют в репозитории, но могут быть сохранены:" + +#: urpm-reposync.py:1022 +#, python-format +msgid "%d packages are going to be downloaded and installed." +msgstr "Пакетов будет скачано и установлено: %d." + +#: urpm-reposync.py:1023 +#, python-format +msgid "%d packages are going to be removed." +msgstr "Пакетов будет удалено: %d." + +#: urpm-reposync.py:1024 +#, python-format +msgid "%s will be downloaded." +msgstr "Данных будет скачано: %s." + +#: urpm-reposync.py:1080 +#, python-format +msgid "\tForced to be removed dew to \"%s\" policy." +msgstr "\tДолжен быть удален из-за правила \"%s\"." + +#: urpm-reposync.py:1108 +msgid "Nothing to do" +msgstr "В системе не требуются изменения" + +#: urpm-reposync.py:1121 +msgid "Do you want to proceed? (y/n): " +msgstr "Хотите продолжить? (д/н): " + +#: urpm-reposync.py:1126 +msgid "y" +msgstr "д" + +#: urpm-reposync.py:1126 +msgid "yes" +msgstr "да" + +#: urpm-reposync.py:1128 +msgid "n" +msgstr "н" + +#: urpm-reposync.py:1128 +msgid "no" +msgstr "нет" + +#: urpm-repograph.py:86 +msgid "Tool for generating dependency graph for REPOSITORY packages." +msgstr "Инструмент для создания графа зависимостей для пакетов из репозитория." + +#: urpm-repograph.py:90 +msgid "Search for cross-repository references in CROSS_REPO(s) repositories." +msgstr "Искать зывисимости между репозиториями в репозиториями CROSS_REPO" + +#: urpm-repograph.py:93 +msgid "Hide service messages. (About progress status etc.)" +msgstr "Не показывать служебные сообщения. (О прогрессе и т. д.)" + +#: urpm-repograph.py:95 +msgid "Show warnings. (About unprovided packages etc.)" +msgstr "Показывать предупреждения (О зависимостях, не предоставляемых ни одним пакетом из репозитория и т. д.)" + +#: urpm-repograph.py:98 +msgid "Process \"requires\" package dependencies. Used by default." +msgstr "Обрабатывать \"requires\" пакетные зависимости. Используется по умолчанию." + +#: urpm-repograph.py:100 +msgid "Process \"suggests\" package dependencies. If used without --requires then only suggests dependencies are processed." +msgstr "Обрабатывать \"suggests\" пакетные зависимости. Если используется без --requires, то будут обрабатываться только мягкие зависимости." + +#: urpm-repograph.py:103 +msgid "Process file dependencies." +msgstr "Обработка зависимостей по файлам..." + +#: urpm-repograph.py:105 +msgid "Show unprovided dependencies." +msgstr "Показать зависимости, не предоставленные ни одним пакетом из репозитория." + +#: urpm-repograph.py:109 +msgid "Search for packages, which are required by package PKG (PKG is a file name or package name)" +msgstr "Искать пакеты, которые нужны пакету PKG. (PKG - это имя пакета или файла)" + +#: urpm-repograph.py:111 +msgid "Search for packages, which requires package PKG (PKG is a file name or package name)" +msgstr "Искать пакеты, которым нужен пакет PKG. (PKG - это имя пакета или файла)" + +#: urpm-repograph.py:115 +msgid "Search for all simple loops of package dependecies." +msgstr "Поиск всех простых циклов в пакетных зависимостях." + +#: urpm-repograph.py:117 +msgid "Search for alternative packages providing the same feature." +msgstr "Поиск альтернативных пакетов, предоставляющих одну и ту же зависимость." + +#: urpm-repograph.py:119 +msgid "Search for all broken packages and anything beetween them" +msgstr "Искать все пакеты с нарушенными зависимостями и цепочки пакетов между ними" + +#: urpm-repograph.py:121 +msgid "Output each loop or each alternative in different file. Ignored if --loops or --alternatives options are not present. OUTPUT_FILE (if present) is tracted as folder name for new files in that case." +msgstr "Выводить каждый цикл или каждую альтернативу в отдельный файл. Игнорируется, если указано --loops или --alternatives. OUTPUT_FILE (если указан) в этом случае рассматривается как имя директории для новых файлов." + +#: urpm-repograph.py:127 +msgid "Change graph output to \"OUTPUT_FILE\". STDOUT by default." +msgstr "Перенаправить вывод графа в файл \"OUTPUT_FILE\". По умолчанию используется STDOUT." + +#: urpm-repograph.py:129 +msgid "Do not output graph. Tool will not start working if --quiet, --nograph are present and --verbose is not. (If there is nothing to output - then nothing has to be done.)" +msgstr "Не выводить граф. Инструмент не будет ничего делать, если включены --quiet и --nograph, а verbose нет. (Если ничего не надо выводить, то и не надо ничего делать.)" + +#: urpm-repograph.py:157 +#: urpm-repodiff.py:125 +#, python-format +msgid "Error: URL to repository \"%s\" is incorrect" +msgstr "Ошибка: Неверный URL репозитория \"%s\"" + +#: urpm-repograph.py:179 +#: urpm-repodiff.py:147 +#, python-format +msgid "Error: directory %s does not exist" +msgstr "Ошибка: директория %s не существует" + +#: urpm-repograph.py:189 +#: urpm-repodiff.py:157 +#, python-format +msgid "Error: \"%s\" is not correct url, path or name of repository" +msgstr "Ошибка: \"%s\" не является корректным URL, путем или именем репозитория" + +#: urpm-repograph.py:216 +#, python-format +msgid "Error: directory %s already exists" +msgstr "Ошибка: директория %s уже существует" + +#: urpm-repograph.py:222 +#: urpm-repograph.py:237 +#: urpm-repodiff.py:183 +#, python-format +msgid "Error: File %s already exists" +msgstr "Ошибка: Файл %s уже существует" + +#: urpm-repograph.py:229 +#, python-format +msgid "Error: directory %s was not created" +msgstr "Ошибка: директория %s не была создана" + +#: urpm-repograph.py:246 +#: urpm-repodiff.py:192 +#, python-format +msgid "Error: File %s cannot be created" +msgstr "Ошибка: Не удалось создать файл %s" + +#: urpm-repograph.py:250 +#: urpm-repodiff.py:196 +#, python-format +msgid "Error: Path %s does not exist." +msgstr "Ошибка: Путь %s не существует." + +#: urpm-repograph.py:262 +#: urpm-repodiff.py:218 +#, python-format +msgid "getting file %s from " +msgstr "получение файла %s из " + +#: urpm-repograph.py:267 +#: urpm-repodiff.py:223 +#, python-format +msgid "Error: file %s was not copied" +msgstr "Ошибка: файл %s был скопирован" + +#: urpm-repograph.py:275 +#: urpm-repodiff.py:231 +#, python-format +msgid "Error: file %(from)s was not downloaded to %(to)s" +msgstr "Ошибка: файл %(from)s не был скачан в %(to)s" + +#: urpm-repograph.py:288 +#: urpm-repodiff.py:272 +msgid "Error: file not found: " +msgstr "Ошибка: файл не найден: " + +#: urpm-repograph.py:293 +#: urpm-repodiff.py:277 +#, python-format +msgid "Error: cannot rename file %(from)s to %(to)s" +msgstr "Ошибка: не удалось переименовать файл %(from)s в %(to)s" + +#: urpm-repograph.py:297 +#: urpm-repograph.py:313 +#: urpm-repograph.py:543 +#: urpm-repodiff.py:281 +#, python-format +msgid "Error: file %s is missing." +msgstr "Ошибка: файл %s отсутствует." + +#: urpm-repograph.py:301 +#: urpm-repodiff.py:285 +#, python-format +msgid "file %(from)s was renamed to %(to)s" +msgstr "файл %(from)s был переименован в %(to)s" + +#: urpm-repograph.py:311 +#: urpm-repograph.py:541 +#: urpm-repodiff.py:294 +#: urpm-repodiff.py:297 +msgid "unpacking file " +msgstr "распаковка файла " + +#: urpm-repograph.py:371 +#: urpm-repodiff.py:410 +msgid "REPODIFF-Warning: strange : " +msgstr "REPODIFF-Предупреждение: необычное поле : " + +#: urpm-repograph.py:406 +#: urpm-repodiff.py:351 +#, python-format +msgid "Error: Synthesis file %s was not found." +msgstr "Ошибка: Synthesis файл %s не найден." + +#: urpm-repograph.py:409 +msgid "Parsing synthesis." +msgstr "Обработка synthesis файла." + +#: urpm-repograph.py:435 +#, python-format +msgid "Warning: Unexpected sign %(sign)s in 'provides' section of %(of)s" +msgstr "Предупреждение: неожиданный знак %(sign)s в 'provides' секции %(of)s" + +#: urpm-repograph.py:451 +#: urpm-repodiff.py:380 +msgid "Error: Failed to open synthesis file " +msgstr "Ошибка: Не удалось открыть synthesis файл" + +#: urpm-repograph.py:555 +msgid "Reading fileslist" +msgstr "Чтение файла со списком" + +#: urpm-repograph.py:557 +msgid "Error: Can't find fileslist " +msgstr "Ошибка: Не удалось найти файл со списком" + +#: urpm-repograph.py:561 +msgid "Error: Can't read fileslist " +msgstr "Ошибка: Не удалось прочитать файл со списком" + +#: urpm-repograph.py:565 +msgid "Error: Wrong fileslist." +msgstr "Ошибка: Неправильный файл со списком." + +#: urpm-repograph.py:578 +msgid "Error: Corrupted fileslist" +msgstr "Ошибка: Поврежденный файл со списком" + +#: urpm-repograph.py:608 +msgid "Warning: cross-repository dependency: " +msgstr "Предупреждение: пакет из одного репозиттория зависит от пакета из другого: " + +#: urpm-repograph.py:612 +#: urpm-repograph.py:662 +msgid "Warning: package has self-dependecies: " +msgstr "Предупреждение: пакет зависит от себя: " + +#: urpm-repograph.py:658 +#, python-format +msgid "" +"Warning: cross-repository dependency:\n" +" package %(pkg)s is dependent from\n" +" <- %(from)s located in another repository" +msgstr "" +"Предупреждение: зависимость между репозиториями:\n" +" пакет %(pkg)s зависит от\n" +" <- %(from)s, расположенного в другом репозитории" + +#: urpm-repograph.py:691 +#, python-format +msgid "Warning: needed version is absent <%(ver)s> %(rel)s required by package" +msgstr "Предупреждение: отсутствует версия <%(ver)s> %(rel)s, требуемая пакетом" + +#: urpm-repograph.py:708 +#, python-format +msgid "Warning: Package %(pkg)s unprovided by %(by)s" +msgstr "Предупреждение: Файл %(by)s требуется пакету %(pkg)s, но не предоставляется ни одним пакетом" + +#: urpm-repograph.py:740 +msgid "Finding dependencies." +msgstr "Поиск зависимостей." + +#: urpm-repograph.py:749 +#, python-format +msgid "" +"Warning: can't find <%(ask)s> required by package\n" +" <%(pkg)s>" +msgstr "" +"Предупреждение: не удалось найти <%(ask)s>, требуемый пакетом\n" +" <%(pkg)s>" + +#: urpm-repograph.py:812 +msgid "Total cross-referenced packages: " +msgstr "Всего пакетов с кросс-платформенными зависимостями: " + +#: urpm-repograph.py:816 +msgid "Total unprovided packages: " +msgstr " Всего пакетов с ничем не предоставленными зависимостями: " + +#: urpm-repograph.py:833 +msgid "Calculating colors." +msgstr "Вычисление цветов." + +#: urpm-repograph.py:1112 +msgid "Non-cycle nodes removed: " +msgstr "Нецикличных узлов удалено: " + +#: urpm-repograph.py:1113 +msgid "Cyclic packages: " +msgstr "Зацикленных пакетов осталось: " + +#: urpm-repograph.py:1130 +#, python-format +msgid "Worktime: %s seconds" +msgstr "Время работы: %s секунд" + +#: urpm-repograph.py:1136 +msgid "Searching loops." +msgstr "Поиск циклов." + +#: urpm-repograph.py:1140 +#: urpm-repograph.py:1188 +msgid "End of search." +msgstr "Конец поиска." + +#: urpm-repograph.py:1141 +#, python-format +msgid "Loops search: %s seconds" +msgstr "Поиск циклов: %s секунд" + +#: urpm-repograph.py:1145 +#, python-format +msgid "Total: %s loops." +msgstr "Всего: %s циклов." + +#: urpm-repograph.py:1151 +msgid "Loop " +msgstr "Цикл " + +#: urpm-repograph.py:1168 +msgid "Searching alternatives." +msgstr "Поиск альтернатив." + +#: urpm-repograph.py:1180 +#, python-format +msgid "Total: %d alternatives." +msgstr "Всего: %d альтернатив." + +#: urpm-repograph.py:1182 +msgid "Alternative " +msgstr "Альтернатива " + +#: urpm-repograph.py:1182 +msgid " is provided by:" +msgstr " предоставляется:" + +#: urpm-repograph.py:1260 +msgid "Searching for broken packages." +msgstr "Поиск нарушенных зависимостей." + +#: urpm-repograph.py:1266 +msgid "Searching for packages REQUIRED by " +msgstr "Поиск пакетов, требуемых " + +#: urpm-repograph.py:1268 +msgid "Searching for packages that REQUIRE " +msgstr "Поиск пакетов, требующих " + +#: urpm-repograph.py:1276 +#, python-format +msgid "Level %d dependency." +msgstr "Зависимость уровня %d." + +#: urpm-repograph.py:1355 +msgid "Remaking structures." +msgstr "Пересоздание структур." + +#: urpm-repograph.py:1367 +msgid "Error: can't find package name or filename \"" +msgstr "Ошибка: Не удалось найти имя пакета или файла \"" + +#: urpm-repograph.py:1401 +msgid "Do not use -q/--quiet and -n/--nograph without -v/--verbose together." +msgstr "Не используйте -q/--quiet совместно с -n/--nograph без -v/--verbose." + +#: urpm-repograph.py:1402 +msgid "That way there is no information to output anywhere. Nothing will be done." +msgstr "В этом случае нет информации, которую можно вывести. Ничего не будет сделано." + +#: urpm-repograph.py:1405 +msgid "Do not use -u/--unprovided and -b/--broken options together." +msgstr "Не используйте -u/--unprovided и -b/--broken вместе." + +#: urpm-repograph.py:1406 +msgid "-b does everything that do -u and a little more." +msgstr "-b делает все то же, что и -u, и немного больше." + +#: urpm-downloader.py:91 +msgid "A tool for downloading RPMs and SRPMs from URPM-based linux repositories" +msgstr "Инструмент, позволяющий скачивать RPM и SRPM пакеты из URPM репозиториев" + +#: urpm-downloader.py:92 +msgid "If none of the options -b, -s, -d turned on, it will be treated as -b" +msgstr "Если ни одна из опций -b, -s или -d не указана, то по умолчанию включается -b" + +#: urpm-downloader.py:93 +msgid "Package name(s) to download. It can contain not only package names, but (S)RPM files too. In this case package name extracted from this file will be used" +msgstr "Имена пакетов для скачивания. Можно так же использовать имена существующих (S)RPM файлов, в этом случае информация об имени пакета будет извлечена из них." + +#: urpm-downloader.py:94 +msgid "Instead of downloading files, list the URLs that would be processed" +msgstr "Выводить их URL файлов, но не скачивать их (в случае использования совместно с -a или -r src.rpm файл все равно будет скачан, так как без этого невозможно разрешить сборочные зависимости)" + +#: urpm-downloader.py:95 +msgid "When downloading RPMs, resolve dependencies and also download the required packages, if they are not already installed" +msgstr "При скачивании пакета разрешать зависимости и скачивать все необходимые пакеты, но только если они не установлены в системе." + +#: urpm-downloader.py:96 +msgid "When downloading RPMs, resolve dependencies and also download the required packages, even if they are already installed" +msgstr "При скачивании пакета разрешать зависимости и скачивать все необходимые пакеты, даже если они уже установлены в системе." + +#: urpm-downloader.py:97 +msgid "Download binary RPMs" +msgstr "Скачивать бинарные пакеты (RPM)" + +#: urpm-downloader.py:98 +msgid "Download the source RPMs (SRPMs)" +msgstr "Скачать пакеты с исходными кодами (SRPM)" + +#: urpm-downloader.py:99 +msgid "Download debug RPMs" +msgstr "Скачать пакеты с отладочной информацией" + +#: urpm-downloader.py:100 +msgid "Download debug RPMs and install" +msgstr "Скачать пакеты с отладочной информацией и установить" + +#: urpm-downloader.py:103 +msgid "Quiet operation." +msgstr "Ничего не печатать в консоль" + +#: urpm-downloader.py:106 +msgid "Exclude package(s) by regex" +msgstr "Исключить пакеты по регулярному выражению" + +#: urpm-downloader.py:107 +msgid "Try to continue when error occurs" +msgstr "Пытаться игнорировать ошибки" + +#: urpm-downloader.py:108 +msgid "If the file already exists, download it again and overwrite the old one" +msgstr "Если файл уже существует, скачать его заново и заменить." + +#: urpm-downloader.py:109 +msgid "If package dependency can be satisfied by several packages, download all of them (by default, only the first one is downloaded)" +msgstr "Скачивать все пакеты, которые могут удовлетворить зависимости для данного пакета (по умолчанию скачивается лишь один)." + +#: urpm-downloader.py:110 +msgid "If different versions of package present in repository, process them all" +msgstr "Если в репозитории присутствует несколько версий пакета, обработать их все." + +#. arg_parser.add_argument('--self-test', action='store_true', help="Test urpm-downloader end exit") +#: urpm-downloader.py:113 +msgid "Specify a destination directory for the download" +msgstr "Директория, в которую будут помещены скачаные файлы" + +#: urpm-downloader.py:130 +msgid "Use of --verbose with --quiet is senseless. Turning verbose mode off." +msgstr "Использование --verbose совместно с --quiet лишено смысла. Опция --verbose будет проигнорирована." + +#: urpm-downloader.py:134 +msgid "Note that resolving of SRPM dependencies is not possible until SRPM downloaded. So, it will be done despite --urls" +msgstr "Помните, что разрешение сборочных зависимостей SRPM невозможно без скачивания этого файла, поэтому SRPM файл все равно будет скачан несмотря на --urls" + +#: urpm-downloader.py:375 +msgid "* Downloaded: " +msgstr "* Скачано: " + +#: urpm-downloader.py:377 +msgid "* File exists, skipping: " +msgstr "* Файл существует, пропускаю: " + +#: urpm-downloader.py:476 +msgid "Can not download SRPM for package" +msgstr "Не удалось скачать SRPM файл для пакета" + +#: urpm-downloader.py:499 +#: urpm-downloader.py:532 +msgid "Can not download RPM" +msgstr "Не удалось скачать RPM файл" + +#: urpm-downloader.py:504 +msgid "Resolving debug-info packages..." +msgstr "Поиск пакетов с отладочной информацией..." + +#. urpmq output. RU: Нет пакета с названием +#: urpm-downloader.py:509 +msgid "No package named " +msgstr "Нет пакета с именем " + +#: urpm-downloader.py:533 +msgid "Maybe you need to update urpmi database (urpmi.update -a)?" +msgstr "Может быть, нужно обновить базу urpmi (urpmi.update -a)?" + +#: urpm-downloader.py:542 +msgid "Installing " +msgstr "Установка " + +#. return code is not 0 +#: urpm-downloader.py:553 +#, python-format +msgid "Debug package for '%s' not found" +msgstr "Для пакета %s не найдено пакета с отладочной информацией." + +#: urpm-downloader.py:602 +msgid "Parameters that end with '.rpm' seem to be local files, but the folowing files do not exist: " +msgstr "Параметры, заканчивающиеся на '.rpm' расцениваются как файлы, но следующие файлы не существуют: " + +#: urpm-downloader.py:627 +msgid "Searching src.rpm file(s) in repository..." +msgstr "Поиск подходящих src.rpm файлов в репозитории..." + +#: urpm-downloader.py:629 +msgid "Downloading src.rpm file(s)..." +msgstr "Скачивание src.rpm файлов..." + +#: urpm-downloader.py:659 +msgid "Resolving build dependencies..." +msgstr "Разрешение сборочных зависимостей..." + +#: urpm-downloader.py:661 +msgid "Resolving dependencies..." +msgstr "Разрешение зависимостей..." + +#: urpm-downloader.py:663 +#, python-format +msgid "Resolved %d packages" +msgstr "Найдено пакетов: %d" + +#: urpm-downloader.py:665 +msgid "Nothing to download" +msgstr "Нечего скачивать" + +#: urpm-repomanage.py:56 +#, python-format +msgid "Error accessing directory %(path)s, %(e)s" +msgstr "Ошибка доступа к директории %(path)s: %(e)s" + +#: urpm-repomanage.py:86 +msgid "manage a directory of rpm packages and report newest or oldest packages" +msgstr "Обработать директорию с rpm пакетами и сообщить о наиболее новых и старых версиях" + +#: urpm-repomanage.py:92 +msgid "path to directory with rpm packages" +msgstr "путь к директории с rpm пакетами" + +#: urpm-repomanage.py:95 +msgid "print the older packages" +msgstr "напечатать более старые пакеты" + +#: urpm-repomanage.py:97 +msgid "print the newest packages (this is the default behavior)" +msgstr "напечатать наиболее новые пакеты (поведение по умолчанию)" + +#: urpm-repomanage.py:99 +msgid "remove older packages" +msgstr "удалить более старые пакеты" + +#: urpm-repomanage.py:101 +msgid "space separated output, not newline" +msgstr "вывод разделяется пробелами, а не переводами строки" + +#: urpm-repomanage.py:103 +msgid "number of newest packages to keep - defaults to 1" +msgstr "количество наиболее новых пакетов, которые надо оставить - по умолчанию 1" + +#: urpm-repomanage.py:105 +msgid "do not check package payload signatures/digests" +msgstr "не проверять встроенные подписи пакетов" + +#: urpm-repomanage.py:108 +msgid "be completely quiet" +msgstr "ничего не печатать" + +#: urpm-repomanage.py:110 +msgid "be verbose - say which packages are decided to be old and why (this info is dumped to STDERR)" +msgstr "показывать дополнительную информацию - какие пакеты выбраны наиболее новыми и почему (информация выводится в STDERR)" + +#: urpm-repomanage.py:131 +msgid "No files to process" +msgstr "Нет файлов для обработки" + +#: urpm-repomanage.py:144 +#, python-format +msgid "Error opening pkg %(pkg)s: %(err)s" +msgstr "Ошибка открытия файла: %(pkg)s: %(err)s" + +#: urpm-repomanage.py:195 +#: urpm-repomanage.py:221 +msgid "Dropped " +msgstr "Убран " + +#: urpm-repomanage.py:196 +#: urpm-repomanage.py:222 +msgid " superseded by: " +msgstr " заменен на: " + +#: urpm-repodiff.py:83 +msgid "Tool for comparing sets of repositories." +msgstr "Инструмент для сравнения наборов репозиториев." + +#: urpm-repodiff.py:85 +msgid "URL or PATH to old repositories" +msgstr "URL или пути к старым репозиториям" + +#: urpm-repodiff.py:87 +msgid "URL or PATH to new repositories" +msgstr "URL или пути к новым репозиториям" + +#: urpm-repodiff.py:89 +msgid "Show differences in package sizes." +msgstr "Показывать различия в размерах пакетов." + +#: urpm-repodiff.py:91 +msgid "Simple output format." +msgstr "Упрощенный формат вывода." + +#: urpm-repodiff.py:93 +msgid "Hide service messages." +msgstr "Не показывать служебные сообщения." + +#: urpm-repodiff.py:95 +msgid "Show changelog difference." +msgstr "Показывать разницу списков изменений." + +#: urpm-repodiff.py:97 +#, python-format +msgid "Output in HTML format, if --output is not present \"%s\" will be created in current directory. --size, --simple and --changelog options are ignored." +msgstr "Вывод в формате HTML. Если --output не указан, то файл \"%s\" будет создан в текущей директории. Опции --size, --simple и --changelog будут игнорироваться." + +#: urpm-repodiff.py:101 +msgid "Change standart output to \"OUTPUT_FILE\"." +msgstr "Перенаправить вывод в \"OUTPUT_FILE\"" + +#: urpm-repodiff.py:174 +#, python-format +msgid "Error: Cannot open %s for writing." +msgstr "Ошибка: Не удалось открыть %s для записи." + +#: urpm-repodiff.py:354 +msgid "Parsing synthesis" +msgstr "Чтение synthesis файла" + +#: urpm-repodiff.py:389 +msgid "REPODIFF-Warning: strange format of or : " +msgstr "REPODIFF-Предупреждение: необычный формат или : " + +#: urpm-repodiff.py:527 +msgid "New package: " +msgstr "Новый пакет: " + +#: urpm-repodiff.py:542 +msgid "Generating obsoleted list." +msgstr "Создание списка устаревших пакетов." + +#: urpm-repodiff.py:601 +msgid "Removed package: " +msgstr "Удален пакет: " + +#: urpm-repodiff.py:609 +msgid " Obsoleted by " +msgstr " Устарел из-за добавления " + +#: urpm-repodiff.py:630 +msgid "Reading changelog" +msgstr "Чтение списка изменений" + +#: urpm-repodiff.py:632 +msgid "Error: Can't find changelog " +msgstr "Ошибка: Не удалось найти список изменений " + +#: urpm-repodiff.py:636 +msgid "Error: Can't read changelog " +msgstr "Ошибка: Не удалось прочитать список изменений " + +#: urpm-repodiff.py:640 +msgid "Error: Wrong changelog." +msgstr "Ошибка: Неправильный список изменений." + +#: urpm-repodiff.py:662 +msgid "Error: Corrupted changelog" +msgstr "Ошибка: Поврежденный список изменений" + +#: urpm-repodiff.py:756 +msgid "Generating changes list." +msgstr "Создание списка изменений." + +#: urpm-repodiff.py:770 +#: urpm-repodiff.py:773 +#, python-format +msgid "REPODIFF-Warning: Package %s was not described in changelogs.xml" +msgstr "REPODIFF-Предупреждение: Пакет %s не описан в changelogs.xml" + +#: urpm-repodiff.py:771 +msgid "REPODIFF-Warning: Changelogs of a package are absent in \"new\" repository." +msgstr "REPODIFF-Предупреждение: В репозитории \"новый\" отсутствует список изменений пакета." + +#: urpm-repodiff.py:774 +msgid "REPODIFF-Warning: Changelogs of a package are absent." +msgstr "REPODIFF-Предупреждение: У пакета отсутствует список изменений." + +#: urpm-repodiff.py:800 +#, python-format +msgid "Package %s has no changelog info\n" +msgstr "Пакет %s не имеет списка изменений\n" + +#: urpm-repodiff.py:818 +msgid "" +"\n" +"\n" +"Updated packages:\n" +"\n" +msgstr "" +"\n" +"\n" +"Обновленные пакеты:\n" +"\n" + +#: urpm-repodiff.py:825 +msgid " ***DOWNGRADED***\n" +msgstr " ***УСТАНОВЛЕНА ПРЕДЫДУЩАЯ ВЕРСИЯ***\n" + +#: urpm-repodiff.py:834 +#, python-format +msgid "" +"Size Change: %d bytes\n" +"\n" +msgstr "" +"Размер изменен: %d байт\n" +"\n" + +#: urpm-repodiff.py:844 +msgid " Total added packages: " +msgstr " Всего добавлено пакетов: " + +#: urpm-repodiff.py:847 +msgid " Total removed packages: " +msgstr " Всего удалено пакетов: " + +#: urpm-repodiff.py:856 +msgid " Total updated packages: " +msgstr " Всего обновлено пакетов: " + +#: urpm-repodiff.py:858 +msgid " Total downgraded packages: " +msgstr " Всего пакетов с пониженной версией: " + +#: urpm-repodiff.py:1316 +msgid "Creating HTML file." +msgstr "Создание HTML файла." + +#: urpm-package-cleanup.py:58 +msgid "Find problems in the rpmdb of system and correct them" +msgstr "Найти проблемы в локальной базе RPM и исправить их" + +#: urpm-package-cleanup.py:62 +msgid "Query format to use for output." +msgstr "Формат вывода." + +#: urpm-package-cleanup.py:65 +msgid "Use non-interactive mode" +msgstr "Работать в неинтерактивном режиме" + +#: urpm-package-cleanup.py:68 +msgid "Orphans Options" +msgstr "Осиротевшие пакеты" + +#: urpm-package-cleanup.py:71 +msgid "List installed packages which are not available from currently configured repositories" +msgstr "Перечислить пакеты, недоступные в настроенных на текущий момент репозиториях" + +#: urpm-package-cleanup.py:75 +msgid "Use only update media. This means that urpmq will search and resolve dependencies only in media marked as containing updates (e.g. which have been created with \"urpmi.addmedia --update\")." +msgstr "Ипользовать только источники обновлений. Это означает, что urpmq будет искать и разрешать зависимости только используя источники, помеченные как источники обновлений (например, которые были добавлены при помощи \"urpmi.addmedia --update\")" + +#: urpm-package-cleanup.py:80 +msgid "Select specific media to be used, instead of defaulting to all available media (or all update media if --update is used). No rpm will be found in other media." +msgstr "Выбрать особые источники вместо того чтобы использовать все доступные по умолчанию источники (или все источники обновлений, если указан флаг --update). В других источниках пакеты искаться не будут." + +#: urpm-package-cleanup.py:85 +msgid "Do not use the specified media." +msgstr "Не использовать указанные источники." + +#: urpm-package-cleanup.py:87 +msgid "Dependency Problems Options" +msgstr "Проблемы с зависимостями" + +#: urpm-package-cleanup.py:90 +msgid "List dependency problems in the local RPM database" +msgstr "Перечислить проблемы с зависимостями в локальной базе RPM" + +#: urpm-package-cleanup.py:93 +msgid "List missing suggestions of installed packages" +msgstr "Перечислить список мягких зависимостей установленных пакетов" + +#: urpm-package-cleanup.py:96 +msgid "Duplicate Package Options" +msgstr "Дублирующиеся пакеты" + +#: urpm-package-cleanup.py:99 +msgid "Scan for duplicates in your rpmdb" +msgstr "Найти дубликаты в локальной базе RPM" + +#: urpm-package-cleanup.py:102 +msgid "Scan for duplicates in your rpmdb and remove older " +msgstr "Найти дубликаты в локальной базе RPM и удалить более старые" + +#: urpm-package-cleanup.py:105 +msgid "disable rpm scriptlets from running when cleaning duplicates" +msgstr "отключить скриптлеты rpm при очистке дубликатов" + +#: urpm-package-cleanup.py:107 +msgid "Leaf Node Options" +msgstr "Листовые узлы" + +#: urpm-package-cleanup.py:110 +msgid "List leaf nodes in the local RPM database" +msgstr "Перечислить листовые узлы в локальной базе RPM" + +#: urpm-package-cleanup.py:113 +msgid "list all packages leaf nodes that do not match leaf-regex" +msgstr "перечислить все пакеты-листовые узлы, имя которых не подходить под регулярное выражение" + +#: urpm-package-cleanup.py:117 +msgid "A package name that matches this regular expression (case insensitively) is a leaf" +msgstr "Считать листовым узлом пакет, имя которого подходит по регулярному выражению (регистронезависимо)." + +#: urpm-package-cleanup.py:121 +msgid "do not list development packages as leaf nodes" +msgstr "не считать devel пакеты листовыми узлами" + +#: urpm-package-cleanup.py:124 +msgid "do not list packages with files in a bin dirs as leaf nodes" +msgstr "не считать пакеты, имеющие файлы в bin директориях, листовыми узлами" + +#: urpm-package-cleanup.py:127 +msgid "Old Kernel Options" +msgstr "Старые ядра" + +#: urpm-package-cleanup.py:130 +msgid "Remove old kernel and kernel-devel packages" +msgstr "Удалить старые ядра и их devel пакеты." + +#: urpm-package-cleanup.py:133 +msgid "Number of kernel packages to keep on the system (default 2)" +msgstr "Количество пакетов с ядрами, которые надо сохранить в системе (по умолчанию 2)" + +#: urpm-package-cleanup.py:137 +msgid "Do not remove kernel-devel packages when removing kernels" +msgstr "Не удалять kernel-devel пакеты при удалении ядер" + +#: urpm-package-cleanup.py:306 +#, python-format +msgid "Warning: neither single nor multi lib arch: %s " +msgstr "Некорректная архитектура: %s " + +#: urpm-package-cleanup.py:417 +#, python-format +msgid "Not removing kernel %(kver)s-%(krel)s because it is the running kernel" +msgstr "Невозможно удалить пакет %(kver)s-%(krel)s, потому что это запущенное ядро" + +#: urpm-package-cleanup.py:447 +#, python-format +msgid "Package %(qf)s %(prob)s" +msgstr "Пакет %(qf)s %(prob)s" + +#: urpm-package-cleanup.py:450 +msgid "Missing suggests:" +msgstr "Недостающие мягкие зависимости:" + +#: urpm-package-cleanup.py:458 +msgid "No Problems Found" +msgstr "Проблем не найдено" + +#: urpm-package-cleanup.py:473 +msgid "Error: Cannot remove kernels as a user, must be root" +msgstr "Ошибка: Невозможно удалить ядро, нужны права root." + +#: urpm-package-cleanup.py:476 +msgid "Error: should keep at least 1 kernel!" +msgstr "Ошибка: нужно оставить хотя бы одно ядро!" + +#: urpm-package-cleanup.py:529 +msgid "Error: Cannot remove packages as a user, must be root" +msgstr "Ошибка: невозможно удалить пакет, нужны права root." + +#~ msgid "Running trunsaction..." +#~ msgstr "Запуск транзакции..." + +#~ msgid "Downloading packages..." +#~ msgstr "Скачивание пакетов..." + +#~ msgid "Could not download packages. Urpm-download output: " +#~ msgstr "Не удалось скачать пакеты. Вывод urpm-downloader: " + +#~ msgid "Output in HTML format, if --output is not present HTML will be created in current directory. --size and --simple options are ignored." +#~ msgstr "Вывод в формате HTML. Если --output не указан, то HTML файл будет создан в текущей директории. Опции --size и --simple будут игнорироваться." + +#~ msgid "input is not correct url, path or name of repository" +#~ msgstr "Введенная строка не является корректным URL, путем или именем репозитория" + +#~ msgid "getting file %s from" +#~ msgstr "получение файла %s из" + +#~ msgid "Error: file %s was not downloaded" +#~ msgstr "Ошибка: файл %s не был скачан" + +#~ msgid "file %(from) was renamed to %(to)s" +#~ msgstr "файл %(from)s был переименован в %(to)s" + +#~ msgid "Error: cannot rename file %(from)s to %(from)s" +#~ msgstr "Ошибка: не удалось переименовать файл %(from)s в %(to)s" + +#~ msgid "Output in HTML format, if --output is not present" +#~ msgstr "Вывод в формате HTML, если --output не " + +#~ msgid "usage: " +#~ msgstr "ssdgfdf" + +#~ msgid "" +#~ "URPM Repos Closure Checker [_1] for Mandriva Linux\n" +#~ "A tool for checking closure of a set of RPM packages\n" +#~ "Copyright (C) 2012 ROSA Laboratory\n" +#~ "License: GNU GPL\n" +#~ "\n" +#~ "Usage: [_2] [options]\n" +#~ "Example: [_2] --hdlist=hdlist.txt\n" +#~ "\n" +#~ "More info: [_2] --help\n" +#~ msgstr "" +#~ "URPM Repos Closure Checker [_1] для Mandriva Linux\n" +#~ "Инструмент для определения замкнутости набора RPM пакетов\n" +#~ "Copyright (C) 2012 Лаборатория РОСА\n" +#~ "Лицензия: GNU GPL\n" +#~ "\n" +#~ "Использование: [_2] [options]\n" +#~ "Пример: [_2] --hdlist=hdlist.txt\n" +#~ "\n" +#~ "Больше информации: [_2] --help\n" + +#~ msgid "" +#~ "\n" +#~ "NAME:\n" +#~ " URPM Repos Closure Checker 1.0 for Mandriva Linux\n" +#~ " A tool for checking closure of a set of RPM packages\n" +#~ "\n" +#~ "USAGE:\n" +#~ " [_1] --hdlist=hdlist.txt\n" +#~ " [_1] --hdlist=http://mirror.yandex.ru/mandriva/.../synthesis.hdlist.cz\n" +#~ " [_1] --dir=rpms/ --static --file-deps=file-deps.txt\n" +#~ " [_1] --list=list.txt --dynamic\n" +#~ "\n" +#~ "OPTIONS:\n" +#~ " -h|-help\n" +#~ " Print this help.\n" +#~ "\n" +#~ " -v|-version\n" +#~ " Print version information.\n" +#~ "\n" +#~ " -hdlist \n" +#~ " Path or URL of HDlist (synthesis) to check.\n" +#~ "\n" +#~ " -d|-dir \n" +#~ " The directory with RPM packages to check.\n" +#~ "\n" +#~ " -l|-list \n" +#~ " The list of packages to check.\n" +#~ "\n" +#~ " -add|-update \n" +#~ " The directory with RPM packages that should\n" +#~ " be added to the repository or updated.\n" +#~ "\n" +#~ " -file-deps \n" +#~ " Read file-deps to ignore some unresolved\n" +#~ " dependencies.\n" +#~ "\n" +#~ " -s|-static\n" +#~ " Check statically if all required dependencies are\n" +#~ " satisfied by provided dependencies in the set of\n" +#~ " RPM packages.\n" +#~ "\n" +#~ " -dynamic\n" +#~ " Install a set of RPM packages to the local chroot\n" +#~ " and check if extra packages were installed.\n" +#~ "\n" +#~ " -r|-check-release\n" +#~ " Check installation media (DVD).\n" +#~ "\n" +#~ " -sign|-check-signature\n" +#~ " Validate package signatures.\n" +#~ "\n" +#~ " -noclean\n" +#~ " Do not clean urpmi cache.\n" +#~ "\n" +#~ " -root \n" +#~ " Where to install packages.\n" +#~ " Default:\n" +#~ " /tmp/...\n" +#~ "\n" +#~ "EXIT CODES:\n" +#~ " 0 - Suceess. The tool has run without any errors\n" +#~ " non-zero - Failed or the tool has run with errors. In particular:\n" +#~ " 1 - Failed to run the tool\n" +#~ " 2 - Discovered dependency problems\n" +#~ " \n" +#~ "\n" +#~ msgstr "" +#~ "\n" +#~ "ИМЯ:\n" +#~ " URPM Repos Closure Checker 1.0 для Mandriva Linux\n" +#~ " Инструмент для определения замкнутости набора RPM пакетов\n" +#~ "\n" +#~ "ИСПОЛЬЗОВАНИЕ:\n" +#~ " [_1] --hdlist=hdlist.txt\n" +#~ " [_1] --hdlist=http://mirror.yandex.ru/mandriva/.../synthesis.hdlist.cz\n" +#~ " [_1] --dir=rpms/ --static --file-deps=file-deps.txt\n" +#~ " [_1] --list=list.txt --dynamic\n" +#~ "\n" +#~ "ОПЦИИ:\n" +#~ " -h|-help\n" +#~ " Показать это сообщение.\n" +#~ "\n" +#~ " -v|-version\n" +#~ " Показать информацию о версии программы.\n" +#~ "\n" +#~ " -hdlist <путь>\n" +#~ " Путь к HDlist (synthesis), который надо проверить.\n" +#~ "\n" +#~ " -d|-dir <путь>\n" +#~ " Директория с RPM пакетами, которую надо проверить.\n" +#~ "\n" +#~ " -l|-list <путь>\n" +#~ " Список пакетов, который надо проверить.\n" +#~ "\n" +#~ " -add|-update <путь>\n" +#~ " Директория с RPM пакетами которые\n" +#~ " надо добавить в репозиторий или обновить.\n" +#~ "\n" +#~ " -file-deps <путь>\n" +#~ " Использовать файл file-deps для игнорирования\n" +#~ " некоторых зависимостей.\n" +#~ "\n" +#~ " -s|-static\n" +#~ " Статический анализ зависимостей.\n" +#~ "\n" +#~ " -dynamic\n" +#~ " Динамический анализ зависимостей (через установку пакетов).\n" +#~ "\n" +#~ " -r|-check-release\n" +#~ " Проверить установочный диск (CD/DVD).\n" +#~ "\n" +#~ " -sign|-check-signature\n" +#~ " Проверить сигнатуры пакетов.\n" +#~ "\n" +#~ " -noclean\n" +#~ " Не очищать кэш инструментария URPM.\n" +#~ "\n" +#~ " -root <путь>\n" +#~ " Куда устанавливать пакеты.\n" +#~ " Путь по-умолчанию:\n" +#~ " /tmp/...\n" +#~ "\n" +#~ "КОДЫ ОШИБОК:\n" +#~ " 0 - Успех. Набор пакетов замкнут. Ошибок не произошло.\n" +#~ " 1 - Ошибки во время выполнения программы.\n" +#~ " 2 - Набор пакетов не замкнут.\n" +#~ " \n" +#~ "\n" + +#~ msgid "can't open file '[_1]': [_2]\n" +#~ msgstr "не удалось открыть файл '[_1]': [_2]\n" + +#~ msgid "ERROR: you should be root\n" +#~ msgstr "ОШИБКА: требуются права администратора\n" + +#~ msgid "ERROR: cannot access '[_1]'\n" +#~ msgstr "ОШИБКА: не удалось найти '[_1]'\n" + +#~ msgid "ERROR: the list of packages is empty\n" +#~ msgstr "ОШИБКА: список пакетов пуст\n" + +#~ msgid "ERROR: file '[_1]' is not RPM package\n" +#~ msgstr "ОШИБКА: файл '[_1]' не является RPM пакетом\n" + +#~ msgid "ERROR: --dir or --list option should be specified\n" +#~ msgstr "ОШИБКА: одна из следующих опций должна быть предоставлена: --dir или --list\n" + +#~ msgid "" +#~ "Extra Packages:\n" +#~ "\n" +#~ msgstr "" +#~ "Дополнительные Пакеты:\n" +#~ "\n" + +#~ msgid " (required by: [_1])" +#~ msgstr " (требуется в: [_1])" + +#~ msgid "" +#~ "Broken Packages:\n" +#~ "\n" +#~ msgstr "" +#~ "Сломанные Пакеты:\n" +#~ "\n" + +#~ msgid "Report has been generated to:" +#~ msgstr "Отчет создан:" + +#~ msgid "Checking RPMs ...\n" +#~ msgstr "Проверка RPM пакетов ...\n" + +#~ msgid "Checking [_1]\n" +#~ msgstr "Проверка [_1]\n" + +#~ msgid " FAILED: invalid signature\n" +#~ msgstr " ОШИБКА: некорректная сигнатура\n" + +#~ msgid "" +#~ "Broken Signature:\n" +#~ "\n" +#~ msgstr "" +#~ "Некорректные сигнатуры:\n" +#~ "\n" + +#~ msgid "ERROR: --hdlist, --dir or --list option should be specified\n" +#~ msgstr "ОШИБКА: одна из следующих опций должна быть предоставлена: --hdlist, --dir or --list\n" + +#~ msgid "Downloading HDlist ...\n" +#~ msgstr "Загрузка HDlist-файла ...\n" + +#~ msgid "ERROR: cannot extract '[_1]'\n" +#~ msgstr "ОШИБКА: не удалось распаковать '[_1]'\n" + +#~ msgid "ERROR: unknown format of hdlist\n" +#~ msgstr "ОШИБКА: неизвестный формат HDlist-файла\n" + +#~ msgid "Checking HDlist ...\n" +#~ msgstr "Проверка HDlist-файла ...\n" + +#~ msgid "Unresolved \"Required\" Dependencies ([_1]):" +#~ msgstr "Сломанные \"Requires\" Зависимости ([_1]):" + +#~ msgid "Unresolved \"Suggested\" Dependencies ([_1]):" +#~ msgstr "Сломанные \"Suggests\" Зависимости ([_1]):" + +#~ msgid "Broken Packages ([_1]):" +#~ msgstr "Сломанные Пакеты ([_1]):" + +#~ msgid "" +#~ "URPM Repos Closure Checker [_1] for Mandriva Linux\n" +#~ "Copyright (C) 2012 ROSA Laboratory\n" +#~ "License: GPL \n" +#~ "This program is free software: you can redistribute it and/or modify it.\n" +#~ "\n" +#~ "Written by Andrey Ponomarenko.\n" +#~ msgstr "" +#~ "URPM Repos Closure Checker [_1] для Mandriva Linux\n" +#~ "Copyright (C) 2012 Лаборатория РОСА\n" +#~ "Лицензия: GPL \n" diff --git a/urpm-tools/localizer.py b/urpm-tools/localizer.py new file mode 100755 index 0000000..36130a3 --- /dev/null +++ b/urpm-tools/localizer.py @@ -0,0 +1,62 @@ +#!/usr/bin/python2.7 +# -*- coding: UTF-8 -*- + +import os, sys + +quiet = False +if '--list' in sys.argv: + quiet = True + +def qprint(text): + if quiet: + sys.stderr.write(text + '\n') + sys.stderr.flush() + return + print text + +def dumb(cmd): + if quiet: + return cmd + ' 1>&2' + else: + return cmd + +walkres = os.walk('.') +fls = [] +pos = [] + +for path, dirs, files in walkres: + for file in files: + p = os.path.join(path, file) + if p.endswith(".py"): + fls.append(p) + if p.endswith(".pl"): + fls.append(p) + if p.endswith(".po"): + pos.append(p) + +if not fls: + qprint("No python modules found!") + exit(1) + + +FN = 'urpm-tools.pot' + +qprint("Generating " + FN) +cmd = "xgettext -d urpm-tools -o " + FN + ' -c --no-wrap ' + ' '.join(fls) +os.system(dumb(cmd)) + +LIST_OUT = [] +for po in pos: + qprint("Updating " + po) + LIST_OUT.append(po.split('/')[2]) + + cmd = "msgmerge --no-wrap -U " + po + ' ' + FN + os.system(dumb(cmd)) + mo = po[:-2] + 'mo' + qprint ("Compiling " + po) + cmd = "msgfmt -o " + mo + ' ' + po + os.system(dumb(cmd)) + +if quiet: + print ' '.join(LIST_OUT) + \ No newline at end of file diff --git a/urpm-tools/rpm5utils/COPYING b/urpm-tools/rpm5utils/COPYING new file mode 100644 index 0000000..e77696a --- /dev/null +++ b/urpm-tools/rpm5utils/COPYING @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 675 Mass Ave, Cambridge, MA 02139, USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) 19yy + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) 19yy name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/urpm-tools/rpm5utils/Makefile b/urpm-tools/rpm5utils/Makefile new file mode 100644 index 0000000..e3f87f8 --- /dev/null +++ b/urpm-tools/rpm5utils/Makefile @@ -0,0 +1,27 @@ +PYTHON=python +PACKAGE = $(shell basename `pwd`) +PYVER := $(shell $(PYTHON) -c 'import sys; print "%.3s" %(sys.version)') +PYSYSDIR := $(shell $(PYTHON) -c 'import sys; print sys.prefix') +PYLIBDIR = $(PYSYSDIR)/lib/python$(PYVER) +SITEDIR = $(PYLIBDIR)/site-packages +PKGDIR = $(SITEDIR)/$(PACKAGE) + +all: + echo "Nothing to do" + +clean: + rm -f *.pyc *.pyo *~ + +install: + mkdir -p $(DESTDIR)/$(PKGDIR) + + #copy urpmgraph dir and set permissions for files and folders + cp -rf . $(DESTDIR)/$(PKGDIR) + #don't copy these files + rm -f $(DESTDIR)/$(PKGDIR)/Makefile + rm -f $(DESTDIR)/$(PKGDIR)/COPYING + find $(DESTDIR)/$(PKGDIR) -type f |xargs -l chmod 644 $1 + find $(DESTDIR)/$(PKGDIR) -type d |xargs -l chmod 775 $1 + + #compile python sources + python -m compileall $(DESTDIR)/$(PKGDIR) \ No newline at end of file diff --git a/urpm-tools/rpm5utils/__init__.py b/urpm-tools/rpm5utils/__init__.py new file mode 100644 index 0000000..ae44f70 --- /dev/null +++ b/urpm-tools/rpm5utils/__init__.py @@ -0,0 +1,10 @@ + +import rpm5utils.urpmgraphs +from rpm5utils.urpmgraphs import * + +class Rpm5UtilsError(Exception): + + """ Exception thrown for anything rpm5utils related. """ + + def __init__(self, args=None): + Exception.__init__(self, args) diff --git a/urpm-tools/rpm5utils/arch.py b/urpm-tools/rpm5utils/arch.py new file mode 100644 index 0000000..02ca7a4 --- /dev/null +++ b/urpm-tools/rpm5utils/arch.py @@ -0,0 +1,423 @@ + +import os + +# dict mapping arch -> ( multicompat, best personality, biarch personality ) +multilibArches = { "x86_64": ( "athlon", "x86_64", "athlon" ), + "sparc64v": ( "sparcv9v", "sparcv9v", "sparc64v" ), + "sparc64": ( "sparcv9", "sparcv9", "sparc64" ), + "ppc64": ( "ppc", "ppc", "ppc64" ), + "s390x": ( "s390", "s390x", "s390" ), + } + +arches = { + # ia32 + "athlon": "i686", + "i686": "i586", + "geode": "i586", + "i586": "i486", + "i486": "i386", + "i386": "noarch", + + # amd64 + "x86_64": "athlon", + "amd64": "x86_64", + "ia32e": "x86_64", + + # ppc + "ppc64pseries": "ppc64", + "ppc64iseries": "ppc64", + "ppc64": "ppc", + "ppc": "noarch", + + # s390{,x} + "s390x": "s390", + "s390": "noarch", + + # sparc + "sparc64v": "sparcv9v", + "sparc64": "sparcv9", + "sparcv9v": "sparcv9", + "sparcv9": "sparcv8", + "sparcv8": "sparc", + "sparc": "noarch", + + # alpha + "alphaev7": "alphaev68", + "alphaev68": "alphaev67", + "alphaev67": "alphaev6", + "alphaev6": "alphapca56", + "alphapca56": "alphaev56", + "alphaev56": "alphaev5", + "alphaev5": "alphaev45", + "alphaev45": "alphaev4", + "alphaev4": "alpha", + "alpha": "noarch", + + # arm + "armv7l": "armv6l", + "armv6l": "armv5tejl", + "armv5tejl": "armv5tel", + "armv5tel": "noarch", + + # super-h + "sh4a": "sh4", + "sh4": "noarch", + "sh3": "noarch", + + #itanium + "ia64": "noarch", + } + +def legitMultiArchesInSameLib(arch=None): + # this is completely crackrock - if anyone has a better way I + # am all ears + + arch = getBestArch(arch) + if isMultiLibArch(arch): + arch = getBaseArch(myarch=arch) + + results = [arch] + + if arch == 'x86_64' or arch.startswith('sparcv9'): + for (k, v) in arches.items(): + if v == arch: + results.append(k) + return results + + +def canCoinstall(arch1, arch2): + """Take two arches and return True if it is possible that they can be + installed together with the same nevr. Ex: arch1=i386 and arch2=i686 then + it will return False. arch1=i386 and arch2=x86_64 will return True. + It does not determine whether or not the arches make any sense. Just whether + they could possibly install w/o conflict""" + + # if both are a multlibarch then we can't coinstall (x86_64, ia32e) + # if both are not multilibarches then we can't coinstall (i386, i686) + + if 'noarch' in [arch1, arch2]: # noarch can never coinstall + return False + + if isMultiLibArch(arch=arch1) == isMultiLibArch(arch=arch2): + return False + # this section keeps arch1=x86_64 arch2=ppc from returning True + if arch1 in getArchList(arch2) or arch2 in getArchList(arch1): + return True + return False + +# this computes the difference between myarch and targetarch +def archDifference(myarch, targetarch): + if myarch == targetarch: + return 1 + if myarch in arches: + ret = archDifference(arches[myarch], targetarch) + if ret != 0: + return ret + 1 + return 0 + return 0 + +def score(arch): + return archDifference(canonArch, arch) + +def isMultiLibArch(arch=None): + """returns true if arch is a multilib arch, false if not""" + if arch is None: + arch = canonArch + + if arch not in arches: # or we could check if it is noarch + return 0 + + if arch in multilibArches: + return 1 + + if arches[arch] in multilibArches: + return 1 + + return 0 + +def getBestArchFromList(archlist, myarch=None): + """ + return the best arch from the list for myarch if - myarch is not given, + then return the best arch from the list for the canonArch. + """ + + if len(archlist) == 0: + return None + + if myarch is None: + myarch = canonArch + + mybestarch = getBestArch(myarch) + + bestarch = getBestArch(myarch) + if bestarch != myarch: + bestarchchoice = getBestArchFromList(archlist, bestarch) + if bestarchchoice != None and bestarchchoice != "noarch": + return bestarchchoice + + thisarch = archlist[0] + for arch in archlist[1:]: + val1 = archDifference(myarch, thisarch) + val2 = archDifference(myarch, arch) + if val1 == 0 and val2 == 0: + continue + if val1 < val2: + if val1 == 0: + thisarch = arch + if val2 < val1: + if val2 != 0: + thisarch = arch + if val1 == val2: + pass + + # thisarch should now be our bestarch + # one final check to make sure we're not returning a bad arch + val = archDifference(myarch, thisarch) + if val == 0: + return None + + return thisarch + + +def getArchList(thisarch=None): + # this returns a list of archs that are compatible with arch given + if not thisarch: + thisarch = canonArch + + archlist = [thisarch] + while thisarch in arches: + thisarch = arches[thisarch] + archlist.append(thisarch) + + # hack hack hack + # sparc64v is also sparc64 compat + if archlist[0] == "sparc64v": + archlist.insert(1,"sparc64") + + # if we're a weirdo arch - add noarch on there. + if len(archlist) == 1 and archlist[0] == thisarch: + archlist.append('noarch') + return archlist + +def _try_read_cpuinfo(): + """ Try to read /proc/cpuinfo ... if we can't ignore errors (ie. proc not + mounted). """ + try: + lines = open("/proc/cpuinfo", "r").readlines() + return lines + except: + return [] + +def getCanonX86Arch(arch): + # + if arch == "i586": + for line in _try_read_cpuinfo(): + if line.startswith("model name") and line.find("Geode(TM)") != -1: + return "geode" + return arch + # only athlon vs i686 isn't handled with uname currently + if arch != "i686": + return arch + + # if we're i686 and AuthenticAMD, then we should be an athlon + for line in _try_read_cpuinfo(): + if line.startswith("vendor") and line.find("AuthenticAMD") != -1: + return "athlon" + # i686 doesn't guarantee cmov, but we depend on it + elif line.startswith("flags") and line.find("cmov") == -1: + return "i586" + + return arch + +def getCanonPPCArch(arch): + # FIXME: should I do better handling for mac, etc? + if arch != "ppc64": + return arch + + machine = None + for line in _try_read_cpuinfo(): + if line.find("machine") != -1: + machine = line.split(':')[1] + break + if machine is None: + return arch + + if machine.find("CHRP IBM") != -1: + return "ppc64pseries" + if machine.find("iSeries") != -1: + return "ppc64iseries" + return arch + +def getCanonSPARCArch(arch): + # Deal with sun4v, sun4u, sun4m cases + SPARCtype = None + for line in _try_read_cpuinfo(): + if line.startswith("type"): + SPARCtype = line.split(':')[1] + break + if SPARCtype is None: + return arch + + if SPARCtype.find("sun4v") != -1: + if arch.startswith("sparc64"): + return "sparc64v" + else: + return "sparcv9v" + if SPARCtype.find("sun4u") != -1: + if arch.startswith("sparc64"): + return "sparc64" + else: + return "sparcv9" + if SPARCtype.find("sun4m") != -1: + return "sparcv8" + return arch + +def getCanonX86_64Arch(arch): + if arch != "x86_64": + return arch + + vendor = None + for line in _try_read_cpuinfo(): + if line.startswith("vendor_id"): + vendor = line.split(':')[1] + break + if vendor is None: + return arch + + if vendor.find("Authentic AMD") != -1 or vendor.find("AuthenticAMD") != -1: + return "amd64" + if vendor.find("GenuineIntel") != -1: + return "ia32e" + return arch + +def getCanonArch(skipRpmPlatform = 0): + if not skipRpmPlatform and os.access("/etc/rpm/platform", os.R_OK): + try: + f = open("/etc/rpm/platform", "r") + line = f.readline() + f.close() + (arch, vendor, opersys) = line.split("-", 2) + return arch + except: + pass + + arch = os.uname()[4] + + if (len(arch) == 4 and arch[0] == "i" and arch[2:4] == "86"): + return getCanonX86Arch(arch) + + if arch.startswith("ppc"): + return getCanonPPCArch(arch) + if arch.startswith("sparc"): + return getCanonSPARCArch(arch) + if arch == "x86_64": + return getCanonX86_64Arch(arch) + + return arch + +canonArch = getCanonArch() + +# this gets you the "compat" arch of a biarch pair +def getMultiArchInfo(arch = canonArch): + if arch in multilibArches: + return multilibArches[arch] + if arch in arches and arches[arch] != "noarch": + return getMultiArchInfo(arch = arches[arch]) + return None + +# get the best usual userspace arch for the arch we're on. this is +# our arch unless we're on an arch that uses the secondary as its +# userspace (eg ppc64, sparc64) +def getBestArch(myarch=None): + if myarch: + arch = myarch + else: + arch = canonArch + + if arch.startswith("sparc64"): + arch = multilibArches[arch][1] + + if arch.startswith("ppc64"): + arch = 'ppc' + + return arch + +def getBaseArch(myarch=None): + """returns 'base' arch for myarch, if specified, or canonArch if not. + base arch is the arch before noarch in the arches dict if myarch is not + a key in the multilibArches.""" + + if not myarch: + myarch = canonArch + + if myarch not in arches: # this is dumb, but + return myarch + + if myarch.startswith("sparc64"): + return "sparc" + elif myarch.startswith("ppc64"): + return "ppc" + elif myarch.startswith("arm"): + return "arm" + + if isMultiLibArch(arch=myarch): + if myarch in multilibArches: + return myarch + else: + return arches[myarch] + + if myarch in arches: + basearch = myarch + value = arches[basearch] + while value != 'noarch': + basearch = value + value = arches[basearch] + + return basearch + + +class ArchStorage(object): + """class for keeping track of what arch we have set and doing various + permutations based on it""" + def __init__(self): + self.canonarch = None + self.basearch = None + self.bestarch = None + self.compatarches = [] + self.archlist = [] + self.multilib = False + self.setup_arch() + + def setup_arch(self, arch=None, archlist_includes_compat_arch=True): + if arch: + self.canonarch = arch + else: + self.canonarch = getCanonArch() + + self.basearch = getBaseArch(myarch=self.canonarch) + self.archlist = getArchList(thisarch=self.canonarch) + + if not archlist_includes_compat_arch: # - do we bother including i686 and below on x86_64 + limit_archlist = [] + for a in self.archlist: + if isMultiLibArch(a) or a == 'noarch': + limit_archlist.append(a) + self.archlist = limit_archlist + + self.bestarch = getBestArch(myarch=self.canonarch) + self.compatarches = getMultiArchInfo(arch=self.canonarch) + self.multilib = isMultiLibArch(arch=self.canonarch) + self.legit_multi_arches = legitMultiArchesInSameLib(arch = self.canonarch) + + def get_best_arch_from_list(self, archlist, fromarch=None): + if not fromarch: + fromarch = self.canonarch + return getBestArchFromList(archlist, myarch=fromarch) + + def score(self, arch): + return archDifference(self.canonarch, arch) + + def get_arch_list(self, arch): + if not arch: + return self.archlist + return getArchList(thisarch=arch) diff --git a/urpm-tools/rpm5utils/miscutils.py b/urpm-tools/rpm5utils/miscutils.py new file mode 100644 index 0000000..165ec75 --- /dev/null +++ b/urpm-tools/rpm5utils/miscutils.py @@ -0,0 +1,455 @@ +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# Copyright 2003 Duke University + +import rpm +import types +import gzip +import os +import sys +import locale +import signal + +import rpm5utils.transaction + +def rpmOutToStr(arg): + if type(arg) != types.StringType: + # and arg is not None: + arg = str(arg) + + return arg + + +def compareEVR((e1, v1, r1), (e2, v2, r2)): + # return 1: a is newer than b + # 0: a and b are the same version + # -1: b is newer than a + if e1 is None: + e1 = '0' + else: + e1 = str(e1) + if v1 is None: + v1 = '0' + else: + v1 = str(v1) + if r1 is None: + r1 = '0' + else: + r1 = str(r1) + + if e2 is None: + e2 = '0' + else: + e2 = str(e2) + if v2 is None: + v2 = '0' + else: + v2 = str(v2) + if r2 is None: + r2 = '0' + else: + r2 = str(r2) + #~ print '%s, %s, %s vs %s, %s, %s' % (e1, v1, r1, e2, v2, r2) + rc = rpm.labelCompare((e1, v1, r1), (e2, v2, r2)) + #~ print '%s, %s, %s vs %s, %s, %s = %s' % (e1, v1, r1, e2, v2, r2, rc) + return rc + +def compareDEVR((d1, e1, v1, r1), (d2, e2, v2, r2)): + # return 1: a is newer than b + # 0: a and b are the same version + # -1: b is newer than a + if d1 is None: + d1 = '0' + if d2 is None: + d2 = '0' + + if d1 > d2: + return 1 + if d1 < d2: + return -1 + + rc = compareEVR((e1, v1, r1), (e2, v2, r2)) + return rc + +def compareVerOnly(v1, v2): + """compare version strings only using rpm vercmp""" + return compareEVR(('', v1, ''), ('', v2, '')) + +def checkSig(ts, package): + """Takes a transaction set and a package, check it's sigs, + return 0 if they are all fine + return 1 if the gpg key can't be found + return 2 if the header is in someway damaged + return 3 if the key is not trusted + return 4 if the pkg is not gpg or pgp signed""" + + value = 0 + currentflags = ts.setVSFlags(0) + fdno = os.open(package, os.O_RDONLY) + try: + hdr = ts.hdrFromFdno(fdno) + except rpm.error, e: + if str(e) == "public key not availaiable": + value = 1 + if str(e) == "public key not available": + value = 1 + if str(e) == "public key not trusted": + value = 3 + if str(e) == "error reading package header": + value = 2 + else: + error, siginfo = getSigInfo(hdr) + if error == 101: + os.close(fdno) + del hdr + value = 4 + else: + del hdr + + try: + os.close(fdno) + except OSError, e: # if we're not opened, don't scream about it + pass + + ts.setVSFlags(currentflags) # put things back like they were before + return value + +def getSigInfo(hdr): + """checks signature from an hdr hand back signature information and/or + an error code""" + + locale.setlocale(locale.LC_ALL, 'C') + string = '%|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|' + siginfo = hdr.sprintf(string) + if siginfo != '(none)': + error = 0 + sigtype, sigdate, sigid = siginfo.split(',') + else: + error = 101 + sigtype = 'MD5' + sigdate = 'None' + sigid = 'None' + + infotuple = (sigtype, sigdate, sigid) + return error, infotuple + +def pkgTupleFromHeader(hdr): + """return a pkgtuple (n, a, e, v, r) from a hdr object, converts + None epoch to 0, as well.""" + + name = hdr['name'] + + # RPMTAG_SOURCEPACKAGE: RPMTAG_SOURCERPM is not necessarily there for + # e.g. gpg-pubkeys imported with older rpm versions + # http://lists.baseurl.org/pipermail/yum/2009-January/022275.html + if hdr[rpm.RPMTAG_SOURCERPM] or hdr[rpm.RPMTAG_SOURCEPACKAGE] != 1: + arch = hdr['arch'] + else: + arch = 'src' + + ver = hdr['version'] + rel = hdr['release'] + epoch = hdr['epoch'] + if epoch is None: + epoch = '0' + pkgtuple = (name, arch, epoch, ver, rel) + return pkgtuple + +def pkgDistTupleFromHeader(hdr): + """the same as above, but appends DistEpoch to the tuple""" + + (n,a,e,v,r) = pkgTupleFromHeader(hdr) + d = hdr['distepoch'] + if d is None: + d = '0' + + pkgtuple = (n,a,e,v,r,d) + return pkgtuple + +def rangeCheck(reqtuple, pkgtuple): + """returns true if the package epoch-ver-rel satisfy the range + requested in the reqtuple: + ex: foo >= 2.1-1""" + # we only ever get here if we have a versioned prco + # nameonly shouldn't ever raise it + #(reqn, reqf, (reqe, reqv, reqr)) = reqtuple + (n, a, e, v, r) = pkgtuple + return rangeCompare(reqtuple, (n, rpm.RPMSENSE_EQUAL, (e, v, r))) + +def rangeCompare(reqtuple, provtuple): + """returns true if provtuple satisfies reqtuple""" + (reqn, reqf, (reqe, reqv, reqr)) = reqtuple + (n, f, (e, v, r)) = provtuple + if reqn != n: + return 0 + + # unversioned satisfies everything + if not f or not reqf: + return 1 + + # and you thought we were done having fun + # if the requested release is left out then we have + # to remove release from the package prco to make sure the match + # is a success - ie: if the request is EQ foo 1:3.0.0 and we have + # foo 1:3.0.0-15 then we have to drop the 15 so we can match + if reqr is None: + r = None + if reqe is None: + e = None + if reqv is None: # just for the record if ver is None then we're going to segfault + v = None + + # if we just require foo-version, then foo-version-* will match + if r is None: + reqr = None + + rc = compareEVR((e, v, r), (reqe, reqv, reqr)) + + # does not match unless + if rc >= 1: + if (reqf & rpm.RPMSENSE_GREATER) or (reqf & rpm.RPMSENSE_EQUAL): + return 1 + + if rc == 0: + if (reqf & rpm.RPMSENSE_EQUAL): + return 1 + + if rc <= -1: + if (reqf & rpm.RPMSENSE_LESS) or (reqf & rpm.RPMSENSE_EQUAL): + return 1 + + return 0 + + +########### +# Title: Remove duplicates from a sequence +# Submitter: Tim Peters +# From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560 + +def unique(s): + """Return a list of the elements in s, but without duplicates. + + For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3], + unique("abcabc") some permutation of ["a", "b", "c"], and + unique(([1, 2], [2, 3], [1, 2])) some permutation of + [[2, 3], [1, 2]]. + + For best speed, all sequence elements should be hashable. Then + unique() will usually work in linear time. + + If not possible, the sequence elements should enjoy a total + ordering, and if list(s).sort() doesn't raise TypeError it's + assumed that they do enjoy a total ordering. Then unique() will + usually work in O(N*log2(N)) time. + + If that's not possible either, the sequence elements must support + equality-testing. Then unique() will usually work in quadratic + time. + """ + + n = len(s) + if n == 0: + return [] + + # Try using a dict first, as that's the fastest and will usually + # work. If it doesn't work, it will usually fail quickly, so it + # usually doesn't cost much to *try* it. It requires that all the + # sequence elements be hashable, and support equality comparison. + u = {} + try: + for x in s: + u[x] = 1 + except TypeError: + del u # move on to the next method + else: + return u.keys() + + # We can't hash all the elements. Second fastest is to sort, + # which brings the equal elements together; then duplicates are + # easy to weed out in a single pass. + # NOTE: Python's list.sort() was designed to be efficient in the + # presence of many duplicate elements. This isn't true of all + # sort functions in all languages or libraries, so this approach + # is more effective in Python than it may be elsewhere. + try: + t = list(s) + t.sort() + except TypeError: + del t # move on to the next method + else: + assert n > 0 + last = t[0] + lasti = i = 1 + while i < n: + if t[i] != last: + t[lasti] = last = t[i] + lasti += 1 + i += 1 + return t[:lasti] + + # Brute force is all that's left. + u = [] + for x in s: + if x not in u: + u.append(x) + return u + + +def splitFilename(filename): + """ + Pass in a standard style rpm fullname + + Return a name, version, release, epoch, arch, e.g.:: + foo-1.0-1.i386.rpm returns foo, 1.0, 1, i386 + 1:bar-9-123a.ia64.rpm returns bar, 9, 123a, 1, ia64 + """ + + if filename[-4:] == '.rpm': + filename = filename[:-4] + + archIndex = filename.rfind('.') + arch = filename[archIndex+1:] + + relIndex = filename[:archIndex].rfind('-') + rel = filename[relIndex+1:archIndex] + + verIndex = filename[:relIndex].rfind('-') + ver = filename[verIndex+1:relIndex] + + epochIndex = filename.find(':') + if epochIndex == -1: + epoch = '' + else: + epoch = filename[:epochIndex] + + name = filename[epochIndex + 1:verIndex] + return name, ver, rel, epoch, arch + + +def rpm2cpio(fdno, out=sys.stdout, bufsize=2048): + """Performs roughly the equivalent of rpm2cpio(8). + Reads the package from fdno, and dumps the cpio payload to out, + using bufsize as the buffer size.""" + ts = rpm5utils.transaction.initReadOnlyTransaction() + hdr = ts.hdrFromFdno(fdno) + del ts + + compr = hdr[rpm.RPMTAG_PAYLOADCOMPRESSOR] or 'gzip' + #XXX FIXME + #if compr == 'bzip2': + # TODO: someone implement me! + #el + if compr != 'gzip': + raise rpm5utils.Rpm5UtilsError, \ + 'Unsupported payload compressor: "%s"' % compr + f = gzip.GzipFile(None, 'rb', None, os.fdopen(fdno, 'rb', bufsize)) + while 1: + tmp = f.read(bufsize) + if tmp == "": break + out.write(tmp) + f.close() + +def formatRequire (name, version, flags): + ''' + Return a human readable requirement string (ex. foobar >= 2.0) + @param name: requirement name (ex. foobar) + @param version: requirent version (ex. 2.0) + @param flags: binary flags ( 0010 = equal, 0100 = greater than, 1000 = less than ) + ''' + s = name + + if flags and (type(flags) == type(0) or type(flags) == type(0L)): # Flag must be set and a int (or a long, now) + if flags & (rpm.RPMSENSE_LESS | rpm.RPMSENSE_GREATER | + rpm.RPMSENSE_EQUAL): + s = s + " " + if flags & rpm.RPMSENSE_LESS: + s = s + "<" + if flags & rpm.RPMSENSE_GREATER: + s = s + ">" + if flags & rpm.RPMSENSE_EQUAL: + s = s + "=" + if version: + s = "%s %s" %(s, version) + return s + + +def flagToString(flags): + flags = flags & 0xf + + if flags == 0: return None + elif flags == 2: return 'LT' + elif flags == 4: return 'GT' + elif flags == 8: return 'EQ' + elif flags == 10: return 'LE' + elif flags == 12: return 'GE' + + return flags + +def stringToVersion(verstring): + if verstring in [None, '']: + return (None, None, None) + i = verstring.find(':') + if i != -1: + try: + epoch = str(long(verstring[i:])) + except ValueError: + # look, garbage in the epoch field, how fun, kill it + epoch = '0' # this is our fallback, deal + else: + epoch = '0' + j = verstring.find('-') + if j != -1: + if verstring[i + 1:j] == '': + version = None + else: + version = verstring[i + 1:j] + release = verstring[j + 1:] + else: + if verstring[i + 1:] == '': + version = None + else: + version = verstring[i + 1:] + release = None + return (epoch, version, release) + +def hdrFromPackage(ts, package): + """hand back the rpm header or raise an Error if the pkg is fubar""" + try: + fdno = os.open(package, os.O_RDONLY) + except OSError, e: + raise rpm5utils.Rpm5UtilsError, 'Unable to open file' + + # XXX: We should start a readonly ts here, so we don't get the options + # from the other one (sig checking, etc) + try: + hdr = ts.hdrFromFdno(fdno) + except rpm.error, e: + os.close(fdno) + raise rpm5utils.Rpm5UtilsError, "RPM Error opening Package" + if type(hdr) != rpm.hdr: + os.close(fdno) + raise rpm5utils.Rpm5UtilsError, "RPM Error opening Package (type)" + + os.close(fdno) + return hdr + +def checkSignals(): + if hasattr(rpm, "checkSignals") and hasattr(rpm, 'signalsCaught'): + if rpm.signalsCaught([signal.SIGINT, + signal.SIGTERM, + signal.SIGPIPE, + signal.SIGQUIT, + signal.SIGHUP]): + sys.exit(1) + diff --git a/urpm-tools/rpm5utils/tests/updates-test.py b/urpm-tools/rpm5utils/tests/updates-test.py new file mode 100644 index 0000000..44ab5a2 --- /dev/null +++ b/urpm-tools/rpm5utils/tests/updates-test.py @@ -0,0 +1,63 @@ + +import rpm5utils.updates +import rpm5utils.arch + +instlist = [('foo', 'i386', '0', '1', '1'), + ('do', 'i386', '0', '2', '3'), + ('glibc', 'i386', '0', '1', '1'), + ('bar', 'noarch', '0', '2', '1'), + ('baz', 'i686', '0', '2', '3'), + ('baz', 'x86_64', '0','1','4'), + ('foo', 'i686', '0', '1', '1'), + ('cyrus-sasl','sparcv9', '0', '1', '1')] + +availlist = [('foo', 'i686', '0', '1', '3'), + ('do', 'noarch', '0', '3', '3'), + ('do', 'noarch', '0', '4', '3'), + ('foo', 'i386', '0', '1', '3'), + ('foo', 'i686', '0', '1', '2'), + ('glibc', 'i686', '0', '1', '2'), + ('glibc', 'i386', '0', '1', '2'), + ('bar', 'noarch', '0', '2', '2'), + ('baz', 'noarch', '0', '2', '4'), + ('baz', 'i686', '0', '2', '4'), + ('baz', 'x86_64', '0', '1', '5'), + ('baz', 'ppc', '0', '1', '5'), + ('cyrus-sasl','sparcv9', '0', '1', '2'), + ('cyrus-sasl','sparc64', '0', '1', '2'),] + +obslist = {('quux', 'noarch', '0', '1', '3'): [('bar', None, (None, None, None))], + + ('quuxish', 'noarch', '0', '1', '3'):[('foo', 'GE', ('0', '1', None))], + } + + +up = rpm5utils.updates.Updates(instlist, availlist) +up.debug=1 +up.exactarch=1 +#up.myarch = 'sparc64' +up._is_multilib = rpm5utils.arch.isMultiLibArch(up.myarch) +up._archlist = rpm5utils.arch.getArchList(up.myarch) +print up._archlist +up._multilib_compat_arches = rpm5utils.arch.getMultiArchInfo(up.myarch) +up.doUpdates() +up.condenseUpdates() + +for tup in up.updatesdict.keys(): + (old_n, old_a, old_e, old_v, old_r) = tup + for (n, a, e, v, r) in up.updatesdict[tup]: + print '%s.%s %s:%s-%s updated by %s.%s %s:%s-%s' % (old_n, + old_a, old_e, old_v, old_r, n, a, e, v, r) + +up.rawobsoletes = obslist +up.doObsoletes() +for tup in up.obsoletes.keys(): + (old_n, old_a, old_e, old_v, old_r) = tup + for (n, a, e, v, r) in up.obsoletes[tup]: + print '%s.%s %s:%s-%s obsoletes %s.%s %s:%s-%s' % (old_n, + old_a, old_e, old_v, old_r, n, a, e, v, r) + + + + + diff --git a/urpm-tools/rpm5utils/transaction.py b/urpm-tools/rpm5utils/transaction.py new file mode 100644 index 0000000..83393e1 --- /dev/null +++ b/urpm-tools/rpm5utils/transaction.py @@ -0,0 +1,192 @@ +# +# Client code for Update Agent +# Copyright (c) 1999-2002 Red Hat, Inc. Distributed under GPL. +# +# Adrian Likins +# Some Edits by Seth Vidal +# +# a couple of classes wrapping up transactions so that we +# can share transactions instead of creating new ones all over +# + +import rpm +import miscutils + +read_ts = None +ts = None + +# wrapper/proxy class for rpm.Transaction so we can +# instrument it, etc easily +class TransactionWrapper: + def __init__(self, root='/'): + self.ts = rpm.TransactionSet(root) + self._methods = ['check', + 'order', + 'addErase', + 'addInstall', + 'run', + 'pgpImportPubkey', + 'pgpPrtPkts', + 'problems', + 'setFlags', + 'setVSFlags', + 'setProbFilter', + 'hdrFromFdno', + 'next', + 'clean'] + self.tsflags = [] + self.open = True + + def __del__(self): + # Automatically close the rpm transaction when the reference is lost + self.close() + + def close(self): + if self.open: + self.ts.closeDB() + self.ts = None + self.open = False + + def dbMatch(self, *args, **kwds): + if 'patterns' in kwds: + patterns = kwds.pop('patterns') + else: + patterns = [] + + mi = self.ts.dbMatch(*args, **kwds) + for (tag, tp, pat) in patterns: + mi.pattern(tag, tp, pat) + return mi + + def __getattr__(self, attr): + if attr in self._methods: + return self.getMethod(attr) + else: + raise AttributeError, attr + + def __iter__(self): + return self.ts + + def getMethod(self, method): + # in theory, we can override this with + # profile/etc info + return getattr(self.ts, method) + + # push/pop methods so we dont lose the previous + # set value, and we can potentiall debug a bit + # easier + def pushVSFlags(self, flags): + self.tsflags.append(flags) + self.ts.setVSFlags(self.tsflags[-1]) + + def popVSFlags(self): + del self.tsflags[-1] + self.ts.setVSFlags(self.tsflags[-1]) + + def addTsFlag(self, flag): + curflags = self.ts.setFlags(0) + self.ts.setFlags(curflags | flag) + + def getTsFlags(self): + curflags = self.ts.setFlags(0) + self.ts.setFlags(curflags) + return curflags + + def isTsFlagSet(self, flag): + val = self.getTsFlags() + return bool(flag & val) + + def setScriptFd(self, fd): + self.ts.scriptFd = fd.fileno() + +# def addProblemFilter(self, filt): +# curfilter = self.ts.setProbFilter(0) +# self.ts.setProbFilter(cutfilter | filt) + + def test(self, cb, conf={}): + """tests the ts we've setup, takes a callback function and a conf dict + for flags and what not""" + + origflags = self.getTsFlags() + self.addTsFlag(rpm.RPMTRANS_FLAG_TEST) + # FIXME GARBAGE - remove once this is reimplemented elsehwere + # KEEPING FOR API COMPLIANCE ONLY + if conf.get('diskspacecheck') == 0: + self.ts.setProbFilter(rpm.RPMPROB_FILTER_DISKSPACE) + tserrors = self.ts.run(cb.callback, '') + self.ts.setFlags(origflags) + + reserrors = [] + if tserrors: + for (descr, (etype, mount, need)) in tserrors: + reserrors.append(descr) + + return reserrors + + + def returnLeafNodes(self, headers=False): + """returns a list of package tuples (n,a,e,v,r) that are not required by + any other package on the system + If headers is True then it will return a list of (header, index) tuples + """ + + req = {} + orphan = [] + + mi = self.dbMatch() + if mi is None: # this is REALLY unlikely but let's just say it for the moment + return orphan + + # prebuild the req dict + for h in mi: + if h['name'] == 'gpg-pubkey': + continue + if not h[rpm.RPMTAG_REQUIRENAME]: + continue + tup = miscutils.pkgTupleFromHeader(h) + for r in h[rpm.RPMTAG_REQUIRENAME]: + if r not in req: + req[r] = set() + req[r].add(tup) + + + mi = self.dbMatch() + if mi is None: + return orphan + + def _return_all_provides(hdr): + """ Return all the provides, via yield. """ + # These are done one by one, so that we get lazy loading + for prov in hdr[rpm.RPMTAG_PROVIDES]: + yield prov + for prov in hdr[rpm.RPMTAG_FILENAMES]: + yield prov + + for h in mi: + if h['name'] == 'gpg-pubkey': + continue + preq = 0 + tup = miscutils.pkgTupleFromHeader(h) + for p in _return_all_provides(h): + if p in req: + # Don't count a package that provides its require + s = req[p] + if len(s) > 1 or tup not in s: + preq = preq + 1 + break + + if preq == 0: + if headers: + orphan.append((h, mi.instance())) + else: + orphan.append(h) + #~ orphan.append(tup) + + return orphan + + +def initReadOnlyTransaction(root='/'): + read_ts = TransactionWrapper(root=root) + read_ts.pushVSFlags((rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)) + return read_ts + diff --git a/urpm-tools/rpm5utils/updates.py b/urpm-tools/rpm5utils/updates.py new file mode 100644 index 0000000..4ef2849 --- /dev/null +++ b/urpm-tools/rpm5utils/updates.py @@ -0,0 +1,723 @@ +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# Copyright 2004 Duke University + +import rpm5utils +import rpm5utils.miscutils +import rpm5utils.arch + +def _vertup_cmp(tup1, tup2): + return rpm5utils.miscutils.compareEVR(tup1, tup2) +class Updates: + """ + This class computes and keeps track of updates and obsoletes. + initialize, add installed packages, add available packages (both as + unique lists of name, arch, ver, rel, epoch tuples), add an optional dict + of obsoleting packages with obsoletes and what they obsolete ie:: + foo, i386, 0, 1.1, 1: bar >= 1.1. + """ + + def __init__(self, instlist, availlist): + + self.installed = instlist # list of installed pkgs (n, a, e, v, r) + self.available = availlist # list of available pkgs (n, a, e, v, r) + + self.rawobsoletes = {} # dict of obsoleting package->[what it obsoletes] + self._obsoletes_by_name = None + self.obsoleted_dict = {} # obsoleted pkgtup -> [ obsoleting pkgtups ] + self.obsoleting_dict = {} # obsoleting pkgtup -> [ obsoleted pkgtups ] + + self.exactarch = 1 # don't change archs by default + self.exactarchlist = set(['kernel', 'kernel-smp', 'glibc', + 'kernel-hugemem', + 'kernel-enterprise', 'kernel-bigmem', + 'kernel-BOOT']) + + self.myarch = rpm5utils.arch.canonArch # set this if you want to + # test on some other arch + # otherwise leave it alone + self._is_multilib = rpm5utils.arch.isMultiLibArch(self.myarch) + + self._archlist = rpm5utils.arch.getArchList(self.myarch) + + self._multilib_compat_arches = rpm5utils.arch.getMultiArchInfo(self.myarch) + + # make some dicts from installed and available + self.installdict = self.makeNADict(self.installed, 1) + self.availdict = self.makeNADict(self.available, 0, # Done in doUpdate + filter=self.installdict) + + # holder for our updates dict + self.updatesdict = {} + self.updating_dict = {} + #debug, ignore me + self.debug = 0 + self.obsoletes = {} + + def _delFromDict(self, dict_, keys, value): + for key in keys: + if key not in dict_: + continue + dict_[key] = filter(value.__ne__, dict_[key]) + if not dict_[key]: + del dict_[key] + + def _delFromNADict(self, dict_, pkgtup): + (n, a, e, v, r) = pkgtup + for aa in (a, None): + if (n, aa) in dict_: + dict_[(n, aa)] = filter((e,v,r).__ne__, dict_[(n, aa)]) + if not dict_[(n, aa)]: + del dict_[(n, aa)] + + def delPackage(self, pkgtup): + """remove available pkgtup that is no longer available""" + if pkgtup not in self.available: + return + self.available.remove(pkgtup) + self._delFromNADict(self.availdict, pkgtup) + + self._delFromDict(self.updating_dict, self.updatesdict.get(pkgtup, []), pkgtup) + self._delFromDict(self.updatesdict, self.updating_dict.get(pkgtup, []), pkgtup) + + if pkgtup in self.rawobsoletes: + if self._obsoletes_by_name: + for name, flag, version in self.rawobsoletes[pkgtup]: + self._delFromDict(self._obsoletes_by_name, [name], (flag, version, pkgtup)) + del self.rawobsoletes[pkgtup] + + self._delFromDict(self.obsoleted_dict, self.obsoleting_dict.get(pkgtup, []), pkgtup) + self._delFromDict(self.obsoleting_dict, self.obsoleted_dict.get(pkgtup, []), pkgtup) + + def debugprint(self, msg): + if self.debug: + print msg + + def makeNADict(self, pkglist, Nonelists, filter=None): + """return lists of (e,v,r) tuples as value of a dict keyed on (n, a) + optionally will return a (n, None) entry with all the a for that + n in tuples of (a,e,v,r)""" + + returndict = {} + for (n, a, e, v, r) in pkglist: + if filter and (n, None) not in filter: + continue + if (n, a) not in returndict: + returndict[(n, a)] = [] + if (e,v,r) in returndict[(n, a)]: + continue + returndict[(n, a)].append((e,v,r)) + + if Nonelists: + if (n, None) not in returndict: + returndict[(n, None)] = [] + if (a,e,v,r) in returndict[(n, None)]: + continue + returndict[(n, None)].append((a, e, v, r)) + + return returndict + + + def returnNewest(self, evrlist): + """takes a list of (e, v, r) tuples and returns the newest one""" + if len(evrlist)==0: + raise rpm5utils.Rpm5UtilsError, "Zero Length List in returnNewest call" + + if len(evrlist)==1: + return evrlist[0] + + (new_e, new_v, new_r) = evrlist[0] # we'll call the first ones 'newest' + + for (e, v, r) in evrlist[1:]: + rc = rpm5utils.miscutils.compareEVR((e, v, r), (new_e, new_v, new_r)) + if rc > 0: + new_e = e + new_v = v + new_r = r + return (new_e, new_v, new_r) + + + def returnHighestVerFromAllArchsByName(self, name, archlist, pkglist): + """returns a list of package tuples in a list (n, a, e, v, r) + takes a package name, a list of archs, and a list of pkgs in + (n, a, e, v, r) form.""" + returnlist = [] + high_vertup = None + for pkgtup in pkglist: + (n, a, e, v, r) = pkgtup + # FIXME: returnlist used to _possibly_ contain things not in + # archlist ... was that desired? + if name == n and a in archlist: + vertup = (e, v, r) + if (high_vertup is None or + (_vertup_cmp(high_vertup, vertup) < 0)): + high_vertup = vertup + returnlist = [] + if vertup == high_vertup: + returnlist.append(pkgtup) + + return returnlist + + def condenseUpdates(self): + """remove any accidental duplicates in updates""" + + for tup in self.updatesdict: + if len(self.updatesdict[tup]) > 1: + mylist = self.updatesdict[tup] + self.updatesdict[tup] = rpm5utils.miscutils.unique(mylist) + + + def checkForObsolete(self, pkglist, newest=1): + """accept a list of packages to check to see if anything obsoletes them + return an obsoleted_dict in the format of makeObsoletedDict""" + if self._obsoletes_by_name is None: + self._obsoletes_by_name = {} + for pkgtup, obsoletes in self.rawobsoletes.iteritems(): + for name, flag, version in obsoletes: + self._obsoletes_by_name.setdefault(name, []).append( + (flag, version, pkgtup) ) + + obsdict = {} # obseleting package -> [obsoleted package] + + for pkgtup in pkglist: + name = pkgtup[0] + for obs_flag, obs_version, obsoleting in self._obsoletes_by_name.get(name, []): + if obs_flag in [None, 0] and name == obsoleting[0]: continue + if rpm5utils.miscutils.rangeCheck( (name, obs_flag, obs_version), pkgtup): + obsdict.setdefault(obsoleting, []).append(pkgtup) + + if not obsdict: + return {} + + obslist = obsdict.keys() + if newest: + obslist = self._reduceListNewestByNameArch(obslist) + + returndict = {} + for new in obslist: + for old in obsdict[new]: + if old not in returndict: + returndict[old] = [] + returndict[old].append(new) + + return returndict + + def doObsoletes(self): + """figures out what things available obsolete things installed, returns + them in a dict attribute of the class.""" + + obsdict = {} # obseleting package -> [obsoleted package] + # this needs to keep arch in mind + # if foo.i386 obsoletes bar + # it needs to obsoletes bar.i386 preferentially, not bar.x86_64 + # if there is only one bar and only one foo then obsolete it, but try to + # match the arch. + + # look through all the obsoleting packages look for multiple archs per name + # if you find it look for the packages they obsolete + # + obs_arches = {} + for (n, a, e, v, r) in self.rawobsoletes: + if n not in obs_arches: + obs_arches[n] = [] + obs_arches[n].append(a) + + for pkgtup in self.rawobsoletes: + (name, arch, epoch, ver, rel) = pkgtup + for (obs_n, flag, (obs_e, obs_v, obs_r)) in self.rawobsoletes[(pkgtup)]: + if (obs_n, None) in self.installdict: + for (rpm_a, rpm_e, rpm_v, rpm_r) in self.installdict[(obs_n, None)]: + if flag in [None, 0] or \ + rpm5utils.miscutils.rangeCheck((obs_n, flag, (obs_e, obs_v, obs_r)), + (obs_n, rpm_a, rpm_e, rpm_v, rpm_r)): + # make sure the obsoleting pkg is not already installed + willInstall = 1 + if (name, None) in self.installdict: + for (ins_a, ins_e, ins_v, ins_r) in self.installdict[(name, None)]: + pkgver = (epoch, ver, rel) + installedver = (ins_e, ins_v, ins_r) + if self.returnNewest((pkgver, installedver)) == installedver: + willInstall = 0 + break + if rpm_a != arch and rpm_a in obs_arches[name]: + willInstall = 0 + if willInstall: + if pkgtup not in obsdict: + obsdict[pkgtup] = [] + obsdict[pkgtup].append((obs_n, rpm_a, rpm_e, rpm_v, rpm_r)) + self.obsoletes = obsdict + self.makeObsoletedDict() + + def makeObsoletedDict(self): + """creates a dict of obsoleted packages -> [obsoleting package], this + is to make it easier to look up what package obsoletes what item in + the rpmdb""" + self.obsoleted_dict = {} + for new in self.obsoletes: + for old in self.obsoletes[new]: + if old not in self.obsoleted_dict: + self.obsoleted_dict[old] = [] + self.obsoleted_dict[old].append(new) + self.obsoleting_dict = {} + for obsoleted, obsoletings in self.obsoleted_dict.iteritems(): + for obsoleting in obsoletings: + self.obsoleting_dict.setdefault(obsoleting, []).append(obsoleted) + + def doUpdates(self): + """check for key lists as populated then commit acts of evil to + determine what is updated and/or obsoleted, populate self.updatesdict + """ + + + # best bet is to chew through the pkgs and throw out the new ones early + # then deal with the ones where there are a single pkg installed and a + # single pkg available + # then deal with the multiples + + # we should take the whole list as a 'newlist' and remove those entries + # which are clearly: + # 1. updates + # 2. identical to the ones in ourdb + # 3. not in our archdict at all + + simpleupdate = [] + complexupdate = [] + + updatedict = {} # (old n, a, e, v, r) : [(new n, a, e, v, r)] + # make the new ones a list b/c while we _shouldn't_ + # have multiple updaters, we might and well, it needs + # to be solved one way or the other + newpkgs = self.availdict + + archlist = self._archlist + for (n, a) in newpkgs.keys(): + if a not in archlist: + # high log here + del newpkgs[(n, a)] + continue + + # remove the older stuff - if we're doing an update we only want the + # newest evrs + for (n, a) in newpkgs: + (new_e,new_v,new_r) = self.returnNewest(newpkgs[(n, a)]) + for (e, v, r) in newpkgs[(n, a)][:]: + if (new_e, new_v, new_r) != (e, v, r): + newpkgs[(n, a)].remove((e, v, r)) + + for (n, a) in newpkgs: + # simple ones - look for exact matches or older stuff + if (n, a) in self.installdict: + for (rpm_e, rpm_v, rpm_r) in self.installdict[(n, a)]: + try: + (e, v, r) = self.returnNewest(newpkgs[(n,a)]) + except rpm5utils.Rpm5UtilsError: + continue + else: + rc = rpm5utils.miscutils.compareEVR((e, v, r), (rpm_e, rpm_v, rpm_r)) + if rc <= 0: + try: + newpkgs[(n, a)].remove((e, v, r)) + except ValueError: + pass + + # Now we add the (n, None) entries back... + for na in newpkgs.keys(): + all_arches = map(lambda x: (na[1], x[0], x[1], x[2]), newpkgs[na]) + newpkgs.setdefault((na[0], None), []).extend(all_arches) + + # get rid of all the empty dict entries: + for nakey in newpkgs.keys(): + if len(newpkgs[nakey]) == 0: + del newpkgs[nakey] + + + # ok at this point our newpkgs list should be thinned, we should have only + # the newest e,v,r's and only archs we can actually use + for (n, a) in newpkgs: + if a is None: # the None archs are only for lookups + continue + + if (n, None) in self.installdict: + installarchs = [] + availarchs = [] + for (a, e, v ,r) in newpkgs[(n, None)]: + availarchs.append(a) + for (a, e, v, r) in self.installdict[(n, None)]: + installarchs.append(a) + + if len(availarchs) > 1 or len(installarchs) > 1: + self.debugprint('putting %s in complex update' % n) + complexupdate.append(n) + else: + #log(4, 'putting %s in simple update list' % name) + self.debugprint('putting %s in simple update' % n) + simpleupdate.append((n, a)) + + # we have our lists to work with now + + # simple cases + for (n, a) in simpleupdate: + # try to be as precise as possible + if n in self.exactarchlist: + if (n, a) in self.installdict: + (rpm_e, rpm_v, rpm_r) = self.returnNewest(self.installdict[(n, a)]) + if (n, a) in newpkgs: + (e, v, r) = self.returnNewest(newpkgs[(n, a)]) + rc = rpm5utils.miscutils.compareEVR((e, v, r), (rpm_e, rpm_v, rpm_r)) + if rc > 0: + # this is definitely an update - put it in the dict + if (n, a, rpm_e, rpm_v, rpm_r) not in updatedict: + updatedict[(n, a, rpm_e, rpm_v, rpm_r)] = [] + updatedict[(n, a, rpm_e, rpm_v, rpm_r)].append((n, a, e, v, r)) + + else: + # we could only have 1 arch in our rpmdb and 1 arch of pkg + # available - so we shouldn't have to worry about the lists, here + # we just need to find the arch of the installed pkg so we can + # check it's (e, v, r) + (rpm_a, rpm_e, rpm_v, rpm_r) = self.installdict[(n, None)][0] + if (n, None) in newpkgs: + for (a, e, v, r) in newpkgs[(n, None)]: + rc = rpm5utils.miscutils.compareEVR((e, v, r), (rpm_e, rpm_v, rpm_r)) + if rc > 0: + # this is definitely an update - put it in the dict + if (n, rpm_a, rpm_e, rpm_v, rpm_r) not in updatedict: + updatedict[(n, rpm_a, rpm_e, rpm_v, rpm_r)] = [] + updatedict[(n, rpm_a, rpm_e, rpm_v, rpm_r)].append((n, a, e, v, r)) + + + # complex cases + + # we're multilib/biarch + # we need to check the name.arch in two different trees + # one for the multiarch itself and one for the compat arch + # ie: x86_64 and athlon(i686-i386) - we don't want to descend + # x86_64->i686 + # however, we do want to descend x86_64->noarch, sadly. + + archlists = [] + if self._is_multilib: + if self.myarch in rpm5utils.arch.multilibArches: + biarches = [self.myarch] + else: + biarches = [self.myarch, rpm5utils.arch.arches[self.myarch]] + biarches.append('noarch') + + multicompat = self._multilib_compat_arches[0] + multiarchlist = rpm5utils.arch.getArchList(multicompat) + archlists = [ set(biarches), set(multiarchlist) ] + # archlists = [ biarches, multiarchlist ] + else: + archlists = [ set(archlist) ] + # archlists = [ archlist ] + + for n in complexupdate: + for thisarchlist in archlists: + # we need to get the highest version and the archs that have it + # of the installed pkgs + tmplist = [] + for (a, e, v, r) in self.installdict[(n, None)]: + tmplist.append((n, a, e, v, r)) + + highestinstalledpkgs = self.returnHighestVerFromAllArchsByName(n, + thisarchlist, tmplist) + hipdict = self.makeNADict(highestinstalledpkgs, 0) + + + if n in self.exactarchlist: + tmplist = [] + for (a, e, v, r) in newpkgs[(n, None)]: + tmplist.append((n, a, e, v, r)) + highestavailablepkgs = self.returnHighestVerFromAllArchsByName(n, + thisarchlist, tmplist) + + hapdict = self.makeNADict(highestavailablepkgs, 0) + + for (n, a) in hipdict: + if (n, a) in hapdict: + self.debugprint('processing %s.%s' % (n, a)) + # we've got a match - get our versions and compare + (rpm_e, rpm_v, rpm_r) = hipdict[(n, a)][0] # only ever going to be first one + (e, v, r) = hapdict[(n, a)][0] # there can be only one + rc = rpm5utils.miscutils.compareEVR((e, v, r), (rpm_e, rpm_v, rpm_r)) + if rc > 0: + # this is definitely an update - put it in the dict + if (n, a, rpm_e, rpm_v, rpm_r) not in updatedict: + updatedict[(n, a, rpm_e, rpm_v, rpm_r)] = [] + updatedict[(n, a, rpm_e, rpm_v, rpm_r)].append((n, a, e, v, r)) + else: + self.debugprint('processing %s' % n) + # this is where we have to have an arch contest if there + # is more than one arch updating with the highest ver + instarchs = [] + for (n,a) in hipdict: + instarchs.append(a) + + rpm_a = rpm5utils.arch.getBestArchFromList(instarchs, myarch=self.myarch) + if rpm_a is None: + continue + + tmplist = [] + for (a, e, v, r) in newpkgs[(n, None)]: + tmplist.append((n, a, e, v, r)) + highestavailablepkgs = self.returnHighestVerFromAllArchsByName(n, + thisarchlist, tmplist) + + hapdict = self.makeNADict(highestavailablepkgs, 0) + availarchs = [] + for (n,a) in hapdict: + availarchs.append(a) + a = rpm5utils.arch.getBestArchFromList(availarchs, myarch=self.myarch) + if a is None: + continue + + (rpm_e, rpm_v, rpm_r) = hipdict[(n, rpm_a)][0] # there can be just one + (e, v, r) = hapdict[(n, a)][0] # just one, I'm sure, I swear! + rc = rpm5utils.miscutils.compareEVR((e, v, r), (rpm_e, rpm_v, rpm_r)) + if rc > 0: + # this is definitely an update - put it in the dict + if (n, rpm_a, rpm_e, rpm_v, rpm_r) not in updatedict: + updatedict[(n, rpm_a, rpm_e, rpm_v, rpm_r)] = [] + updatedict[(n, rpm_a, rpm_e, rpm_v, rpm_r)].append((n, a, e, v, r)) + + self.updatesdict = updatedict + self.makeUpdatingDict() + + def makeUpdatingDict(self): + """creates a dict of available packages -> [installed package], this + is to make it easier to look up what package will be updating what + in the rpmdb""" + self.updating_dict = {} + for old in self.updatesdict: + for new in self.updatesdict[old]: + if new not in self.updating_dict: + self.updating_dict[new] = [] + self.updating_dict[new].append(old) + + def reduceListByNameArch(self, pkglist, name=None, arch=None): + """returns a set of pkg naevr tuples reduced based on name or arch""" + returnlist = [] + + if name or arch: + for (n, a, e, v, r) in pkglist: + if name: + if name == n: + returnlist.append((n, a, e, v, r)) + continue + if arch: + if arch == a: + returnlist.append((n, a, e, v, r)) + continue + else: + returnlist = pkglist + + return returnlist + + + def getUpdatesTuples(self, name=None, arch=None): + """returns updates for packages in a list of tuples of: + (updating naevr, installed naevr)""" + returnlist = [] + for oldtup in self.updatesdict: + for newtup in self.updatesdict[oldtup]: + returnlist.append((newtup, oldtup)) + + # self.reduceListByNameArch() for double tuples + tmplist = [] + if name: + for ((n, a, e, v, r), oldtup) in returnlist: + if name != n: + tmplist.append(((n, a, e, v, r), oldtup)) + if arch: + for ((n, a, e, v, r), oldtup) in returnlist: + if arch != a: + tmplist.append(((n, a, e, v, r), oldtup)) + + for item in tmplist: + try: + returnlist.remove(item) + except ValueError: + pass + + return returnlist + + def getUpdatesList(self, name=None, arch=None): + """returns updating packages in a list of (naevr) tuples""" + returnlist = [] + + for oldtup in self.updatesdict: + for newtup in self.updatesdict[oldtup]: + returnlist.append(newtup) + + returnlist = self.reduceListByNameArch(returnlist, name, arch) + + return returnlist + + # NOTE: This returns obsoleters and obsoletees, but narrows based on + # _obsoletees_ (unlike getObsoletesList). Look at getObsoletersTuples + def getObsoletesTuples(self, newest=0, name=None, arch=None): + """returns obsoletes for packages in a list of tuples of: + (obsoleting naevr, installed naevr). You can specify name and/or + arch of the installed package to narrow the results. + You can also specify newest=1 to get the set of newest pkgs (name, arch) + sorted, that obsolete something""" + + tmplist = [] + obslist = self.obsoletes.keys() + if newest: + obslist = self._reduceListNewestByNameArch(obslist) + + for obstup in obslist: + for rpmtup in self.obsoletes[obstup]: + tmplist.append((obstup, rpmtup)) + + # self.reduceListByNameArch() for double tuples + returnlist = [] + if name or arch: + for (obstup, (n, a, e, v, r)) in tmplist: + if name: + if name == n: + returnlist.append((obstup, (n, a, e, v, r))) + continue + if arch: + if arch == a: + returnlist.append((obstup, (n, a, e, v, r))) + continue + else: + returnlist = tmplist + + return returnlist + + # NOTE: This returns obsoleters and obsoletees, but narrows based on + # _obsoleters_ (like getObsoletesList). + def getObsoletersTuples(self, newest=0, name=None, arch=None): + """returns obsoletes for packages in a list of tuples of: + (obsoleting naevr, installed naevr). You can specify name and/or + arch of the obsoleting package to narrow the results. + You can also specify newest=1 to get the set of newest pkgs (name, arch) + sorted, that obsolete something""" + + tmplist = [] + obslist = self.obsoletes.keys() + if newest: + obslist = self._reduceListNewestByNameArch(obslist) + + for obstup in obslist: + for rpmtup in self.obsoletes[obstup]: + tmplist.append((obstup, rpmtup)) + + # self.reduceListByNameArch() for double tuples + returnlist = [] + if name or arch: + for ((n, a, e, v, r), insttup) in tmplist: + if name: + if name == n: + returnlist.append(((n, a, e, v, r), insttup)) + continue + if arch: + if arch == a: + returnlist.append(((n, a, e, v, r), insttup)) + continue + else: + returnlist = tmplist + + return returnlist + + # NOTE: This returns _obsoleters_, and narrows based on that (unlike + # getObsoletesTuples, but like getObsoletersTuples) + def getObsoletesList(self, newest=0, name=None, arch=None): + """returns obsoleting packages in a list of naevr tuples of just the + packages that obsolete something that is installed. You can specify + name and/or arch of the obsoleting packaging to narrow the results. + You can also specify newest=1 to get the set of newest pkgs (name, arch) + sorted, that obsolete something""" + + tmplist = self.obsoletes.keys() + if newest: + tmplist = self._reduceListNewestByNameArch(tmplist) + + returnlist = self.reduceListByNameArch(tmplist, name, arch) + + return returnlist + + def getObsoletedList(self, newest=0, name=None): + """returns a list of pkgtuples obsoleting the package in name""" + returnlist = [] + for new in self.obsoletes: + for obstup in self.obsoletes[new]: + (n, a, e, v, r) = obstup + if n == name: + returnlist.append(new) + continue + return returnlist + + + + def getOthersList(self, name=None, arch=None): + """returns a naevr tuple of the packages that are neither installed + nor an update - this may include something that obsoletes an installed + package""" + updates = {} + inst = {} + tmplist = [] + + for pkgtup in self.getUpdatesList(): + updates[pkgtup] = 1 + + for pkgtup in self.installed: + inst[pkgtup] = 1 + + for pkgtup in self.available: + if pkgtup not in updates and pkgtup not in inst: + tmplist.append(pkgtup) + + returnlist = self.reduceListByNameArch(tmplist, name, arch) + + return returnlist + + + + def _reduceListNewestByNameArch(self, tuplelist): + """return list of newest packages based on name, arch matching + this means(in name.arch form): foo.i386 and foo.noarch are not + compared to each other for highest version only foo.i386 and + foo.i386 will be compared""" + highdict = {} + done = False + for pkgtup in tuplelist: + (n, a, e, v, r) = pkgtup + if (n, a) not in highdict: + highdict[(n, a)] = pkgtup + else: + pkgtup2 = highdict[(n, a)] + done = True + (n2, a2, e2, v2, r2) = pkgtup2 + rc = rpm5utils.miscutils.compareEVR((e,v,r), (e2, v2, r2)) + if rc > 0: + highdict[(n, a)] = pkgtup + + if not done: + return tuplelist + + return highdict.values() + + +# def getProblems(self): +# """return list of problems: +# - Packages that are both obsoleted and updated. +# - Packages that have multiple obsoletes. +# - Packages that _still_ have multiple updates +# """ + + diff --git a/urpm-tools/rpm5utils/urpmgraphs/__init__.py b/urpm-tools/rpm5utils/urpmgraphs/__init__.py new file mode 100644 index 0000000..095491a --- /dev/null +++ b/urpm-tools/rpm5utils/urpmgraphs/__init__.py @@ -0,0 +1,66 @@ +""" +NetworkX +======== + + NetworkX (NX) is a Python package for the creation, manipulation, and + study of the structure, dynamics, and functions of complex networks. + + https://networkx.lanl.gov/ + +Using +----- + + Just write in Python + + >>> import networkx as nx + >>> G=nx.Graph() + >>> G.add_edge(1,2) + >>> G.add_node("spam") + >>> print(G.nodes()) + [1, 2, 'spam'] + >>> print(G.edges()) + [(1, 2)] +""" +# Copyright (C) 2004-2010 by +# Aric Hagberg +# Dan Schult +# Pieter Swart +# All rights reserved. +# BSD license. +# +# Add platform dependent shared library path to sys.path +# + +from __future__ import absolute_import + +import sys +if sys.version_info[:2] < (2, 6): + m = "Python version 2.6 or later is required for NetworkX (%d.%d detected)." + raise ImportError(m % sys.version_info[:2]) +del sys + +# Release data + +# these packages work with Python >= 2.6 +from rpm5utils.urpmgraphs.exception import * +import rpm5utils.urpmgraphs.classes +from rpm5utils.urpmgraphs.classes import * +import rpm5utils.urpmgraphs.convert +from rpm5utils.urpmgraphs.convert import * +#import urpmgraphs.relabel +#from urpmgraphs.relabel import * +#import urpmgraphs.generators +#from urpmgraphs.generators import * +#from urpmgraphs.readwrite import * +#import urpmgraphs.readwrite +#Need to test with SciPy, when available +import rpm5utils.urpmgraphs.algorithms +from rpm5utils.urpmgraphs.algorithms import * +#import urpmgraphs.linalg +#from urpmgraphs.linalg import * +#from urpmgraphs.tests.test import run as test +#import urpmgraphs.utils + +#import urpmgraphs.drawing +#from urpmgraphs.drawing import * + diff --git a/urpm-tools/rpm5utils/urpmgraphs/algorithms/__init__.py b/urpm-tools/rpm5utils/urpmgraphs/algorithms/__init__.py new file mode 100644 index 0000000..9e6d007 --- /dev/null +++ b/urpm-tools/rpm5utils/urpmgraphs/algorithms/__init__.py @@ -0,0 +1,2 @@ +from rpm5utils.urpmgraphs.algorithms.components import * +from rpm5utils.urpmgraphs.algorithms.cycles import * diff --git a/urpm-tools/rpm5utils/urpmgraphs/algorithms/components/__init__.py b/urpm-tools/rpm5utils/urpmgraphs/algorithms/components/__init__.py new file mode 100644 index 0000000..ae16a0c --- /dev/null +++ b/urpm-tools/rpm5utils/urpmgraphs/algorithms/components/__init__.py @@ -0,0 +1,2 @@ +#from urpmgraphs.algorithms.components.connected import * +from rpm5utils.urpmgraphs.algorithms.components.strongly_connected import * diff --git a/urpm-tools/rpm5utils/urpmgraphs/algorithms/components/strongly_connected.py b/urpm-tools/rpm5utils/urpmgraphs/algorithms/components/strongly_connected.py new file mode 100644 index 0000000..c9db4b6 --- /dev/null +++ b/urpm-tools/rpm5utils/urpmgraphs/algorithms/components/strongly_connected.py @@ -0,0 +1,321 @@ +# -*- coding: utf-8 -*- +""" +Strongly connected components. +""" +__authors__ = "\n".join(['Eben Kenah', + 'Aric Hagberg (hagberg@lanl.gov)' + 'Christopher Ellison']) +# Copyright (C) 2004-2010 by +# Aric Hagberg +# Dan Schult +# Pieter Swart +# All rights reserved. +# BSD license. + +__all__ = ['number_strongly_connected_components', + 'strongly_connected_components', + 'strongly_connected_component_subgraphs', + 'is_strongly_connected', + 'strongly_connected_components_recursive', + 'kosaraju_strongly_connected_components', + 'condensation', + ] + +import rpm5utils as nx + +def strongly_connected_components(G): + """Return nodes in strongly connected components of graph. + + Parameters + ---------- + G : NetworkX Graph + An directed graph. + + Returns + ------- + comp : list of lists + A list of nodes for each component of G. + The list is ordered from largest connected component to smallest. + + See Also + -------- + connected_components + + Notes + ----- + Uses Tarjan's algorithm with Nuutila's modifications. + Nonrecursive version of algorithm. + + References + ---------- + .. [1] Depth-first search and linear graph algorithms, R. Tarjan + SIAM Journal of Computing 1(2):146-160, (1972). + + .. [2] On finding the strongly connected components in a directed graph. + E. Nuutila and E. Soisalon-Soinen + Information Processing Letters 49(1): 9-14, (1994).. + """ + preorder={} + lowlink={} + scc_found={} + scc_queue = [] + scc_list=[] + i=0 # Preorder counter + for source in G: + if source not in scc_found: + queue=[source] + while queue: + v=queue[-1] + if v not in preorder: + i=i+1 + preorder[v]=i + done=1 + v_nbrs=G[v] + for w in v_nbrs: + if w not in preorder: + queue.append(w) + done=0 + break + if done==1: + lowlink[v]=preorder[v] + for w in v_nbrs: + if w not in scc_found: + if preorder[w]>preorder[v]: + lowlink[v]=min([lowlink[v],lowlink[w]]) + else: + lowlink[v]=min([lowlink[v],preorder[w]]) + queue.pop() + if lowlink[v]==preorder[v]: + scc_found[v]=True + scc=[v] + while scc_queue and preorder[scc_queue[-1]]>preorder[v]: + k=scc_queue.pop() + scc_found[k]=True + scc.append(k) + scc_list.append(scc) + else: + scc_queue.append(v) + scc_list.sort(key=len,reverse=True) + return scc_list + + +def kosaraju_strongly_connected_components(G,source=None): + """Return nodes in strongly connected components of graph. + + Parameters + ---------- + G : NetworkX Graph + An directed graph. + + Returns + ------- + comp : list of lists + A list of nodes for each component of G. + The list is ordered from largest connected component to smallest. + + See Also + -------- + connected_components + + Notes + ----- + Uses Kosaraju's algorithm. + """ + components=[] + G=G.reverse(copy=False) + post=list(nx.dfs_postorder_nodes(G,source=source)) + G=G.reverse(copy=False) + seen={} + while post: + r=post.pop() + if r in seen: + continue + c=nx.dfs_preorder_nodes(G,r) + new=[v for v in c if v not in seen] + seen.update([(u,True) for u in new]) + components.append(new) + components.sort(key=len,reverse=True) + return components + + +def strongly_connected_components_recursive(G): + """Return nodes in strongly connected components of graph. + + Recursive version of algorithm. + + Parameters + ---------- + G : NetworkX Graph + An directed graph. + + Returns + ------- + comp : list of lists + A list of nodes for each component of G. + The list is ordered from largest connected component to smallest. + + See Also + -------- + connected_components + + Notes + ----- + Uses Tarjan's algorithm with Nuutila's modifications. + + References + ---------- + .. [1] Depth-first search and linear graph algorithms, R. Tarjan + SIAM Journal of Computing 1(2):146-160, (1972). + + .. [2] On finding the strongly connected components in a directed graph. + E. Nuutila and E. Soisalon-Soinen + Information Processing Letters 49(1): 9-14, (1994).. + """ + def visit(v,cnt): + root[v]=cnt + visited[v]=cnt + cnt+=1 + stack.append(v) + for w in G[v]: + if w not in visited: visit(w,cnt) + if w not in component: + root[v]=min(root[v],root[w]) + if root[v]==visited[v]: + component[v]=root[v] + tmpc=[v] # hold nodes in this component + while stack[-1]!=v: + w=stack.pop() + component[w]=root[v] + tmpc.append(w) + stack.remove(v) + scc.append(tmpc) # add to scc list + scc=[] + visited={} + component={} + root={} + cnt=0 + stack=[] + for source in G: + if source not in visited: + visit(source,cnt) + + scc.sort(key=len,reverse=True) + return scc + + +def strongly_connected_component_subgraphs(G): + """Return strongly connected components as subgraphs. + + Parameters + ---------- + G : NetworkX Graph + A graph. + + Returns + ------- + glist : list + A list of graphs, one for each strongly connected component of G. + + See Also + -------- + connected_component_subgraphs + + Notes + ----- + The list is ordered from largest strongly connected component to smallest. + """ + cc=strongly_connected_components(G) + graph_list=[] + for c in cc: + graph_list.append(G.subgraph(c)) + return graph_list + + +def number_strongly_connected_components(G): + """Return number of strongly connected components in graph. + + Parameters + ---------- + G : NetworkX graph + A directed graph. + + Returns + ------- + n : integer + Number of strongly connected components + + See Also + -------- + connected_components + + Notes + ----- + For directed graphs only. + """ + return len(strongly_connected_components(G)) + + +def is_strongly_connected(G): + """Test directed graph for strong connectivity. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + connected : bool + True if the graph is strongly connected, False otherwise. + + See Also + -------- + strongly_connected_components + + Notes + ----- + For directed graphs only. + """ + if not G.is_directed(): + raise nx.NetworkXError("""Not allowed for undirected graph G. + See is_connected() for connectivity test.""") + + if len(G)==0: + raise nx.NetworkXPointlessConcept( + """Connectivity is undefined for the null graph.""") + + return len(strongly_connected_components(G)[0])==len(G) + + +def condensation(G): + """Returns the condensation of G. + + The condensation of G is the graph with each of the strongly connected + components contracted into a single node. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + cG : NetworkX DiGraph + The condensation of G. + + Notes + ----- + After contracting all strongly connected components to a single node, + the resulting graph is a directed acyclic graph. + + """ + scc = strongly_connected_components(G) + mapping = dict([(n,tuple(sorted(c))) for c in scc for n in c]) + cG = nx.DiGraph() + for u in mapping: + cG.add_node(mapping[u]) + for _,v,d in G.edges_iter(u, data=True): + if v not in mapping[u]: + cG.add_edge(mapping[u], mapping[v]) + return cG + diff --git a/urpm-tools/rpm5utils/urpmgraphs/algorithms/cycles.py b/urpm-tools/rpm5utils/urpmgraphs/algorithms/cycles.py new file mode 100644 index 0000000..1abc168 --- /dev/null +++ b/urpm-tools/rpm5utils/urpmgraphs/algorithms/cycles.py @@ -0,0 +1,122 @@ +""" +======================== +Cycle finding algorithms +======================== + +""" +# Copyright (C) 2010 by +# Aric Hagberg +# Dan Schult +# Pieter Swart +# All rights reserved. +# BSD license. +import rpm5utils as nx +from collections import defaultdict + +__all__ = ['simple_cycles'] + +__author__ = "\n".join(['Jon Olav Vik ', + 'Aric Hagberg ']) + + +def simple_cycles(G): + """Find simple cycles (elementary circuits) of a directed graph. + + An simple cycle, or elementary circuit, is a closed path where no + node appears twice, except that the first and last node are the same. + Two elementary circuits are distinct if they are not cyclic permutations + of each other. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + Returns + ------- + A list of circuits, where each circuit is a list of nodes, with the first + and last node being the same. + + Example: + >>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]) + >>> nx.simple_cycles(G) + [[0, 0], [0, 1, 2, 0], [0, 2, 0], [1, 2, 1], [2, 2]] + + See Also + -------- + cycle_basis (for undirected graphs) + + Notes + ----- + The implementation follows pp. 79-80 in [1]_. + + The time complexity is O((n+e)(c+1)) for n nodes, e edges and c + elementary circuits. + + References + ---------- + .. [1] Finding all the elementary circuits of a directed graph. + D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. + http://dx.doi.org/10.1137/0204007 + + See Also + -------- + cycle_basis + """ + # Jon Olav Vik, 2010-08-09 + def _unblock(thisnode): + """Recursively unblock and remove nodes from B[thisnode].""" + if blocked[thisnode]: + blocked[thisnode] = False + while B[thisnode]: + _unblock(B[thisnode].pop()) + + def circuit(thisnode, startnode, component): + closed = False # set to True if elementary path is closed + path.append(thisnode) + blocked[thisnode] = True + for nextnode in component[thisnode]: # direct successors of thisnode + if nextnode == startnode: + result.append(path + [startnode]) + closed = True + elif not blocked[nextnode]: + if circuit(nextnode, startnode, component): + closed = True + if closed: + _unblock(thisnode) + else: + for nextnode in component[thisnode]: + if thisnode not in B[nextnode]: # TODO: use set for speedup? + B[nextnode].append(thisnode) + path.pop() # remove thisnode from path + return closed + + if not G.is_directed(): + raise nx.NetworkXError(\ + "simple_cycles() not implemented for undirected graphs.") + path = [] # stack of nodes in current path + blocked = defaultdict(bool) # vertex: blocked from search? + B = defaultdict(list) # graph portions that yield no elementary circuit + result = [] # list to accumulate the circuits found + # Johnson's algorithm requires some ordering of the nodes. + # They might not be sortable so we assign an arbitrary ordering. + ordering=dict(zip(G,range(len(G)))) + for s in ordering: + # Build the subgraph induced by s and following nodes in the ordering + subgraph = G.subgraph(node for node in G + if ordering[node] >= ordering[s]) + # Find the strongly connected component in the subgraph + # that contains the least node according to the ordering + strongcomp = nx.strongly_connected_components(subgraph) + mincomp=min(strongcomp, + key=lambda nodes: min(ordering[n] for n in nodes)) + component = G.subgraph(mincomp) + if component: + # smallest node in the component according to the ordering + startnode = min(component,key=ordering.__getitem__) + for node in component: + blocked[node] = False + B[node][:] = [] + dummy=circuit(startnode, startnode, component) + + return result diff --git a/urpm-tools/rpm5utils/urpmgraphs/classes/__init__.py b/urpm-tools/rpm5utils/urpmgraphs/classes/__init__.py new file mode 100644 index 0000000..f43dc33 --- /dev/null +++ b/urpm-tools/rpm5utils/urpmgraphs/classes/__init__.py @@ -0,0 +1,3 @@ +from rpm5utils.urpmgraphs.classes.graph import Graph +from rpm5utils.urpmgraphs.classes.digraph import DiGraph +from rpm5utils.urpmgraphs.classes.function import * diff --git a/urpm-tools/rpm5utils/urpmgraphs/classes/digraph.py b/urpm-tools/rpm5utils/urpmgraphs/classes/digraph.py new file mode 100644 index 0000000..a50c756 --- /dev/null +++ b/urpm-tools/rpm5utils/urpmgraphs/classes/digraph.py @@ -0,0 +1,996 @@ +"""Base class for directed graphs.""" +# Copyright (C) 2004-2011 by +# Aric Hagberg +# Dan Schult +# Pieter Swart +# All rights reserved. +# BSD license. +from copy import deepcopy +import rpm5utils as nx +from rpm5utils.urpmgraphs.classes.graph import Graph +from rpm5utils.urpmgraphs.exception import NetworkXError +#import urpmgraphs.convert as convert +__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)', + 'Pieter Swart (swart@lanl.gov)', + 'Dan Schult(dschult@colgate.edu)']) + +class DiGraph(Graph): + """ + Base class for directed graphs. + + A DiGraph stores nodes and edges with optional data, or attributes. + + DiGraphs hold directed edges. Self loops are allowed but multiple + (parallel) edges are not. + + Nodes can be arbitrary (hashable) Python objects with optional + key/value attributes. + + Edges are represented as links between nodes with optional + key/value attributes. + + Parameters + ---------- + data : input graph + Data to initialize graph. If data=None (default) an empty + graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a NumPy matrix + or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + Graph + MultiGraph + MultiDiGraph + + Examples + -------- + Create an empty graph structure (a "null graph") with no nodes and + no edges. + + >>> G = nx.DiGraph() + + G can be grown in several ways. + + **Nodes:** + + Add one node at a time: + + >>> G.add_node(1) + + Add the nodes from any container (a list, dict, set or + even the lines from a file or the nodes from another graph). + + >>> G.add_nodes_from([2,3]) + >>> G.add_nodes_from(range(100,110)) + >>> H=nx.Graph() + >>> H.add_path([0,1,2,3,4,5,6,7,8,9]) + >>> G.add_nodes_from(H) + + In addition to strings and integers any hashable Python object + (except None) can represent a node, e.g. a customized node object, + or even another Graph. + + >>> G.add_node(H) + + **Edges:** + + G can also be grown by adding edges. + + Add one edge, + + >>> G.add_edge(1, 2) + + a list of edges, + + >>> G.add_edges_from([(1,2),(1,3)]) + + or a collection of edges, + + >>> G.add_edges_from(H.edges()) + + If some edges connect nodes not yet in the graph, the nodes + are added automatically. There are no errors when adding + nodes or edges that already exist. + + **Attributes:** + + Each graph, node, and edge can hold key/value attribute pairs + in an associated attribute dictionary (the keys must be hashable). + By default these are empty, but can be added or changed using + add_edge, add_node or direct manipulation of the attribute + dictionaries named graph, node and edge respectively. + + >>> G = nx.DiGraph(day="Friday") + >>> G.graph + {'day': 'Friday'} + + Add node attributes using add_node(), add_nodes_from() or G.node + + >>> G.add_node(1, time='5pm') + >>> G.add_nodes_from([3], time='2pm') + >>> G.node[1] + {'time': '5pm'} + >>> G.node[1]['room'] = 714 + >>> G.nodes(data=True) + [(1, {'room': 714, 'time': '5pm'}), (3, {'time': '2pm'})] + + Warning: adding a node to G.node does not add it to the graph. + + Add edge attributes using add_edge(), add_edges_from(), subscript + notation, or G.edge. + + >>> G.add_edge(1, 2, weight=4.7 ) + >>> G.add_edges_from([(3,4),(4,5)], color='red') + >>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})]) + >>> G[1][2]['weight'] = 4.7 + >>> G.edge[1][2]['weight'] = 4 + + **Shortcuts:** + + Many common graph features allow python syntax to speed reporting. + + >>> 1 in G # check if node in graph + True + >>> [n for n in G if n<3] # iterate through nodes + [1, 2] + >>> len(G) # number of nodes in graph + 5 + >>> G[1] # adjacency dict keyed by neighbor to edge attributes + ... # Note: you should not change this dict manually! + {2: {'color': 'blue', 'weight': 4}} + + The fastest way to traverse all edges of a graph is via + adjacency_iter(), but the edges() method is often more convenient. + + >>> for n,nbrsdict in G.adjacency_iter(): + ... for nbr,eattr in nbrsdict.items(): + ... if 'weight' in eattr: + ... (n,nbr,eattr['weight']) + (1, 2, 4) + (2, 3, 8) + >>> [ (u,v,edata['weight']) for u,v,edata in G.edges(data=True) if 'weight' in edata ] + [(1, 2, 4), (2, 3, 8)] + + **Reporting:** + + Simple graph information is obtained using methods. + Iterator versions of many reporting methods exist for efficiency. + Methods exist for reporting nodes(), edges(), neighbors() and degree() + as well as the number of nodes and edges. + + For details on these and other miscellaneous methods, see below. + """ + def __init__(self, data=None, **attr): + """Initialize a graph with edges, name, graph attributes. + + Parameters + ---------- + data : input graph + Data to initialize graph. If data=None (default) an empty + graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a NumPy matrix + or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph. + name : string, optional (default='') + An optional name for the graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + convert + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G = nx.Graph(name='my graph') + >>> e = [(1,2),(2,3),(3,4)] # list of edges + >>> G = nx.Graph(e) + + Arbitrary graph attribute pairs (key=value) may be assigned + + >>> G=nx.Graph(e, day="Friday") + >>> G.graph + {'day': 'Friday'} + + """ + self.graph = {} # dictionary for graph attributes + self.node = {} # dictionary for node attributes + # We store two adjacency lists: + # the predecessors of node n are stored in the dict self.pred + # the successors of node n are stored in the dict self.succ=self.adj + self.adj = {} # empty adjacency dictionary + self.pred = {} # predecessor + self.succ = self.adj # successor + + # attempt to load graph with data + if data is not None: + convert.to_networkx_graph(data,create_using=self) + # load graph attributes (must be after convert) + self.graph.update(attr) + self.edge=self.adj + + + def add_node(self, n, attr_dict=None, **attr): + """Add a single node n and update node attributes. + + Parameters + ---------- + n : node + A node can be any hashable Python object except None. + attr_dict : dictionary, optional (default= no attributes) + Dictionary of node attributes. Key/value pairs will + update existing data associated with the node. + attr : keyword arguments, optional + Set or change attributes using key=value. + + See Also + -------- + add_nodes_from + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_node(1) + >>> G.add_node('Hello') + >>> K3 = nx.Graph([(0,1),(1,2),(2,0)]) + >>> G.add_node(K3) + >>> G.number_of_nodes() + 3 + + Use keywords set/change node attributes: + + >>> G.add_node(1,size=10) + >>> G.add_node(3,weight=0.4,UTM=('13S',382871,3972649)) + + Notes + ----- + A hashable object is one that can be used as a key in a Python + dictionary. This includes strings, numbers, tuples of strings + and numbers, etc. + + On many platforms hashable items also include mutables such as + NetworkX Graphs, though one should be careful that the hash + doesn't change on mutables. + """ + # set up attribute dict + if attr_dict is None: + attr_dict=attr + else: + try: + attr_dict.update(attr) + except AttributeError: + raise NetworkXError(\ + "The attr_dict argument must be a dictionary.") + if n not in self.succ: + self.succ[n] = {} + self.pred[n] = {} + self.node[n] = attr_dict + else: # update attr even if node already exists + self.node[n].update(attr_dict) + + + def add_nodes_from(self, nodes, **attr): + """Add multiple nodes. + + Parameters + ---------- + nodes : iterable container + A container of nodes (list, dict, set, etc.). + OR + A container of (node, attribute dict) tuples. + Node attributes are updated using the attribute dict. + attr : keyword arguments, optional (default= no attributes) + Update attributes for all nodes in nodes. + Node attributes specified in nodes as a tuple + take precedence over attributes specified generally. + + See Also + -------- + add_node + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_nodes_from('Hello') + >>> K3 = nx.Graph([(0,1),(1,2),(2,0)]) + >>> G.add_nodes_from(K3) + >>> sorted(G.nodes(),key=str) + [0, 1, 2, 'H', 'e', 'l', 'o'] + + Use keywords to update specific node attributes for every node. + + >>> G.add_nodes_from([1,2], size=10) + >>> G.add_nodes_from([3,4], weight=0.4) + + Use (node, attrdict) tuples to update attributes for specific + nodes. + + >>> G.add_nodes_from([(1,dict(size=11)), (2,{'color':'blue'})]) + >>> G.node[1]['size'] + 11 + >>> H = nx.Graph() + >>> H.add_nodes_from(G.nodes(data=True)) + >>> H.node[1]['size'] + 11 + + """ + for n in nodes: + try: + newnode=n not in self.succ + except TypeError: + nn,ndict = n + if nn not in self.succ: + self.succ[nn] = {} + self.pred[nn] = {} + newdict = attr.copy() + newdict.update(ndict) + self.node[nn] = newdict + else: + olddict = self.node[nn] + olddict.update(attr) + olddict.update(ndict) + continue + if newnode: + self.succ[n] = {} + self.pred[n] = {} + self.node[n] = attr.copy() + else: + self.node[n].update(attr) + + + def add_edge(self, u, v, attr_dict=None, **attr): + """Add an edge between u and v. + + The nodes u and v will be automatically added if they are + not already in the graph. + + Edge attributes can be specified with keywords or by providing + a dictionary with key/value pairs. See examples below. + + Parameters + ---------- + u,v : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + attr_dict : dictionary, optional (default= no attributes) + Dictionary of edge attributes. Key/value pairs will + update existing data associated with the edge. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + See Also + -------- + add_edges_from : add a collection of edges + + Notes + ----- + Adding an edge that already exists updates the edge data. + + NetworkX algorithms designed for weighted graphs use as + the edge weight a numerical value assigned to the keyword + 'weight'. + + Examples + -------- + The following all add the edge e=(1,2) to graph G: + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> e = (1,2) + >>> G.add_edge(1, 2) # explicit two-node form + >>> G.add_edge(*e) # single edge as tuple of two nodes + >>> G.add_edges_from( [(1,2)] ) # add edges from iterable container + + Associate data to edges using keywords: + + >>> G.add_edge(1, 2, weight=3) + >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7) + """ + # set up attribute dict + if attr_dict is None: + attr_dict=attr + else: + try: + attr_dict.update(attr) + except AttributeError: + raise NetworkXError(\ + "The attr_dict argument must be a dictionary.") + # add nodes + if u not in self.succ: + self.succ[u]={} + self.pred[u]={} + self.node[u] = {} + if v not in self.succ: + self.succ[v]={} + self.pred[v]={} + self.node[v] = {} + # add the edge + datadict=self.adj[u].get(v,{}) + datadict.update(attr_dict) + self.succ[u][v]=datadict + self.pred[v][u]=datadict + + + def has_successor(self, u, v): + """Return True if node u has successor v. + + This is true if graph has the edge u->v. + """ + return (u in self.succ and v in self.succ[u]) + + def has_predecessor(self, u, v): + """Return True if node u has predecessor v. + + This is true if graph has the edge u<-v. + """ + return (u in self.pred and v in self.pred[u]) + + def successors_iter(self,n): + """Return an iterator over successor nodes of n. + + neighbors_iter() and successors_iter() are the same. + """ + try: + return iter(self.succ[n].keys()) + except KeyError: + raise NetworkXError("The node %s is not in the digraph."%(n,)) + + def predecessors_iter(self,n): + """Return an iterator over predecessor nodes of n.""" + try: + return iter(self.pred[n].keys()) + except KeyError: + raise NetworkXError("The node %s is not in the digraph."%(n,)) + + def successors(self, n): + """Return a list of successor nodes of n. + + neighbors() and successors() are the same function. + """ + return list(self.successors_iter(n)) + + def predecessors(self, n): + """Return a list of predecessor nodes of n.""" + return list(self.predecessors_iter(n)) + + + # digraph definitions + neighbors = successors + neighbors_iter = successors_iter + + def edges_iter(self, nbunch=None, data=False): + """Return an iterator over the edges. + + Edges are returned as tuples with optional data + in the order (node, neighbor, data). + + Parameters + ---------- + nbunch : iterable container, optional (default= all nodes) + A container of nodes. The container will be iterated + through once. + data : bool, optional (default=False) + If True, return edge attribute dict in 3-tuple (u,v,data). + + Returns + ------- + edge_iter : iterator + An iterator of (u,v) or (u,v,d) tuples of edges. + + See Also + -------- + edges : return a list of edges + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + + Examples + -------- + >>> G = nx.DiGraph() # or MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> [e for e in G.edges_iter()] + [(0, 1), (1, 2), (2, 3)] + >>> list(G.edges_iter(data=True)) # default data is {} (empty dict) + [(0, 1, {}), (1, 2, {}), (2, 3, {})] + >>> list(G.edges_iter([0,2])) + [(0, 1), (2, 3)] + >>> list(G.edges_iter(0)) + [(0, 1)] + + """ + if nbunch is None: + nodes_nbrs=iter(self.adj.items()) + else: + nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch)) + if data: + for n,nbrs in nodes_nbrs: + for nbr,data in nbrs.items(): + yield (n,nbr,data) + else: + for n,nbrs in nodes_nbrs: + for nbr in nbrs: + yield (n,nbr) + + # alias out_edges to edges + out_edges_iter=edges_iter + out_edges=Graph.edges + + def in_edges_iter(self, nbunch=None, data=False): + """Return an iterator over the incoming edges. + + Parameters + ---------- + nbunch : iterable container, optional (default= all nodes) + A container of nodes. The container will be iterated + through once. + data : bool, optional (default=False) + If True, return edge attribute dict in 3-tuple (u,v,data). + + Returns + ------- + in_edge_iter : iterator + An iterator of (u,v) or (u,v,d) tuples of incoming edges. + + See Also + -------- + edges_iter : return an iterator of edges + """ + if nbunch is None: + nodes_nbrs=iter(self.pred.items()) + else: + nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch)) + if data: + for n,nbrs in nodes_nbrs: + for nbr,data in nbrs.items(): + yield (nbr,n,data) + else: + for n,nbrs in nodes_nbrs: + for nbr in nbrs: + yield (nbr,n) + + def in_edges(self, nbunch=None, data=False): + """Return a list of the incoming edges. + + See Also + -------- + edges : return a list of edges + """ + return list(self.in_edges_iter(nbunch, data)) + + def degree_iter(self, nbunch=None, weighted=False): + """Return an iterator for (node, degree). + + The node degree is the number of edges adjacent to the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + weighted : bool, optional (default=False) + If True return the sum of edge weights adjacent to the node. + + Returns + ------- + nd_iter : an iterator + The iterator returns two-tuples of (node, degree). + + See Also + -------- + degree, in_degree, out_degree, in_degree_iter, out_degree_iter + + Examples + -------- + >>> G = nx.DiGraph() # or MultiDiGraph + >>> G.add_path([0,1,2,3]) + >>> list(G.degree_iter(0)) # node 0 with degree 1 + [(0, 1)] + >>> list(G.degree_iter([0,1])) + [(0, 1), (1, 2)] + + """ + if nbunch is None: + nodes_nbrs=zip(iter(self.succ.items()),iter(self.pred.items())) + else: + nodes_nbrs=zip( + ((n,self.succ[n]) for n in self.nbunch_iter(nbunch)), + ((n,self.pred[n]) for n in self.nbunch_iter(nbunch))) + + if weighted: + # edge weighted graph - degree is sum of edge weights + for (n,succ),(n2,pred) in nodes_nbrs: + yield (n, + sum((succ[nbr].get('weight',1) for nbr in succ))+ + sum((pred[nbr].get('weight',1) for nbr in pred))) + else: + for (n,succ),(n2,pred) in nodes_nbrs: + yield (n,len(succ)+len(pred)) + + + def in_degree_iter(self, nbunch=None, weighted=False): + """Return an iterator for (node, in-degree). + + The node in-degree is the number of edges pointing in to the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + weighted : bool, optional (default=False) + If True return the sum of edge weights adjacent to the node. + + Returns + ------- + nd_iter : an iterator + The iterator returns two-tuples of (node, in-degree). + + See Also + -------- + degree, in_degree, out_degree, out_degree_iter + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_path([0,1,2,3]) + >>> list(G.in_degree_iter(0)) # node 0 with degree 0 + [(0, 0)] + >>> list(G.in_degree_iter([0,1])) + [(0, 0), (1, 1)] + + """ + if nbunch is None: + nodes_nbrs=iter(self.pred.items()) + else: + nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch)) + + if weighted: + # edge weighted graph - degree is sum of edge weights + for n,nbrs in nodes_nbrs: + yield (n, sum(data.get('weight',1) for data in nbrs.values())) + else: + for n,nbrs in nodes_nbrs: + yield (n,len(nbrs)) + + + def out_degree_iter(self, nbunch=None, weighted=False): + """Return an iterator for (node, out-degree). + + The node out-degree is the number of edges pointing out of the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + weighted : bool, optional (default=False) + If True return the sum of edge weights adjacent to the node. + + Returns + ------- + nd_iter : an iterator + The iterator returns two-tuples of (node, out-degree). + + See Also + -------- + degree, in_degree, out_degree, in_degree_iter + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_path([0,1,2,3]) + >>> list(G.out_degree_iter(0)) # node 0 with degree 1 + [(0, 1)] + >>> list(G.out_degree_iter([0,1])) + [(0, 1), (1, 1)] + + """ + if nbunch is None: + nodes_nbrs=iter(self.succ.items()) + else: + nodes_nbrs=((n,self.succ[n]) for n in self.nbunch_iter(nbunch)) + + if weighted: + # edge weighted graph - degree is sum of edge weights + for n,nbrs in nodes_nbrs: + yield (n, sum(data.get('weight',1) for data in nbrs.values())) + else: + for n,nbrs in nodes_nbrs: + yield (n,len(nbrs)) + + + def in_degree(self, nbunch=None, weighted=False): + """Return the in-degree of a node or nodes. + + The node in-degree is the number of edges pointing in to the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + weighted : bool, optional (default=False) + If True return the sum of edge weights adjacent to the node. + + Returns + ------- + nd : dictionary, or number + A dictionary with nodes as keys and in-degree as values or + a number if a single node is specified. + + See Also + -------- + degree, out_degree, in_degree_iter + + Examples + -------- + >>> G = nx.DiGraph() # or MultiDiGraph + >>> G.add_path([0,1,2,3]) + >>> G.in_degree(0) + 0 + >>> G.in_degree([0,1]) + {0: 0, 1: 1} + >>> list(G.in_degree([0,1]).values()) + [0, 1] + """ + if nbunch in self: # return a single node + return next(self.in_degree_iter(nbunch,weighted=weighted))[1] + else: # return a dict + return dict(self.in_degree_iter(nbunch,weighted=weighted)) + + def out_degree(self, nbunch=None, weighted=False): + """Return the out-degree of a node or nodes. + + The node out-degree is the number of edges pointing out of the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + weighted : bool, optional (default=False) + If True return the sum of edge weights adjacent to the node. + + Returns + ------- + nd : dictionary, or number + A dictionary with nodes as keys and out-degree as values or + a number if a single node is specified. + + Examples + -------- + >>> G = nx.DiGraph() # or MultiDiGraph + >>> G.add_path([0,1,2,3]) + >>> G.out_degree(0) + 1 + >>> G.out_degree([0,1]) + {0: 1, 1: 1} + >>> list(G.out_degree([0,1]).values()) + [1, 1] + + + """ + if nbunch in self: # return a single node + return next(self.out_degree_iter(nbunch,weighted=weighted))[1] + else: # return a dict + return dict(self.out_degree_iter(nbunch,weighted=weighted)) + + def clear(self): + """Remove all nodes and edges from the graph. + + This also removes the name, and all graph, node, and edge attributes. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.clear() + >>> G.nodes() + [] + >>> G.edges() + [] + + """ + self.succ.clear() + self.pred.clear() + self.node.clear() + self.graph.clear() + + + def is_multigraph(self): + """Return True if graph is a multigraph, False otherwise.""" + return False + + + def is_directed(self): + """Return True if graph is directed, False otherwise.""" + return True + + def to_directed(self): + """Return a directed copy of the graph. + + Returns + ------- + G : DiGraph + A deepcopy of the graph. + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar D=DiGraph(G) which returns a + shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, http://docs.python.org/library/copy.html. + + Examples + -------- + >>> G = nx.Graph() # or MultiGraph, etc + >>> G.add_path([0,1]) + >>> H = G.to_directed() + >>> H.edges() + [(0, 1), (1, 0)] + + If already directed, return a (deep) copy + + >>> G = nx.DiGraph() # or MultiDiGraph, etc + >>> G.add_path([0,1]) + >>> H = G.to_directed() + >>> H.edges() + [(0, 1)] + """ + return deepcopy(self) + + def to_undirected(self, reciprocal=False): + """Return an undirected representation of the digraph. + + Parameters + ---------- + reciprocal : bool (optional) + If True only keep edges that appear in both directions + in the original digraph. + + Returns + ------- + G : Graph + An undirected graph with the same name and nodes and + with edge (u,v,data) if either (u,v,data) or (v,u,data) + is in the digraph. If both edges exist in digraph and + their edge data is different, only one edge is created + with an arbitrary choice of which edge data to use. + You must check and correct for this manually if desired. + + Notes + ----- + If edges in both directions (u,v) and (v,u) exist in the + graph, attributes for the new undirected edge will be a combination of + the attributes of the directed edges. The edge data is updated + in the (arbitrary) order that the edges are encountered. For + more customized control of the edge attributes use add_edge(). + + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar G=DiGraph(D) which returns a + shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, http://docs.python.org/library/copy.html. + """ + H=Graph() + H.name=self.name + H.add_nodes_from(self) + if reciprocal is True: + H.add_edges_from( (u,v,deepcopy(d)) + for u,nbrs in self.adjacency_iter() + for v,d in nbrs.items() + if v in self.pred[u]) + else: + H.add_edges_from( (u,v,deepcopy(d)) + for u,nbrs in self.adjacency_iter() + for v,d in nbrs.items() ) + H.graph=deepcopy(self.graph) + H.node=deepcopy(self.node) + return H + + + def reverse(self, copy=True): + """Return the reverse of the graph. + + The reverse is a graph with the same nodes and edges + but with the directions of the edges reversed. + + Parameters + ---------- + copy : bool optional (default=True) + If True, return a new DiGraph holding the reversed edges. + If False, reverse the reverse graph is created using + the original graph (this changes the original graph). + """ + if copy: + H = self.__class__(name="Reverse of (%s)"%self.name) + H.pred=self.succ.copy() + H.adj=self.pred.copy() + H.succ=H.adj + H.graph=self.graph.copy() + H.node=self.node.copy() + else: + self.pred,self.succ=self.succ,self.pred + self.adj=self.succ + H=self + return H + + + def subgraph(self, nbunch): + """Return the subgraph induced on nodes in nbunch. + + The induced subgraph of the graph contains the nodes in nbunch + and the edges between those nodes. + + Parameters + ---------- + nbunch : list, iterable + A container of nodes which will be iterated through once. + + Returns + ------- + G : Graph + A subgraph of the graph with the same edge attributes. + + Notes + ----- + The graph, edge or node attributes just point to the original graph. + So changes to the node or edge structure will not be reflected in + the original graph while changes to the attributes will. + + To create a subgraph with its own copy of the edge/node attributes use: + nx.Graph(G.subgraph(nbunch)) + + If edge attributes are containers, a deep copy can be obtained using: + G.subgraph(nbunch).copy() + + For an inplace reduction of a graph to a subgraph you can remove nodes: + G.remove_nodes_from([ n in G if n not in set(nbunch)]) + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> H = G.subgraph([0,1,2]) + >>> H.edges() + [(0, 1), (1, 2)] + """ + bunch = self.nbunch_iter(nbunch) + # create new graph and copy subgraph into it + H = self.__class__() + # namespace shortcuts for speed + H_succ=H.succ + H_pred=H.pred + self_succ=self.succ + # add nodes + for n in bunch: + H_succ[n]={} + H_pred[n]={} + # add edges + for u in H_succ: + Hnbrs=H_succ[u] + for v,datadict in self_succ[u].items(): + if v in H_succ: + # add both representations of edge: u-v and v-u + Hnbrs[v]=datadict + H_pred[v][u]=datadict + # copy node and attribute dictionaries + for n in H: + H.node[n]=self.node[n] + H.graph=self.graph + return H diff --git a/urpm-tools/rpm5utils/urpmgraphs/classes/function.py b/urpm-tools/rpm5utils/urpmgraphs/classes/function.py new file mode 100644 index 0000000..296653a --- /dev/null +++ b/urpm-tools/rpm5utils/urpmgraphs/classes/function.py @@ -0,0 +1,375 @@ +""" +Functional interface to graph methods and assorted utilities. + +""" +__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)', + 'Pieter Swart (swart@lanl.gov)', + 'Dan Schult(dschult@colgate.edu)']) +# Copyright (C) 2004-2010 by +# Aric Hagberg +# Dan Schult +# Pieter Swart +# All rights reserved. +# BSD license. +# +import rpm5utils as nx + +# functional style helpers + + +__all__ = ['nodes', 'edges', 'degree', 'degree_histogram', 'neighbors', + 'number_of_nodes', 'number_of_edges', 'density', + 'nodes_iter', 'edges_iter', 'is_directed','info', + 'freeze','is_frozen','subgraph','create_empty_copy', + 'set_node_attributes','get_node_attributes', + 'set_edge_attributes','get_edge_attributes'] + +def nodes(G): + """Return a copy of the graph nodes in a list.""" + return G.nodes() + +def nodes_iter(G): + """Return an iterator over the graph nodes.""" + return G.nodes_iter() + +def edges(G,nbunch=None): + """Return list of edges adjacent to nodes in nbunch. + + Return all edges if nbunch is unspecified or nbunch=None. + + For digraphs, edges=out_edges + """ + return G.edges(nbunch) + +def edges_iter(G,nbunch=None): + """Return iterator over edges adjacent to nodes in nbunch. + + Return all edges if nbunch is unspecified or nbunch=None. + + For digraphs, edges=out_edges + """ + return G.edges_iter(nbunch) + +def degree(G,nbunch=None,weighted=False): + """Return degree of single node or of nbunch of nodes. + If nbunch is ommitted, then return degrees of *all* nodes. + """ + return G.degree(nbunch,weighted=weighted) + +def neighbors(G,n): + """Return a list of nodes connected to node n. """ + return G.neighbors(n) + +def number_of_nodes(G): + """Return the number of nodes in the graph.""" + return G.number_of_nodes() + +def number_of_edges(G): + """Return the number of edges in the graph. """ + return G.number_of_edges() + +def density(G): + r"""Return the density of a graph. + + The density for undirected graphs is + + .. math:: + + d = \frac{2m}{n(n-1)}, + + and for directed graphs is + + .. math:: + + d = \frac{m}{n(n-1)}, + + where `n` is the number of nodes and `m` is the number of edges in `G`. + + Notes + ----- + The density is 0 for an graph without edges and 1.0 for a complete graph. + + The density of multigraphs can be higher than 1. + + """ + n=number_of_nodes(G) + m=number_of_edges(G) + if m==0: # includes cases n==0 and n==1 + d=0.0 + else: + if G.is_directed(): + d=m/float(n*(n-1)) + else: + d= m*2.0/float(n*(n-1)) + return d + +def degree_histogram(G): + """Return a list of the frequency of each degree value. + + Parameters + ---------- + G : Networkx graph + A graph + + Returns + ------- + hist : list + A list of frequencies of degrees. + The degree values are the index in the list. + + Notes + ----- + Note: the bins are width one, hence len(list) can be large + (Order(number_of_edges)) + """ + degseq=list(G.degree().values()) + dmax=max(degseq)+1 + freq= [ 0 for d in range(dmax) ] + for d in degseq: + freq[d] += 1 + return freq + +def is_directed(G): + """ Return True if graph is directed.""" + return G.is_directed() + + +def freeze(G): + """Modify graph to prevent addition of nodes or edges. + + Parameters + ----------- + G : graph + A NetworkX graph + + Examples + -------- + >>> G=nx.Graph() + >>> G.add_path([0,1,2,3]) + >>> G=nx.freeze(G) + >>> try: + ... G.add_edge(4,5) + ... except nx.NetworkXError as e: + ... print(str(e)) + Frozen graph can't be modified + + Notes + ----- + This does not prevent modification of edge data. + + To "unfreeze" a graph you must make a copy. + + See Also + -------- + is_frozen + + """ + def frozen(*args): + raise nx.NetworkXError("Frozen graph can't be modified") + G.add_node=frozen + G.add_nodes_from=frozen + G.remove_node=frozen + G.remove_nodes_from=frozen + G.add_edge=frozen + G.add_edges_from=frozen + G.remove_edge=frozen + G.remove_edges_from=frozen + G.clear=frozen + G.frozen=True + return G + +def is_frozen(G): + """Return True if graph is frozen. + + Parameters + ----------- + G : graph + A NetworkX graph + + See Also + -------- + freeze + """ + try: + return G.frozen + except AttributeError: + return False + +def subgraph(G, nbunch): + """Return the subgraph induced on nodes in nbunch. + + Parameters + ---------- + G : graph + A NetworkX graph + + nbunch : list, iterable + A container of nodes that will be iterated through once (thus + it should be an iterator or be iterable). Each element of the + container should be a valid node type: any hashable type except + None. If nbunch is None, return all edges data in the graph. + Nodes in nbunch that are not in the graph will be (quietly) + ignored. + + Notes + ----- + subgraph(G) calls G.subgraph() + + """ + return G.subgraph(nbunch) + +def create_empty_copy(G,with_nodes=True): + """Return a copy of the graph G with all of the edges removed. + + Parameters + ---------- + G : graph + A NetworkX graph + + with_nodes : bool (default=True) + Include nodes. + + Notes + ----- + Graph, node, and edge data is not propagated to the new graph. + """ + H=G.__class__() + if with_nodes: + H.add_nodes_from(G) + return H + + +def info(G, n=None): + """Print short summary of information for the graph G or the node n. + + Parameters + ---------- + G : Networkx graph + A graph + n : node (any hashable) + A node in the graph G + """ + info='' # append this all to a string + if n is None: + info+="Name: %s\n"%G.name + type_name = [type(G).__name__] + info+="Type: %s\n"%",".join(type_name) + info+="Number of nodes: %d\n"%G.number_of_nodes() + info+="Number of edges: %d\n"%G.number_of_edges() + nnodes=G.number_of_nodes() + if len(G) > 0: + if G.is_directed(): + info+="Average in degree: %8.4f\n"%\ + (sum(G.in_degree().values())/float(nnodes)) + info+="Average out degree: %8.4f"%\ + (sum(G.out_degree().values())/float(nnodes)) + else: + s=sum(G.degree().values()) + info+="Average degree: %8.4f"%\ + (float(s)/float(nnodes)) + + else: + if n not in G: + raise nx.NetworkXError("node %s not in graph"%(n,)) + info+="Node % s has the following properties:\n"%n + info+="Degree: %d\n"%G.degree(n) + info+="Neighbors: " + info+=' '.join(str(nbr) for nbr in G.neighbors(n)) + return info + +def set_node_attributes(G,name,attributes): + """Set node attributes from dictionary of nodes and values + + Parameters + ---------- + G : NetworkX Graph + + name : string + Attribute name + + attributes: dict + Dictionary of attributes keyed by node. + + Examples + -------- + >>> G=nx.path_graph(3) + >>> bb=nx.betweenness_centrality(G) + >>> nx.set_node_attributes(G,'betweenness',bb) + >>> G.node[1]['betweenness'] + 1.0 + """ + for node,value in attributes.items(): + G.node[node][name]=value + +def get_node_attributes(G,name): + """Get node attributes from graph + + Parameters + ---------- + G : NetworkX Graph + + name : string + Attribute name + + Returns + ------- + Dictionary of attributes keyed by node. + + Examples + -------- + >>> G=nx.Graph() + >>> G.add_nodes_from([1,2,3],color='red') + >>> color=nx.get_node_attributes(G,'color') + >>> color[1] + 'red' + """ + return dict( (n,d[name]) for n,d in G.node.items() if name in d) + + +def set_edge_attributes(G,name,attributes): + """Set edge attributes from dictionary of edge tuples and values + + Parameters + ---------- + G : NetworkX Graph + + name : string + Attribute name + + attributes: dict + Dictionary of attributes keyed by edge (tuple). + + Examples + -------- + >>> G=nx.path_graph(3) + >>> bb=nx.edge_betweenness_centrality(G) + >>> nx.set_edge_attributes(G,'betweenness',bb) + >>> G[1][2]['betweenness'] + 4.0 + """ + for (u,v),value in attributes.items(): + G[u][v][name]=value + +def get_edge_attributes(G,name): + """Get edge attributes from graph + + Parameters + ---------- + G : NetworkX Graph + + name : string + Attribute name + + Returns + ------- + Dictionary of attributes keyed by node. + + Examples + -------- + >>> G=nx.Graph() + >>> G.add_path([1,2,3],color='red') + >>> color=nx.get_edge_attributes(G,'color') + >>> color[(1,2)] + 'red' + """ + return dict( ((u,v),d[name]) for u,v,d in G.edges(data=True) if name in d) diff --git a/urpm-tools/rpm5utils/urpmgraphs/classes/graph.py b/urpm-tools/rpm5utils/urpmgraphs/classes/graph.py new file mode 100644 index 0000000..2e0a2d8 --- /dev/null +++ b/urpm-tools/rpm5utils/urpmgraphs/classes/graph.py @@ -0,0 +1,1804 @@ +"""Base class for undirected graphs. + +The Graph class allows any hashable object as a node +and can associate key/value attribute pairs with each undirected edge. + +Self-loops are allowed but multiple edges are not (see MultiGraph). + +For directed graphs see DiGraph and MultiDiGraph. +""" +# Copyright (C) 2004-2011 by +# Aric Hagberg +# Dan Schult +# Pieter Swart +# All rights reserved. +# BSD license. +from copy import deepcopy +import rpm5utils as nx +#from urpmgraphs.exception import NetworkXError +#import urpmgraphs.convert as convert + +__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)', + 'Pieter Swart (swart@lanl.gov)', + 'Dan Schult(dschult@colgate.edu)']) + +class Graph(object): + """ + Base class for undirected graphs. + + A Graph stores nodes and edges with optional data, or attributes. + + Graphs hold undirected edges. Self loops are allowed but multiple + (parallel) edges are not. + + Nodes can be arbitrary (hashable) Python objects with optional + key/value attributes. + + Edges are represented as links between nodes with optional + key/value attributes. + + Parameters + ---------- + data : input graph + Data to initialize graph. If data=None (default) an empty + graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a NumPy matrix + or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + DiGraph + MultiGraph + MultiDiGraph + + Examples + -------- + Create an empty graph structure (a "null graph") with no nodes and + no edges. + + >>> G = nx.Graph() + + G can be grown in several ways. + + **Nodes:** + + Add one node at a time: + + >>> G.add_node(1) + + Add the nodes from any container (a list, dict, set or + even the lines from a file or the nodes from another graph). + + >>> G.add_nodes_from([2,3]) + >>> G.add_nodes_from(range(100,110)) + >>> H=nx.Graph() + >>> H.add_path([0,1,2,3,4,5,6,7,8,9]) + >>> G.add_nodes_from(H) + + In addition to strings and integers any hashable Python object + (except None) can represent a node, e.g. a customized node object, + or even another Graph. + + >>> G.add_node(H) + + **Edges:** + + G can also be grown by adding edges. + + Add one edge, + + >>> G.add_edge(1, 2) + + a list of edges, + + >>> G.add_edges_from([(1,2),(1,3)]) + + or a collection of edges, + + >>> G.add_edges_from(H.edges()) + + If some edges connect nodes not yet in the graph, the nodes + are added automatically. There are no errors when adding + nodes or edges that already exist. + + **Attributes:** + + Each graph, node, and edge can hold key/value attribute pairs + in an associated attribute dictionary (the keys must be hashable). + By default these are empty, but can be added or changed using + add_edge, add_node or direct manipulation of the attribute + dictionaries named graph, node and edge respectively. + + >>> G = nx.Graph(day="Friday") + >>> G.graph + {'day': 'Friday'} + + Add node attributes using add_node(), add_nodes_from() or G.node + + >>> G.add_node(1, time='5pm') + >>> G.add_nodes_from([3], time='2pm') + >>> G.node[1] + {'time': '5pm'} + >>> G.node[1]['room'] = 714 + >>> G.nodes(data=True) + [(1, {'room': 714, 'time': '5pm'}), (3, {'time': '2pm'})] + + Warning: adding a node to G.node does not add it to the graph. + + Add edge attributes using add_edge(), add_edges_from(), subscript + notation, or G.edge. + + >>> G.add_edge(1, 2, weight=4.7 ) + >>> G.add_edges_from([(3,4),(4,5)], color='red') + >>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})]) + >>> G[1][2]['weight'] = 4.7 + >>> G.edge[1][2]['weight'] = 4 + + **Shortcuts:** + + Many common graph features allow python syntax to speed reporting. + + >>> 1 in G # check if node in graph + True + >>> [n for n in G if n<3] # iterate through nodes + [1, 2] + >>> len(G) # number of nodes in graph + 5 + >>> G[1] # adjacency dict keyed by neighbor to edge attributes + ... # Note: you should not change this dict manually! + {2: {'color': 'blue', 'weight': 4}} + + The fastest way to traverse all edges of a graph is via + adjacency_iter(), but the edges() method is often more convenient. + + >>> for n,nbrsdict in G.adjacency_iter(): + ... for nbr,eattr in nbrsdict.items(): + ... if 'weight' in eattr: + ... (n,nbr,eattr['weight']) + (1, 2, 4) + (2, 1, 4) + (2, 3, 8) + (3, 2, 8) + >>> [ (u,v,edata['weight']) for u,v,edata in G.edges(data=True) if 'weight' in edata ] + [(1, 2, 4), (2, 3, 8)] + + **Reporting:** + + Simple graph information is obtained using methods. + Iterator versions of many reporting methods exist for efficiency. + Methods exist for reporting nodes(), edges(), neighbors() and degree() + as well as the number of nodes and edges. + + For details on these and other miscellaneous methods, see below. + """ + def __init__(self, data=None, **attr): + """Initialize a graph with edges, name, graph attributes. + + Parameters + ---------- + data : input graph + Data to initialize graph. If data=None (default) an empty + graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a NumPy matrix + or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph. + name : string, optional (default='') + An optional name for the graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + convert + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G = nx.Graph(name='my graph') + >>> e = [(1,2),(2,3),(3,4)] # list of edges + >>> G = nx.Graph(e) + + Arbitrary graph attribute pairs (key=value) may be assigned + + >>> G=nx.Graph(e, day="Friday") + >>> G.graph + {'day': 'Friday'} + + """ + self.graph = {} # dictionary for graph attributes + self.node = {} # empty node dict (created before convert) + self.adj = {} # empty adjacency dict + # attempt to load graph with data + if data is not None: + convert.to_networkx_graph(data,create_using=self) + # load graph attributes (must be after convert) + self.graph.update(attr) + self.edge = self.adj + + @property + def name(self): + return self.graph.get('name','') + @name.setter + def name(self, s): + self.graph['name']=s + + def __str__(self): + """Return the graph name. + + Returns + ------- + name : string + The name of the graph. + + Examples + -------- + >>> G = nx.Graph(name='foo') + >>> str(G) + 'foo' + """ + return self.name + + def __iter__(self): + """Iterate over the nodes. Use the expression 'for n in G'. + + Returns + ------- + niter : iterator + An iterator over all nodes in the graph. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + """ + return iter(self.adj.keys()) + + def __contains__(self,n): + """Return True if n is a node, False otherwise. Use the expression + 'n in G'. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> 1 in G + True + """ + try: + return n in self.adj + except TypeError: + return False + + def __len__(self): + """Return the number of nodes. Use the expression 'len(G)'. + + Returns + ------- + nnodes : int + The number of nodes in the graph. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> len(G) + 4 + + """ + return len(self.adj) + + def __getitem__(self, n): + """Return a dict of neighbors of node n. Use the expression 'G[n]'. + + Parameters + ---------- + n : node + A node in the graph. + + Returns + ------- + adj_dict : dictionary + The adjacency dictionary for nodes connected to n. + + Notes + ----- + G[n] is similar to G.neighbors(n) but the internal data dictionary + is returned instead of a list. + + Assigning G[n] will corrupt the internal graph data structure. + Use G[n] for reading data only. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G[0] + {1: {}} + """ + return self.adj[n] + + + def add_node(self, n, attr_dict=None, **attr): + """Add a single node n and update node attributes. + + Parameters + ---------- + n : node + A node can be any hashable Python object except None. + attr_dict : dictionary, optional (default= no attributes) + Dictionary of node attributes. Key/value pairs will + update existing data associated with the node. + attr : keyword arguments, optional + Set or change attributes using key=value. + + See Also + -------- + add_nodes_from + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_node(1) + >>> G.add_node('Hello') + >>> K3 = nx.Graph([(0,1),(1,2),(2,0)]) + >>> G.add_node(K3) + >>> G.number_of_nodes() + 3 + + Use keywords set/change node attributes: + + >>> G.add_node(1,size=10) + >>> G.add_node(3,weight=0.4,UTM=('13S',382871,3972649)) + + Notes + ----- + A hashable object is one that can be used as a key in a Python + dictionary. This includes strings, numbers, tuples of strings + and numbers, etc. + + On many platforms hashable items also include mutables such as + NetworkX Graphs, though one should be careful that the hash + doesn't change on mutables. + """ + # set up attribute dict + if attr_dict is None: + attr_dict=attr + else: + try: + attr_dict.update(attr) + except AttributeError: + raise NetworkXError(\ + "The attr_dict argument must be a dictionary.") + if n not in self.adj: + self.adj[n] = {} + self.node[n] = attr_dict + else: # update attr even if node already exists + self.node[n].update(attr_dict) + + + def add_nodes_from(self, nodes, **attr): + """Add multiple nodes. + + Parameters + ---------- + nodes : iterable container + A container of nodes (list, dict, set, etc.). + OR + A container of (node, attribute dict) tuples. + Node attributes are updated using the attribute dict. + attr : keyword arguments, optional (default= no attributes) + Update attributes for all nodes in nodes. + Node attributes specified in nodes as a tuple + take precedence over attributes specified generally. + + See Also + -------- + add_node + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_nodes_from('Hello') + >>> K3 = nx.Graph([(0,1),(1,2),(2,0)]) + >>> G.add_nodes_from(K3) + >>> sorted(G.nodes(),key=str) + [0, 1, 2, 'H', 'e', 'l', 'o'] + + Use keywords to update specific node attributes for every node. + + >>> G.add_nodes_from([1,2], size=10) + >>> G.add_nodes_from([3,4], weight=0.4) + + Use (node, attrdict) tuples to update attributes for specific + nodes. + + >>> G.add_nodes_from([(1,dict(size=11)), (2,{'color':'blue'})]) + >>> G.node[1]['size'] + 11 + >>> H = nx.Graph() + >>> H.add_nodes_from(G.nodes(data=True)) + >>> H.node[1]['size'] + 11 + + """ + for n in nodes: + try: + newnode=n not in self.adj + except TypeError: + nn,ndict = n + if nn not in self.adj: + self.adj[nn] = {} + newdict = attr.copy() + newdict.update(ndict) + self.node[nn] = newdict + else: + olddict = self.node[nn] + olddict.update(attr) + olddict.update(ndict) + continue + if newnode: + self.adj[n] = {} + self.node[n] = attr.copy() + else: + self.node[n].update(attr) + + def remove_node(self,n): + """Remove node n. + + Removes the node n and all adjacent edges. + Attempting to remove a non-existent node will raise an exception. + + Parameters + ---------- + n : node + A node in the graph + + Raises + ------- + NetworkXError + If n is not in the graph. + + See Also + -------- + remove_nodes_from + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2]) + >>> G.edges() + [(0, 1), (1, 2)] + >>> G.remove_node(1) + >>> G.edges() + [] + + """ + adj = self.adj + try: + nbrs = list(adj[n].keys()) # keys handles self-loops (allow mutation later) + del self.node[n] + except KeyError: # NetworkXError if n not in self + raise NetworkXError("The node %s is not in the graph."%(n,)) + for u in nbrs: + del adj[u][n] # remove all edges n-u in graph + del adj[n] # now remove node + + + def remove_nodes_from(self, nodes): + """Remove multiple nodes. + + Parameters + ---------- + nodes : iterable container + A container of nodes (list, dict, set, etc.). If a node + in the container is not in the graph it is silently + ignored. + + See Also + -------- + remove_node + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2]) + >>> e = G.nodes() + >>> e + [0, 1, 2] + >>> G.remove_nodes_from(e) + >>> G.nodes() + [] + + """ + adj = self.adj + for n in nodes: + try: + del self.node[n] + for u in list(adj[n].keys()): # keys() handles self-loops + del adj[u][n] #(allows mutation of dict in loop) + del adj[n] + except KeyError: + pass + + + def nodes_iter(self, data=False): + """Return an iterator over the nodes. + + Parameters + ---------- + data : boolean, optional (default=False) + If False the iterator returns nodes. If True + return a two-tuple of node and node data dictionary + + Returns + ------- + niter : iterator + An iterator over nodes. If data=True the iterator gives + two-tuples containing (node, node data, dictionary) + + Notes + ----- + If the node data is not required it is simpler and equivalent + to use the expression 'for n in G'. + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2]) + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2]) + + >>> [d for n,d in G.nodes_iter(data=True)] + [{}, {}, {}] + """ + if data: + return iter(self.node.items()) + return iter(self.adj.keys()) + + def nodes(self, data=False): + """Return a list of the nodes in the graph. + + Parameters + ---------- + data : boolean, optional (default=False) + If False return a list of nodes. If True return a + two-tuple of node and node data dictionary + + Returns + ------- + nlist : list + A list of nodes. If data=True a list of two-tuples containing + (node, node data dictionary). + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2]) + >>> G.nodes() + [0, 1, 2] + >>> G.add_node(1, time='5pm') + >>> G.nodes(data=True) + [(0, {}), (1, {'time': '5pm'}), (2, {})] + """ + return list(self.nodes_iter(data=data)) + + def number_of_nodes(self): + """Return the number of nodes in the graph. + + Returns + ------- + nnodes : int + The number of nodes in the graph. + + See Also + -------- + order, __len__ which are identical + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2]) + >>> len(G) + 3 + """ + return len(self.adj) + + def order(self): + """Return the number of nodes in the graph. + + Returns + ------- + nnodes : int + The number of nodes in the graph. + + See Also + -------- + number_of_nodes, __len__ which are identical + + """ + return len(self.adj) + + def has_node(self, n): + """Return True if the graph contains the node n. + + Parameters + ---------- + n : node + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2]) + >>> G.has_node(0) + True + + It is more readable and simpler to use + + >>> 0 in G + True + + """ + try: + return n in self.adj + except TypeError: + return False + + def add_edge(self, u, v, attr_dict=None, **attr): + """Add an edge between u and v. + + The nodes u and v will be automatically added if they are + not already in the graph. + + Edge attributes can be specified with keywords or by providing + a dictionary with key/value pairs. See examples below. + + Parameters + ---------- + u,v : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + attr_dict : dictionary, optional (default= no attributes) + Dictionary of edge attributes. Key/value pairs will + update existing data associated with the edge. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + See Also + -------- + add_edges_from : add a collection of edges + + Notes + ----- + Adding an edge that already exists updates the edge data. + + NetworkX algorithms designed for weighted graphs use as + the edge weight a numerical value assigned to the keyword + 'weight'. + + Examples + -------- + The following all add the edge e=(1,2) to graph G: + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> e = (1,2) + >>> G.add_edge(1, 2) # explicit two-node form + >>> G.add_edge(*e) # single edge as tuple of two nodes + >>> G.add_edges_from( [(1,2)] ) # add edges from iterable container + + Associate data to edges using keywords: + + >>> G.add_edge(1, 2, weight=3) + >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7) + """ + # set up attribute dictionary + if attr_dict is None: + attr_dict=attr + else: + try: + attr_dict.update(attr) + except AttributeError: + raise NetworkXError(\ + "The attr_dict argument must be a dictionary.") + # add nodes + if u not in self.adj: + self.adj[u] = {} + self.node[u] = {} + if v not in self.adj: + self.adj[v] = {} + self.node[v] = {} + # add the edge + datadict=self.adj[u].get(v,{}) + datadict.update(attr_dict) + self.adj[u][v] = datadict + self.adj[v][u] = datadict + + + def add_edges_from(self, ebunch, attr_dict=None, **attr): + """Add all the edges in ebunch. + + Parameters + ---------- + ebunch : container of edges + Each edge given in the container will be added to the + graph. The edges must be given as as 2-tuples (u,v) or + 3-tuples (u,v,d) where d is a dictionary containing edge + data. + attr_dict : dictionary, optional (default= no attributes) + Dictionary of edge attributes. Key/value pairs will + update existing data associated with each edge. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + + See Also + -------- + add_edge : add a single edge + add_weighted_edges_from : convenient way to add weighted edges + + Notes + ----- + Adding the same edge twice has no effect but any edge data + will be updated when each duplicate edge is added. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edges_from([(0,1),(1,2)]) # using a list of edge tuples + >>> e = zip(range(0,3),range(1,4)) + >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 + + Associate data to edges + + >>> G.add_edges_from([(1,2),(2,3)], weight=3) + >>> G.add_edges_from([(3,4),(1,4)], label='WN2898') + """ + # set up attribute dict + if attr_dict is None: + attr_dict=attr + else: + try: + attr_dict.update(attr) + except AttributeError: + raise NetworkXError(\ + "The attr_dict argument must be a dictionary.") + # process ebunch + for e in ebunch: + ne=len(e) + if ne==3: + u,v,dd = e + elif ne==2: + u,v = e + dd = {} + else: + raise NetworkXError(\ + "Edge tuple %s must be a 2-tuple or 3-tuple."%(e,)) + if u not in self.adj: + self.adj[u] = {} + self.node[u] = {} + if v not in self.adj: + self.adj[v] = {} + self.node[v] = {} + datadict=self.adj[u].get(v,{}) + datadict.update(attr_dict) + datadict.update(dd) + self.adj[u][v] = datadict + self.adj[v][u] = datadict + + + def add_weighted_edges_from(self, ebunch, **attr): + """Add all the edges in ebunch as weighted edges with specified + weights. + + Parameters + ---------- + ebunch : container of edges + Each edge given in the list or container will be added + to the graph. The edges must be given as 3-tuples (u,v,w) + where w is a number. + attr : keyword arguments, optional (default= no attributes) + Edge attributes to add/update for all edges. + + See Also + -------- + add_edge : add a single edge + add_edges_from : add multiple edges + + Notes + ----- + Adding the same edge twice has no effect but any edge data + will be updated when each duplicate edge is added. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_weighted_edges_from([(0,1,3.0),(1,2,7.5)]) + """ + self.add_edges_from(((u,v,{'weight':d}) for u,v,d in ebunch),**attr) + + def remove_edge(self, u, v): + """Remove the edge between u and v. + + Parameters + ---------- + u,v: nodes + Remove the edge between nodes u and v. + + Raises + ------ + NetworkXError + If there is not an edge between u and v. + + See Also + -------- + remove_edges_from : remove a collection of edges + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.remove_edge(0,1) + >>> e = (1,2) + >>> G.remove_edge(*e) # unpacks e from an edge tuple + >>> e = (2,3,{'weight':7}) # an edge with attribute data + >>> G.remove_edge(*e[:2]) # select first part of edge tuple + """ + try: + del self.adj[u][v] + if u != v: # self-loop needs only one entry removed + del self.adj[v][u] + except KeyError: + raise NetworkXError("The edge %s-%s is not in the graph"%(u,v)) + + + + def remove_edges_from(self, ebunch): + """Remove all edges specified in ebunch. + + Parameters + ---------- + ebunch: list or container of edge tuples + Each edge given in the list or container will be removed + from the graph. The edges can be: + + - 2-tuples (u,v) edge between u and v. + - 3-tuples (u,v,k) where k is ignored. + + See Also + -------- + remove_edge : remove a single edge + + Notes + ----- + Will fail silently if an edge in ebunch is not in the graph. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> ebunch=[(1,2),(2,3)] + >>> G.remove_edges_from(ebunch) + """ + for e in ebunch: + u,v = e[:2] # ignore edge data if present + if u in self.adj and v in self.adj[u]: + del self.adj[u][v] + if u != v: # self loop needs only one entry removed + del self.adj[v][u] + + + def has_edge(self, u, v): + """Return True if the edge (u,v) is in the graph. + + Parameters + ---------- + u,v : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + + Returns + ------- + edge_ind : bool + True if edge is in the graph, False otherwise. + + Examples + -------- + Can be called either using two nodes u,v or edge tuple (u,v) + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.has_edge(0,1) # using two nodes + True + >>> e = (0,1) + >>> G.has_edge(*e) # e is a 2-tuple (u,v) + True + >>> e = (0,1,{'weight':7}) + >>> G.has_edge(*e[:2]) # e is a 3-tuple (u,v,data_dictionary) + True + + The following syntax are all equivalent: + + >>> G.has_edge(0,1) + True + >>> 1 in G[0] # though this gives KeyError if 0 not in G + True + + """ + try: + return v in self.adj[u] + except KeyError: + return False + + + def neighbors(self, n): + """Return a list of the nodes connected to the node n. + + Parameters + ---------- + n : node + A node in the graph + + Returns + ------- + nlist : list + A list of nodes that are adjacent to n. + + Raises + ------ + NetworkXError + If the node n is not in the graph. + + Notes + ----- + It is usually more convenient (and faster) to access the + adjacency dictionary as G[n]: + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge('a','b',weight=7) + >>> G['a'] + {'b': {'weight': 7}} + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.neighbors(0) + [1] + + """ + try: + return list(self.adj[n].keys()) + except KeyError: + raise NetworkXError("The node %s is not in the graph."%(n,)) + + def neighbors_iter(self, n): + """Return an iterator over all neighbors of node n. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> [n for n in G.neighbors_iter(0)] + [1] + + Notes + ----- + It is faster to use the idiom "in G[0]", e.g. + + >>> G = nx.path_graph(4) + >>> [n for n in G[0]] + [1] + """ + try: + return iter(self.adj[n].keys()) + except KeyError: + raise NetworkXError("The node %s is not in the graph."%(n,)) + + def edges(self, nbunch=None, data=False): + """Return a list of edges. + + Edges are returned as tuples with optional data + in the order (node, neighbor, data). + + Parameters + ---------- + nbunch : iterable container, optional (default= all nodes) + A container of nodes. The container will be iterated + through once. + data : bool, optional (default=False) + Return two tuples (u,v) (False) or three-tuples (u,v,data) (True). + + Returns + -------- + edge_list: list of edge tuples + Edges that are adjacent to any node in nbunch, or a list + of all edges if nbunch is not specified. + + See Also + -------- + edges_iter : return an iterator over the edges + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.edges() + [(0, 1), (1, 2), (2, 3)] + >>> G.edges(data=True) # default edge data is {} (empty dictionary) + [(0, 1, {}), (1, 2, {}), (2, 3, {})] + >>> G.edges([0,3]) + [(0, 1), (3, 2)] + >>> G.edges(0) + [(0, 1)] + + """ + return list(self.edges_iter(nbunch, data)) + + def edges_iter(self, nbunch=None, data=False): + """Return an iterator over the edges. + + Edges are returned as tuples with optional data + in the order (node, neighbor, data). + + Parameters + ---------- + nbunch : iterable container, optional (default= all nodes) + A container of nodes. The container will be iterated + through once. + data : bool, optional (default=False) + If True, return edge attribute dict in 3-tuple (u,v,data). + + Returns + ------- + edge_iter : iterator + An iterator of (u,v) or (u,v,d) tuples of edges. + + See Also + -------- + edges : return a list of edges + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + + Examples + -------- + >>> G = nx.Graph() # or MultiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> [e for e in G.edges_iter()] + [(0, 1), (1, 2), (2, 3)] + >>> list(G.edges_iter(data=True)) # default data is {} (empty dict) + [(0, 1, {}), (1, 2, {}), (2, 3, {})] + >>> list(G.edges_iter([0,3])) + [(0, 1), (3, 2)] + >>> list(G.edges_iter(0)) + [(0, 1)] + + """ + seen={} # helper dict to keep track of multiply stored edges + if nbunch is None: + nodes_nbrs = iter(self.adj.items()) + else: + nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch)) + if data: + for n,nbrs in nodes_nbrs: + for nbr,data in nbrs.items(): + if nbr not in seen: + yield (n,nbr,data) + seen[n]=1 + else: + for n,nbrs in nodes_nbrs: + for nbr in nbrs: + if nbr not in seen: + yield (n,nbr) + seen[n] = 1 + del seen + + + def get_edge_data(self, u, v, default=None): + """Return the attribute dictionary associated with edge (u,v). + + Parameters + ---------- + u,v : nodes + default: any Python object (default=None) + Value to return if the edge (u,v) is not found. + + Returns + ------- + edge_dict : dictionary + The edge attribute dictionary. + + Notes + ----- + It is faster to use G[u][v]. + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G[0][1] + {} + + Warning: Assigning G[u][v] corrupts the graph data structure. + But it is safe to assign attributes to that dictionary, + + >>> G[0][1]['weight'] = 7 + >>> G[0][1]['weight'] + 7 + >>> G[1][0]['weight'] + 7 + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.get_edge_data(0,1) # default edge data is {} + {} + >>> e = (0,1) + >>> G.get_edge_data(*e) # tuple form + {} + >>> G.get_edge_data('a','b',default=0) # edge not in graph, return 0 + 0 + """ + try: + return self.adj[u][v] + except KeyError: + return default + + def adjacency_list(self): + """Return an adjacency list representation of the graph. + + The output adjacency list is in the order of G.nodes(). + For directed graphs, only outgoing adjacencies are included. + + Returns + ------- + adj_list : lists of lists + The adjacency structure of the graph as a list of lists. + + See Also + -------- + adjacency_iter + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.adjacency_list() # in order given by G.nodes() + [[1], [0, 2], [1, 3], [2]] + + """ + return list(map(list,iter(self.adj.values()))) + + def adjacency_iter(self): + """Return an iterator of (node, adjacency dict) tuples for all nodes. + + This is the fastest way to look at every edge. + For directed graphs, only outgoing adjacencies are included. + + Returns + ------- + adj_iter : iterator + An iterator of (node, adjacency dictionary) for all nodes in + the graph. + + See Also + -------- + adjacency_list + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> [(n,nbrdict) for n,nbrdict in G.adjacency_iter()] + [(0, {1: {}}), (1, {0: {}, 2: {}}), (2, {1: {}, 3: {}}), (3, {2: {}})] + + """ + return iter(self.adj.items()) + + def degree(self, nbunch=None, weighted=False): + """Return the degree of a node or nodes. + + The node degree is the number of edges adjacent to that node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + weighted : bool, optional (default=False) + If True return the sum of edge weights adjacent to the node. + + Returns + ------- + nd : dictionary, or number + A dictionary with nodes as keys and degree as values or + a number if a single node is specified. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.degree(0) + 1 + >>> G.degree([0,1]) + {0: 1, 1: 2} + >>> list(G.degree([0,1]).values()) + [1, 2] + + """ + if nbunch in self: # return a single node + return next(self.degree_iter(nbunch,weighted=weighted))[1] + else: # return a dict + return dict(self.degree_iter(nbunch,weighted=weighted)) + + def degree_iter(self, nbunch=None, weighted=False): + """Return an iterator for (node, degree). + + The node degree is the number of edges adjacent to the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + weighted : bool, optional (default=False) + If True return the sum of edge weights adjacent to the node. + + Returns + ------- + nd_iter : an iterator + The iterator returns two-tuples of (node, degree). + + See Also + -------- + degree + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> list(G.degree_iter(0)) # node 0 with degree 1 + [(0, 1)] + >>> list(G.degree_iter([0,1])) + [(0, 1), (1, 2)] + + """ + if nbunch is None: + nodes_nbrs = iter(self.adj.items()) + else: + nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch)) + + if weighted: + # edge weighted graph - degree is sum of nbr edge weights + for n,nbrs in nodes_nbrs: + yield (n, sum((nbrs[nbr].get('weight',1) for nbr in nbrs)) + + (n in nbrs and nbrs[n].get('weight',1))) + else: + for n,nbrs in nodes_nbrs: + yield (n,len(nbrs)+(n in nbrs)) # return tuple (n,degree) + + + def clear(self): + """Remove all nodes and edges from the graph. + + This also removes the name, and all graph, node, and edge attributes. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.clear() + >>> G.nodes() + [] + >>> G.edges() + [] + + """ + self.name = '' + self.adj.clear() + self.node.clear() + self.graph.clear() + + def copy(self): + """Return a copy of the graph. + + Returns + ------- + G : Graph + A copy of the graph. + + See Also + -------- + to_directed: return a directed copy of the graph. + + Notes + ----- + This makes a complete copy of the graph including all of the + node or edge attributes. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> H = G.copy() + + """ + return deepcopy(self) + + def is_multigraph(self): + """Return True if graph is a multigraph, False otherwise.""" + return False + + + def is_directed(self): + """Return True if graph is directed, False otherwise.""" + return False + + def to_directed(self): + """Return a directed representation of the graph. + + Returns + ------- + G : DiGraph + A directed graph with the same name, same nodes, and with + each edge (u,v,data) replaced by two directed edges + (u,v,data) and (v,u,data). + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar D=DiGraph(G) which returns a + shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, http://docs.python.org/library/copy.html. + + Examples + -------- + >>> G = nx.Graph() # or MultiGraph, etc + >>> G.add_path([0,1]) + >>> H = G.to_directed() + >>> H.edges() + [(0, 1), (1, 0)] + + If already directed, return a (deep) copy + + >>> G = nx.DiGraph() # or MultiDiGraph, etc + >>> G.add_path([0,1]) + >>> H = G.to_directed() + >>> H.edges() + [(0, 1)] + """ + from urpmgraphs import DiGraph + G=DiGraph() + G.name=self.name + G.add_nodes_from(self) + G.add_edges_from( ((u,v,deepcopy(data)) + for u,nbrs in self.adjacency_iter() + for v,data in nbrs.items()) ) + G.graph=deepcopy(self.graph) + G.node=deepcopy(self.node) + return G + + def to_undirected(self): + """Return an undirected copy of the graph. + + Returns + ------- + G : Graph/MultiGraph + A deepcopy of the graph. + + See Also + -------- + copy, add_edge, add_edges_from + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar G=DiGraph(D) which returns a + shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, http://docs.python.org/library/copy.html. + + Examples + -------- + >>> G = nx.Graph() # or MultiGraph, etc + >>> G.add_path([0,1]) + >>> H = G.to_directed() + >>> H.edges() + [(0, 1), (1, 0)] + >>> G2 = H.to_undirected() + >>> G2.edges() + [(0, 1)] + """ + return deepcopy(self) + + def subgraph(self, nbunch): + """Return the subgraph induced on nodes in nbunch. + + The induced subgraph of the graph contains the nodes in nbunch + and the edges between those nodes. + + Parameters + ---------- + nbunch : list, iterable + A container of nodes which will be iterated through once. + + Returns + ------- + G : Graph + A subgraph of the graph with the same edge attributes. + + Notes + ----- + The graph, edge or node attributes just point to the original graph. + So changes to the node or edge structure will not be reflected in + the original graph while changes to the attributes will. + + To create a subgraph with its own copy of the edge/node attributes use: + nx.Graph(G.subgraph(nbunch)) + + If edge attributes are containers, a deep copy can be obtained using: + G.subgraph(nbunch).copy() + + For an inplace reduction of a graph to a subgraph you can remove nodes: + G.remove_nodes_from([ n in G if n not in set(nbunch)]) + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> H = G.subgraph([0,1,2]) + >>> H.edges() + [(0, 1), (1, 2)] + """ + bunch =self.nbunch_iter(nbunch) + # create new graph and copy subgraph into it + H = self.__class__() + # namespace shortcuts for speed + H_adj=H.adj + self_adj=self.adj + # add nodes and edges (undirected method) + for n in bunch: + Hnbrs={} + H_adj[n]=Hnbrs + for nbr,d in self_adj[n].items(): + if nbr in H_adj: + # add both representations of edge: n-nbr and nbr-n + Hnbrs[nbr]=d + H_adj[nbr][n]=d + # copy node and attribute dictionaries + for n in H: + H.node[n]=self.node[n] + H.graph=self.graph + return H + + + def nodes_with_selfloops(self): + """Return a list of nodes with self loops. + + A node with a self loop has an edge with both ends adjacent + to that node. + + Returns + ------- + nodelist : list + A list of nodes with self loops. + + See Also + -------- + selfloop_edges, number_of_selfloops + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge(1,1) + >>> G.add_edge(1,2) + >>> G.nodes_with_selfloops() + [1] + """ + return [ n for n,nbrs in self.adj.items() if n in nbrs ] + + def selfloop_edges(self, data=False): + """Return a list of selfloop edges. + + A selfloop edge has the same node at both ends. + + Parameters + ----------- + data : bool, optional (default=False) + Return selfloop edges as two tuples (u,v) (data=False) + or three-tuples (u,v,data) (data=True) + + Returns + ------- + edgelist : list of edge tuples + A list of all selfloop edges. + + See Also + -------- + selfloop_nodes, number_of_selfloops + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge(1,1) + >>> G.add_edge(1,2) + >>> G.selfloop_edges() + [(1, 1)] + >>> G.selfloop_edges(data=True) + [(1, 1, {})] + """ + if data: + return [ (n,n,nbrs[n]) + for n,nbrs in self.adj.items() if n in nbrs ] + else: + return [ (n,n) + for n,nbrs in self.adj.items() if n in nbrs ] + + + def number_of_selfloops(self): + """Return the number of selfloop edges. + + A selfloop edge has the same node at both ends. + + Returns + ------- + nloops : int + The number of selfloops. + + See Also + -------- + selfloop_nodes, selfloop_edges + + Examples + -------- + >>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge(1,1) + >>> G.add_edge(1,2) + >>> G.number_of_selfloops() + 1 + """ + return len(self.selfloop_edges()) + + + def size(self, weighted=False): + """Return the number of edges. + + Parameters + ---------- + weighted : boolean, optional (default=False) + If True return the sum of the edge weights. + + Returns + ------- + nedges : int + The number of edges in the graph. + + See Also + -------- + number_of_edges + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.size() + 3 + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge('a','b',weight=2) + >>> G.add_edge('b','c',weight=4) + >>> G.size() + 2 + >>> G.size(weighted=True) + 6.0 + """ + s=sum(self.degree(weighted=weighted).values())/2 + if weighted: + return float(s) + else: + return int(s) + + def number_of_edges(self, u=None, v=None): + """Return the number of edges between two nodes. + + Parameters + ---------- + u,v : nodes, optional (default=all edges) + If u and v are specified, return the number of edges between + u and v. Otherwise return the total number of all edges. + + Returns + ------- + nedges : int + The number of edges in the graph. If nodes u and v are specified + return the number of edges between those nodes. + + See Also + -------- + size + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.number_of_edges() + 3 + >>> G.number_of_edges(0,1) + 1 + >>> e = (0,1) + >>> G.number_of_edges(*e) + 1 + """ + if u is None: return int(self.size()) + if v in self.adj[u]: + return 1 + else: + return 0 + + + def add_star(self, nodes, **attr): + """Add a star. + + The first node in nodes is the middle of the star. It is connected + to all other nodes. + + Parameters + ---------- + nodes : iterable container + A container of nodes. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to every edge in star. + + See Also + -------- + add_path, add_cycle + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_star([0,1,2,3]) + >>> G.add_star([10,11,12],weight=2) + + """ + nlist = list(nodes) + v=nlist[0] + edges=((v,n) for n in nlist[1:]) + self.add_edges_from(edges, **attr) + + def add_path(self, nodes, **attr): + """Add a path. + + Parameters + ---------- + nodes : iterable container + A container of nodes. A path will be constructed from + the nodes (in order) and added to the graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to every edge in path. + + See Also + -------- + add_star, add_cycle + + Examples + -------- + >>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> G.add_path([10,11,12],weight=7) + + """ + nlist = list(nodes) + edges=list(zip(nlist[:-1],nlist[1:])) + self.add_edges_from(edges, **attr) + + def add_cycle(self, nodes, **attr): + """Add a cycle. + + Parameters + ---------- + nodes: iterable container + A container of nodes. A cycle will be constructed from + the nodes (in order) and added to the graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to every edge in cycle. + + See Also + -------- + add_path, add_star + + Examples + -------- + >>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_cycle([0,1,2,3]) + >>> G.add_cycle([10,11,12],weight=7) + + """ + nlist = list(nodes) + edges=list(zip(nlist,nlist[1:]+[nlist[0]])) + self.add_edges_from(edges, **attr) + + + def nbunch_iter(self, nbunch=None): + """Return an iterator of nodes contained in nbunch that are + also in the graph. + + The nodes in nbunch are checked for membership in the graph + and if not are silently ignored. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + + Returns + ------- + niter : iterator + An iterator over nodes in nbunch that are also in the graph. + If nbunch is None, iterate over all nodes in the graph. + + Raises + ------ + NetworkXError + If nbunch is not a node or or sequence of nodes. + If a node in nbunch is not hashable. + + See Also + -------- + Graph.__iter__ + + Notes + ----- + When nbunch is an iterator, the returned iterator yields values + directly from nbunch, becoming exhausted when nbunch is exhausted. + + To test whether nbunch is a single node, one can use + "if nbunch in self:", even after processing with this routine. + + If nbunch is not a node or a (possibly empty) sequence/iterator + or None, a NetworkXError is raised. Also, if any object in + nbunch is not hashable, a NetworkXError is raised. + """ + if nbunch is None: # include all nodes via iterator + bunch=iter(self.adj.keys()) + elif nbunch in self: # if nbunch is a single node + bunch=iter([nbunch]) + else: # if nbunch is a sequence of nodes + def bunch_iter(nlist,adj): + try: + for n in nlist: + if n in adj: + yield n + except TypeError as e: + message=e.args[0] +# sys.stdout.write(message) + # capture error for non-sequence/iterator nbunch. + if 'iter' in message: + raise NetworkXError(\ + "nbunch is not a node or a sequence of nodes.") + # capture error for unhashable node. + elif 'hashable' in message: + raise NetworkXError(\ + "Node %s in the sequence nbunch is not a valid node."%n) + else: + raise + bunch=bunch_iter(nbunch,self.adj) + return bunch diff --git a/urpm-tools/rpm5utils/urpmgraphs/convert.py b/urpm-tools/rpm5utils/urpmgraphs/convert.py new file mode 100644 index 0000000..571b47a --- /dev/null +++ b/urpm-tools/rpm5utils/urpmgraphs/convert.py @@ -0,0 +1,708 @@ +""" +This module provides functions to convert +NetworkX graphs to and from other formats. + +The preferred way of converting data to a NetworkX graph +is through the graph constuctor. The constructor calls +the to_networkx_graph() function which attempts to guess the +input type and convert it automatically. + +Examples +-------- + +Create a 10 node random graph from a numpy matrix + +>>> import numpy +>>> a=numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10)) +>>> D=nx.DiGraph(a) + +or equivalently + +>>> D=nx.to_networkx_graph(a,create_using=nx.DiGraph()) + +Create a graph with a single edge from a dictionary of dictionaries + +>>> d={0: {1: 1}} # dict-of-dicts single edge (0,1) +>>> G=nx.Graph(d) + + +See Also +-------- +nx_pygraphviz, nx_pydot + +""" +__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)', + 'Pieter Swart (swart@lanl.gov)', + 'Dan Schult(dschult@colgate.edu)']) +# Copyright (C) 2006-2011 by +# Aric Hagberg +# Dan Schult +# Pieter Swart +# All rights reserved. +# BSD license. + +import warnings +import rpm5utils as nx + +__all__ = ['to_networkx_graph', + + 'from_dict_of_dicts', 'to_dict_of_dicts', + 'from_dict_of_lists', 'to_dict_of_lists', + 'from_edgelist', 'to_edgelist', + 'from_numpy_matrix', 'to_numpy_matrix', + 'to_numpy_recarray' + ] + +def _prep_create_using(create_using): + """Return a graph object ready to be populated. + + If create_using is None return the default (just networkx.Graph()) + If create_using.clear() works, assume it returns a graph object. + Otherwise raise an exception because create_using is not a networkx graph. + + """ + if create_using is None: + G=nx.Graph() + else: + G=create_using + try: + G.clear() + except: + raise TypeError("Input graph is not a networkx graph type") + return G + +def to_networkx_graph(data,create_using=None,multigraph_input=False): + """Make a NetworkX graph from a known data structure. + + The preferred way to call this is automatically + from the class constructor + + >>> d={0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1) + >>> G=nx.Graph(d) + + instead of the equivalent + + >>> G=nx.from_dict_of_dicts(d) + + Parameters + ---------- + data : a object to be converted + Current known types are: + any NetworkX graph + dict-of-dicts + dist-of-lists + list of edges + numpy matrix + numpy ndarray + scipy sparse matrix + pygraphviz agraph + + create_using : NetworkX graph + Use specified graph for result. Otherwise a new graph is created. + + multigraph_input : bool (default False) + If True and data is a dict_of_dicts, + try to create a multigraph assuming dict_of_dict_of_lists. + If data and create_using are both multigraphs then create + a multigraph from a multigraph. + + """ + # NX graph + if hasattr(data,"adj"): + try: + result= from_dict_of_dicts(data.adj,\ + create_using=create_using,\ + multigraph_input=data.is_multigraph()) + if hasattr(data,'graph') and isinstance(data.graph,dict): + result.graph=data.graph.copy() + if hasattr(data,'node') and isinstance(data.node,dict): + result.node=dict( (n,dd.copy()) for n,dd in data.node.items() ) + return result + except: + raise nx.NetworkXError("Input is not a correct NetworkX graph.") + + # pygraphviz agraph + if hasattr(data,"is_strict"): + try: + return nx.from_agraph(data,create_using=create_using) + except: + raise nx.NetworkXError("Input is not a correct pygraphviz graph.") + + # dict of dicts/lists + if isinstance(data,dict): + try: + return from_dict_of_dicts(data,create_using=create_using,\ + multigraph_input=multigraph_input) + except: + try: + return from_dict_of_lists(data,create_using=create_using) + except: + raise TypeError("Input is not known type.") + + # list or generator of edges + if (isinstance(data,list) + or hasattr(data,'next') + or hasattr(data, '__next__')): + try: + return from_edgelist(data,create_using=create_using) + except: + raise nx.NetworkXError("Input is not a valid edge list") + + # numpy matrix or ndarray + try: + import numpy + if isinstance(data,numpy.matrix) or \ + isinstance(data,numpy.ndarray): + try: + return from_numpy_matrix(data,create_using=create_using) + except: + raise nx.NetworkXError(\ + "Input is not a correct numpy matrix or array.") + except ImportError: + warnings.warn('numpy not found, skipping conversion test.', + ImportWarning) + + # scipy sparse matrix - any format + try: + import scipy + if hasattr(data,"format"): + try: + return from_scipy_sparse_matrix(data,create_using=create_using) + except: + raise nx.NetworkXError(\ + "Input is not a correct scipy sparse matrix type.") + except ImportError: + warnings.warn('scipy not found, skipping conversion test.', + ImportWarning) + + + raise nx.NetworkXError(\ + "Input is not a known data type for conversion.") + + return + + +def convert_to_undirected(G): + """Return a new undirected representation of the graph G. + + """ + return G.to_undirected() + + +def convert_to_directed(G): + """Return a new directed representation of the graph G. + + """ + return G.to_directed() + + +def to_dict_of_lists(G,nodelist=None): + """Return adjacency representation of graph as a dictionary of lists. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + Notes + ----- + Completely ignores edge data for MultiGraph and MultiDiGraph. + + """ + if nodelist is None: + nodelist=G + + d = {} + for n in nodelist: + d[n]=[nbr for nbr in G.neighbors(n) if nbr in nodelist] + return d + +def from_dict_of_lists(d,create_using=None): + """Return a graph from a dictionary of lists. + + Parameters + ---------- + d : dictionary of lists + A dictionary of lists adjacency representation. + + create_using : NetworkX graph + Use specified graph for result. Otherwise a new graph is created. + + Examples + -------- + >>> dol= {0:[1]} # single edge (0,1) + >>> G=nx.from_dict_of_lists(dol) + + or + >>> G=nx.Graph(dol) # use Graph constructor + + """ + G=_prep_create_using(create_using) + G.add_nodes_from(d) + if G.is_multigraph() and not G.is_directed(): + # a dict_of_lists can't show multiedges. BUT for undirected graphs, + # each edge shows up twice in the dict_of_lists. + # So we need to treat this case separately. + seen={} + for node,nbrlist in d.items(): + for nbr in nbrlist: + if nbr not in seen: + G.add_edge(node,nbr) + seen[node]=1 # don't allow reverse edge to show up + else: + G.add_edges_from( ((node,nbr) for node,nbrlist in d.items() + for nbr in nbrlist) ) + return G + + +def to_dict_of_dicts(G,nodelist=None,edge_data=None): + """Return adjacency representation of graph as a dictionary of dictionaries. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + edge_data : list, optional + If provided, the value of the dictionary will be + set to edge_data for all edges. This is useful to make + an adjacency matrix type representation with 1 as the edge data. + If edgedata is None, the edgedata in G is used to fill the values. + If G is a multigraph, the edgedata is a dict for each pair (u,v). + + """ + dod={} + if nodelist is None: + if edge_data is None: + for u,nbrdict in G.adjacency_iter(): + dod[u]=nbrdict.copy() + else: # edge_data is not None + for u,nbrdict in G.adjacency_iter(): + dod[u]=dod.fromkeys(nbrdict, edge_data) + else: # nodelist is not None + if edge_data is None: + for u in nodelist: + dod[u]={} + for v,data in ((v,data) for v,data in G[u].items() if v in nodelist): + dod[u][v]=data + else: # nodelist and edge_data are not None + for u in nodelist: + dod[u]={} + for v in ( v for v in G[u] if v in nodelist): + dod[u][v]=edge_data + return dod + +def from_dict_of_dicts(d,create_using=None,multigraph_input=False): + """Return a graph from a dictionary of dictionaries. + + Parameters + ---------- + d : dictionary of dictionaries + A dictionary of dictionaries adjacency representation. + + create_using : NetworkX graph + Use specified graph for result. Otherwise a new graph is created. + + multigraph_input : bool (default False) + When True, the values of the inner dict are assumed + to be containers of edge data for multiple edges. + Otherwise this routine assumes the edge data are singletons. + + Examples + -------- + >>> dod= {0: {1:{'weight':1}}} # single edge (0,1) + >>> G=nx.from_dict_of_dicts(dod) + + or + >>> G=nx.Graph(dod) # use Graph constructor + + """ + G=_prep_create_using(create_using) + G.add_nodes_from(d) + # is dict a MultiGraph or MultiDiGraph? + if multigraph_input: + # make a copy of the list of edge data (but not the edge data) + if G.is_directed(): + if G.is_multigraph(): + G.add_edges_from( (u,v,key,data) + for u,nbrs in d.items() + for v,datadict in nbrs.items() + for key,data in datadict.items() + ) + else: + G.add_edges_from( (u,v,data) + for u,nbrs in d.items() + for v,datadict in nbrs.items() + for key,data in datadict.items() + ) + else: # Undirected + if G.is_multigraph(): + seen=set() # don't add both directions of undirected graph + for u,nbrs in d.items(): + for v,datadict in nbrs.items(): + if (u,v) not in seen: + G.add_edges_from( (u,v,key,data) + for key,data in datadict.items() + ) + seen.add((v,u)) + else: + seen=set() # don't add both directions of undirected graph + for u,nbrs in d.items(): + for v,datadict in nbrs.items(): + if (u,v) not in seen: + G.add_edges_from( (u,v,data) + for key,data in datadict.items() ) + seen.add((v,u)) + + else: # not a multigraph to multigraph transfer + if G.is_multigraph() and not G.is_directed(): + # d can have both representations u-v, v-u in dict. Only add one. + # We don't need this check for digraphs since we add both directions, + # or for Graph() since it is done implicitly (parallel edges not allowed) + seen=set() + for u,nbrs in d.items(): + for v,data in nbrs.items(): + if (u,v) not in seen: + G.add_edge(u,v,attr_dict=data) + seen.add((v,u)) + else: + G.add_edges_from( ( (u,v,data) + for u,nbrs in d.items() + for v,data in nbrs.items()) ) + return G + +def to_edgelist(G,nodelist=None): + """Return a list of edges in the graph. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + """ + if nodelist is None: + return G.edges(data=True) + else: + return G.edges(nodelist,data=True) + +def from_edgelist(edgelist,create_using=None): + """Return a graph from a list of edges. + + Parameters + ---------- + edgelist : list or iterator + Edge tuples + + create_using : NetworkX graph + Use specified graph for result. Otherwise a new graph is created. + + Examples + -------- + >>> edgelist= [(0,1)] # single edge (0,1) + >>> G=nx.from_edgelist(edgelist) + + or + >>> G=nx.Graph(edgelist) # use Graph constructor + + """ + G=_prep_create_using(create_using) + G.add_edges_from(edgelist) + return G + +def to_numpy_matrix(G, nodelist=None, dtype=None, order=None, + multigraph_weight=sum, weight='weight'): + """Return the graph adjacency matrix as a NumPy matrix. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the NumPy matrix. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by G.nodes(). + + dtype : NumPy data type, optional + A valid single NumPy data type used to initialize the array. + This must be a simple type such as int or numpy.float64 and + not a compound data type (see to_numpy_recarray) + If None, then the NumPy default is used. + + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. If None, then the NumPy default + is used. + + multigraph_weight : {sum, min, max}, optional + An operator that determines how weights in multigraphs are handled. + The default is to sum the weights of the multiple edges. + + weight: string, optional + Edge data key corresponding to the edge weight. + + Returns + ------- + M : NumPy matrix + Graph adjacency matrix. + + See Also + -------- + to_numpy_recarray, from_numpy_matrix + + Notes + ----- + The matrix entries are assigned with weight edge attribute. When + an edge does not have the weight attribute, the value of the entry is 1. + For multiple edges, the values of the entries are the sums of the edge + attributes for each edge. + + When `nodelist` does not contain every node in `G`, the matrix is built + from the subgraph of `G` that is induced by the nodes in `nodelist`. + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0,1,weight=2) + >>> G.add_edge(1,0) + >>> G.add_edge(2,2,weight=3) + >>> G.add_edge(2,2) + >>> nx.to_numpy_matrix(G, nodelist=[0,1,2]) + matrix([[ 0., 2., 0.], + [ 1., 0., 0.], + [ 0., 0., 4.]]) + + """ + try: + import numpy as np + except ImportError: + raise ImportError(\ + "to_numpy_matrix() requires numpy: http://scipy.org/ ") + + if nodelist is None: + nodelist = G.nodes() + + nodeset = set(nodelist) + if len(nodelist) != len(nodeset): + msg = "Ambiguous ordering: `nodelist` contained duplicates." + raise nx.NetworkXError(msg) + + nlen=len(nodelist) + undirected = not G.is_directed() + index=dict(zip(nodelist,range(nlen))) + + if G.is_multigraph(): + # Handle MultiGraphs and MultiDiGraphs + # array of nan' to start with, any leftover nans will be converted to 0 + # nans are used so we can use sum, min, max for multigraphs + M = np.zeros((nlen,nlen), dtype=dtype, order=order)+np.nan + # use numpy nan-aware operations + operator={sum:np.nansum, min:np.nanmin, max:np.nanmax} + try: + op=operator[multigraph_weight] + except: + raise ValueError('multigraph_weight must be sum, min, or max') + + for u,v,attrs in G.edges_iter(data=True): + if (u in nodeset) and (v in nodeset): + i,j = index[u],index[v] + e_weight = attrs.get(weight, 1) + M[i,j] = op([e_weight,M[i,j]]) + if undirected: + M[j,i] = M[i,j] + # convert any nans to zeros + M = np.asmatrix(np.nan_to_num(M)) + else: + # Graph or DiGraph, this is much faster than above + M = np.zeros((nlen,nlen), dtype=dtype, order=order) + for u,nbrdict in G.adjacency_iter(): + for v,d in nbrdict.items(): + try: + M[index[u],index[v]]=d.get(weight,1) + except KeyError: + pass + M = np.asmatrix(M) + return M + + +def from_numpy_matrix(A,create_using=None): + """Return a graph from numpy matrix. + + The numpy matrix is interpreted as an adjacency matrix for the graph. + + Parameters + ---------- + A : numpy matrix + An adjacency matrix representation of a graph + + create_using : NetworkX graph + Use specified graph for result. The default is Graph() + + Notes + ----- + If the numpy matrix has a single data type for each matrix entry it + will be converted to an appropriate Python data type. + + If the numpy matrix has a user-specified compound data type the names + of the data fields will be used as attribute keys in the resulting + NetworkX graph. + + See Also + -------- + to_numpy_matrix, to_numpy_recarray + + Examples + -------- + Simple integer weights on edges: + + >>> import numpy + >>> A=numpy.matrix([[1,1],[2,1]]) + >>> G=nx.from_numpy_matrix(A) + + User defined compound data type on edges: + + >>> import numpy + >>> dt=[('weight',float),('cost',int)] + >>> A=numpy.matrix([[(1.0,2)]],dtype=dt) + >>> G=nx.from_numpy_matrix(A) + >>> G.edges(data=True) + [(0, 0, {'cost': 2, 'weight': 1.0})] + """ + kind_to_python_type={'f':float, + 'i':int, + 'u':int, + 'b':bool, + 'c':complex, + 'S':str, + 'V':'void'} + + try: # Python 3.x + blurb = chr(1245) # just to trigger the exception + kind_to_python_type['U']=str + except ValueError: # Python 2.6+ + kind_to_python_type['U']=unicode + + # This should never fail if you have created a numpy matrix with numpy... + try: + import numpy as np + except ImportError: + raise ImportError(\ + "from_numpy_matrix() requires numpy: http://scipy.org/ ") + + G=_prep_create_using(create_using) + n,m=A.shape + if n!=m: + raise nx.NetworkXError("Adjacency matrix is not square.", + "nx,ny=%s"%(A.shape,)) + dt=A.dtype + try: + python_type=kind_to_python_type[dt.kind] + except: + raise TypeError("Unknown numpy data type: %s"%dt) + + # make sure we get isolated nodes + G.add_nodes_from(range(n)) + # get a list of edges + x,y=np.asarray(A).nonzero() + + # handle numpy constructed data type + if python_type is 'void': + fields=sorted([(offset,dtype,name) for name,(dtype,offset) in + A.dtype.fields.items()]) + for (u,v) in zip(x,y): + attr={} + for (offset,dtype,name),val in zip(fields,A[u,v]): + attr[name]=kind_to_python_type[dtype.kind](val) + G.add_edge(u,v,attr) + else: # basic data type + G.add_edges_from( ((u,v,{'weight':python_type(A[u,v])}) + for (u,v) in zip(x,y)) ) + return G + + +def to_numpy_recarray(G,nodelist=None, + dtype=[('weight',float)], + order=None): + """Return the graph adjacency matrix as a NumPy recarray. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the NumPy matrix. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by G.nodes(). + + dtype : NumPy data-type, optional + A valid NumPy named dtype used to initialize the NumPy recarray. + The data type names are assumed to be keys in the graph edge attribute + dictionary. + + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. If None, then the NumPy default + is used. + + Returns + ------- + M : NumPy recarray + The graph with specified edge data as a Numpy recarray + + Notes + ----- + When `nodelist` does not contain every node in `G`, the matrix is built + from the subgraph of `G` that is induced by the nodes in `nodelist`. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edge(1,2,weight=7.0,cost=5) + >>> A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)]) + >>> print(A.weight) + [[ 0. 7.] + [ 7. 0.]] + >>> print(A.cost) + [[0 5] + [5 0]] + """ + try: + import numpy as np + except ImportError: + raise ImportError(\ + "to_numpy_matrix() requires numpy: http://scipy.org/ ") + + if G.is_multigraph(): + raise nx.NetworkXError("Not implemented for multigraphs.") + + if nodelist is None: + nodelist = G.nodes() + + nodeset = set(nodelist) + if len(nodelist) != len(nodeset): + msg = "Ambiguous ordering: `nodelist` contained duplicates." + raise nx.NetworkXError(msg) + + nlen=len(nodelist) + undirected = not G.is_directed() + index=dict(zip(nodelist,range(nlen))) + M = np.zeros((nlen,nlen), dtype=dtype, order=order) + + names=M.dtype.names + for u,v,attrs in G.edges_iter(data=True): + if (u in nodeset) and (v in nodeset): + i,j = index[u],index[v] + values=tuple([attrs[n] for n in names]) + M[i,j] = values + if undirected: + M[j,i] = M[i,j] + + return M.view(np.recarray) diff --git a/urpm-tools/rpm5utils/urpmgraphs/exception.py b/urpm-tools/rpm5utils/urpmgraphs/exception.py new file mode 100644 index 0000000..c2dd580 --- /dev/null +++ b/urpm-tools/rpm5utils/urpmgraphs/exception.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +""" +********** +Exceptions +********** + +Base exceptions and errors for NetworkX. + +""" +__author__ = """Aric Hagberg (hagberg@lanl.gov)\nPieter Swart (swart@lanl.gov)\nDan Schult(dschult@colgate.edu)\nLoïc Séguin-C. """ +# Copyright (C) 2004-2008 by +# Aric Hagberg +# Dan Schult +# Pieter Swart +# All rights reserved. +# BSD license. +# + +# Exception handling + +# the root of all Exceptions +class NetworkXException(Exception): + """Base class for exceptions in NetworkX.""" + +class NetworkXError(NetworkXException): + """Exception for a serious error in NetworkX""" + +class NetworkXPointlessConcept(NetworkXException): + """Harary, F. and Read, R. "Is the Null Graph a Pointless Concept?" +In Graphs and Combinatorics Conference, George Washington University. +New York: Springer-Verlag, 1973. +""" + +class NetworkXAlgorithmError(NetworkXException): + """Exception for unexpected termination of algorithms.""" + +class NetworkXUnfeasible(NetworkXAlgorithmError): + """Exception raised by algorithms trying to solve a problem + instance that has no feasible solution.""" + +class NetworkXNoPath(NetworkXUnfeasible): + """Exception for algorithms that should return a path when running + on graphs where such a path does not exist.""" + +class NetworkXUnbounded(NetworkXAlgorithmError): + """Exception raised by algorithms trying to solve a maximization + or a minimization problem instance that is unbounded.""" + + diff --git a/urpm-tools/urpm-downloader.py b/urpm-tools/urpm-downloader.py new file mode 100755 index 0000000..b0bf573 --- /dev/null +++ b/urpm-tools/urpm-downloader.py @@ -0,0 +1,675 @@ +#!/usr/bin/python2.7 +# -*- coding: UTF-8 -*- +''' +" urpm-downloader for URPM-based linux +" A tool for downloading RPMs from URPM-based linux repositories. +" +" Copyright (C) 2011 ROSA Laboratory. +" Written by Anton Kirilenko +" +" PLATFORMS +" ========= +" Linux +" +" REQUIREMENTS +" ============ +" - python 2.7 +" - python-rpm 5.3 +" - urpmi 6.68 +" +" This program is free software: you can redistribute it and/or modify +" it under the terms of the GNU General Public License or the GNU Lesser +" General Public License as published by the Free Software Foundation, +" either version 2 of the Licenses, or (at your option) any later version. +" +" This program is distributed in the hope that it will be useful, +" but WITHOUT ANY WARRANTY; without even the implied warranty of +" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +" GNU General Public License for more details. +" +" You should have received a copy of the GNU General Public License +" and the GNU Lesser General Public License along with this program. +" If not, see . +''' + + +import argparse +import sys +import subprocess +import os +import re +from urllib import urlretrieve +import rpm +from urllib2 import urlopen, HTTPError, URLError +import shutil + +import configparser +cp = ConfigParser.RawConfigParser() + +exit() + +import gettext +#gettext.install('urpm-tools', 'locale', unicode=True, names=['gettext']) +gettext.install('urpm-tools') + +#t = gettext.translation('urpm-tools', 'locale', fallback=True) +#_ = t.ugettext + +def vprint(text): + '''Print the message only if verbose mode is on''' + if(command_line_arguments.verbose): + print(text) + +def qprint(text): + '''Print the message only if quiet mode is off''' + if(not command_line_arguments.quiet): + print(text) + + +def eprint(text, fatal=False, code=1): + '''Print the message to stderr. Exit if fatal''' + print >> sys.stderr, text + if (fatal): + exit(code) + + +def url_exists(url): + '''Return True if the given url or local path exists. Otherwise, return False.''' + if(url.startswith("file://") or url.startswith("/")): + return os.path.isfile(url) + + #try to open file + try: + r = urlopen(url) + return True + except (HTTPError,URLError): + return False + +def parse_command_line(): + ''' Parse command line, adjust some flags and warn in some cases''' + global command_line_arguments + arg_parser = argparse.ArgumentParser(description=_('A tool for downloading RPMs and SRPMs from URPM-based linux repositories'), + epilog=_("If none of the options -b, -s, -d turned on, it will be treated as -b")) + arg_parser.add_argument('packages', action='store',nargs = '+', help=_("Package name(s) to download. It can contain not only package names, but (S)RPM files too. In this case package name extracted from this file will be used")) + arg_parser.add_argument('-u', '--urls', action='store_true', help=_("Instead of downloading files, list the URLs that would be processed")) + arg_parser.add_argument('-r', '--resolve', action='store_true', help=_("When downloading RPMs, resolve dependencies and also download the required packages, if they are not already installed")) + arg_parser.add_argument('-a', '--resolve-all', action='store_true', help=_("When downloading RPMs, resolve dependencies and also download the required packages, even if they are already installed")) + arg_parser.add_argument('-b', '--binary', action='store_true', help=_("Download binary RPMs")) + arg_parser.add_argument('-s', '--source', action='store_true', help=_("Download the source RPMs (SRPMs)")) + arg_parser.add_argument('-d', '--debug-info', action='store_true', help=_("Download debug RPMs")) + arg_parser.add_argument('-D', '--debug-info-install', action='store_true', help=_("Download debug RPMs and install")) + arg_parser.add_argument('--version', action='version', version=VERSION) + arg_parser.add_argument('-v', '--verbose', action='store_true', help=_("Verbose (print additional info)")) + arg_parser.add_argument('-q', '--quiet', action='store_true', help=_("Quiet operation.")) + arg_parser.add_argument('--include-media', '--media', action='append',nargs = '+', help=_("Use only selected URPM media")) + arg_parser.add_argument('--exclude-media', action='append',nargs = '+', help=_("Do not use selected URPM media")) + arg_parser.add_argument('-x', '--exclude-packages', action='store',nargs = '+', help=_("Exclude package(s) by regex")) + arg_parser.add_argument('-i', '--ignore-errors', action='store_true', help=_("Try to continue when error occurs")) + arg_parser.add_argument('-o', '--overwrite', action='store_true', help=_("If the file already exists, download it again and overwrite the old one")) + arg_parser.add_argument('--all-alternatives', action='store_true', help=_("If package dependency can be satisfied by several packages, download all of them (by default, only the first one is downloaded)")) + arg_parser.add_argument('--all-versions', action='store_true', help=_("If different versions of package present in repository, process them all")) + #arg_parser.add_argument('--self-test', action='store_true', help="Test urpm-downloader end exit") + + arg_parser.add_argument('--dest-dir', action='store', help=_("Specify a destination directory for the download")) + + command_line_arguments = arg_parser.parse_args(sys.argv[1:]) + + if(command_line_arguments.debug_info_install): + command_line_arguments.debug_info = True + + if(not command_line_arguments.debug_info and not command_line_arguments.source): + command_line_arguments.binary = True + + if(command_line_arguments.resolve_all): + command_line_arguments.resolve = True + + if(command_line_arguments.exclude_packages is None): + command_line_arguments.exclude_packages = [] + + if(command_line_arguments.verbose and command_line_arguments.quiet): + eprint(_("Use of --verbose with --quiet is senseless. Turning verbose mode off.")) + command_line_arguments.verbose = False + + if(command_line_arguments.resolve and command_line_arguments.source and command_line_arguments.urls): + eprint(_("Note that resolving of SRPM dependencies is not possible until SRPM downloaded. So, it will be done despite --urls")) + + if(command_line_arguments.dest_dir is not None): + if(not os.path.exists(command_line_arguments.dest_dir) or not os.path.isdir(command_line_arguments.dest_dir)): + os.mkdir(command_line_arguments.dest_dir) + else: + command_line_arguments.dest_dir = os.getcwd() + +def get_command_output(command, fatal_fails=True): + '''Execute command using subprocess.Popen and return its stdout output string. If + return code is not 0, print error message end exit''' + vprint("Executing command: " + str(command)) + res = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output = list(res.communicate()) + vprint('Output: ' + str(output)) + if sys.stdout.encoding: + if output[0]: + output[0] = output[0].decode(sys.stdout.encoding).encode("UTF-8") + if output[1]: + output[1] = output[1].decode(sys.stdout.encoding).encode("UTF-8") + + if(res.returncode != 0 and fatal_fails): # if not fatal_fails, do nothing. Caller have to deal with that himself + eprint(_("Error while calling command") + " '" + " ".join(command) + "'") + if(output[1] != None or output[0] != None): + eprint(_("Error message: \n")+ ((output[0].strip() + "\n") if output[0]!=None else "") + + (output[1].strip() if output[1]!=None else "") ) + exit(1) + return [output[0], output[1], res.returncode] + + +def parse_packages(pkgs_list, toresolve): + ''' Takes a list of package names, some of that are alternative (like 'pkg1|pkg2') + and returns a list of package names without '|' ''' + output = [] + for pkg in pkgs_list: + pkgs = pkg.split("|") + if(len(pkgs)>1): + vprint("Aternatives found: " + str(pkgs)) + if(command_line_arguments.all_alternatives): # download all the alternatives + for p in pkgs: + output.append(p) + else: # download only the firsl package(first in alphabetical order) + #check if one of the packages already ion the 'toresolve' list + already_presents = False + for p in pkgs: + if(p in toresolve or p in output): + already_presents = True + break + #if not - add the first package + if(not already_presents): + output.append(sorted(pkgs)[0]) + if(len(pkgs)>1): + vprint("Selected: " + sorted(pkgs)[0]) + return output + + +def get_installed_packages(): + '''Makes 'installed_packages' be filled with installed packages data and look like + {pkg_namei:[[version1,relese1], [version2,relese2], ...], ...} ''' + global installed_packages, installed_loaded + if(installed_loaded): + return + installed_loaded = True + installed_packages = {} + + ts = rpm.TransactionSet() + mi = ts.dbMatch() + for h in mi: + if(h['name'] not in installed_packages): + installed_packages[h['name']] = [] + installed_packages[h['name']].append( [h['version'], h['release']] ) + vprint("The list of installed packages loaded") + +def check_what_to_skip(package_names): + ''' Get the list of package names and return a list of packages from it, that don't have to be downloaded ''' + + def should_be_excluded(pkg): + for line in command_line_arguments.exclude_packages: + if(re.search(line, pkg) is not None): + return True + return False + + vprint("Check package to skip...") + pkgs = package_names[:] + to_skip = [] + # remove packages that have to be excluded dew to command line arguments + for pkg in pkgs[:]: + if(should_be_excluded(pkg)): + pkgs.remove(pkg) + to_skip.append(pkg) + + if(command_line_arguments.resolve_all): + return to_skip + + # Skip packages, that are already installed and have the same version + get_installed_packages() + + #remove from to_skip candidates all the packages, which are not installed + for pkg in pkgs[:]: + if(pkg not in installed_packages): + pkgs.remove(pkg) + + vprint("Retrieving possible downloading package versions...") + res = get_command_output(cmd + ['--sources'] + pkgs) + urls = res[0].strip().split('\n') + vprint("A list of urls retrieved: " + str(urls)) + to_download = {} + rpms = {} + for url in urls: # collect data + res = get_package_fields(url) + if(res[0] not in rpms): + rpms[res[0]] = [] + rpms[res[0]].append(res[1:4]) + + + if(not command_line_arguments.all_versions): + vprint("Removing urls of the older versions...") + for pkg in rpms.keys()[:]: # filter + L = rpms[pkg] + while(len(L) > 1): + if(rpm.evrCompare(L[0][0], L[1][0]) == 1): + del L[1] + else: + del L[0] + + # regroup data: to_download[pkg_name] = [ver-rel1, ver-rel2, ...] + for pkg in rpms: + if(pkg not in to_download): + to_download[pkg] = [] + for item in rpms[pkg]: + to_download[pkg].append(item[0]) # item[0] == version + + vprint("Checking what to skip...") + + for pkg in pkgs: + installed_versions = ['-'.join(i) for i in installed_packages[pkg]] + #print pkg, str(installed_versions) + for ver in to_download[pkg][:]: + if (ver in installed_versions): + to_download[pkg].remove(ver) + if(len(to_download[pkg]) == 0): + to_download.pop(pkg) + to_skip.append(pkg) + vprint("Skipping " + pkg) + return to_skip + + +def resolve_packages(package_names): + '''Returns a list of packages recursively resoled from given list''' + global installed_packages + + resolved_packages = [] + def _resolve_packages(pkg_names): + toresolve = [] + pkgs = parse_packages(pkg_names, toresolve) + to_skip = check_what_to_skip(pkgs) + for pkg in pkgs[:]: + if(pkg in resolved_packages or (pkg in to_skip and (pkg not in package_names or resolve_source))): + # don't resolve its dependencies. + pkgs.remove(pkg) + else: + resolved_packages.append(pkg) + toresolve.append(pkg) + + if (len(toresolve) == 0): + return + vprint ("Resolving " + str(toresolve)) + names = get_command_output(['urpmq', "--requires-recursive"] + toresolve)[0].strip().split("\n") + _resolve_packages(names) + + _resolve_packages(package_names) + return resolved_packages + +def get_srpm_names(pkgs): + '''get a list of srpms names for every given package name. Returns a dictionary {pakage_name_1:[srpm_name_1, srpm_name_2,...], ...}''' + srpms = {} + cmd_tmp = cmd[:] + ['--sourcerpm'] + pkgs + names = get_command_output(cmd_tmp)[0] + + for line in names.split("\n"): + line = line.strip() + if(line == ''): + continue + n = line.split(":")[0].strip() + v = ":".join((line.split(":")[1:])).strip() + if(n not in srpms): + srpms[n] = [] + srpms[n].append(v) + return srpms + + +def get_srpm_url(url): + if(url.startswith("file://") or url.startswith("/")): + return url + tmp = url.split("/") + tmp[-4] = "SRPMS" + del tmp[-3] + return "/".join(tmp) + + +def list_srpm_urls(): + global cmd, srpm_urls_loaded + try: + srpm_urls_loaded + return srpm_urls + except: + srpm_urls_loaded = True + vprint("Loading list of SRPM URLs...") + re_slash = re.compile("/") + lines = get_command_output(cmd + ["--list-url"])[0].strip().split("\n") + media = get_command_output(cmd + ["--list-media", 'active'])[0].strip().split("\n") + + srpm_urls = [] + for line in lines: + parts = line.split(" ") + medium = ' '.join(parts[:-1]) + if medium not in media: + continue + url = parts[-1] + if(url.endswith("/")): + url = url[:-1] + if(re_slash.search(url) is not None): + srpm_urls.append(get_srpm_url(url)) + + return srpm_urls + +def try_download(url): + ''' Try to download file and return True if success, else return False ''' + path = os.path.join(command_line_arguments.dest_dir, os.path.basename(url)) + vprint("Trying to download file " + url) + try: + if(not os.path.exists(path) or command_line_arguments.overwrite): + #(path, msg) = urlretrieve(url, path) + if(url.startswith('/')): # local file + shutil.copyfile(url, path) + else: + fd = urlopen(url) + file = open(path, 'w') + file.write(fd.read()) + file.close() + fd.close() + qprint (_("* Downloaded: ") + url) + else: + qprint (_("* File exists, skipping: ") + url) + return None + except IOError, e: + return e + +def get_package_fields(rpmname): + ''' Return [name, version, suffix, path(prefix)] for given rpm file or package name ''' + suffix = "" + path = os.path.dirname(rpmname) + if(path): + path += "/" + + filename = False + rpmname = os.path.basename(rpmname) + if(rpmname.endswith(".rpm")): + suffix = ".rpm" + rpmname = rpmname[:-4] + filename = True + + if(rpmname.endswith(".src")): + suffix = ".src" + suffix + rpmname = rpmname[:-4] + name = rpmname.split("-")[:-2] + version = rpmname.split("-")[-2:] + else: + re_version = re.compile("(\.)?((alpha)|(cvs)|(svn)|(r))?\d+((mdv)|(mdk)|(mnb))") + if(filename): + parts = rpmname.split('.') + suffix = "." + parts[-1] + suffix + rpmname = '.'.join(parts[:-1]) # remove the architecture part + sections = rpmname.split("-") + if(re_version.search(sections[-1]) == None): + name = sections[:-3] + version = sections[-3:-1] + suffix = "-" + sections[-1] + suffix + else: + name = sections[:-2] + version = sections[-2:] + return ["-".join(name), "-".join(version), suffix, path] + + +#url = 'ftp://ftp.sunet.se/pub/Linux/distributions/mandrakelinux/official/2011/x86_64/media/contrib/release/lib64oil0.3_0-0.3.17-2mdv2011.0.x86_64.rpm' +#url = 'ftp://ftp.sunet.se/pub/Linux/distributions/mandrakelinux/official/2011/x86_64/media/contrib/release/liboil-tools-0.3.17-2mdv2011.0.x86_64.rpm' +#res = get_package_fields(url) +#print res +#exit() + + +def filter_versions(rpm_list): + ''' When different versions of one package given, remove older version and returns only the newest one for every package. ''' + if(command_line_arguments.all_versions): + return rpm_list + + rpms = {} + vprint("Filtering input: " + str(rpm_list)) + for srpm in rpm_list: # collect data + res = get_package_fields(srpm) + if(res[0] not in rpms): + rpms[res[0]] = [] + rpms[res[0]].append(res[1:4]) + + for pkg in rpms.keys()[:]: # filter + L = rpms[pkg] + while(len(L)> 1): + if(rpm.evrCompare(L[0][0], L[1][0]) == 1): + del L[1] + else: + del L[0] + + output = [] + for pkg in rpms: # assembling package names + output.append ( rpms[pkg][0][2] + pkg + "-" + rpms[pkg][0][0] + rpms[pkg][0][1]) + vprint ("Filtering output: " + str(output)) + return output + +def download_srpm(package, srpms): + '''download the srpm with a given name. Try to find it in the repository. Returns a list of downloaded file names''' + vprint("downloading srpm(s) for package " + package) + + srpm_urls = list_srpm_urls() + downloaded = [] + for srpm in filter_versions(srpms[package]): + count = 0 + for srpm_url in srpm_urls: + url = srpm_url + "/" + srpm + if(command_line_arguments.urls): # a correct url have to be printed! + if(not url_exists(url)): + continue + qprint(url) + if(not command_line_arguments.resolve): + count += 1 + break + + if(try_download(url) == None): + count += 1 + downloaded.append(os.path.join(command_line_arguments.dest_dir, os.path.basename(url))) + break + + if(count == 0): + eprint(_("Can not download SRPM for package") + srpm) + if(not command_line_arguments.ignore_errors): + exit(2) + + return downloaded + + +def download_rpm(pkgs_to_download): + global resolve_source, downloaded_debug_pkgs + vprint("downloading packages " + ", ".join (pkgs_to_download)) + cmd_bin = cmd[:] + ['--sources'] + pkgs_to_download + urls = get_command_output(cmd_bin)[0].strip().split("\n") + + urls = filter_versions(urls) + + if(command_line_arguments.binary or resolve_source): + for url in urls: + if(command_line_arguments.urls): + qprint(url) + continue + + res = try_download(url) + if(res != None): + eprint(_("Can not download RPM") + "%s\n(%s)" % (url, res) ) + if(not command_line_arguments.ignore_errors): + exit(3) + if(command_line_arguments.debug_info): + pkgs_to_download_debug = [p+"-debug" for p in pkgs_to_download[:]] + qprint(_("Resolving debug-info packages...")) + cmd_debug = ['urpmq', '--media', 'debug', '--sources'] + pkgs_to_download_debug + res = get_command_output(cmd_debug, fatal_fails=False) + + # urpmq output. RU: Нет пакета с названием + text = _("No package named ") + vprint("Removing missed debug packages from query...") + removed = [] + if(res[2] != 0): # return code is not 0 + + for line in res[1].split("\n"): + if line.startswith(text): + pkg = line[len(text):] + pkgs_to_download_debug.remove(pkg) + removed.append(pkg) + + vprint("Removed %d packages" % len(removed)) + vprint(removed) + + cmd_debug = ['urpmq', '--media', 'debug', '--sources'] + pkgs_to_download_debug + urls = get_command_output(cmd_debug)[0].strip().split("\n") + urls = filter_versions(urls) + for url in urls: + if(command_line_arguments.urls): + qprint(url) + continue + res = try_download(url) + if(res != None): + eprint(_("Can not download RPM") + "%s:\n(%s)\n" % (os.path.basename(url), res) + + _("Maybe you need to update urpmi database (urpmi.update -a)?") ) + if(not command_line_arguments.ignore_errors): + exit(2) + else: + path = os.path.join(command_line_arguments.dest_dir, os.path.basename(url)) + downloaded_debug_pkgs.append(path) + + if(command_line_arguments.debug_info_install): + for pkg in downloaded_debug_pkgs: + qprint(_('Installing ') + os.path.basename(str(pkg)) + "...") + command = ['rpm', '-i', pkg] + res = get_command_output(command,fatal_fails=False) + if(res[2] != 0): # rpm return code is not 0 + qprint(_('Error while calling command') + ' "' + ' '.join(command) + '":\n' + res[1].strip()) + + +def filter_debug_rpm_urls(input_urls): + command = ['urpmq', '--media', 'debug', '--sources', pkg_name + "-debug"] + res = get_command_output(command, fatal_fails=False) + if(res[2] != 0): # return code is not 0 + qprint(_("Debug package for '%s' not found") % pkg_name) + return [] + names = res[0].strip().split("\n") + if(command_line_arguments.all_versions): + return names + + get_installed_packages() + #print names + #print installed_packages[pkg_name] + urls = [] + for n in names: + res = get_package_fields(os.path.basename(n)) + version = "-".join(res[1].split("-")[0:2] ) + if(pkg_name not in installed_packages): + break + for inst_pkg in installed_packages[pkg_name]: + if(version == inst_pkg[0] + "-" + inst_pkg[1]): + urls.append(n) + break + return urls + + +def Main(): + global cmd, resolve_source + resolve_source = False # variable that makes download_rpm to download resolved build-deps + cmd = ['urpmq'] + if(command_line_arguments.include_media != None): + media = '' + for i in command_line_arguments.include_media: + media = ",".join([media]+i) + cmd = cmd + ['--media', media[1:]] + + if(command_line_arguments.exclude_media != None): + media = '' + for i in command_line_arguments.exclude_media: + media = ",".join([media]+i) + cmd = cmd + ['--excludemedia', media[1:]] + + missing_files = [] + for pkg in command_line_arguments.packages[:]: + if(pkg.endswith(".rpm")): + if(not os.path.exists(pkg) or not os.path.isfile(pkg)): + missing_files.append(pkg) + continue + name = get_rpm_tag_from_file("name", pkg) + command_line_arguments.packages.remove(pkg) + command_line_arguments.packages.append(name) + + if(missing_files): + eprint(_("Parameters that end with '.rpm' seem to be local files, but the folowing files do not exist: ") + ", ".join(missing_files)) + if(not command_line_arguments.ignore_errors): + exit(4) + + if(command_line_arguments.source): + download(command_line_arguments.packages, True) + + if(command_line_arguments.binary or (not command_line_arguments.source and command_line_arguments.debug_info)): + download(command_line_arguments.packages, False) + + +def get_rpm_tag_from_file(tag, file): + rpm_ts = rpm.TransactionSet() + fd = os.open(file, os.O_RDONLY) + rpm_hdr = rpm_ts.hdrFromFdno(fd) + os.close(fd) + return rpm_hdr.sprintf("%{" + tag + "}").strip() + + +def download(packages, src): + global resolve_source + pkgs_to_download = packages + + if(src): + if(command_line_arguments.urls): + qprint(_("Searching src.rpm file(s) in repository...")) + else: + qprint(_("Downloading src.rpm file(s)...")) + srpms = get_srpm_names(packages) + #for pkg in packages[:]: + #if (pkg not in srpms: + #eprint("Package " + pkg + " not fond!") + #if(not command_line_arguments.ignore_errors): + # exit(1) + #else: + # eprint ("Package is dequeued.") + #packages.remove(pkg) + + srpms_list= [] + for package in packages: + srpms_list = srpms_list + download_srpm(package, srpms) + + if(len(srpms_list) == 0): + return + + if(command_line_arguments.resolve): + resolve_source = True + pkgs = [] + lines = get_command_output(cmd + ['--requires-recursive'] + srpms_list)[0].strip().split("\n") + pkgs = parse_packages(lines, []) + download(pkgs, False) + resolve_source = False + + else: + pkgs_to_download = packages + if(command_line_arguments.resolve): + if(resolve_source): + qprint(_("Resolving build dependencies...")) + else: + qprint(_("Resolving dependencies...")) + pkgs_to_download = resolve_packages(packages) + qprint (_("Resolved %d packages") % len(pkgs_to_download)) + if(len(pkgs_to_download) == 0): + qprint(_("Nothing to download")) + return + download_rpm(pkgs_to_download) + + +downloaded_debug_pkgs = [] +installed_loaded=False +VERSION = "urpm-downloader 2.2.4" +if __name__ == '__main__': + parse_command_line() + Main() diff --git a/urpm-tools/urpm-package-cleanup.py b/urpm-tools/urpm-package-cleanup.py new file mode 100755 index 0000000..7a1fc5d --- /dev/null +++ b/urpm-tools/urpm-package-cleanup.py @@ -0,0 +1,556 @@ +#!/usr/bin/python +''' +" Package cleanup utility for distributions using urpm +" Based on package-cleanup from yum-utils +" +" Copyright (C) 2011 ROSA Laboratory. +" Written by Denis Silakov +" +" This program is free software: you can redistribute it and/or modify +" it under the terms of the GNU General Public License or the GNU Lesser +" General Public License as published by the Free Software Foundation, +" either version 2 of the Licenses, or (at your option) any later version. +" +" This program is distributed in the hope that it will be useful, +" but WITHOUT ANY WARRANTY; without even the implied warranty of +" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +" GNU General Public License for more details. +" +" You should have received a copy of the GNU General Public License +" and the GNU Lesser General Public License along with this program. +" If not, see . +''' + +import sys + +import logging +import os +import re +import subprocess +import string +import urpmmisc +import types + +from rpm5utils import miscutils, arch, transaction +import argparse +import rpm + +import gettext +gettext.install('urpm-tools') + +def exactlyOne(l): + return len(filter(None, l)) == 1 + + +class PackageCleanup(): + NAME = 'urpm-package-cleanup' + VERSION = '0.1' + USAGE = """ + urpm-package-cleanup: helps find problems in the rpmdb of system and correct them + + usage: urpm-package-cleanup --problems or --leaves or --orphans or --oldkernels + """ + def __init__(self): + self.addCmdOptions() + self.main() + + def addCmdOptions(self): + self.ArgParser = argparse.ArgumentParser(description=_('Find problems in the rpmdb of system and correct them')) + self.ArgParser.add_argument("--qf", "--queryformat", dest="qf", + action="store", + default='%{NAME}-%{VERSION}-%{RELEASE}.%{ARCH}', + help=_("Query format to use for output.")) + self.ArgParser.add_argument("--auto", default=False, + dest="auto",action="store_true", + help=_('Use non-interactive mode')) + self.ArgParser.add_argument("--version", action='version', version=self.VERSION) + + probgrp = self.ArgParser.add_argument_group(_('Orphans Options')) + probgrp.add_argument("--orphans", default=False, + dest="orphans",action="store_true", + help=_('List installed packages which are not available from'\ + ' currently configured repositories')) + probgrp.add_argument("--update", default=False, + dest="update",action="store_true", + help=_('Use only update media. This means that urpmq will search'\ + ' and resolve dependencies only in media marked as containing updates'\ + ' (e.g. which have been created with "urpmi.addmedia --update").')) + + probgrp.add_argument("--media", metavar='media', nargs='+', + help=_('Select specific media to be used, instead of defaulting to all available '\ + 'media (or all update media if --update is used). No rpm will be found in ' + 'other media.')) + + probgrp.add_argument("--excludemedia", metavar='media', nargs='+', + help=_('Do not use the specified media.')) + + probgrp = self.ArgParser.add_argument_group(_('Dependency Problems Options')) + probgrp.add_argument("--problems", default=False, + dest="problems", action="store_true", + help=_('List dependency problems in the local RPM database')) + probgrp.add_argument("--suggests", default=False, + dest="suggests", action="store_true", + help=_('List missing suggestions of installed packages')) + + + dupegrp = self.ArgParser.add_argument_group(_('Duplicate Package Options')) + dupegrp.add_argument("--dupes", default=False, + dest="dupes", action="store_true", + help=_('Scan for duplicates in your rpmdb')) + dupegrp.add_argument("--cleandupes", default=False, + dest="cleandupes", action="store_true", + help=_('Scan for duplicates in your rpmdb and remove older ')) + dupegrp.add_argument("--noscripts", default=False, + dest="noscripts", action="store_true", + help=_("disable rpm scriptlets from running when cleaning duplicates")) + + leafgrp = self.ArgParser.add_argument_group(_('Leaf Node Options')) + leafgrp.add_argument("--leaves", default=False, dest="leaves", + action="store_true", + help=_('List leaf nodes in the local RPM database')) + leafgrp.add_argument("--all", default=False, dest="all_nodes", + action="store_true", + help=_('list all packages leaf nodes that do not match'\ + ' leaf-regex')) + leafgrp.add_argument("--leaf-regex", + default="(^(compat-)?lib(?!reoffice).+|.*libs?[\d-]*|.*-data$)", + help=_('A package name that matches this regular expression' \ + ' (case insensitively) is a leaf')) + leafgrp.add_argument("--exclude-devel", default=False, + action="store_true", + help=_('do not list development packages as leaf nodes')) + leafgrp.add_argument("--exclude-bin", default=False, + action="store_true", + help=_('do not list packages with files in a bin dirs as '\ + 'leaf nodes')) + + kernelgrp = self.ArgParser.add_argument_group(_('Old Kernel Options')) + kernelgrp.add_argument("--oldkernels", default=False, + dest="kernels",action="store_true", + help=_("Remove old kernel and kernel-devel packages")) + kernelgrp.add_argument("--count",default=2,dest="kernelcount", + action="store", + help=_('Number of kernel packages to keep on the '\ + 'system (default 2)')) + kernelgrp.add_argument("--keepdevel", default=False, dest="keepdevel", + action="store_true", + help=_('Do not remove kernel-devel packages when ' + 'removing kernels')) + + def _removePkg(self, pkg): + """remove given package""" + # No smart behavior yet, simply call urpme for the package + pkgName = pkg['name'] + "-" + pkg['version'] + if pkg['release']: + pkgName += '-' + pkg['release'] + eraseOpts = string.join(self.tsflags, " ") + if eraseOpts: + subprocess.call(['urpme', pkgName, eraseOpts]) + else: + subprocess.call(['urpme', pkgName]) + + + @staticmethod + def _genDeptup(name, flags, version): + """ Given random stuff, generate a usable dep tuple. """ + + if flags == 0: + flags = None + + if type(version) is types.StringType: + (r_e, r_v, r_r) = miscutils.stringToVersion(version) + # would this ever be a ListType? + elif type(version) in (types.TupleType, types.ListType): + (r_e, r_v, r_r) = version + else: + # FIXME: This isn't always type(version) is types.NoneType: + # ...not sure what it is though, come back to this + r_e = r_v = r_r = None + + deptup = (name, urpmmisc.share_data(flags), + (urpmmisc.share_data(r_e), urpmmisc.share_data(r_v), + urpmmisc.share_data(r_r))) + return urpmmisc.share_data(deptup) + + def _getProvides(self, req, flags, ver): + """searches the rpmdb for what provides the arguments + returns a list of pkg objects of providing packages, possibly empty""" + + ts = rpm.TransactionSet() + mi = ts.dbMatch('provides', req) + + deptup = self._genDeptup(req, flags, ver) + if deptup in self._get_pro_cache: + return self._get_pro_cache[deptup] + r_v = deptup[2][1] + + result = { } + + for po in mi: + prov_idx = 0 + for prov in po['provides']: + if prov != req: + prov_idx += 1 + continue + + prov_ver = po['provideversion'][prov_idx] + prov_flags = po['provideflags'][prov_idx] + prov_idx += 1 + + if req[0] == '/' and r_v is None: + result[po] = [(req, None, (None, None, None))] + continue + + if deptup[2][1] is None and deptup[2][2] is None and deptup[2][0] is None: + result[po] = [(req, None, (None, None, None))] + else: + provtup = (req, prov_flags, (po['epoch'], po['version'], po['release'])) + matched = miscutils.rangeCompare(deptup, provtup) + if not matched: + print "NOT MATCHED " + str(deptup) + " VS " + str(provtup) + + if matched: + result[po] = [(req, None, (None, None, None))] + + self._get_pro_cache[deptup] = result + + # Check if we have dependency on file not listed + # directly in PROVIDES + if not result and req[0] == '/' and r_v is None: + mi = ts.dbMatch('filepaths', req) + for po in mi: + result[po] = [(req, None, (None, None, None))] + + return result + + def _find_missing_deps(self, pkgs): + """find any missing dependencies for any installed package in pkgs""" + + providers = {} # To speed depsolving, don't recheck deps that have + # already been checked + problems = [] + missing_suggests = [] + + for po in pkgs: + req_idx = 0; + for req in po['requires']: + ver = po['requireversion'][req_idx] + flags = po['requireflags'][req_idx] + req_idx += 1 + + if req.startswith('rpmlib'): continue # ignore rpmlib deps + if (req,flags,ver) not in providers: + resolve_sack = self._getProvides(req,flags,ver) + else: + resolve_sack = providers[(req,flags,ver)] + + if len(resolve_sack) < 1: + #~ flags = yum.depsolve.flags.get(flags, flags) + missing = miscutils.formatRequire(req,ver,flags) + # RPMSENSE_MISSINGOK == (1 << 19) + if req in po['suggests'] or flags & (1 << 19): + missing_suggests.append((po, "suggests %s" % missing)) + else: + problems.append((po, "requires %s" % missing)) + + else: + # Store the resolve_sack so that we can re-use it if another + # package has the same requirement + providers[(req,flags,ver)] = resolve_sack + + return [problems, missing_suggests] + + def _find_installed_duplicates(self, ignore_kernel=True): + """find installed duplicate packages returns a dict of + pkgname = [[dupe1, dupe2], [dupe3, dupe4]] """ + + multipkgs = {} + singlepkgs = {} + results = {} + + ts = rpm.TransactionSet() + mi = ts.dbMatch() + + for pkg in mi: + # just skip kernels and everyone is happier + if ignore_kernel: + if 'kernel' in pkg['provides_names']: + continue + if pkg['name'].startswith('kernel'): + continue + + # public keys from different repos may have different versions + if pkg['name'].startswith('gpg-pubkey'): + continue + + name = pkg['name'] + if name in multipkgs or name in singlepkgs: + continue + + pkgs = ts.dbMatch( 'name', name ) + + for po in pkgs: + if name not in multipkgs: + multipkgs[name] = [] + if name not in singlepkgs: + singlepkgs[name] = [] + + if arch.isMultiLibArch(arch=po['arch']): + multipkgs[name].append(po) + elif po['arch'] == 'noarch': + multipkgs[name].append(po) + singlepkgs[name].append(po) + elif not arch.isMultiLibArch(arch=po['arch']): + singlepkgs[name].append(po) + else: + print _("Warning: neither single nor multi lib arch: %s ") % po['arch'] + + for (name, pkglist) in multipkgs.items() + singlepkgs.items(): + if len(pkglist) <= 1: + continue + + if name not in results: + results[name] = [] + if pkglist not in results[name]: + results[name].append(pkglist) + + return results + + def _remove_old_dupes(self): + """add older duplicate pkgs to be removed in the transaction""" + dupedict = self._find_installed_duplicates() + + removedupes = [] + for (name,dupelists) in dupedict.items(): + for dupelist in dupelists: + dupelist.sort() + for lowpo in dupelist[0:-1]: + removedupes.append(lowpo) + + # No smart behavior yet, simply call urpme for every package + for po in removedupes: + self._removePkg(po) + + def _should_show_leaf(self, po, leaf_regex, exclude_devel, exclude_bin): + """ + Determine if the given pkg should be displayed as a leaf or not. + + Return True if the pkg should be shown, False if not. + """ + + if po['name'] == 'gpg-pubkey': + return False + name = po['name'] + if exclude_devel and name.endswith('devel'): + return False + if exclude_bin: + for file_name in po['filepaths']: + if file_name.find('bin') != -1: + return False + if leaf_regex.match(name): + return True + return False + + def _get_kernels(self): + """return a list of all installed kernels, sorted newest to oldest""" + + ts = rpm.TransactionSet() + mi = ts.dbMatch('provides','kernel') + kernlist = [] + + for h in mi: + kernlist.append(h) + + kernlist.sort() + kernlist.reverse() + return kernlist + + def _get_old_kernel_devel(self, kernels, removelist): + """ List all kernel devel packages that either belong to kernel versions that + are no longer installed or to kernel version that are in the removelist""" + + devellist = [] + ts = rpm.TransactionSet() + mi = ts.dbMatch('provides','kernel-devel') + + for po in mi: + # For all kernel-devel packages see if there is a matching kernel + # in kernels but not in removelist + keep = False + for kernel in kernels: + if kernel in removelist: + continue + (kname,karch,kepoch,kver,krel) = (kernel['name'],kernel['arch'],kernel['epoch'],kernel['version'],kernel['release']) + (dname,darch,depoch,dver,drel) = (po['name'],po['arch'],po['epoch'],po['version'],po['release']) + if (karch,kepoch,kver,krel) == (darch,depoch,dver,drel): + keep = True + if not keep: + devellist.append(po) + return devellist + + def _remove_old_kernels(self, count, keepdevel): + """Remove old kernels, keep at most count kernels (and always keep the running + kernel""" + + count = int(count) + kernels = self._get_kernels() + runningkernel = os.uname()[2] + # Vanilla kernels dont have a release, only a version + if '-' in runningkernel: + splt = runningkernel.split('-') + if len(splt) == 2: + (kver,krel) = splt + else: # Handle cases where a custom build kernel has an extra '-' in the release + kver=splt[1] + krel="-".join(splt[1:]) + if krel.split('.')[-1] == os.uname()[-1]: + krel = ".".join(krel.split('.')[:-1]) + else: + kver = runningkernel + krel = "" + remove = kernels[count:] + + toremove = [] + # Remove running kernel from remove list + for kernel in remove: + if kernel['version'] == kver and krel.startswith(kernel['release']): + print _("Not removing kernel %(kver)s-%(krel)s because it is the running kernel") % {'kver': kver, 'krel': krel} + else: + toremove.append(kernel) + + + # Now extend the list with all kernel-devel pacakges that either + # have no matching kernel installed or belong to a kernel that is to + # be removed + if not keepdevel: + toremove.extend(self._get_old_kernel_devel(kernels, toremove)) + + for po in toremove: + self._removePkg(po) + + + def main(self): + opts = self.ArgParser.parse_args(sys.argv[1:]) + if not exactlyOne([opts.problems, opts.dupes, opts.leaves, opts.kernels, + opts.orphans, opts.cleandupes]): + print self.ArgParser.format_help() + sys.exit(1) + + self.tsflags = [] + + if opts.problems: + ts = rpm.TransactionSet() + mi = ts.dbMatch() + self._get_pro_cache = {} + (issues, missing_suggests) = self._find_missing_deps(mi) + for (pkg, prob) in issues: + print _('Package %(qf)s %(prob)s') % {'qf': pkg.sprintf(opts.qf), 'prob': prob} + + if( opts.suggests ): + print _("Missing suggests:") + for (pkg, prob) in missing_suggests: + print 'Package %s %s' % (pkg.sprintf(opts.qf), prob) + + if issues: + sys.exit(2) + else: + if (not opts.suggests) or (len(missing_suggests) == 0): + print _('No Problems Found') + sys.exit(0) + else: + sys.exit(3) + + if opts.dupes: + dupes = self._find_installed_duplicates() + for name, pkglists in dupes.items(): + for pkglist in pkglists: + for pkg in pkglist: + print '%s' % pkg.sprintf(opts.qf) + sys.exit(0) + + if opts.kernels: + if os.geteuid() != 0: + print _("Error: Cannot remove kernels as a user, must be root") + sys.exit(1) + if int(opts.kernelcount) < 1: + print _("Error: should keep at least 1 kernel!") + sys.exit(100) + if opts.auto: + self.tsflags.append('--auto') + + self._remove_old_kernels(opts.kernelcount, opts.keepdevel) + sys.exit(0) + #~ self.run_with_package_names.add('yum-utils') + #~ if hasattr(self, 'doUtilBuildTransaction'): + #~ errc = self.doUtilBuildTransaction() + #~ if errc: + #~ sys.exit(errc) + #~ else: + #~ try: + #~ self.buildTransaction() + #~ except yum.Errors.YumBaseError, e: + #~ self.logger.critical("Error building transaction: %s" % e) + #~ sys.exit(1) +#~ + #~ if len(self.tsInfo) < 1: + #~ print 'No old kernels to remove' + #~ sys.exit(0) +#~ + #~ sys.exit(self.doUtilTransaction()) + + + if opts.leaves: + self._ts = transaction.TransactionWrapper() + leaves = self._ts.returnLeafNodes() + leaf_reg = re.compile(opts.leaf_regex, re.IGNORECASE) + for po in sorted(leaves): + if opts.all_nodes or \ + self._should_show_leaf(po, leaf_reg, opts.exclude_devel, + opts.exclude_bin): + print po.sprintf(opts.qf) + + sys.exit(0) + + if opts.orphans: + """ Just a wrapper that invokes urpmq """ + aux_opts = "" + if opts.excludemedia: + aux_opts = " --excludemedia " + " ".join(opts.excludemedia) + if opts.media: + aux_opts += " --media " + " ".join(opts.media) + if opts.update: + aux_opts += " --update " + + subprocess.call(["urpmq", "--not-available", aux_opts]) + sys.exit(0) + + if opts.cleandupes: + if os.geteuid() != 0: + print _("Error: Cannot remove packages as a user, must be root") + sys.exit(1) + if opts.noscripts: + self.tsflags.append('--noscripts') + if opts.auto: + self.tsflags.append('--auto') + + self._remove_old_dupes() + #~ self.run_with_package_names.add('yum-utils') + + #~ if hasattr(self, 'doUtilBuildTransaction'): + #~ errc = self.doUtilBuildTransaction() + #~ if errc: + #~ sys.exit(errc) + #~ else: + #~ try: + #~ self.buildTransaction() + #~ except yum.Errors.YumBaseError, e: + #~ self.logger.critical("Error building transaction: %s" % e) + #~ sys.exit(1) + + #~ if len(self.tsInfo) < 1: + #~ print 'No duplicates to remove' + #~ sys.exit(0) + +if __name__ == '__main__': +# setup_locale() + util = PackageCleanup() diff --git a/urpm-tools/urpm-repoclosure.pl b/urpm-tools/urpm-repoclosure.pl new file mode 100755 index 0000000..1689c6d --- /dev/null +++ b/urpm-tools/urpm-repoclosure.pl @@ -0,0 +1,1167 @@ +#!/usr/bin/perl +######################################################## +# URPM Repos Closure Checker 1.3.1 for Linux +# A tool for checking closure of a set of RPM packages +# +# Copyright (C) 2012 ROSA Laboratory +# Written by Andrey Ponomarenko +# +# PLATFORMS +# ========= +# Linux (ROSA, Mandriva) +# +# REQUIREMENTS +# ============ +# - urpmi +# - Perl 5 (>=5.8) +# - Wget +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +######################################################## +use Getopt::Long; +Getopt::Long::Configure ("posix_default", "no_ignore_case"); +use Cwd qw(abs_path cwd); +use File::Path qw(mkpath rmtree); +use File::Temp qw(tempdir); +use File::Copy qw(copy move); +use Data::Dumper; +use Locale::gettext; +use strict; + +my $TOOL_VERSION = "1.3.1"; +my $CmdName = get_filename($0); + +my ($Help, $ShowVersion, $RPMlist, $RPMdir, $StaticMode, +$DynamicMode, $CheckRelease, $CheckSignature, $SelectRepos, +$NoClean, $Root, $HDlist, $FileDeps, $ResDir, $AddRPMs); + +textdomain("urpm-tools"); + +sub gettext_(@) +{ + my ($Str, @Params) = @_; + if(not $Str) { + return ""; + } + $Str = gettext($Str); + foreach my $N (1 .. $#Params+1) + { + my $P = $Params[$N-1]; + $Str=~s/\[_$N\]/$P/g; + } + return $Str; +} + +my $ShortUsage = gettext_("URPM Repos Closure Checker [_1] for Mandriva Linux +A tool for checking closure of a set of RPM packages +Copyright (C) 2012 ROSA Laboratory +License: GNU GPL + +Usage: [_2] [options] +Example: [_2] --hdlist=hdlist.txt + +More info: [_2] --help\n", $TOOL_VERSION, $CmdName); + +if($#ARGV==-1) { + print $ShortUsage."\n"; + exit(0); +} + +GetOptions("h|help!" => \$Help, + "v|version!" => \$ShowVersion, + "l|list=s" => \$RPMlist, + "d|dir=s" => \$RPMdir, + "hdlist=s" => \$HDlist, + "add=s" => \$AddRPMs, + "file-deps=s" => \$FileDeps, + "s|static!" => \$StaticMode, + "dynamic!" => \$DynamicMode, + "r|check-release!" => \$CheckRelease, + "sign|check-signature!" => \$CheckSignature, + "media=s" => \$SelectRepos, + "noclean!" => \$NoClean, + "root=s" => \$Root, + "o|res=s" => \$ResDir +) or ERR_MESSAGE(); + +my %EXIT_CODES = ( + "SUCCESS" => 0, + "ERROR" => 1, + "FAILED" => 2 +); + +my $HelpMessage = gettext_(" +NAME: + URPM Repos Closure Checker 1.0 for Mandriva Linux + A tool for checking closure of a set of RPM packages + +USAGE: + [_1] --hdlist=hdlist.txt + [_1] --hdlist=http://mirror.yandex.ru/mandriva/.../synthesis.hdlist.cz + [_1] --dir=rpms/ --static --file-deps=file-deps.txt + [_1] --list=list.txt --dynamic + +OPTIONS: + -h|-help + Print this help. + + -v|-version + Print version information. + + -hdlist + Path or URL of HDlist (synthesis) to check. + + -d|-dir + The directory with RPM packages to check. + + -l|-list + The list of packages to check. + + -add|-update + The directory with RPM packages that should + be added to the repository or updated. + + -file-deps + Read file-deps to ignore some unresolved + dependencies. + + -s|-static + Check statically if all required dependencies are + satisfied by provided dependencies in the set of + RPM packages. + + -dynamic + Install a set of RPM packages to the local chroot + and check if extra packages were installed. + + -r|-check-release + Check installation media (DVD). + + -sign|-check-signature + Validate package signatures. + + -noclean + Do not clean urpmi cache. + + -root + Where to install packages. + Default: + /tmp/... + +EXIT CODES: + 0 - Suceess. The tool has run without any errors + non-zero - Failed or the tool has run with errors. In particular: + 1 - Failed to run the tool + 2 - Discovered dependency problems + +\n", $CmdName); + +sub HELP_MESSAGE() { + print $HelpMessage; +} + +sub ERR_MESSAGE() +{ + print $ShortUsage; + exit(1); +} + +my %Cache; +my $RPM_CACHE = "/var/cache/urpmi/rpms"; +my $TMP_DIR = tempdir(CLEANUP=>1); +my %InstalledPackage; +my %RequiredBy; +my $TEST_MEDIA = "test_media"; +my %Packages; +my %BrokenSignature; +my %InstallFailed; +my $RESULTS_DIR = "repoclosure_reports"; + +sub appendFile($$) +{ + my ($Path, $Content) = @_; + return if(not $Path); + if(my $Dir = get_dirname($Path)) { + mkpath($Dir); + } + open(FILE, ">>".$Path) || die gettext_("can't open file \'[_1]\': [_2]\n", $Path, $!); + print FILE $Content; + close(FILE); +} + +sub writeFile($$) +{ + my ($Path, $Content) = @_; + return if(not $Path); + if(my $Dir = get_dirname($Path)) { + mkpath($Dir); + } + open (FILE, ">".$Path) || die gettext_("can't open file \'[_1]\': [_2]\n", $Path, $!); + print FILE $Content; + close(FILE); +} + +sub readFile($) +{ + my $Path = $_[0]; + return "" if(not $Path or not -f $Path); + open (FILE, $Path); + local $/ = undef; + my $Content = ; + close(FILE); + return $Content; +} + +sub get_filename($) +{ # much faster than basename() from File::Basename module + if($_[0]=~/([^\/\\]+)[\/\\]*\Z/) { + return $1; + } + return ""; +} + +sub get_dirname($) +{ # much faster than dirname() from File::Basename module + if($_[0]=~/\A(.*?)[\/\\]+[^\/\\]*[\/\\]*\Z/) { + return $1; + } + return ""; +} + +sub searchRPMs($) +{ + my $Path = $_[0]; + if(not $Path or not -d $Path) { + return (); + } + my @RPMs = split("\n", `find $Path -type f -name "*.rpm"`); # -maxdepth 1 + return sort {lc($a) cmp lc($b)} @RPMs; +} + +sub addMedia($) +{ + my $Dir = $_[0]; + if(not $Dir or not -d $Dir) { + return; + } + my %Media = map {$_=>1} split(/\n+/, `urpmq --list-media`); + if($Media{$TEST_MEDIA}) { + removeMedia(); + } + $Dir = abs_path($Dir); + system("/usr/sbin/urpmi.addmedia $TEST_MEDIA $Dir"); + system("/usr/sbin/urpmi.update $TEST_MEDIA"); +} + +sub removeMedia() { + system("/usr/sbin/urpmi.removemedia $TEST_MEDIA"); +} + +sub installPackage($) +{ + my $Package = $_[0]; + my $Cmd = "/usr/sbin/urpmi"; + if($CheckRelease) + { # from CD or DVD + $Cmd .= " --media=$TEST_MEDIA"; + } + elsif($SelectRepos) + { + if(-d $SelectRepos) { + $Cmd .= " --media=$TEST_MEDIA"; + } + else { + $Cmd .= " --media=$SelectRepos"; + } + } + # create root where to install packages + if(not -d $TMP_DIR."/root") { + mkpath($TMP_DIR."/root"); + } + if(not $CheckRelease) { + $Cmd .= " --no-install"; + } + if($Root) { + $Cmd .= " --root=\"$Root\""; + } + else { + $Cmd .= " --root=\"$TMP_DIR/root\""; + } + $Cmd .= " --noclean --auto --force"; + $Cmd .= " $Package"; + print "Running $Cmd\n"; + my $LogPath = $TMP_DIR."/ilog.txt"; + system($Cmd." >$LogPath 2>&1"); + my $Log = readFile($LogPath); + appendFile("$RESULTS_DIR/install-log.txt", $Log); + $Log=~s/The following packages have to be removed (.|\n)*\Z//g; + if($Log=~/ (unsatisfied|conflicts with|missing) ([\w\-\/]*)/i) + { + my ($Reason, $Dep) = ($1, $2); + $InstallFailed{getPName($Package)}=1; + print " FAILED: due to $Reason $Dep\n"; + } + if($CheckRelease) + { # installed + while($Log=~s/(installing\s+)([^\/\s]+\.rpm)(\s|\Z)/$1/) + { + my $RpmName = $2; + print " $RpmName\n"; + } + } + else + { # downloaded + while($Log=~s/(\/)([^\/\s]+\.rpm)(\s|\Z)/$1$3/) + { + my $RpmName = $2; + print " $RpmName\n"; + $RequiredBy{getPName($RPM_CACHE."/".$RpmName)}=getPName($Package); + } + } +} + +sub get_RPMname($) +{ + my $Path = $_[0]; + my $Name = get_filename($Path); + if($Cache{"get_RPMname"}{$Name}) { + return $Cache{"get_RPMname"}{$Name}; + } + if(not $Path or not -f $Path) { + return ""; + } + return ($Cache{"get_RPMname"}{$Name} = `rpm -qp --queryformat \%{name} \"$Path\"`); +} + +sub sepDep($) +{ + my $Dep = $_[0]; + if($Dep=~/\A(.+?)(\s+|\[)(=|==|<=|>=|<|>)\s+(.+?)(\]|\Z)/) + { + my ($N, $O, $V) = ($1, $3, $4); + # canonify version (1:3.2.5-5:2011.0) + return ($N, $O, $V); + } + else { + return ($Dep, "", ""); + } +} + +sub showDep($$$) +{ + my ($N, $O, $V) = @_; + if($O and $V) { + return $N." ".$O." ".$V; + } + else { + return $N + } +} + +sub sepVersion($) +{ + my $V = $_[0]; + if($V=~/\A(.+)(\-[^\-\:]+)(\:[^\:]+|)\Z/) + { # 3.2.5-5:2011.0 + return ($1, $2, $3); + } + return ($V, "", ""); +} + +sub simpleVersion($) +{ # x.y.z-r:n to x.y.z.r.n + my $V = $_[0]; + $V=~s/[\-:]/\./g; # -5:2011.0 + $V=~s/[a-z]+/\./ig; # 10-12mdk + $V=~s/\.\Z//g; + return $V; +} + +sub formatVersions(@) +{ # V1 - provided + # V2 - required + my ($V1, $V2) = @_; + my ($E1, $E2) = (); + if($V1=~s/\A([^\-\:]+)\://) { + $E1 = $1; + } + if($V2=~s/\A([^\-\:]+)\://) { + $E2 = $1; + } + my ($V1_M, $V1_R, $V1_RR) = sepVersion($V1); + my ($V2_M, $V2_R, $V2_RR) = sepVersion($V2); + if(not $V2_RR) { + $V1_RR = ""; + } + if(not $V2_R) { + $V1_R = ""; + } + $V1 = $V1_M.$V1_R.$V1_RR; + $V2 = $V2_M.$V2_R.$V2_RR; + if(defined $E1 and defined $E2) + { + $V1 = $E1.".".$V1; + $V2 = $E2.".".$V2; + } + return (simpleVersion($V1), simpleVersion($V2)); +} + +sub cmpVersions($$) +{ # compare two versions + # 3.2.5-5:2011.0 + # NOTE: perl 5.00503 and 5.12 + my ($V1, $V2) = formatVersions(@_); + return 0 if($V1 eq $V2); + my @V1Parts = split(/\./, $V1); + my @V2Parts = split(/\./, $V2); + for (my $i = 0; $i <= $#V1Parts && $i <= $#V2Parts; $i++) + { + my $N1 = $V1Parts[$i]; + my $N2 = $V2Parts[$i]; + if(defined $N1 and not defined $N2) { + return 1; + } + elsif(not defined $N1 and defined $N2) { + return -1; + } + if(my $R = cmpNums($N1, $N2)) { + return $R; + } + } + return -1 if($#V1Parts < $#V2Parts); + return 1 if($#V1Parts > $#V2Parts); + return 0; +} + +sub cmpNums($$) +{ + my ($N1, $N2) = @_; + # 00503 + # 12 + if($N1 eq $N2) { + return 0; + } + while($N1=~s/\A0([0]*[1-9]+)/$1/) { + $N2.="0"; + } + while($N2=~s/\A0([0]*[1-9]+)/$1/) { + $N1.="0"; + } + return int($N1)<=>int($N2); +} + +sub checkDeps($$$$) +{ + my ($N, $O, $V, $Provides) = @_; + if(not $O or not $V) + { # requires any version + return 1; + } + foreach my $OP (keys(%{$Provides})) + { + if(not $OP) + { # provides any version + return 1; + } + foreach my $VP (keys(%{$Provides->{$OP}})) + { + if($O eq "=" or $O eq "==") + { + if(cmpVersions($VP, $V)==0) + { # requires the same version + return 1; + } + } + elsif($O eq "<=") + { + if(cmpVersions($VP, $V)<=0) { + return 1; + } + } + elsif($O eq ">=") + { + if(cmpVersions($VP, $V)>=0) { + return 1; + } + } + elsif($O eq "<") + { + if(cmpVersions($VP, $V)<0) { + return 1; + } + } + elsif($O eq ">") + { + if(cmpVersions($VP, $V)>0) { + return 1; + } + } + } + } + return 0; +} + +sub checkSignature($) +{ + my $Path = $_[0]; + my $Info = `rpm --checksig $Path`; + if($Info!~/ OK(\s|\Z)/) { + $BrokenSignature{getPName($Path)}=1; + return 0; + } + return 1; +} + +sub checkRoot() +{ + if(not -w "/usr") { + print STDERR gettext_("ERROR: you should be root\n"); + exit(1); + } +} + +sub readRPMlist($$) +{ + my ($Path, $Type) = @_; + if(not -f $Path) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $Path); + exit(1); + } + my @RPMs = split(/\s+/, readFile($Path)); + if($#RPMs==-1) { + print STDERR gettext_("ERROR: the list of packages is empty\n"); + exit(1); + } + if($Type eq "RPMs") + { + foreach my $P (@RPMs) + { + if($P!~/\.rpm\Z/) + { + print STDERR gettext_("ERROR: file \'[_1]\' is not RPM package\n", $P); + exit(1); + } + elsif(not -f $P) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $P); + exit(1); + } + } + } + return @RPMs; +} + +sub checkRelease() +{ + checkRoot(); + if(not $RPMdir and not $RPMlist) + { + print STDERR gettext_("ERROR: --dir or --list option should be specified\n"); + exit(1); + } + clearCache(); + my @RPMs = (); + if($RPMlist) + { + @RPMs = readRPMlist($RPMlist, "RPMs"); + $RPMdir = get_dirname($RPMs[0]); + if(not $RPMdir) { + $RPMdir = "."; + } + } + else + { + if(not -d $RPMdir) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $RPMdir); + exit(1); + } + @RPMs = searchRPMs($RPMdir); + } + addMedia($RPMdir); + foreach my $Path (@RPMs) + { # add to cache + if(not -f $RPM_CACHE."/".get_filename($Path)) { + # copy($Path, $RPM_CACHE); + } + } + foreach my $Path (@RPMs) + { + installPackage($Path); + $Packages{get_filename($Path)} = 1; + } + removeMedia(); + checkResult(); +} + +sub dynamicCheck() +{ + checkRoot(); + if(not $RPMdir and not $RPMlist) + { + print STDERR gettext_("ERROR: --dir or --list option should be specified\n"); + exit(1); + } + clearCache(); + my @RPMs = (); + if($RPMdir) + { # --dir option + if(not -d $RPMdir) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $RPMdir); + exit(1); + } + @RPMs = searchRPMs($RPMdir); + foreach my $Path (@RPMs) + { # add to cache + copy($Path, $RPM_CACHE); + } + if(-d $SelectRepos) { + addMedia($SelectRepos); + } + foreach my $Path (@RPMs) + { + installPackage($Path); + $Packages{get_RPMname($Path)} = 1; + $Packages{get_filename($Path)} = 1; + } + if(-d $SelectRepos) { + removeMedia(); + } + } + elsif($RPMlist) + { + @RPMs = readRPMlist($RPMlist, "Names"); + if(-d $SelectRepos) { + addMedia($SelectRepos); + } + foreach my $Name (@RPMs) + { + installPackage($Name); + $Packages{$Name} = 1; + } + if(-d $SelectRepos) { + removeMedia(); + } + } + checkResult(); +} + +sub getPName($) +{ # package ID + my $Path = $_[0]; + if($RPMdir or not -f $Path) + { # input: RPMs + return get_filename($Path); + } + else + { # input: RPM names + return get_RPMname($Path); + } +} + +sub isInstalled($) +{ + my $Name = $_[0]; + if($InstallFailed{$Name}) { + return 0; + } + if(not $CheckRelease) { + if(not $InstalledPackage{$Name}) { + return 0; + } + } + return 1; +} + +sub checkResult() +{ + my (%ExtraPackages, %BrokenPackages) = (); + foreach my $Path (searchRPMs($RPM_CACHE)) + { # extra + my $Name = getPName($Path); + $InstalledPackage{$Name} = 1; + if(not $Packages{$Name}) { + $ExtraPackages{$Name} = $Path; + } + } + foreach my $Name (keys(%Packages)) + { # broken + if(not isInstalled($Name)) { + $BrokenPackages{$Name}=1; + } + } + if(my @Names = sort {lc($a) cmp lc($b)} keys(%ExtraPackages)) + { + my $Report = gettext_("Extra Packages:\n\n"); + foreach my $Name (@Names) + { + $Report .= $Name; + if(my $Req = $RequiredBy{$Name}) { + $Report .= gettext_(" (required by: [_1])", $Req); + } + $Report .= "\n"; + } + print $Report; + writeFile("$RESULTS_DIR/extra-packages.txt", $Report); + } + if(my @Names = sort {lc($a) cmp lc($b)} keys(%BrokenPackages)) + { + my $Report = gettext_("Broken Packages:\n\n"); + foreach my $Name (@Names) { + $Report .= "$Name\n"; + } + print $Report; + writeFile("$RESULTS_DIR/broken-packages.txt", $Report); + } + print gettext_("Report has been generated to:"); + print "\n $RESULTS_DIR/extra-packages.txt\n $RESULTS_DIR/broken-packages.txt\n"; + if(keys(%ExtraPackages) or keys(%BrokenPackages)) + { + exit($EXIT_CODES{"FAILED"}); + } + else { + exit($EXIT_CODES{"SUCCESS"}); + } +} + +sub sigCheck() +{ + if(not $RPMdir and not $RPMlist) + { + print STDERR gettext_("ERROR: --dir or --list option should be specified\n"); + exit(1); + } + print gettext_("Checking RPMs ...\n"); + my @RPMs = (); + if($RPMdir) + { + if(not -d $RPMdir) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $RPMdir); + exit(1); + } + @RPMs = searchRPMs($RPMdir); + } + elsif($RPMlist) { + @RPMs = readRPMlist($RPMlist, "RPMs"); + } + foreach my $Path (@RPMs) + { + print gettext_("Checking [_1]\n", get_filename($Path)); + if(not checkSignature($Path)) { + print gettext_(" FAILED: invalid signature\n"); + } + } + if(my @Names = sort {lc($a) cmp lc($b)} keys(%BrokenSignature)) + { + my $Report = gettext_("Broken Signature:\n\n"); + foreach my $Name (@Names) { + $Report .= "$Name\n"; + } + print $Report; + writeFile("$RESULTS_DIR/report.txt", $Report); + } + print gettext_("Report has been generated to:"); + print "\n $RESULTS_DIR/report.txt\n"; + if(keys(%BrokenSignature)) { + exit($EXIT_CODES{"FAILED"}); + } + else { + exit($EXIT_CODES{"SUCCESS"}); + } +} + +sub readLineNum($$) +{ + my ($Path, $Num) = @_; + return "" if(not $Path or not -f $Path); + open (FILE, $Path); + foreach (1 ... $Num) { + ; + } + my $Line = ; + close(FILE); + return $Line; +} + +sub cmd_find($$$$) +{ + my ($Path, $Type, $Name, $MaxDepth) = @_; + return () if(not $Path or not -e $Path); + my $Cmd = "find \"$Path\""; + if($MaxDepth) { + $Cmd .= " -maxdepth $MaxDepth"; + } + if($Type) { + $Cmd .= " -type $Type"; + } + if($Name) { + if($Name=~/\]/) { + $Cmd .= " -regex \"$Name\""; + } + else { + $Cmd .= " -name \"$Name\""; + } + } + return split(/\n/, `$Cmd`); +} + +sub readDeps($$$) +{ + my ($Path, $Dep, $RPMdep) = @_; + my $Name = get_filename($Path); + foreach my $Type ("provides", "suggests", "requires") + { + foreach my $D (split("\n", `rpm -qp -$Type $Path`)) + { + my ($N, $O, $V) = sepDep($D); + $Dep->{$Type}{$N}{$O}{$V}=$Name; + $RPMdep->{$Type}{$Name}{$N}=1; + } + } +} + +sub staticCheck() +{ + if(not $RPMdir and not $HDlist and not $RPMlist) + { + print STDERR gettext_("ERROR: --hdlist, --dir or --list option should be specified\n"); + exit(1); + } + my (%Dep, %RPMdep, %AddedRPMs) = (); + if($AddRPMs) + { + if(not -d $AddRPMs) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $AddRPMs); + exit(1); + } + if(my @AddedRPMs = searchRPMs($AddRPMs)) + { + foreach my $Path (@AddedRPMs) + { + readDeps($Path, \%Dep, \%RPMdep); + if(my $Name = get_RPMname($Path)) { + $AddedRPMs{$Name}=1; + } + } + } + } + if($RPMdir or $RPMlist) + { + print gettext_("Checking RPMs ...\n"); + my @RPMs = (); + if($RPMdir) + { + if(not -d $RPMdir) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $RPMdir); + exit(1); + } + @RPMs = searchRPMs($RPMdir); + } + elsif($RPMlist) { + @RPMs = readRPMlist($RPMlist, "RPMs"); + } + foreach my $Path (@RPMs) + { + if($AddRPMs) + { + if(my $Name = get_RPMname($Path)) + { + if($AddedRPMs{$Name}) + { # already added + next; + } + } + } + readDeps($Path, \%Dep, \%RPMdep); + } + } + elsif($HDlist) + { + my $Content = readFile($HDlist); + if($HDlist=~/(http|https|ftp):\/\//) + { + print gettext_("Downloading HDlist ...\n"); + my $DownloadTo = $TMP_DIR."/extract/".get_filename($HDlist); + $DownloadTo=~s/\.cz/\.gz/g; # cz == gz + my $Dir = get_dirname($DownloadTo); + mkdir($Dir); + system("wget -U '' --no-check-certificate \"$HDlist\" --connect-timeout=5 --tries=1 --output-document=\"$DownloadTo\" >/dev/null 2>&1"); + if(not -f $DownloadTo + or not -s $DownloadTo) { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $HDlist); + exit(1); + } + + my %Extract = ( + "xz"=>"unxz", + "lzma"=>"unlzma", + "gz"=>"gunzip" + ); + if($DownloadTo=~/\.(gz|xz|lzma)\Z/) + { + my ($Format, $Cmd) = ($1, $Extract{$1}); + if($Cmd) { + system("cd $Dir && $Cmd $DownloadTo"); + } + my @Files = cmd_find($Dir, "f", "", ""); + if(not @Files) { + print STDERR gettext_("ERROR: cannot extract \'[_1]\'\n", $HDlist); + exit(1); + } + $DownloadTo = $Files[0]; + } + if(my $Line = readLineNum($DownloadTo, 1)) + { + if($Line!~/\A\@\w+\@/) { + print STDERR gettext_("ERROR: unknown format of hdlist\n"); + exit(1); + } + } + $Content = readFile($DownloadTo); + } + else + { + if(not -f $HDlist) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $HDlist); + exit(1); + } + $Content = readFile($HDlist); + } + print gettext_("Checking HDlist ...\n"); + my $Name = ""; + foreach (reverse(split(/\n/, $Content))) + { + $_=~s/\A\@//g; + my @Parts = split("\@", $_); + my $Type = shift(@Parts); + if($Type eq "info") + { + $Name = $Parts[0]; + next; + } + if($AddRPMs) + { + if(my $PName = parse_RPMname($Name)) + { + if($AddedRPMs{$PName}) + { # already added + next; + } + } + } + if($Type=~/\A(requires|provides|suggests)\Z/) + { + foreach my $D (@Parts) + { + my ($N, $O, $V) = sepDep($D); + $N=~s/\[\*\]//g;# /sbin/ldconfig[*] + $Dep{$Type}{$N}{$O}{$V}=$Name; + $RPMdep{$Type}{$Name}{$D} = 1; + } + } + } + } + my %IgnoreDeps = (); + if($FileDeps) + { + if(not -f $FileDeps) + { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $FileDeps); + exit(1); + } + %IgnoreDeps = map {$_=>1} split(/\s+/, readFile($FileDeps)); + } + my (%Unresolved, %UnresolvedSuggested, %Broken) = (); + foreach my $N (sort {lc($a) cmp lc($b)} keys(%{$Dep{"requires"}})) + { + foreach my $O (keys(%{$Dep{"requires"}{$N}})) + { + foreach my $V (keys(%{$Dep{"requires"}{$N}{$O}})) + { + if(not defined $Dep{"provides"}{$N} + or not checkDeps($N, $O, $V, $Dep{"provides"}{$N})) + { # unresolved + if($N=~/\A(rpmlib|executable)\(.+\)\Z/) + { # rpmlib(PayloadIsLzma), ... + # executable(rm), ... + next; + } + if($IgnoreDeps{$N}) { + next; + } + my $Name = $Dep{"requires"}{$N}{$O}{$V}; + if($RPMdep{"suggests"}{$Name}{$N}) { + $UnresolvedSuggested{$N}{$O}{$V} = $Name; + } + else { + $Unresolved{$N}{$O}{$V} = $Name; + } + $Broken{$Name}=1; + } + } + } + } + my $Report = ""; + if(my @Ns = sort {lc($a) cmp lc($b)} keys(%Unresolved)) + { + $Report .= "\n".gettext_("Unresolved \"Required\" Dependencies ([_1]):", $#Ns+1)."\n\n"; + foreach my $N (@Ns) + { + foreach my $O (keys(%{$Unresolved{$N}})) + { + foreach my $V (keys(%{$Unresolved{$N}{$O}})) + { + $Report .= showDep($N, $O, $V)." (".gettext_("required by [_1]", $Unresolved{$N}{$O}{$V}).")\n"; + } + } + } + } + if(my @Ns = sort {lc($a) cmp lc($b)} keys(%UnresolvedSuggested)) + { + if($Report) { + $Report .= "\n"; + } + $Report .= "\n".gettext_("Unresolved \"Suggested\" Dependencies ([_1]):", $#Ns+1)."\n\n"; + foreach my $N (@Ns) + { + foreach my $O (keys(%{$UnresolvedSuggested{$N}})) + { + foreach my $V (keys(%{$UnresolvedSuggested{$N}{$O}})) + { + $Report .= showDep($N, $O, $V)." (required by ".$UnresolvedSuggested{$N}{$O}{$V}.")\n"; + } + } + } + } + if(my @Ns = sort {lc($a) cmp lc($b)} keys(%Broken)) + { + $Report .= "\n".gettext_("Broken Packages ([_1]):", $#Ns+1)."\n\n"; + foreach my $N (@Ns) { + $Report .= parse_RPMname($N)."\n"; + } + } + if($Report) + { + print $Report."\n"; + writeFile("$RESULTS_DIR/report.txt", $Report); + } + writeFile("$RESULTS_DIR/debug/rpm-provides.txt", Dumper($RPMdep{"provides"})); + writeFile("$RESULTS_DIR/debug/rpm-requires.txt", Dumper($RPMdep{"requires"})); + writeFile("$RESULTS_DIR/debug/rpm-suggests.txt", Dumper($RPMdep{"suggests"})); + print gettext_("Report has been generated to:"); + print "\n $RESULTS_DIR/report.txt\n"; + if(keys(%Unresolved)) { + exit($EXIT_CODES{"FAILED"}); + } + else { + exit($EXIT_CODES{"SUCCESS"}); + } +} + +sub parse_RPMname($) +{ + my $Name = $_[0]; + if($Name=~/\d(mdv|mdk|rosa(\.\w+|))\d+/) + { # plexus-interactivity-1.0-0.1.a5.2.2.5mdv2011.0.i586 + $Name=~s/\-[^\-]+\Z//; + $Name=~s/\-[^\-]+\Z//; + } + else + { # x11-server-source-1.10.3-1-mdv2011.0.i586 + $Name=~s/\-[^\-]+\Z//; + $Name=~s/\-[^\-]+\Z//; + $Name=~s/\-[^\-]+\Z//; + } + return $Name; +} + +sub clearCache() +{ + if(not $NoClean) + { + rmtree($RPM_CACHE); + mkpath($RPM_CACHE); + } +} + +sub scenario() +{ + if($Help) + { + HELP_MESSAGE(); + exit(0); + } + if($ShowVersion) + { + print gettext_("URPM Repos Closure Checker [_1] for Mandriva Linux\nCopyright (C) 2012 ROSA Laboratory\nLicense: GPL \nThis program is free software: you can redistribute it and/or modify it.\n\nWritten by Andrey Ponomarenko.\n", $TOOL_VERSION); + exit(0); + } + if($HDlist) { + $StaticMode = 1; + } + if($Root) + { + if(not -d $Root) { + print STDERR gettext_("ERROR: cannot access \'[_1]\'\n", $Root); + exit(1); + } + } + if($ResDir) { + $RESULTS_DIR = $ResDir; + } + if(-d $RESULTS_DIR) + { + # print "Removing old $RESULTS_DIR directory\n"; + rmtree($RESULTS_DIR); + } + if($CheckSignature) + { + if(not $ResDir) { + $RESULTS_DIR .= "/signature"; + } + sigCheck(); + exit(0); + } + if($StaticMode) + { + if(not $ResDir) { + $RESULTS_DIR .= "/static"; + } + staticCheck(); + } + if($CheckRelease) + { + if(not $ResDir) { + $RESULTS_DIR .= "/release"; + } + checkRelease(); + exit(0); + } + if($DynamicMode) + { + if(not $ResDir) { + $RESULTS_DIR .= "/dynamic"; + } + dynamicCheck(); + } + exit(0); +} + +scenario(); + diff --git a/urpm-tools/urpm-repodiff.py b/urpm-tools/urpm-repodiff.py new file mode 100755 index 0000000..bd0b836 --- /dev/null +++ b/urpm-tools/urpm-repodiff.py @@ -0,0 +1,1379 @@ +#!/usr/bin/python +''' +" Repodiff utility for finding differences between different repositories +" +" The tool downloads, unpacks and parses synthesis.hdlist.cz and +" changelog.xml.lzma to genererate lists of newly added packages, +" removed from new repository packages and updated packages. +" The tool outputs data to standart output or to file. +" It can show if a removed packages is obsoleted by some package +" in new repositories. Also the tool can output data in format of +" HTML table. +" +" REQUIREMENTS +" ============ +" - urpmi +" - python-2.7 +" - lzma +" - gzip +" - libxml2 python library +" - rpm python library +" +" Copyright (C) 2012 ROSA Laboratory. +" Written by Vladimir Testov +" +" This program is free software: you can redistribute it and/or modify +" it under the terms of the GNU General Public License or the GNU Lesser +" General Public License as published by the Free Software Foundation, +" either version 2 of the Licenses, or (at your option) any later version. +" +" This program is distributed in the hope that it will be useful, +" but WITHOUT ANY WARRANTY; without even the implied warranty of +" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +" GNU General Public License for more details. +" +" You should have received a copy of the GNU General Public License +" and the GNU Lesser General Public License along with this program. +" If not, see . +''' + +import argparse +import urllib +import tempfile +import os +import subprocess +import re +import libxml2 +import sys +from datetime import date +import rpm +import shutil +import urllib2 +import urpmmisc + +import gettext +gettext.install('urpm-tools') + +old_dir = "old" +new_dir = "new" +htmlname = "repodiff.html" + +synthtags = ["provides", "requires", "obsoletes", "conflicts", "suggests", + "summary", "info"] + +minus_check = re.compile('-') +re_search_unver = re.compile("([^\[\]]+)[\[\]]") +re_search_verrel = re.compile("\[(== |> |< |>= |<= )([\{\}+=0-9a-zA-Z_\.]*:)?([[\{\}+=0-9a-zA-Z_\.]+)(-[[\{\}+=0-9a-zA-Z_\.]+)?([^\[\]]*)\]$") + +synthesis_arch = "synthesis.hdlist.cz" +synthesis_arch_renamed = "synthesis.hdlist.gz" +synthesis_file = "synthesis.hdlist" +changelog_arch = "changelog.xml.lzma" +changelog_file = "changelog.xml" +default_output = "sys.stdout" +timeout = 5 + +def ParseCommandLine(): + """Parse arguments. + + Parse arguments from command line. + Return these arguments. + """ + parser = argparse.ArgumentParser( + description=_("Tool for comparing sets of repositories.")) + parser.add_argument("--old", "-o", action="store", nargs='+', required="True", + metavar="OLD_REPO", help=_("URL or PATH to old repositories")) + parser.add_argument("--new", "-n", action="store", nargs='+', required="True", + metavar="NEW_REPO", help=_("URL or PATH to new repositories")) + parser.add_argument("--size", "-s", action="store_true", + help=_("Show differences in package sizes.")) + parser.add_argument("--simple", action="store_false", + help=_("Simple output format.")) + parser.add_argument("--quiet", "-q", action="store_false", + help=_("Hide service messages.")) + parser.add_argument("--changelog", "-c", action="store_true", + help=_("Show changelog difference.")) + parser.add_argument("--html", action="store_true", + help=_("Output in HTML format, if --output is not present\ + \"%s\" will be created in current directory. \ + --size, --simple and --changelog options are ignored.") % htmlname) + parser.add_argument("--output", "-out", action="store", nargs=1, default='', + metavar="OUTPUT_FILE", help=_("Change standart output to \"OUTPUT_FILE\".")) + return parser.parse_args() + +def exit_proc(arg): + """ + Remove trash. + """ + err_tmpdir = arg.temp_dir + err_output = arg.output + + if err_output != None: + err_output.close() + if os.path.isdir(err_tmpdir): + shutil.rmtree(err_tmpdir) + exit(0) + +def CheckURL(url, arg): + """URL check. + + Check that URL is gettable. + """ + try: + urllib2.urlopen(url, None, timeout) + except: + print _("Error: URL to repository \"%s\" is incorrect") % url + exit_proc(arg) + +def CheckArgs(urlpath, arg): + """Trivial checks. + + Check that url or path is correct. + """ + if (urlpath.startswith("http://") or urlpath.startswith("ftp://")): + if not urlpath.endswith('/'): + urlpath = urlpath + '/' + tmp_url = urlpath + "media_info/" + CheckURL(tmp_url, arg) + elif (os.path.isdir(urlpath)) or urlpath.startswith("file://"): + if urlpath.startswith("file://./"): + urlpath = urlpath[7:] + else: + urlpath = urlpath[6:] + if not urlpath.endswith('/'): + urlpath = urlpath + '/' + urlpath = urlpath + "media_info/" + if not os.path.isdir(urlpath): + print _("Error: directory %s does not exist") % urlpath + exit_proc(arg) + else: + (e1,e2,urltmp) = urpmmisc.GetUrlFromRepoName(urlpath) + if (urltmp): + if not urltmp.endswith('/'): + urltmp = urltmp + '/' + urlpath = urltmp + "media_info/" + CheckURL(urlpath, arg) + else: + print _("Error: \"%s\" is not correct url, path or name of repository") % urlpath + exit_proc(arg) + return urlpath + +def CheckOutput(arg): + """Check output file. + + Check if the file can be created and redirect standart output to this file. + """ + file_output = arg.output + ifhtml = arg.html + + if (file_output == default_output): + if(ifhtml): + try: + arg.output = open(htmlname, "w") + except: + print _("Error: Cannot open %s for writing.") % htmlname + exit_proc(arg) + return + else: + arg.output = sys.stdout + return + + if(file_output != ''): + if(os.path.isfile(file_output)): + print _("Error: File %s already exists") % file_output + arg.output = None + exit_proc(arg) + else: + dirname = os.path.dirname(file_output) + if(dirname == '') or (os.path.exists(dirname)): + try: + arg.output = open(file_output, "w") + except IOError: + print _("Error: File %s cannot be created") % file_output + arg.output = None + exit_proc(arg) + else: + print _("Error: Path %s does not exist.") % dirname + arg.output = None + exit_proc(arg) + +def CheckParam(arg): + """Check parameters. + + Ignore some parameters in HTML-case. + """ + if arg.html: + arg.size = 0 + arg.simple = 0 + arg.changelog = 0 + +def GetFile(urlpath, filename, localdir, arg): + """Donwload archive. + """ + ifnotquiet = arg.quiet + + if not os.path.isdir(localdir): + os.makedirs(os.path.realpath(localdir)) + if ifnotquiet: + print (_("getting file %s from ") % filename) + "\n " + urlpath + filename + if os.path.isdir(urlpath): + try: + shutil.copyfile(urlpath + filename, localdir + filename) + except: + print _("Error: file %s was not copied") % filename + exit_proc(arg) + else: + try: + file_from = urllib2.urlopen(urllib2.Request(urlpath + filename), None, timeout) + file_to = open(localdir + filename, "w") + shutil.copyfileobj(file_from, file_to) + except: + print _("Error: file %(from)s was not downloaded to %(to)s") %{"from": urlpath + filename, "to": localdir + filename} + exit_proc(arg) + file_from.close() + file_to.close() + +def GetFiles(arg): + """Get all needed files. + """ + ifchangelog = arg.changelog + file_dir = [] + file_name = [] + file_path = [] + for i in range(len(arg.old)): + file_name.append(synthesis_arch) + file_dir.append(arg.temp_old[i]) + file_path.append(arg.old[i] + "media_info/") + if ifchangelog: + file_name.append(changelog_arch) + file_dir.append(arg.temp_old[i]) + file_path.append(arg.old[i] + "media_info/") + + for i in range(len(arg.new)): + file_name.append(synthesis_arch) + file_dir.append(arg.temp_new[i]) + file_path.append(arg.new[i] + "media_info/") + if ifchangelog: + file_name.append(changelog_arch) + file_dir.append(arg.temp_new[i]) + file_path.append(arg.new[i] + "media_info/") + + for i in range(len(file_name)): + GetFile(file_path[i], file_name[i], file_dir[i], arg) + +def RenameSynthFile(localdir, arg): + """Rename. + + Rename Synthesis file so zgip can understand format. + """ + ifnotquiet = arg.quiet + + if not os.path.isfile(localdir + synthesis_arch): + print _("Error: file not found: ") + localdir + synthesis_arch + exit_proc(arg) + try: + os.rename(localdir + synthesis_arch, localdir + synthesis_arch_renamed) + except OSError: + print _("Error: cannot rename file %(from)s to %(to)s") % {"from": synthesis_arch, "to": synthesis_arch_renamed} + + exit_proc(arg) + if not os.path.isfile(localdir + synthesis_arch_renamed): + print _("Error: file %s is missing.") % (localdir + synthesis_arch_renamed) + exit_proc(arg) + else: + if ifnotquiet: + print _("file %(from)s was renamed to %(to)s") % {"from": synthesis_arch, "to": synthesis_arch_renamed} + +def UnpackFiles(files_dir, ifchangelog, ifnotquiet): + """Unpack. + + Unpack needed files in selected directory. + """ + if ifchangelog: + if ifnotquiet: + print _("unpacking file ") + changelog_arch + subprocess.call(["lzma", "-df", files_dir + changelog_arch]) + if ifnotquiet: + print _("unpacking file ") + synthesis_arch_renamed + subprocess.call(["gzip", "-df", files_dir + synthesis_arch_renamed]) + +def ParseVersion(names_list): + """Parse version info is present. + + Parse version information from the field. e.g. provided_name[>= 1.2.3-4.5.6] + is parsed to (provided_name, sign, (epoch, version, release)) + """ + new_names_list = [] + for name in names_list: + match = re_search_unver.match(name) + if match: + tmp_entry = match.group(1) + else: + tmp_entry = name + match = re_search_verrel.search(name) + if match: + sign = match.group(1)[:-1] + epoch = match.group(2) + if epoch: + epoch = epoch[:-1] + else: + epoch = '' + version = match.group(3) + release = match.group(4) + if release: + release = release[1:] + else: + release = '' + verrel = (epoch, version, release) + else: + sign = '' + verrel = ('','','') + new_names_list.append((tmp_entry, sign, verrel)) + return new_names_list + +def ParseSynthesis(synthfile, pkgdict, arg): + """Collect info about packages. + + Parse synthesis.hdlist file (or add new entries to pkgdict). + + pkgdict is a dictionary with format: + pkgdict[name]=(verrel,(s0,s1,s2)) + where: + name - is package name parsed from package filename + verrel - is tuple (version, release, epoch) + s0[] - is package info + s1 - is package summary + s2[] - is list of obsoleted packages + """ + ifnotquiet = arg.quiet + + if not os.path.isfile(synthfile): + print _("Error: Synthesis file %s was not found.") % synthfile + exit_proc(arg) + if ifnotquiet: + print _("Parsing synthesis") + try: + synth = open(synthfile) + tmp = ['', '', ''] + for synthline in synth: + if synthline.endswith('\n'): + synthline = synthline[:-1] + tmpline = synthline.split('@') + tag = tmpline[1] + if tag == synthtags[2]: + tmp[2] = tmpline[2:] + elif tag == synthtags[5]: + tmp[1] = '@'.join(tmpline[2:]) + elif tag == synthtags[6]: + tmp[0] = tmpline[2:] + disttagepoch = ChkTagEpoch(tmp[0]) + tmp[2] = ParseVersion(tmp[2]) + (name, version, release) = RPMNameFilter(tmp[0][0], disttagepoch) #disttag + distepoch + verrel = (version, release, tmp[0][1]) + if(not name in pkgdict): + pkgdict[name]=(verrel, (tmp[0], tmp[1], tmp[2])) + elif(compare_versions(pkgdict[name][0], verrel) == -1): + pkgdict[name]=(verrel, (tmp[0], tmp[1], tmp[2])) + tmp = ['', '', ''] + synth.close() + except IOError: + print _("Error: Failed to open synthesis file ") + synthfile + exit_proc(arg) + +def ChkDist(disttag, distepoch): + """No minus in tag and epoch. + + Trivial check that tag and epoch hasn't got '-' in their name + """ + if minus_check.search(disttag) or minus_check.search(distepoch): + print _("REPODIFF-Warning: strange format of or : ") +\ + disttag + distepoch + +def ChkTagEpoch(i): + """No minus in tag and epoch. + + Trivial check that tag and epoch hasn't got '-' in their name + """ + if len(i) == 4: + return '-' + elif len(i) == 5: + disttag = i[4] + distepoch = '' + ChkDist(disttag, distepoch) + return disttag + distepoch + elif len(i) == 6: + disttag = i[4] + distepoch = i[5] + ChkDist(disttag, distepoch) + return disttag + distepoch + else: + print _("REPODIFF-Warning: strange : ") + str(i) + +def RPMNameFilter(rpmname, disttagepoch): + """Parse name and verrel. + + Function that parses name, version and release of a package. + """ + string = rpmname.split('-') + lastpart = string.pop() + tmp = lastpart.split('.') + tmp.pop() + lastpart = '.'.join(tmp) + if (lastpart[0].isdigit() or (not lastpart.startswith(disttagepoch))) and\ + (not lastpart.isdigit()): + name = '-'.join(string[:-1]) + ver = string[-1] + rel = lastpart + else: + name = '-'.join(string[:-2]) + ver = string[-2] + rel = string[-1] + return (name, ver, rel) + +def compare_versions(first_entry, second_entry): + """Compare two verrel tuples. + + dict_entry and comp_entry are verrel tuples + verrel = (version, release, epoch). + Return 1 if the first argument is higher. + 0 if they are equivalent. + -1 if the second argument is higher. + """ + (version1, release1, first_epoch) = first_entry + (version2, release2, second_epoch) = second_entry + return(rpm.labelCompare((first_epoch, version1, release1), + (second_epoch, version2, release2))) + +def ParsePackage(arg): + """Processing files, parsing synthesis, getting pkgdict. + + pkgdict is a dictionary with format: + pkgdict[name]=(verrel,(s0,s1,s2)) + where: + name - is package name parsed from package filename + verrel - is tuple (version, release, epoch) + s0[] - is package info + s1 - is package summary + s2[] - is list of obsoleted packages + """ + ifchangelog = arg.changelog + ifnotquiet = arg.quiet + pkgdict_old = {} + for directory in arg.temp_old: + RenameSynthFile(directory, arg) + UnpackFiles(directory, ifchangelog, ifnotquiet) + ParseSynthesis(directory + synthesis_file, pkgdict_old, arg) + pkgdict_new = {} + for directory in arg.temp_new: + RenameSynthFile(directory, arg) + UnpackFiles(directory, ifchangelog, ifnotquiet) + ParseSynthesis(directory + synthesis_file, pkgdict_new, arg) + return pkgdict_old, pkgdict_new + +def CreateDicts(dict_old, dict_new): + """Creating dictionaries. + + Creating dictionaries for new, updated and removed(deleted) packages + from two dictionaries: old and new, for old and new repositories. + + dict_old, dict_new are dictionaries with format: + pkgdict[name]=(verrel,(s0,s1,s2)) + where: + name - is package name parsed from package filename + verrel - is tuple (version, release, epoch) + s0[] - is package info + s1 - is package summary + s2[] - is list of obsoleted packages + + dict_new_packages and dict_del_packages have the same format. + dict_upd_packages has format: + dict_upd_packages[name]=((verrel_old,(so0,so1,so2)), + (verrel_new,(sn0,sn1,sn2)),ifdowngraded) + or + dict_upd_packages[name]=(dict_old[name],dict_new[name],ifdowngraded) + """ + dict_new_packages = {} + dict_del_packages = {} + dict_upd_packages = {} + + for name in dict_new: + if(name in dict_old): #updated or downgraded + compare_result = compare_versions(dict_new[name][0], + dict_old[name][0]) + if(compare_result > 0): #updated + dict_upd_packages[name] = (dict_old[name], dict_new[name], 0) + elif(compare_result < 0): #downgraded ? + dict_upd_packages[name] = (dict_old[name], dict_new[name], 1) + else: #new + dict_new_packages[name] = dict_new[name] + for name in dict_old: + if(not name in dict_new): #removed + dict_del_packages[name] = dict_old[name] + return (dict_new_packages, dict_del_packages, dict_upd_packages) + +def ProcessNewPackages(dict_new_packages, file_output): + """Processing newly added packages. + + dict_new_packages[name]=(verrel,(s0,s1,s2)) + where: + name - is package name parsed from package filename + verrel - is tuple (version, release, epoch) + s0[] - is package info + s1 - is package summary + s2[] - is list of obsoleted packages + """ + sorted_list = sorted(dict_new_packages) + for name in sorted_list: + file_output.write(_("New package: ") + dict_new_packages[name][1][0][0] +\ + "\n " + dict_new_packages[name][1][1] + "\n\n") + +def GenerateDictObsoleted(dict_new, ifnotquiet): + """Generate Dictionary of obsoleted packages. + + pkgdict[name]=(verrel,(s0,s1,s2)) + where: + name - is package name parsed from package filename + verrel - is tuple (version, release, epoch) + s0[] - package info + s1 - package summary + s2[] - list of packages obsoleted by current package + """ + if ifnotquiet: + print _("Generating obsoleted list.") + obsoleted_by = {} + for name in dict_new: + for (obsolete, sign, verrel) in dict_new[name][1][2]: + if(not obsolete in obsoleted_by): + obsoleted_by[obsolete] = [] + obsoleted_by[obsolete].append((dict_new[name][1][0][0], sign, verrel)) + return obsoleted_by + +def compare_verrel(verrel1, sign, verrel2): + if (sign == ''): + return 1 + (e1, v1, r1) = verrel1 + (e2, v2, r2) = verrel2 + # checks + if (v2 == '') or (v1 == ''): + return 1 + if (e1 == '') or (e2 == ''): + e1 = '0' + e2 = '0' + if (r1 == '') or (r2 == ''): + r1 = '0' + r2 = '0' + # compare + compare = rpm.labelCompare((e1, v1, r1), (e2, v2, r2)) + if (sign == "=="): + if (compare == 0): + return 1 + elif (sign == ">"): + if (compare == 1): + return 1 + elif (sign == "<"): + if (compare == -1): + return 1 + elif (sign == ">="): + if (compare > -1): + return 1 + elif (sign == "<="): + if (compare < 1): + return 1 + return 0 + +def ProcessDelPackages(dict_del_packages, dict_obsoleted, file_output): + """Process deleted packages. + + Printing every deleted package. Show if package is obsoleted. + pkgdict[name]=(verrel,(s0,s1,s2)) + where: + name - is package name parsed from package filename + verrel - is tuple (version, release, epoch) + s0[] - is package info + s1 - is package summary + s2[] - is list of obsoleted packages + + dict_obsoleted is dictionary + dict_obsoleted[name]=[obs1, ...] + """ + sorted_list = sorted(dict_del_packages) + for name in sorted_list: + file_output.write(_("Removed package: ") + dict_del_packages[name][1][0][0] + '\n') + if (name in dict_obsoleted): + tmp_list = [] + for (obsolete, sign, verrel) in dict_obsoleted[name]: + if (compare_verrel(dict_del_packages[name][0], sign, verrel)): + tmp_list.append(obsolete) + sorted_obsolete = sorted(tmp_list) + for obs_package_name in sorted_obsolete: + file_output.write(_(" Obsoleted by ") + obs_package_name + '\n') + +def ParseLogfile(dict_log, logfile, dict_upd_packages, mode, arg): + """Parse Changelog. + + mode == 0 - for old changelog: we search only for 1st entry in changelog + mode == 1 - for new changelog: we collect entries from changelog untill + we find remembered entry from changelog + + Parse changelog.xml to compare changes between updated packages. + dict_log - is dictionary with format: + dict_log[name] = + [(verrel, (time,name,text)), (verrel,[(time,name,text),...])] + + dict_upd_packages[name] = [old_pkg[name],new_pkg[name],ifdowngraded] + or dict_upd_packages[name] = + [(verler,(s0,s1,s2)),(verrel,(s0,s1,s2)),ifdowngraded] + """ + ifnotquiet = arg.quiet + + if ifnotquiet: + print _("Reading changelog") + if not os.path.isfile(logfile): + print _("Error: Can't find changelog ") + logfile + exit_proc(arg) + doc = libxml2.parseFile(logfile) + if (not doc): + print _("Error: Can't read changelog ") + logfile + "." + exit_proc(arg) + root = doc.children + if root.name != "media_info": + print _("Error: Wrong changelog.") + doc.freeDoc() + exit_proc(arg) + tag_changelog = root.children + while(tag_changelog): + if(tag_changelog.name != "changelogs"): + tag_changelog = tag_changelog.next + continue + + tag_property = tag_changelog.properties + pkgname = '' + disttag = '' + distepoch = '' + while(tag_property): + if (tag_property.name == "fn"): + pkgname = tag_property.content + elif (tag_property.name == "disttag"): + disttag = tag_property.content + elif (tag_property.name == "distepoch"): + distepoch = tag_property.content + tag_property = tag_property.next + if (pkgname == ''): + print _("Error: Corrupted changelog") + doc.freeDoc() + exit_proc(arg) + disttagepoch = disttag + distepoch + if (disttagepoch == ''): + disttagepoch = '-' + (result_key, version, release) = RPMNameFilter(pkgname, disttagepoch) + verrel = (version, release, "-1") + # skip entry if it wasn't updated + if result_key not in dict_upd_packages: + tag_changelog = tag_changelog.next + continue + ifdowngraded = dict_upd_packages[result_key][2] + # skip entry if it's name is not in dictionary + if(dict_upd_packages[result_key][mode][1][0][0] != pkgname): + tag_changelog = tag_changelog.next + continue + # skip entry if it has been found already with appropriate version + if(result_key in dict_log) and (dict_log[result_key][mode]): + tag_changelog = tag_changelog.next + continue + # if "old" repository do not have changelog of the package + if(mode == 1) and (not result_key in dict_log): + dict_log[result_key] = [] + dict_log[result_key].append([]) + dict_log[result_key].append([]) + dict_log[result_key][0] = (verrel, []) + + log_current = tag_changelog.children + result_changelog = [] + while(log_current): + if(log_current.name != "log"): + log_current = log_current.next + continue + + if(log_current.properties.name == "time"): + entry_time = log_current.properties.content + else: + entry_time = 0 + + if(mode == 1) and (not ifdowngraded) and\ + (entry_time <= dict_log[result_key][0][1][0]): + break + log_child = log_current.children + while(log_child): + if(log_child.name == "log_name"): + entry_name = log_child.content + elif(log_child.name == "log_text"): + entry_text = log_child.content + log_child = log_child.next + result_changelog.append((entry_time, entry_name, entry_text)) + if(mode == ifdowngraded): + break + log_current = log_current.next + if(mode == 0): + dict_log[result_key] = [] + dict_log[result_key].append([]) + dict_log[result_key].append([]) + if not ifdowngraded: + dict_log[result_key][0] = (verrel, result_changelog[0]) + else: + dict_log[result_key][0] = (verrel, result_changelog) + else: + if not ifdowngraded: + dict_log[result_key][1] = (verrel, result_changelog) + else: #special actions for downgraded packages + new_result = [] + time_to_stop = result_changelog[0][0] + tmp_change = dict_log[result_key][0][1] #changelog list + if tmp_change: #changelog is not empty + i = 0 + length = len(tmp_change) + while i < length: + if tmp_change[i][0] <= time_to_stop: + i = i + 1 + break + new_result.append(tmp_change[i]) + i = i + 1 + dict_log[result_key][1] = (verrel, new_result) + tag_changelog = tag_changelog.next + doc.freeDoc() + +def GenerateLogfileDiff(dict_upd_packages, arg): + """Changelog difference list. + + Generate changelog difference list. + dict_upd_packages[name] = [old_pkg[name],new_pkg[name],ifdowngraded] + or dict_upd_packages[name] = [(verler,(s0,s1,s2)),(verrel,(s0,s1,s2)),ifdowngraded] + """ + ifnotquiet = arg.quiet + temp_old = arg.temp_old + temp_new = arg.temp_new + + if ifnotquiet: + print _("Generating changes list.") + dict_logfile_diff = {} + dict_log = {} + + for old_dir in temp_old: + ParseLogfile(dict_log, old_dir + changelog_file, dict_upd_packages, 0, arg) + for new_dir in temp_new: + ParseLogfile(dict_log, new_dir + changelog_file, dict_upd_packages, 1, arg) + + for name in dict_upd_packages: + if(name in dict_log): + if dict_log[name][1]: + entry = dict_log[name][1][1] + else: + print _("REPODIFF-Warning: Package %s was not described in changelogs.xml") % name + entry = [(0, '', _("REPODIFF-Warning: Changelogs of a package are absent in \"new\" repository."))] + else: + print _("REPODIFF-Warning: Package %s was not described in changelogs.xml") % name + entry = [(0, '', _("REPODIFF-Warning: Changelogs of a package are absent."))] + dict_logfile_diff[name] = entry + + return dict_logfile_diff + +def ChangelogPrint(changes_list, file_output): + """Changelog difference. + + Output changes in changelog. + changes_list is list with format: + changes_list = [(time,author,text)] + """ + for entry in changes_list: + file_output.write("* " + str(date.fromtimestamp(float(entry[0]))) +\ + " " + entry[1] + '\n' + entry[2] + '\n\n') + +def PrintLogfileDiff(package_name, dict_logfile_diff, file_output): + """Changelog difference. + + Output changes in changelog. + dict_logfile_diff is dictionary with format: + dict_logfile_diff[name] = [(time,author,text)] + """ + if package_name in dict_logfile_diff: + ChangelogPrint(dict_logfile_diff[package_name], file_output) + else: + file_output.write(_("Package %s has no changelog info\n") % package_name) + +def ProcessUpdPackages(dict_upd_packages, dict_logfile_diff, arg): + """Process updated packages. + + ifsizes - is indicator: should we (1) or should we not (0) print + difference in package sizes. + ifnotsimple - is indicator: should we (0) or shoudl we not (1) print + difference in changelogs. + Process updated packages and output everything needed info. + dict_upd_packages[name] = [old_pkg[name],new_pkg[name],ifdowngraded] + or dict_upd_packages[name] = [(verler,(s0,s1,s2)),(verrel,(s0,s1,s2)),ifdowngraded] + """ + ifnotsimple = arg.simple + file_output = arg.output + ifchangelog = arg.changelog + ifsizes = arg.size + + file_output.write(_("\n\nUpdated packages:\n\n")) + sorted_list = sorted(dict_upd_packages) + for name in sorted_list: + package = dict_upd_packages[name][1][1][0][0] + if ifnotsimple: + file_output.write(package + '\n' + '-'*len(package) + '\n') + if dict_upd_packages[name][2]: + file_output.write(_(" ***DOWNGRADED***\n")) + if ifchangelog: + PrintLogfileDiff(name, dict_logfile_diff, file_output) + else: + old_package = dict_upd_packages[name][0][1][0][0] + file_output.write(name + ": " + old_package + " -> " + package + '\n') + if(ifsizes): + sizediff = int(dict_upd_packages[name][1][1][0][2]) - \ + int(dict_upd_packages[name][0][1][0][2]) + file_output.write(_("Size Change: %d bytes\n\n") % sizediff) + +def PrintSummary(dict_new_packages, dict_del_packages, dict_upd_packages, file_output): + """Output summary. + + Output summary: numbers of new/removew/updated packages at all. + """ + file_output.write("Summary:\n") + length = len(dict_new_packages) + if length: + file_output.write(_(" Total added packages: ") + str(length) + '\n') + length = len(dict_del_packages) + if length: + file_output.write(_(" Total removed packages: ") + str(length) + '\n') + length = 0 + length_d = 0 + for packagename in dict_upd_packages: + if dict_upd_packages[packagename][2] == 0: + length = length + 1 + else: + length_d = length_d + 1 + if length: + file_output.write(_(" Total updated packages: ") + str(length) + '\n') + if length_d: + file_output.write(_(" Total downgraded packages: ") + str(length_d) + '\n') + +def HTML_ParsePackage(arg): + """Parse hdlist. + + HTML-specific ParsePackage(). Calls for ParsePackage + """ + ifchangelog = arg.changelog + ifnotquiet = arg.quiet + + html_old_dict_list = [] + html_new_dict_list = [] + + for directory in arg.temp_old: + tmp_dict = {} + RenameSynthFile(directory, arg) + UnpackFiles(directory, 0, ifnotquiet) + ParseSynthesis(directory + synthesis_file, tmp_dict, arg) + html_old_dict_list.append(tmp_dict) + for directory in arg.temp_new: + tmp_dict = {} + RenameSynthFile(directory, arg) + UnpackFiles(directory, 0, ifnotquiet) + ParseSynthesis(directory + synthesis_file, tmp_dict, arg) + html_new_dict_list.append(tmp_dict) + return html_old_dict_list, html_new_dict_list + +def HTML_UniteOld(list_dict_old): + """Union of dictionaries. + + HTML-specific. + """ + dict_old = list_dict_old[0] + i = 1 + while(i < len(list_dict_old)): + for name in list_dict_old[i]: + if name not in dict_old: + dict_old[name] = list_dict_old[i][name] + elif(compare_versions(dict_old[name][0], list_dict_old[i][name][0]) == -1): + dict_old[name] = list_dict_old[i][name] + i = i + 1 + return dict_old + +def HTML_CreateDicts(dict_old, list_dict_new): + """Create dictionary of packages. + + Dictionary of packages and types of changes. + """ + dict_packages = {} + i = 0 + for dict_new in list_dict_new: + (tmp_new, tmp_del, tmp_upd) = CreateDicts(dict_old, dict_new) + for packagename in tmp_new: + if packagename not in dict_packages: + dict_packages[packagename] = [] + dict_packages[packagename].append((tmp_new[packagename], i, 1)) + for packagename in tmp_del: + if packagename not in dict_packages: + dict_packages[packagename] = [] + dict_packages[packagename].append((tmp_del[packagename], i, 2)) + for packagename in tmp_upd: + if packagename not in dict_packages: + dict_packages[packagename] = [] + if tmp_upd[packagename][2] == 0: + dict_packages[packagename].append((tmp_upd[packagename][1], i, 3)) + elif tmp_upd[packagename][2] == 1: + dict_packages[packagename].append((tmp_upd[packagename][1], i, 4)) + i = i + 1 + return dict_packages + +def CssOutput(): + """Output style. + + Output contents of style tag or to .css file. + """ + csscontent = '\nbody {\nfont-size: 1em;\nmargin: 1em;\ncolor: black;\nbackground-color: white;\n}\n' +\ + 'th {\nborder-bottom-style: double;\n}\n' +\ + 'h1 {\nfont-size: 1.6em;\n}\n' +\ + 'h2 {\nfont-size: 1.4em;\n}\n' +\ + 'ul {\nfont-size: 1.2em;\n}\n' +\ + 'li {\nfont-size: 1em; list-style: disc;\n}\n' +\ + '.even {\nbackground-color: #CCCCCC;\n}\n' +\ + '.odd {\nbackground-color: #FFFFFF;\n}\n' +\ + '.new {\nbackground-color: #C6DEFF;\n}\n' +\ + '.removed {\nbackground-color: #FFC3CE;\n}\n' +\ + '.updated {\nbackground-color: #CCFFCC;\n}\n' +\ + '.downgraded {\nbackground-color: #F4F4AF;\n}\n' +\ + 'p.bold {\n font-weight: bold\n}\n' + return csscontent + +def JavaScriptOutput(): + """Output scripts. + + Output javascript to script tag or to .js file. + """ + javacontent = """ +var tableBody; +var table2sort; +var imgUp; +var imgDown; +var suffix; +var lastSortCol; +var lastSortOrderAsc; +var index; +var rows; + +function TableSorter(table,suf) { + this.table2sort = table; + this.suffix = suf; + this.lastSortCol = -1; + this.lastSortOrderAsc = true; + this.tableBody = this.table2sort.getElementsByTagName("tbody")[0]; + + this.imgUp = document.createTextNode(String.fromCharCode(0x2193)); + this.imgDown = document.createTextNode(String.fromCharCode(0x2191)); +} + +TableSorter.prototype.sort = function (col, type) { + if (this.lastSortCol != -1) { + sortCell = document.getElementById("sortCell" + this.suffix + this.lastSortCol); + if (sortCell != null) { + if (this.lastSortOrderAsc == true) { + sortCell.removeChild(this.imgUp); + } else { + sortCell.removeChild(this.imgDown); + } + } + sortLink = document.getElementById("sortCellLink" + this.suffix + this.lastSortCol); + if(sortLink != null) { + sortLink.title = "Sort Ascending"; + } + }else{ + this.rows = this.tableBody.rows; + } + + if (this.lastSortCol == col) { + this.lastSortOrderAsc = !this.lastSortOrderAsc; + } else { + this.lastSortCol = col; + this.lastSortOrderAsc = true; + } + + var newRows = new Array(); + + var newRowsCount = 0; + for (i = 1; i < this.rows.length; i ++) { + newRows[newRowsCount++] = this.rows[i]; + } + + index = this.lastSortCol; + if (type == 'string') { + newRows.sort(sortFunction_string); + } + else { + newRows.sort(sortFunction_attr); + } + + if (this.lastSortOrderAsc == false) { + newRows.reverse(); + } + + var count = 0; + var newclass; + for (i = 0; i < newRows.length; i++) { + if (count++ % 2 == 0){ + newclass = "odd"; + }else{ + newclass = "even"; + } + newRows[i].className = newclass; + this.table2sort.tBodies[0].appendChild(newRows[i]); + } + + sortCell = document.getElementById("sortCell" + this.suffix + col); + if (sortCell == null) { + } else { + if (this.lastSortOrderAsc == true) { + sortCell.appendChild(this.imgUp); + } else { + sortCell.appendChild(this.imgDown); + } + } + + sortLink = document.getElementById("sortCellLink" + this.suffix + col); + if (sortLink == null) { + } else { + if (this.lastSortOrderAsc == true) { + sortLink.title = "Sort Descending"; + } else { + sortLink.title = "Sort Ascending"; + } + } +} + +function getCellContent(elem) { + if (typeof elem == "string") return elem; + if (typeof elem == "undefined") { return elem }; + if (elem.innerText) return elem.innerText; + var str = ""; + + var cs = elem.childNodes; + var l = cs.length; + for (var i = 0; i < l; i++) { + switch (cs[i].nodeType) { + case 1: // 'ELEMENT_NODE' + str += getCellContent(cs[i]); + break; + case 3: // 'TEXT_NODE' + str += cs[i].nodeValue; + break; + } + } + return str; +} + +function sortFunction_attr(a, b) { + elem1 = a.cells[index] ; + elem2 = b.cells[index] ; + str1 = elem1.className; + str2 = elem2.className; + sub1 = getCellContent(a.cells[0]).toLowerCase(); + sub2 = getCellContent(b.cells[0]).toLowerCase(); + + if (str1 == str2){ + if (sub1 == sub2) return 0; + if (sub1 < sub2) return -1; + return 1; + } + if (str1 < str2) return -1; + return 1; +} + +function sortFunction_string(a, b) { + str1 = getCellContent(a.cells[index]).toLowerCase(); + str2 = getCellContent(b.cells[index]).toLowerCase(); + + if (str1 == str2) return 0; + if (str1 < str2) return -1; + return 1; +} + +var diffTableSorter = null; + +function init_diff(){ + if( document.getElementById("table_diff") ) { + diffTableSorter = new TableSorter(document.getElementById("table_diff"), 'diff'); + } +} + +function sort_diff(col, type) { + if( diffTableSorter != null ) { + diffTableSorter.sort(col, type); + } +} +""" + return javacontent + +def HTML_OutputHead(file_output): + """Output beginning of the document. + + Outputs static text. + """ + file_output.write('\n' +\ + '\n' +\ + '\n' + + '\n' +\ + 'Differences between Mandriva / Rosa releases\n' +\ + '\n' +\ + '\n' +\ + '\n' +\ + '\n' +\ + '\n' +\ + '\n' +\ + '\n\n') + +def GetRepoInfo(dict_packages, packagename, lenold, lennew, list_dict_old, list_dict_new): + """Generate package-specific information. + + Generates class and name to be displayed in the table. + """ + result1 = [] + result2 = [] + flag = 0 + for i in range(lenold): + if packagename in list_dict_old[i]: + result1.append(list_dict_old[i][packagename][0][0] + '-' +\ + list_dict_old[i][packagename][0][1]) + else: + result1.append("N/A") + result2.append('') + + tmplist = dict_packages[packagename] + tmpdict = {} + for (entry, reponum, entry_type) in dict_packages[packagename]: + tmpdict[reponum] = (entry[0][0] + '-' + entry[0][1], entry_type) + + for i in range(lennew): + if(i not in tmpdict): + if(packagename not in list_dict_new[i]): + result1.append("N/A") + result2.append("") + else: + result1.append(list_dict_new[i][packagename][0][0] + '-' +\ + list_dict_new[i][packagename][0][1]) + result2.append("") + else: + (name, entry_type) = tmpdict[i] + if entry_type == 1: + result1.append(name) + result2.append('class = "new"') + elif entry_type == 2: + result1.append("Removed") + result2.append('class = "removed"') + flag = 1 + elif entry_type == 3: + result1.append(name) + result2.append('class = "updated"') + elif entry_type == 4: + result1.append(name) + result2.append('class = "downgraded"') + + return (result1, result2, flag) + +def HTML_OutputBody(dict_packages, list_dict_old, list_dict_new, arg): + """Output table. + + Outputs table in HTML format. + """ + old = arg.old + new = arg.new + file_output = arg.output + + file_output.write('

Difference between repositories.

\n' +\ + '

The use of color coding in tables:

\n' +\ + '\n' +\ + '\n' +\ + '\n' +\ + '\n' +\ + '\n' +\ + '
NewUpdated
DowngradedRemoved
\n\n') + repo_list = [] + + all_list = [] + all_list.extend(old) + all_list.extend(new) + lenold = len(old) + lennew = len(new) + length = lenold + lennew + + reptext = 'repositories' if lenold > 1 else 'repository' + tmp_string = '

Old ' + reptext + ':

\n
    \n' + for i in range(lenold): + tmp_string = tmp_string + '
  • Repository ' + str(i) + ' : ' + old[i] + '
  • \n' + tmp_string = tmp_string + '
\n' + file_output.write(tmp_string) + + reptext = 'repositories' if lennew > 1 else 'repository' + tmp_string = '

New ' + reptext + ':

\n
    \n' + for k in range(lennew): + i = i + 1 + tmp_string = tmp_string + '
  • Repository ' + str(i) + ' : ' + new[k] + '
  • \n' + tmp_string = tmp_string + '
\n' + file_output.write(tmp_string) + + tmp_string = '

Difference between ' + i = 0 + while(i < length): + if(i < length - 2): + delimeter = " , " + elif(i == length - 2): + delimeter = " and " + else: + delimeter = '' + temp = '' + \ + 'Repository ' + str(i) + '' + if i < lenold: + repo_list.append('Repository ' + str(i) + '') + else: + ii = i + 1 + repo_list.append('Repository '+str(i)+'') + tmp_string = tmp_string + temp + delimeter + i = i + 1 + tmp_string = tmp_string + ".

\n" + file_output.write(tmp_string) + + tmp_string = '\n\n' + for reponame in repo_list: + tmp_string = tmp_string + reponame + tmp_string = tmp_string + '\n' + + strnum = 1 + resrange = [] + for i in range(lennew): + resrange.append(lenold + i) + + sorted_list = sorted(dict_packages, key=str.lower) + for packagename in sorted_list: + if strnum % 2: + strtype = "odd" + else: + strtype = "even" + tmp_string = tmp_string + '' + tmp_string = tmp_string + '' + (repo_name, repo_class, flag) = GetRepoInfo(dict_packages, packagename, + lenold, lennew, list_dict_old, list_dict_new) + if flag: + if(repo_name[lenold] == "Removed"): + res = 0 + for k in resrange: + if(repo_name[k] != "Removed"): + res = 1 + if res: + for k in resrange: + if(repo_name[k] == "Removed"): + repo_name[k] = "N/A" + repo_class[k] = '' + else: + for k in resrange: + if(repo_name[k] == "Removed"): + repo_name[k] = "N/A" + repo_class[k] = '' + + for i in range(length): + tmp_string = tmp_string + '' + tmp_string = tmp_string + '\n' + strnum = strnum + 1 + tmp_string = tmp_string + '\n
Package name
' + packagename + '' +\ + repo_name[i] + '
\n' + + file_output.write(tmp_string) + +def HTML_OutputTail(file_output): + """Output end of document. + + Outputs static text. + """ + file_output.write(''' + +'''); + file_output.write('\n\n') + +def HTML_Output(dict_packages, list_dict_old, list_dict_new, arg): + """Output HTML file. + + Generates HTML file. + """ + ifnotquiet = arg.quiet + file_output = arg.output + + if ifnotquiet: + print _("Creating HTML file.") + HTML_OutputHead(file_output) + HTML_OutputBody(dict_packages, list_dict_old, list_dict_new, arg) + HTML_OutputTail(file_output) + +def main(args): + arg = ParseCommandLine() + arg.temp_dir = tempfile.mkdtemp() + '/' + head_old = arg.temp_dir + old_dir + head_new = arg.temp_dir + new_dir + arg.temp_old = [] + arg.temp_new = [] + if (arg.output): + tmp_output = arg.output[0] + else: + tmp_output = default_output + arg.output = None; + for i in range(len(arg.old)): + arg.old[i] = CheckArgs(arg.old[i], arg) + arg.temp_old.append(head_old + str(i) + '/') + for i in range(len(arg.new)): + arg.new[i] = CheckArgs(arg.new[i], arg) + arg.temp_new.append(head_new + str(i) + '/') + arg.output = tmp_output + CheckOutput(arg) + CheckParam(arg) + + ifsizes = arg.size + ifnotsimple = arg.simple + output_file = arg.output + ifnotquiet = arg.quiet + ifhtml = arg.html + ifchangelog = arg.changelog + + + GetFiles(arg) + + if not ifhtml: + (dict_old, dict_new) = ParsePackage(arg) + + (dict_new_packages, dict_del_packages, dict_upd_packages) = CreateDicts( + dict_old, dict_new) + + dict_obsoleted = GenerateDictObsoleted(dict_new, ifnotquiet) + if(dict_upd_packages) and (ifnotsimple) and (ifchangelog): + dict_logfile_diff = GenerateLogfileDiff(dict_upd_packages, arg) + if not ifnotsimple or not ifchangelog: + dict_logfile_diff = {} + + ProcessNewPackages(dict_new_packages, arg.output) + ProcessDelPackages(dict_del_packages, dict_obsoleted, arg.output) + if dict_upd_packages: + ProcessUpdPackages(dict_upd_packages, dict_logfile_diff, arg) + PrintSummary(dict_new_packages, dict_del_packages, dict_upd_packages, arg.output) + else: + (list_dict_old, list_dict_new) = HTML_ParsePackage(arg) + dict_old = HTML_UniteOld(list_dict_old) + dict_packages = HTML_CreateDicts(dict_old, list_dict_new) + HTML_Output(dict_packages, list_dict_old, list_dict_new, arg) + + exit_proc(arg) + +if __name__ == "__main__": + main(sys.argv) diff --git a/urpm-tools/urpm-repograph.py b/urpm-tools/urpm-repograph.py new file mode 100755 index 0000000..bb58427 --- /dev/null +++ b/urpm-tools/urpm-repograph.py @@ -0,0 +1,1472 @@ +#!/usr/bin/python +''' +" Repograph utility for outputting graph of packages and their dependencies +" on each other. Also checks for unprovided dependencies. +" +" The tool downloads, unpacks and parses synthesis.hdlist.cz and +" (if necessary) files.xml.lzma to check for unprovided dependencies and +" to output graph of packages and their dependencies in DOT language format. +" The tool outputs data to standart output or to file. +" +" REQUIREMENTS +" ============ +" - urpmi +" - python-2.7 +" - lzma +" - gzip +" - libxml2 python library +" - rpm python library +" - networkx python library +" +" Copyright (C) 2012 ROSA Laboratory. +" Written by Vladimir Testov +" +" This program is free software: you can redistribute it and/or modify +" it under the terms of the GNU General Public License or the GNU Lesser +" General Public License as published by the Free Software Foundation, +" either version 2 of the Licenses, or (at your option) any later version. +" +" This program is distributed in the hope that it will be useful, +" but WITHOUT ANY WARRANTY; without even the implied warranty of +" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +" GNU General Public License for more details. +" +" You should have received a copy of the GNU General Public License +" and the GNU Lesser General Public License along with this program. +" If not, see . +''' +import argparse +import shutil +import sys +import os +import urllib2 +import urllib +import tempfile +import subprocess +import re +import time + +import rpm +import libxml2 +import urpmmisc + +import rpm5utils +from rpm5utils.urpmgraphs.classes.digraph import DiGraph +from rpm5utils.urpmgraphs.algorithms.cycles import simple_cycles +import gettext + +gettext.install('urpm-tools') +#import rpm5utils.urpmgraphs +#from rpm5utils.urpmgraphs.algorithms import cycles +#from rpm5utils.urpmgraphs.classes import digraph + + +synthesis_arch = "synthesis.hdlist.cz" +synthesis_arch_renamed = "synthesis.hdlist.gz" +synthesis_file = "synthesis.hdlist" +synthesis_search_field = ["info", "requires", "suggests", "provides"] +fileslist_arch = "files.xml.lzma" +fileslist_file = "files.xml" +tmp_cross_path = "cross" +loopdotfile = "loopgraph" +altdotfile = "altgraph" +default_output = "sys.stdout" +timeout = 5 + +re_search_unver = re.compile("([^\[\]]+)[\[\]]") +re_search_verrel = re.compile("\[(== |> |< |>= |<= )([\{\}+=0-9a-zA-Z_\.]*:)?([[\{\}+=0-9a-zA-Z_\.]+)(-[[\{\}+=0-9a-zA-Z_\.]+)?([^\[\]]*)\]$") + +def ParseCommandLine(): + """Parse arguments. + + Parse arguments from command line. + Return these arguments. + """ + parser = argparse.ArgumentParser( + description=_("Tool for generating dependency graph for REPOSITORY packages.")) + parser.add_argument("repository", action="store", nargs=1, + metavar="REPOSITORY", help="URL or local PATH to repository.") + parser.add_argument("--cross", "-c", action="store", nargs='+', metavar="CROSS_REPO", + help=_("Search for cross-repository references in CROSS_REPO(s) repositories.")) + + parser.add_argument("--quiet", "-q", action="store_false", + help=_("Hide service messages. (About progress status etc.)")) + parser.add_argument("--verbose", "-v", action="store_true", + help=_("Show warnings. (About unprovided packages etc.)")) + + parser.add_argument("--requires", "-r", action="store_true", + help=_("Process \"requires\" package dependencies. Used by default.")) + parser.add_argument("--suggests", "-s", action="store_true", + help=_("Process \"suggests\" package dependencies. If used without \ + --requires then only suggests dependencies are processed.")) + parser.add_argument("--file", "-f", action="store_true", + help=_("Process file dependencies.")) + parser.add_argument("--unprovided", "-u", action="store_true", + help=_("Show unprovided dependencies.")) + + pkgrequiresgroup = parser.add_mutually_exclusive_group() + pkgrequiresgroup.add_argument("--requires-recursive", action="store", nargs=1, default=None, + metavar="PKG", help=_("Search for packages, which are required by package PKG (PKG is a file name or package name)")) + pkgrequiresgroup.add_argument("--whatrequires", action="store", nargs=1, default=None, + metavar="PKG", help=_("Search for packages, which requires package PKG (PKG is a file name or package name)")) + + opactgroup = parser.add_mutually_exclusive_group() + opactgroup.add_argument("--loops", "-l", action="store_true", + help=_("Search for all simple loops of package dependecies.")) + opactgroup.add_argument("--alternatives", "-a", action="store_true", + help=_("Search for alternative packages providing the same feature.")) + opactgroup.add_argument("--broken", "-b", action="store_true", + help=_("Search for all broken packages and anything beetween them")) + parser.add_argument("--different", "-d", action="store_true", + help=_("Output each loop or each alternative in different file. \ + Ignored if --loops or --alternatives options are not present. \ + OUTPUT_FILE (if present) is tracted as folder name for new files in that case.")) + + graphgroup = parser.add_mutually_exclusive_group() + graphgroup.add_argument("--output", "-o", action="store", nargs=1, default='', + metavar="OUTPUT_FILE", help=_("Change graph output to \"OUTPUT_FILE\". STDOUT by default.")) + graphgroup.add_argument("--nograph", "-n", action="store_true", + help=_("Do not output graph. Tool will not start working if --quiet, --nograph are present \ + and --verbose is not. (If there is nothing to output - then nothing has to be done.)")) + return parser.parse_args() + +def exit_proc(arg): + """ + Remove trash. + """ + err_tmp_dir = arg.tmp_dir + err_output = arg.output + err_loops = arg.loops + err_alternatives = arg.alternatives + err_different = arg.different + + if (err_output != None) and not ((err_loops or err_alternatives) and (err_different)): + err_output.close() + if os.path.isdir(err_tmp_dir): + shutil.rmtree(err_tmp_dir) + exit(0) + +def CheckURL(url, arg): + """URL check. + + Check that URL is gettable. + """ + try: + urllib2.urlopen(url, None, timeout) + except: + print _("Error: URL to repository \"%s\" is incorrect") % url + exit_proc(arg) + +def CheckURLPATH(urlpath, arg): + """Argument checks. + + Check, that url or path is correct. + """ + if (urlpath.startswith("http://") or urlpath.startswith("ftp://")): + if not urlpath.endswith('/'): + urlpath = urlpath + '/' + urlpath = urlpath + "media_info/" + CheckURL(urlpath, arg) + elif (os.path.isdir(urlpath)) or urlpath.startswith("file://"): + if urlpath.startswith("file://./"): + urlpath = urlpath[7:] + else: + urlpath = urlpath[6:] + if not urlpath.endswith('/'): + urlpath = urlpath + '/' + urlpath = urlpath + "media_info/" + if not os.path.isdir(urlpath): + print _("Error: directory %s does not exist") % urlpath + exit_proc(arg) + else: + (e1,e2,urltmp) = urpmmisc.GetUrlFromRepoName(urlpath) + if (urltmp): + if not urltmp.endswith('/'): + urltmp = urltmp + '/' + urlpath = urltmp + "media_info/" + CheckURL(urlpath, arg) + else: + print _("Error: \"%s\" is not correct url, path or name of repository") % urlpath + exit_proc(arg) + return urlpath + +def CheckOptions(arg): + """Options check. + + Make options understandable for the program. + """ + if (arg.suggests == 0): + arg.requires = 1 + +def CheckOutput(arg): + """Check output file. + + Check if the file can be created and redirect standart output to this file. + """ + file_output = arg.output + ifloops = arg.loops + ifalternatives = arg.alternatives + ifdifferent = arg.different + + if (file_output == "sys.stdout") or (file_output == "stdout"): + arg.output = sys.stdout + return + if((ifloops or ifalternatives) and ifdifferent): # check for dir + if(os.path.isdir(file_output)): + print _("Error: directory %s already exists") % file_output + arg.output = None + exit_proc(arg) + else: + file_output = os.path.realpath(file_output) + if (os.path.isfile(file_output)): + print _("Error: File %s already exists") % file_output + arg.output = None + exit_proc(arg) + + try: + os.makedirs(file_output) + except: + print _("Error: directory %s was not created") % file_output + arg.output = None + exit_proc(arg) + if not file_output.endswith('/'): + file_output = file_output + '/' + arg.output = file_output + else: + if(os.path.isfile(file_output)): + print _("Error: File %s already exists") % file_output + arg.output = None + exit_proc(arg) + else: + dirname = os.path.dirname(file_output) + if(dirname == '') or (os.path.exists(dirname)): + try: + arg.output = open(file_output, "w") + except IOError: + print _("Error: File %s cannot be created") % file_output + arg.output = None + exit_proc(arg) + else: + print _("Error: Path %s does not exist.") % dirname + arg.output = None + exit_proc(arg) + +def GetFile(urlpath, filename, localdir, arg): + """Donwload archive. + """ + ifnotquiet = arg.quiet + + if not os.path.isdir(localdir): + os.makedirs(os.path.realpath(localdir)) + if ifnotquiet: + print (_("getting file %s from ") % filename) + "\n " + urlpath + filename + if os.path.isdir(urlpath): + try: + shutil.copyfile(urlpath + filename, localdir + filename) + except: + print _("Error: file %s was not copied") % filename + exit_proc(arg) + else: + try: + file_from = urllib2.urlopen(urllib2.Request(urlpath + filename), None, timeout) + file_to = open(localdir + filename, "w") + shutil.copyfileobj(file_from, file_to) + except: + print _("Error: file %(from)s was not downloaded to %(to)s") %{"from": urlpath + filenam, "to": localdir + filename} + exit_proc(arg) + file_from.close() + file_to.close() + +def RenameSynthFile(localdir, arg): + """Rename. + + Rename Synthesis file so zgip can understand format. + """ + ifnotquiet = arg.quiet + + if not os.path.isfile(localdir + synthesis_arch): + print _("Error: file not found: ") + localdir + synthesis_arch + exit_proc(arg) + try: + os.rename(localdir + synthesis_arch, localdir + synthesis_arch_renamed) + except OSError: + print _("Error: cannot rename file %(from)s to %(to)s") % {"from": synthesis_arch, "to": synthesis_arch_renamed} + + exit_proc(arg) + if not os.path.isfile(localdir + synthesis_arch_renamed): + print _("Error: file %s is missing.") % (localdir + synthesis_arch_renamed) + exit_proc(arg) + else: + if ifnotquiet: + print _("file %(from)s was renamed to %(to)s") % {"from": synthesis_arch, "to": synthesis_arch_renamed} + +def UnpackSynthFile(localdir, arg): + """Unpack Synthesis file. + + Unpack renamed synthesis file using gzip. + """ + ifnotquiet = arg.quiet + + if ifnotquiet: + print _("unpacking file ") + synthesis_arch_renamed + if not os.path.isfile(localdir + synthesis_arch_renamed): + print _("Error: file %s is missing.") % (localdir + synthesis_arch_renamed) + exit_proc(arg) + subprocess.call(["gzip", "-df", localdir + synthesis_arch_renamed]) + +def PrepareSynthFile(localdir, arg): + """Prepare Synthesis file for parsing. + """ + RenameSynthFile(localdir, arg) + UnpackSynthFile(localdir, arg) + +def ParseVersion(names_list): + """Parse version info if present. + + Parse version information from the field. e.g. provided_name[>= 1.2.3-4.5.6] + is parsed to (provided_name, sign, (epoch, version, release)) + """ + new_names_list = [] + for name in names_list: + match = re_search_unver.match(name) + if match: + tmp_entry = match.group(1) + else: + tmp_entry = name + match = re_search_verrel.search(name) + if match: + sign = match.group(1)[:-1] + epoch = match.group(2) + if epoch: + epoch = epoch[:-1] + else: + epoch = '' + version = match.group(3) + release = match.group(4) + if release: + release = release[1:] + else: + release = '' + verrel = (epoch, version, release) + else: + sign = '' + verrel = ('','','') + new_names_list.append((tmp_entry, sign, verrel)) + return new_names_list + +def TagEpoch(i): + """Return disttagepoch value. + """ + if len(i) == 4: + return '-' + elif len(i) == 5: + disttag = i[4] + distepoch = '' + return disttag + distepoch + elif len(i) == 6: + disttag = i[4] + distepoch = i[5] + return disttag + distepoch + else: + print _("REPODIFF-Warning: strange : ") + str(i) + +def RPMNameFilter(rpmname, disttagepoch): + """Parse name and verrel. + + Function that parses name, version and release of a package. + """ + string = rpmname.split('-') + lastpart = string.pop() + tmp = lastpart.split('.') + tmp.pop() + lastpart = '.'.join(tmp) + if (lastpart[0].isdigit() or (not lastpart.startswith(disttagepoch))) and\ + (not lastpart.isdigit()): + name = '-'.join(string[:-1]) + else: + name = '-'.join(string[:-2]) + return name + +def ParseSynthFile(dict_provides, dict_asks, localdir, arg): + """Collect packages information. + + Parse synthesis.hdlist file. + dict_provides[phrase]=[(name, sign, verrel)] contain names of packages providing phrase + dict_asks[pkg_name]=[(name, sign, verrel)] contain everything + that pkg_name package asks + """ + ifnotquiet = arg.quiet + ifrequires = arg.requires + ifsuggests = arg.suggests + ifverbose = arg.verbose + iftagepoch = arg.requires_recursive or arg.whatrequires + ifnothide = not iftagepoch + + if not os.path.isfile(localdir + synthesis_file): + print _("Error: Synthesis file %s was not found.") % (localdir + synthesis_file) + exit_proc(-1) + if ifnotquiet: + print _("Parsing synthesis.") + try: + synth = open(localdir + synthesis_file) + tmp = ['', [], [], []] + for synthline in synth: + if synthline.endswith('\n'): + synthline = synthline[:-1] + tmpline = synthline.split('@') + tag = tmpline[1] + if(tag == synthesis_search_field[1]) and ifrequires: + tmp[1] = tmpline[2:] + elif(tag == synthesis_search_field[2]) and ifsuggests: + tmp[2] = tmpline[2:] + elif tag == synthesis_search_field[3]: + tmp[3] = tmpline[2:] + elif tag == synthesis_search_field[0]: + if (iftagepoch): + tmp[0] = tmpline[2:] + disttagepoch = TagEpoch(tmp[0]) + tmp[0] = tmp[0][0] + else: + tmp[0] = tmpline[2] + + parsed_tmp = ParseVersion(tmp[3]) + for (phrase, sign, verrel) in parsed_tmp: + if ((ifverbose and ifnothide) and (sign != '==') and (sign != '')): + print _("Warning: Unexpected sign %(sign)s in 'provides' section of %(of)s") %\ + {"sign": sign, "of": tmp[0]} + if (not phrase in dict_provides): + dict_provides[phrase] = [(tmp[0], sign, verrel)] + else: + dict_provides[phrase].append((tmp[0], sign, verrel)) + tmp_list = [] + tmp_list.extend(tmp[1]) + tmp_list.extend(tmp[2]) + if (iftagepoch): + dict_asks[tmp[0]] = (ParseVersion(tmp_list), RPMNameFilter(tmp[0], disttagepoch)) + else: + dict_asks[tmp[0]] = [ParseVersion(tmp_list)] + tmp = ['', [], [], []] + synth.close() + except IOError: + print _("Error: Failed to open synthesis file ") + localdir + synthesis_file + exit_proc(-1) + return (dict_provides, dict_asks) + +def compare_verrel(verrel1, sign, verrel2): + """Compare versions. + + Compare versions with attention to sign. + """ + (e1, v1, r1) = verrel1 + (e2, v2, r2) = verrel2 + # checks + if (v2 == '') or (v1 == ''): + return 1 + if (e1 == '') or (e2 == ''): + e1 = '0' + e2 = '0' + if (r1 == '') or (r2 == ''): + r1 = '0' + r2 = '0' + # compare + compare = rpm.labelCompare((e1, v1, r1), (e2, v2, r2)) + if (sign == "=="): + if (compare == 0): + return 1 + elif (sign == ">"): + if (compare == 1): + return 1 + elif (sign == "<"): + if (compare == -1): + return 1 + elif (sign == ">="): + if (compare > -1): + return 1 + elif (sign == "<="): + if (compare < 1): + return 1 + return 0 + +def compare_2signs_verrel(provide_verrel, provide_sign, verrel, sign): + """Compare versions. + + Compare versions with attention to two signs. + """ + (e1, v1, r1) = provide_verrel + (e2, v2, r2) = verrel + if ((sign == '>') or (sign == '>=')) and ((provide_sign == '>') or (provide_sign == '>=')): + return 1 + if ((sign == '<') or (sign == '<=')) and ((provide_sign == '<') or (provide_sign == '<=')): + return 1 + if (v1 == '') or (v2 == ''): + return 1 + if (e1 == '') or (e2 == ''): + e1 = '0' + e2 = '0' + if (r1 == '') or (r2 == ''): + r1 = '0' + r2 = '0' + compare = rpm.labelCompare((e1, v1, r1), (e2, v2, r2)) + if (compare == 0): + return 1 + if ((provide_sign == '<') or (provide_sign == '<=')) and (compare == 1): + return 1 + if ((provide_sign == '>') or (provide_sign == '>=')) and (compare == -1): + return 1 + return 0 + +def print_verrel(verrel): + """Output version info. + + Formatted output of version info. + """ + (e, v, r) = verrel + result = '' + if (e != ''): + result = e + ":" + if (v != ''): + result = result + v + if (r != ''): + result = result + '-' + r + return result + +def unpack_fileslist(localdir, arg): + """Unpack files.xml file. + + Unpack files.xml.lzma using lzma. + """ + ifnotquiet = arg.quiet + + if ifnotquiet: + print _("unpacking file ") + fileslist_arch + if not os.path.isfile(localdir + fileslist_arch): + print _("Error: file %s is missing.") % (localdir + fileslist_arch) + exit_proc(arg) + subprocess.call(["lzma", "-df", localdir + fileslist_arch]) + +def parse_fileslist(filename_check, filename_found, count_depend, dict_depend, localdir, ifcry, arg): + """Parse files.xml. + """ + ifnotquiet = arg.quiet + ifverbose = arg.verbose + ifnothide = (not arg.requires_recursive) and (not arg.whatrequires) + + if ifnotquiet: + print _("Reading fileslist") + if not os.path.isfile(localdir + fileslist_file): + print _("Error: Can't find fileslist ") + localdir + fileslist_file + exit_proc(arg) + doc = libxml2.parseFile(localdir + fileslist_file) + if (not doc): + print _("Error: Can't read fileslist ") + localdir + fileslist_file + exit_proc(arg) + root = doc.children + if root.name != "media_info": + print _("Error: Wrong fileslist.") + doc.freeDoc() + exit_proc(arg) + tag_package = root.children + while(tag_package): + if(tag_package.name != "files"): + tag_package = tag_package.next + continue + + tag_property = tag_package.properties + while(tag_property) and (tag_property.name != "fn"): + tag_property = tag_property.next + if not tag_property: + print _("Error: Corrupted fileslist") + doc.freeDoc() + exit_proc(arg) + name = tag_property.content + files = tag_package.content.split('\n') + for filename in files: + if filename in filename_check: + for packagename in filename_check[filename]: + if (packagename != name): + if (ifcry > 0): + if (filename_check[filename][packagename] == 1): + continue + else: + isdotted = 1 + else: + if (filename_check[filename][packagename] == 1): + isdotted = 1 + else: + isdotted = 0 + if packagename not in dict_depend: + dict_depend[packagename]={} + if name not in dict_depend[packagename]: + dict_depend[packagename][name] = isdotted + if packagename not in count_depend: + count_depend[packagename] = 1 + else: + count_depend[packagename] = count_depend[packagename] + 1 + if filename not in filename_found: + filename_found.append(filename) + if (ifverbose and ifnothide) and (ifcry == None): + print _("Warning: cross-repository dependency: ") + packagename +\ + "\n -> " + name + else: + if (ifverbose and ifnothide): + print _("Warning: package has self-dependecies: ") + packagename +\ + "\n <" + filename + ">" + tag_package = tag_package.next + doc.freeDoc() + #found!!! update count_depend dict_depend add to filename_found + +def process_fileslist(filename_check, filename_found, count_depend, dict_depend, localdir, ifcry, arg): + """Process files.xml. + + Make necessary steps to process files.xml. + """ + if (ifcry == None): + path = arg.repository + else: + path = arg.crossurl[ifcry] + if (not os.path.isfile(localdir + fileslist_file)): + GetFile(path, fileslist_arch, localdir, arg) + unpack_fileslist(localdir, arg) + parse_fileslist(filename_check, filename_found, count_depend, dict_depend, localdir, ifcry, arg) + +def remake_count_depend(count_depend): + """Build count_depend. + + Build count_depend in case of using --file option. + """ + result = {} + for packagename in count_depend: + length = count_depend[packagename] + if length not in result: + result[length] = 1 + else: + result[length] = result[length] + 1 + return result + +def AddDepend(provides, temp_dict, packagename, asked, mode, dict_cross_error, ifshow): + """Add dependency to temp dictionary. + + Used in FillDepend function. + """ + if (provides not in temp_dict) and (provides != packagename): + if mode == 0: + temp_dict[provides] = 0 + else: + temp_dict[provides] = 1 + dict_cross_error[packagename] = "" + if (ifshow): + print _("Warning: cross-repository dependency:\n package %(pkg)s is dependent from\n <- %(from)s located in another repository") %\ + {"pkg": packagename, "from": provides} + elif (provides == packagename): + if (ifshow): + print _("Warning: package has self-dependecies: ") + packagename +\ + "\n <" + asked + ">" + +def FillDepend(dict_tmp_provides, asked, temp_dict, packagename, sign, verrel, + dict_error, dict_cross_error, mode, ifshow, ifshowunprovided): + """Fill dependency dictionary. + + Used in FindDepend function. + """ + found = 0 + tmp = 0 + for (provides, provide_sign, provide_verrel) in dict_tmp_provides[asked]: + if (sign == '') or (provide_sign == ''): + AddDepend(provides, temp_dict, packagename, asked, mode, dict_cross_error, ifshow) + tmp = 1 + found = 1 + elif (provide_sign == '=='): + if compare_verrel(provide_verrel, sign, verrel): + AddDepend(provides, temp_dict, packagename, asked, mode, dict_cross_error, ifshow) + tmp = 2 + found = 1 + else: + if compare_2signs_verrel(provide_verrel, provide_sign, verrel, sign): + AddDepend(provides, temp_dict, packagename, asked, mode, dict_cross_error, ifshow) + tmp = 3 + found = 1 + if found == 0: + dict_error[packagename] = '' + if (ifshow): + print _("Warning: needed version is absent <%(ver)s> %(rel)s required by package") %\ + {"ver": asked, "rel": print_verrel(verrel)} + "\n <%s>" % packagename + if (ifshowunprovided): + if asked not in temp_dict: + temp_dict[asked] = 2 + +def generate_error_dict(filename_check, filename_found, dict_error, dict_depend, count_depend, ifshow, ifshowunprovided): + """Generate Warnings about unprovided packages. + + Used in FindDepend function. + """ + for filename in filename_check: + if filename not in filename_found: + for packagename in filename_check[filename]: + if (filename_check[filename][packagename] == 1): + continue + if (ifshow): + print _("Warning: Package %(pkg)s unprovided by %(by)s") %{'pkg': packagename, 'by': filename} + if (ifshowunprovided): + if filename not in dict_depend[packagename]: + dict_depend[packagename][filename] = 2 + if packagename not in count_depend: + count_depend[packagename] = 1 + else: + count_depend[packagename] = count_depend[packagename] + 1 + if packagename not in dict_error: + dict_error[packagename] = '' + #if in filename_check but not in filename_found then update dict_error by contents of filename_check + +def FindDepend(dict_provides, dict_asks, dict_cross_provides, dict_cross_asks, arg): + """Find dependencies. + + Find dependencies and tell about unprovided packages. + """ + ifnotquiet = arg.quiet + ifcheckfiles = arg.file + ifcross = arg.cross + ifverbose = arg.verbose + ifnothide = (not arg.requires_recursive) and (not arg.whatrequires) + ifshow = ifverbose and ifnothide + ifshowunprovided = arg.unprovided or arg.broken + + dict_error = {} + dict_cross_error = {} + dict_depend = {} + count_depend = {} + filename_check = {} + filename_found = [] + if (ifnotquiet and ifnothide): + print _("Finding dependencies.") + for packagename in dict_asks: + temp_dict = {} + for (asked, sign, verrel) in dict_asks[packagename][0]: + if asked not in dict_provides: + if asked not in dict_cross_provides: + if not asked.startswith('/'): + dict_error[packagename] = '' + if (ifshow): + print _("Warning: can't find <%(ask)s> required by package\n <%(pkg)s>") %\ + {'ask': asked, 'pkg': packagename} + if (ifshowunprovided): + if asked not in temp_dict: + temp_dict[asked] = 2 + elif ifcheckfiles: + if asked not in filename_check: + filename_check[asked] = {} + filename_check[asked][packagename] = 0 # usual + else: + FillDepend(dict_cross_provides, asked, temp_dict, packagename, + sign, verrel, dict_error, dict_cross_error, 1, ifshow, ifshowunprovided) + else: + FillDepend(dict_provides, asked, temp_dict, packagename, + sign, verrel, dict_error, dict_cross_error, 0, ifshow, ifshowunprovided) + dict_depend[packagename] = temp_dict + if not ifcheckfiles: + length = len(temp_dict) + if length not in count_depend: + count_depend[length] = 1 + else: + count_depend[length] = count_depend[length] + 1 + else: + count_depend[packagename] = len(temp_dict) + + for packagename in dict_cross_asks: # cross-rep dependency + if packagename in dict_depend: + continue + temp_dict = {} + for (asked, sign, verrel) in dict_cross_asks[packagename][0]: + if asked in dict_provides: + FillDepend(dict_provides, asked, temp_dict, packagename, + sign, verrel, dict_error, dict_cross_error, 2, ifshow, ifshowunprovided) + else: + if (asked not in dict_cross_provides) and (asked.startswith('/')) and (ifcheckfiles): + if (asked not in filename_check): + filename_check[asked] = {} + filename_check[asked][packagename] = 1 # from cross-repo + + if packagename not in dict_depend: + dict_depend[packagename] = temp_dict + else: + temp_dict.update(dict_depend[packagename]) + dict_depend[packagename] = temp_dict + if not ifcheckfiles: + length = len(temp_dict) + if length not in count_depend: + count_depend[length] = 1 + else: + count_depend[length] = count_depend[length] + 1 + else: + count_depend[packagename] = len(temp_dict) + + if ifcheckfiles: + process_fileslist(filename_check, filename_found, count_depend, dict_depend, arg.tmp_dir, None, arg) + if ifcross: + for i in range(len(ifcross)): + process_fileslist(filename_check, filename_found, count_depend, dict_depend, get_temp(i, arg), i, arg) + generate_error_dict(filename_check, filename_found, dict_error, dict_depend, count_depend, ifshow, ifshowunprovided) + count_depend = remake_count_depend(count_depend) + if (ifshow): + if (ifcross): + sorted_tmp = sorted(dict_cross_error) + print "\n" + _("Total cross-referenced packages: ") + str(len(sorted_tmp)) + for tmp_ent in sorted_tmp: + print tmp_ent + sorted_tmp = sorted(dict_error) + print "\n" + _("Total unprovided packages: ") + str(len(sorted_tmp)) + for tmp_ent in sorted_tmp: + print tmp_ent + return dict_depend, count_depend + +def AssignColors(dict_depend, count_depend, arg): + """Assign colors. + + Assign colors for graph output. + """ + ifnotquiet = arg.quiet + ifchangecolors = arg.whatrequires + + dict_colors = {} + dict_count = {} + + if ifnotquiet: + print _("Calculating colors.") + sorted_count = sorted(count_depend) + length = len(count_depend) + normalized_count = {} + i = 0 + for number in sorted_count: + normalized_count[number] = float(i) / length + dict_count[number] = count_depend[number] + i = i + 1 + for package_name in dict_depend: + number = len(dict_depend[package_name]) + if (ifchangecolors): + h = float(dict_count[number]) / count_depend[number] + s = 0.6 + 0.4 * normalized_count[number] + else: + h = normalized_count[number] + s = 0.6 + (0.4 * dict_count[number]) / count_depend[number] + b = 1.0 + dict_colors[package_name] = (h, s, b) + dict_count[number] = dict_count[number] - 1 + return dict_colors + +def OutputGraphHead(file_output): + """Output Graph head. + + Static information about graph. + """ + file_output.write('\n\ndigraph packages {\nsize="20.69,25.52";\nratio="fill";\n' +\ + 'rankdir="TB";\nnode[style="filled"];\nnode[shape="box"];\n\n') + +def print_color(color_tuple): + """Format color. + + Format color for outputting. + """ + return str(color_tuple[0]) + ' ' + str(color_tuple[1]) + ' ' +\ + str(color_tuple[2]) + +def OutputGraphLoopBody(loop, loop_color, file_output): + """Output Graph body in --loop case. + """ + beg = 1 + for pkg in loop: + if (beg): + beg = 0 + tmp_string = '"' + pkg + '"' + else: + tmp_string = tmp_string + ' -> "' + pkg + '"' + file_output.write(tmp_string + ' [color="' + str(loop_color) + ' 1.0 1.0"];\n') + +def OutputGraphAltBody(phrase, alt, alt_color, file_output): + """Output Graph body in --alternative case. + """ + tmp_string = '"' + phrase + '" -> {\n' + sorted_list = sorted(alt) + for packagename in sorted_list: + tmp_string = tmp_string + '"' + packagename + '"\n' + tmp_string = tmp_string + '} [color="' + str(alt_color) + ' 1.0 1.0"];\n\n' + file_output.write(tmp_string) + +def OutputGraphBody(some_list, dict_color, file_output, packagename, node_type): + """Output Graph body. + + Output Graph. + """ + tmp_string = '"' + packagename + '" -> {\n' + sorted_depend = sorted(some_list) + if (node_type == 1): + arrow_style = ', style="dotted"' + else: + arrow_style = '' + if (node_type == 2): + tmp_string = tmp_string + 'node[shape="ellipse", fillcolor="0.0 1.0 1.0"];\n' + for dependfrom in sorted_depend: + tmp_string = tmp_string + '"' + dependfrom + '"\n' + if (node_type == 0) or (node_type == 1): + tmp_string = tmp_string + '} [color="' +\ + print_color(dict_color[packagename]) +\ + '"' + arrow_style + '];\n\n' + elif (node_type == 2): + tmp_string = tmp_string + '};\n\n' + file_output.write(tmp_string) + + +def OutputGraphTail(file_output): + """Finish the graph. + """ + file_output.write('}\n') + +def OutputGraph(dict_depend, dict_color, arg): + """Output the graph. + """ + file_output = arg.output + if arg.whatrequires: + selected_node = arg.whatrequires[0] + elif arg.requires_recursive: + selected_node = arg.requires_recursive[0] + else: + selected_node = None + OutputGraphHead(file_output) + + if (selected_node): + file_output.write('"' + selected_node + '" [color="0.4 1.0 1.0"];\n') + sorted_list = sorted(dict_depend) + for packagename in sorted_list: + if not dict_depend[packagename]: + continue + usual_list = [] + cross_list = [] + missed_list = [] + for pkg in dict_depend[packagename]: + mode = dict_depend[packagename][pkg] + if (mode == 0): + usual_list.append(pkg) + elif (mode == 1): + cross_list.append(pkg) + elif (mode == 2): + missed_list.append(pkg) + + if (len(usual_list) > 0): + OutputGraphBody(usual_list, dict_color, file_output, packagename, 0) + if (len(cross_list) > 0): + OutputGraphBody(cross_list, dict_color, file_output, packagename, 1) + if (len(missed_list) > 0): + OutputGraphBody(missed_list, None, file_output, packagename, 2) + + OutputGraphTail(file_output) + +def CountPor(number): + tmp = number / 10 + por = 0 + while tmp: + tmp = tmp / 10 + por = por + 1 + return por + +def LeadingZeroes(number, por): + por2 = CountPor(number) + return (por-por2)*'0' + str(number) + +def OutputLoopGraph(loops, colors, arg): + """Output graph(s) of loops. + """ + ifdifferent = arg.different + if arg.whatrequires: + selected_node = arg.whatrequires[0] + elif arg.requires_recursive: + selected_node = arg.requires_recursive[0] + else: + selected_node = None + + output = arg.output + file_output = output + if not ifdifferent: + OutputGraphHead(file_output) + if (selected_node): + file_output.write('"' + selected_node + '" [color="0.4 1.0 1.0"];\n') + + length = len(colors) + por = CountPor(length) + for i in range(length): + if ifdifferent: + filename = output + loopdotfile + LeadingZeroes(i, por) + '.dot' + file_output = open(filename, 'w') + OutputGraphHead(file_output) + if (selected_node): + file_output.write('"' + selected_node + '" [color="0.4 1.0 1.0"];\n') + OutputGraphLoopBody(loops[i], colors[i], file_output) + if ifdifferent: + OutputGraphTail(file_output) + file_output.close() + + if not ifdifferent: + OutputGraphTail(file_output) + +def OutputAltGraph(alternatives, colors, arg): + """Output graph(s) of alternatives. + """ + ifdifferent = arg.different + if arg.whatrequires: + selected_node = arg.whatrequires[0] + elif arg.requires_recursive: + selected_node = arg.requires_recursive[0] + else: + selected_node = None + + output = arg.output + file_output = output + if not ifdifferent: + OutputGraphHead(file_output) + if (selected_node): + file_output.write('"' + selected_node + '" [color="0.4 1.0 1.0"];\n') + + i = 0 + length = len(colors) + por = CountPor(length) + for phrase in alternatives: + if ifdifferent: + filename = output + altdotfile + LeadingZeroes(i, por) + '.dot' + file_output = open(filename, 'w') + OutputGraphHead(file_output) + if (selected_node): + file_output.write('"' + selected_node + '" [color="0.4 1.0 1.0"];\n') + OutputGraphAltBody(phrase, alternatives[phrase], colors[i], file_output) + if ifdifferent: + OutputGraphTail(file_output) + file_output.close() + i = i + 1 + + if not ifdifferent: + OutputGraphTail(file_output) + +def BuildGraph(dict_depend): + """Build additional structures. + + Build structures used in algorithm that finds loops. And later in --pkg-... options. + """ + dict_out = {} + dict_in = {} + for packagename in dict_depend: + for pkg2 in dict_depend[packagename]: + if pkg2 not in dict_out: + dict_out[pkg2] = [] + if packagename not in dict_in: + dict_in[packagename] = [] + dict_out[pkg2].append(packagename) + dict_in[packagename].append(pkg2) + return (dict_in, dict_out) + +def RemoveNonCycle(dict_in, dict_out, arg): + """Remove non-cycle nodes from graph. + + Remove all nodes that are not present in any loop. + Linear algorithm. On each step it checks all marked nodes. + If node hasn't got any nodes dependent from it or it's not + dependent on any node, then this node cannot be present in any loop. + So we exlude this node and mark all nodes that are connected to this node. + Because only for them the situation has been changed a little. + All remained nodes are included in some loop. + """ + ifnotquiet = arg.quiet + + check = [] #items for further checks + to_remove = [] #items for remove + for pkg in dict_in: + check.append(pkg) + for pkg in dict_out: + if pkg not in check: + check.append(pkg) + + ischanged = 1 + removed = 0 + while(ischanged): + ischanged = 0 + for pkg in check: + if (pkg not in dict_in) or (pkg not in dict_out): + to_remove.append(pkg) + removed = removed + 1 + ischanged = 1 + check = [] + for pkg in to_remove: + if (pkg in dict_in): + for pkg2 in dict_in[pkg]: + dict_out[pkg2].remove(pkg) + if (len(dict_out[pkg2]) == 0): + dict_out.pop(pkg2) + if pkg2 not in check: + check.append(pkg2) + dict_in.pop(pkg) + if (pkg in dict_out): + for pkg2 in dict_out[pkg]: + dict_in[pkg2].remove(pkg) + if (len(dict_in[pkg2]) == 0): + dict_in.pop(pkg2) + if pkg2 not in check: + check.append(pkg2) + dict_out.pop(pkg) + to_remove = [] + if ifnotquiet: + print _("Non-cycle nodes removed: ") + str(removed) + print _("Cyclic packages: ") + str(len(dict_in)) + +def FindLoops(dict_depend, arg): + """Find all simple loops in oriented graph. + + First, remove all nodes, that are not present in any loop. + Then search for all loops in what has remained. + """ + ifnotquiet = arg.quiet + ifverbose = arg.verbose + file_output = arg.output + + benchtime = time.clock() + (dict_in, dict_out) = BuildGraph(dict_depend) + RemoveNonCycle(dict_in, dict_out, arg) + if ifnotquiet: + benchtime1 = time.clock() - benchtime + print _("Worktime: %s seconds") % str(benchtime1) + G = DiGraph() + for pkg1 in dict_in: + for pkg2 in dict_in[pkg1]: + G.add_edge(pkg1, pkg2) + if ifnotquiet: + print _("Searching loops.") + loops = simple_cycles(G) + if ifnotquiet: + benchtime2 = time.clock() - benchtime + print _("End of search.") + print _("Loops search: %s seconds") % str(benchtime2) + + if ifverbose: + i = 1 + print _("Total: %s loops.") % str(len(loops)) + for loop in loops: + beg = 1 + for pkg in loop: + if beg: + beg = 0 + tmpstr = _("Loop ") + str(i) + ": " + pkg + else: + tmpstr = tmpstr + " -> " + pkg + print tmpstr + i = i + 1 + + return loops + +def FindAlternatives(dict_provides, arg): + """Find Alternatives. + + Select all phrases that are provided by more than one package. + """ + ifverbose = arg.verbose + ifnotquiet = arg.quiet + + if (ifnotquiet): + print _("Searching alternatives.") + altlist = {} + for phrase in dict_provides: + if len(dict_provides[phrase]) > 1: + altlist[phrase] = [] + for (packagename, r1, r2) in dict_provides[phrase]: + altlist[phrase].append(packagename) + + if ifverbose: + length = len(altlist) + i = 1 + sorted_list = sorted(altlist) + print _("Total: %d alternatives.") % length + for phrase in sorted_list: + print _("Alternative ") + str(i) + ": " + phrase + _(" is provided by:") + for packagename in altlist[phrase]: + print " -> " + packagename + i = i + 1 + + if (ifnotquiet): + print _("End of search.") + return altlist + +def FindBroken(dict_depend, count_depend, dict_asks, dict_provides, dict_cross_asks, dict_cross_provides, arg): + """Find Broken packages. + + Select all unprovided packages (with unprovided dependencies or dependent from packages with unprovided dependencies. + """ + startlist = [] + for packagename in dict_depend: + for pkg in dict_depend[packagename]: + if dict_depend[packagename][pkg] == 2: + if packagename not in startlist: + startlist.append(packagename) + return RemakeDicts(dict_depend, count_depend, dict_asks, dict_provides, dict_cross_asks, dict_cross_provides, arg, startlist) + +def AssignDictColors(tmpdict): + """Assign color for every loop. + """ + length = len(tmpdict) + colors = [] + for i in range(length): + colors.append ((i * 1.) / length) + return colors + +def get_temp(i, arg): + """Get numbered temporarily directory name. + """ + return arg.tmp_dir + tmp_cross_path + str(i) + '/' + +def PkgCheck(pkgname, dict_asks, dict_cross_asks): + """Check that PKG from --pkg-require or --pkg-provide is existent in repository. + + Searches PKG in file names and package names from repository. + """ + if pkgname in dict_asks: + return pkgname + else: + for filename in dict_asks: + if (pkgname == dict_asks[filename][1]): + return filename + + if pkgname in dict_cross_asks: + return pkgname + else: + for filename in dict_cross_asks: + if (pkgname == dict_cross_asks[filename][1]): + return filename + return None + +def RemakeAsks(startlist, dict_asks, dict_depend, dict_cross_asks, arg, ifbroken): + """Select needed packages, so we can rebuild everything else. + """ + ifwhatrequires = arg.whatrequires + ifrequires_recursive = arg.requires_recursive + ifnotquite = arg.quiet + ifverbose = arg.verbose + + (dict_in, dict_out) = BuildGraph(dict_depend) + if (ifbroken != None): + dict_tmp = dict_out + elif (ifwhatrequires): + dict_tmp = dict_out + elif (ifrequires_recursive): + dict_tmp = dict_in + + list_selected = [] + list_selected.extend(startlist) + list_append = [] + list_append.extend(startlist) + if (ifnotquite): + if (ifbroken != None): + print _("Searching for broken packages.") + if (ifverbose): + sorted_list = sorted(startlist) + for pkgname in sorted_list: + print " -> " + pkgname + elif (ifrequires_recursive): + print _("Searching for packages REQUIRED by ") + startlist[0] + elif (ifwhatrequires): + print _("Searching for packages that REQUIRE ") + startlist[0] + #select what we need, show what we have found (if --verbose option is used) + level_cnt = 0 + ischanged = 1 + while (ischanged == 1): + if (ifverbose): + if (level_cnt > 0): + if (ifnotquite): + print _("Level %d dependency.") % level_cnt + for tmppkg in list_append: + print " -> " + tmppkg + + ischanged = 0 + tmp_append = [] + #check for every filename in custody if it in list_selected. + for name in list_append: + if name in dict_tmp: + for tmpname in dict_tmp[name]: + #if we haven't met it yet - put it undet custody + if (tmpname not in list_selected) and (tmpname not in tmp_append): + tmp_append.append(tmpname) + ischanged = 1 + + list_selected.extend(list_append) + list_append = tmp_append + level_cnt = level_cnt + 1 + #remove what has remained unselected + new_dict_asks = {} + new_dict_cross_asks = {} + for filename in list_selected: + if filename in dict_asks: + new_dict_asks[filename] = dict_asks[filename] + else: + if not filename in dict_cross_asks: + new_dict_asks[filename] = [[], ""] + else: + new_dict_cross_asks[filename] = dict_cross_asks[filename] + return (new_dict_asks, new_dict_cross_asks) + +def RemoveExternal(dict_asks, dict_provides, dict_cross_asks, dict_cross_provides, ifshow): + """Remove dependecies external to group. + """ + new_dict_asks = {} + new_dict_provides = {} + for filename in dict_asks: + new_dict_asks[filename] = ([], filename) + for asks in dict_asks[filename][0]: + if asks[0] in dict_provides: + found = 0 + for pkg in dict_provides[asks[0]]: + if pkg[0] in dict_asks: + found = 1 + if asks[0] not in new_dict_provides: + new_dict_provides[asks[0]] = [] + if not pkg in new_dict_provides[asks[0]]: + new_dict_provides[asks[0]].append(pkg) + if (found == 1): + new_dict_asks[filename][0].append(asks) + elif asks[0] in dict_cross_provides: + new_dict_asks[filename][0].append(asks) + elif ifshow: + new_dict_asks[filename][0].append(asks) + + for filename in dict_cross_asks: + for asks in dict_cross_asks[filename][0]: + if asks[0] in dict_provides: + for pkg in dict_provides[asks[0]]: + if pkg[0] in dict_asks: + if asks[0] not in new_dict_provides: + new_dict_provides[asks[0]] = [] + if not pkg in new_dict_provides[asks[0]]: + new_dict_provides[asks[0]].append(pkg) + + return (new_dict_asks, new_dict_provides) + +def RemakeDicts(dict_depend, count_depend, dict_asks, dict_provides, dict_cross_asks, dict_cross_provides, arg, brokenlist=None): + """Procedure for rebuilding packages lists. + + for --whatrequires and --requires-recursive options + and for --broken option + """ + ifnotquiet = arg.quiet + whatrequires = arg.whatrequires + requires_recursive = arg.requires_recursive + ifshow = arg.unprovided or arg.broken + + if (ifnotquiet): + print _("Remaking structures.") + if (brokenlist == None): + if (whatrequires): + pkgname = whatrequires[0] + else: + pkgname = requires_recursive[0] + filename = PkgCheck(pkgname, dict_asks, dict_cross_asks) + if (whatrequires): + arg.whatrequires[0] = filename + else: + arg.requires_recursive[0] = filename + if (not filename): + print _("Error: can't find package name or filename \"") + pkgname + "\"." + exit_proc(arg) + startlist = [filename] + else: + startlist = brokenlist + + (dict_asks, dict_cross_asks) = RemakeAsks(startlist, dict_asks, dict_depend, dict_cross_asks, arg, brokenlist) + (new_dict_asks, new_dict_provides) = RemoveExternal(dict_asks, dict_provides, dict_cross_asks, dict_cross_provides, ifshow) + (new_dict_cross_asks, new_dict_cross_provides) = RemoveExternal(dict_cross_asks, dict_cross_provides, dict_asks, dict_provides, ifshow) + (dict_depend, count_depend) = FindDepend(new_dict_provides, new_dict_asks, new_dict_cross_provides, new_dict_cross_asks, arg) + return (dict_depend, count_depend, new_dict_asks, new_dict_provides, new_dict_cross_asks, new_dict_cross_provides) + +def main(args): + #define arguments namespace + arg = ParseCommandLine() + ifnotquiet = arg.quiet + ifverbose = arg.verbose + ifnograph = arg.nograph + ifrequires_recursive = arg.requires_recursive + ifwhatrequires = arg.whatrequires + ifloops = arg.loops + ifalternatives = arg.alternatives + ifbroken = arg.broken + ifoptact = ifloops or ifalternatives or ifbroken + ifunprovided = arg.unprovided + + arg.crossurl = [] + arg.tmp_dir = "" + if (arg.output): + file_output = arg.output[0] + else: + file_output = default_output + arg.output = None + if (not ifnotquiet) and (not ifverbose) and (ifnograph): + print _("Do not use -q/--quiet and -n/--nograph without -v/--verbose together.") + print _("That way there is no information to output anywhere. Nothing will be done.") + exit_proc(arg) + if (ifunprovided and ifbroken): + print _("Do not use -u/--unprovided and -b/--broken options together.") + print _("-b does everything that do -u and a little more.") + exit_proc(arg) + arg.repository = arg.repository[0] + arg.repository = CheckURLPATH(arg.repository, arg) + if (arg.cross): + crossrange = range(len(arg.cross)) + for i in crossrange: + arg.crossurl.append(CheckURLPATH(arg.cross[i], arg)) + CheckOptions(arg) + arg.tmp_dir = tempfile.mkdtemp() + '/' + #get all needed files + GetFile(arg.repository, synthesis_arch, arg.tmp_dir, arg) + PrepareSynthFile(arg.tmp_dir, arg) + if (arg.cross): + for i in crossrange: + temp_subdir = get_temp(i, arg) + GetFile(arg.crossurl[i], synthesis_arch, temp_subdir, arg) + PrepareSynthFile(temp_subdir, arg) + + #generate dictionaries + dict_provides = {} + dict_asks = {} + dict_cross_provides = {} + dict_cross_asks = {} + ParseSynthFile(dict_provides, dict_asks, arg.tmp_dir, arg) + if (arg.cross): + for i in crossrange: + temp_subdir = get_temp(i, arg) + ParseSynthFile(dict_cross_provides, dict_cross_asks, temp_subdir, arg) + (dict_depend, count_depend) = FindDepend(dict_provides, dict_asks, dict_cross_provides, dict_cross_asks, arg) + + if (ifrequires_recursive or ifwhatrequires): + answer = RemakeDicts(dict_depend, count_depend, dict_asks, dict_provides, dict_cross_asks, dict_cross_provides, arg) + if (answer): + (dict_depend, count_depend, dict_asks, dict_provides, dict_cross_asks, dict_cross_provides) = answer + + arg.output = file_output + CheckOutput(arg) + if (ifoptact): ##REMAKE (MUTUALLY EXCLUSIVE) + if (ifloops): + loops = FindLoops(dict_depend, arg) + if (ifnograph): + exit_proc(arg) + colors = AssignDictColors(loops) + OutputLoopGraph(loops, colors, arg) + elif (ifalternatives): + alternatives = FindAlternatives(dict_provides, arg) + if ifnograph: + exit_proc(arg) + colors = AssignDictColors(alternatives) + OutputAltGraph(alternatives, colors, arg) + elif (ifbroken): + brokengraph = FindBroken(dict_depend, count_depend, dict_asks, dict_provides, dict_cross_asks, dict_cross_provides, arg) + if ifnograph: + exit_proc(arg) + dict_color = AssignColors(brokengraph[0], brokengraph[1], arg) + OutputGraph(brokengraph[0], dict_color, arg) + else: + if ifnograph: + exit_proc(arg) + dict_color = AssignColors(dict_depend, count_depend, arg) + OutputGraph(dict_depend, dict_color, arg) + + exit_proc(arg) + +if __name__ == "__main__": + main(sys.argv) diff --git a/urpm-tools/urpm-repomanage.py b/urpm-tools/urpm-repomanage.py new file mode 100755 index 0000000..7437f3a --- /dev/null +++ b/urpm-tools/urpm-repomanage.py @@ -0,0 +1,239 @@ +#!/usr/bin/python +''' +" Repomanage utility for distributions using urpm +" +" The tool traverses a directory, build a dict of +" foo[(name, arch)] = [/path/to/file/that/is/highest, /path/to/equalfile] +" and then reports newest/old packages +" +" Based on repomanage from yum-utils +" +" Copyright (C) 2011 ROSA Laboratory. +" Written by Denis Silakov +" +" This program is free software: you can redistribute it and/or modify +" it under the terms of the GNU General Public License or the GNU Lesser +" General Public License as published by the Free Software Foundation, +" either version 2 of the Licenses, or (at your option) any later version. +" +" This program is distributed in the hope that it will be useful, +" but WITHOUT ANY WARRANTY; without even the implied warranty of +" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +" GNU General Public License for more details. +" +" You should have received a copy of the GNU General Public License +" and the GNU Lesser General Public License along with this program. +" If not, see . +''' + +import os +import sys +import rpm +import fnmatch +import subprocess +import string +from rpm5utils import miscutils, arch, transaction, Rpm5UtilsError +import urpmmisc + +import argparse + +import gettext +gettext.install('urpm-tools') + + +def errorprint(stuff): + print >> sys.stderr, stuff + + +def getFileList(path, ext, filelist): + """Return all files in path matching ext, store them in filelist, recurse dirs + return list object""" + + extlen = len(ext) + try: + dir_list = os.listdir(path) + except OSError, e: + errorprint(_('Error accessing directory %(path)s, %(e)s') % {"path": path,"e": str(e)}) + return [] + + for d in dir_list: + if os.path.isdir(path + '/' + d): + filelist = getFileList(path + '/' + d, ext, filelist) + else: + if string.lower(d[-extlen:]) == '%s' % (ext): + newpath = os.path.normpath(path + '/' + d) + filelist.append(newpath) + + return filelist + + +def trimRpms(rpms, excludeGlobs): + badrpms = [] + for fn in rpms: + for glob in excludeGlobs: + if fnmatch.fnmatch(fn, glob): + #~ print 'excluded: %s' % fn + if fn not in badrpms: + badrpms.append(fn) + for fn in badrpms: + if fn in rpms: + rpms.remove(fn) + + return rpms + + +def parseargs(args): + parser = argparse.ArgumentParser(description=_('manage a directory of rpm packages and report newest or oldest packages')) + + # new is only used to make sure that the user is not trying to get both + # new and old, after this old and not old will be used. + # (default = not old = new) + parser.add_argument("path", metavar="path", + help=_('path to directory with rpm packages')) + group = parser.add_mutually_exclusive_group(); + group.add_argument("-o", "--old", default=False, action="store_true", + help=_('print the older packages')) + group.add_argument("-n", "--new", default=False, action="store_true", + help=_('print the newest packages (this is the default behavior)')) + parser.add_argument("-r", "--remove-old", default=False, action="store_true", + help=_('remove older packages')) + parser.add_argument("-s", "--space", default=False, action="store_true", + help=_('space separated output, not newline')) + parser.add_argument("-k", "--keep", default=1, dest='keep', action="store", + help=_('number of newest packages to keep - defaults to 1')) + parser.add_argument("-c", "--nocheck", default=0, action="store_true", + help=_('do not check package payload signatures/digests')) + group_log = parser.add_mutually_exclusive_group(); + group_log.add_argument("-q", "--quiet", default=0, action="store_true", + help=_('be completely quiet')) + group_log.add_argument("-V", "--verbose", default=False, action="store_true", + help=_('be verbose - say which packages are decided to be old and why \ + (this info is dumped to STDERR)')) + + opts = parser.parse_args() + + return opts + + +def main(args): + + options = parseargs(args) + mydir = options.path + + rpmList = [] + rpmList = getFileList(mydir, '.rpm', rpmList) + verfile = {} + pkgdict = {} # hold all of them - put them in (n,a) = [(e,v,r),(e1,v1,r1)] + + keepnum = int(options.keep)*(-1) # the number of items to keep + + if len(rpmList) == 0: + errorprint(_('No files to process')) + sys.exit(1) + + ts = rpm.TransactionSet() + if options.nocheck: + ts.setVSFlags(~(rpm._RPMVSF_NOPAYLOAD)) + else: + ts.setVSFlags(~(rpm.RPMVSF_NOMD5|rpm.RPMVSF_NEEDPAYLOAD)) + + for pkg in rpmList: + try: + hdr = miscutils.hdrFromPackage(ts, pkg) + except Rpm5UtilsError, e: + msg = _("Error opening pkg %(pkg)s: %(err)s") % {"pkg": pkg, "err": str(e)} + errorprint(msg) + continue + + pkgtuple = miscutils.pkgDistTupleFromHeader(hdr) + (n,a,e,v,r,d) = pkgtuple + del hdr + + if (n,a) not in pkgdict: + pkgdict[(n,a)] = [] + pkgdict[(n,a)].append((e,v,r,d)) + + if pkgtuple not in verfile: + verfile[pkgtuple] = [] + verfile[pkgtuple].append(pkg) + + for natup in pkgdict.keys(): + evrlist = pkgdict[natup] + if len(evrlist) > 1: + evrlist = urpmmisc.unique(evrlist) + evrlist.sort(miscutils.compareDEVR) + pkgdict[natup] = evrlist + + del ts + + # now we have our dicts - we can return whatever by iterating over them + + outputpackages = [] + + # a flag indicating that old packages were found + old_found = 0 + + #if new + if not options.old: + for (n,a) in pkgdict.keys(): + evrlist = pkgdict[(n,a)] + + if len(evrlist) < abs(keepnum): + newevrs = evrlist + else: + newevrs = evrlist[keepnum:] + if len(evrlist[:keepnum]) > 0: + old_found = 1 + if options.remove_old: + for dropped in evrlist[:keepnum]: + (e,v,r,d) = dropped + pkg = str(verfile[(n,a,e,v,r,d)]).replace("['","").replace("']","") + subprocess.call(["rm", pkg]) + if options.verbose: + for dropped in evrlist[:keepnum]: + (e,v,r,d) = dropped + print >> sys.stderr, _("Dropped ") + str(verfile[(n,a,e,v,r,d)]) + print >> sys.stderr, _(" superseded by: ") + for left in newevrs: + (e,v,r,d) = left + print >> sys.stderr, " " + str(verfile[(n,a,e,v,r,d)]) + + for (e,v,r,d) in newevrs: + for pkg in verfile[(n,a,e,v,r,d)]: + outputpackages.append(pkg) + + if options.old: + for (n,a) in pkgdict.keys(): + evrlist = pkgdict[(n,a)] + + if len(evrlist) < abs(keepnum): + continue + + oldevrs = evrlist[:keepnum] + if len(oldevrs) > 0: + old_found = 1 + for (e,v,r,d) in oldevrs: + for pkg in verfile[(n,a,e,v,r,d)]: + outputpackages.append(pkg) + if options.remove_old: + subprocess.call(["rm", "-f", pkg]) + if options.verbose: + print >> sys.stderr, _("Dropped ") + pkg + print >> sys.stderr, _(" superseded by: ") + for left in evrlist[keepnum:]: + (e,v,r,d) = left + print >> sys.stderr, " " + str(verfile[(n,a,e,v,r,d)]) + + if not options.quiet: + outputpackages.sort() + for pkg in outputpackages: + if options.space: + print '%s' % pkg, + else: + print pkg + + if old_found==1: + sys.exit(3) + +if __name__ == "__main__": + main(sys.argv) diff --git a/urpm-tools/urpm-reposync.py b/urpm-tools/urpm-reposync.py new file mode 100755 index 0000000..18d8cae --- /dev/null +++ b/urpm-tools/urpm-reposync.py @@ -0,0 +1,1223 @@ +#!/usr/bin/python2.7 +''' +Created on Jan 11, 2012 + +@author: flid +''' + +import rpm +import argparse +import sys +import subprocess +import re +import os +from urllib2 import urlopen, HTTPError, URLError +import zlib +import glob +import shutil +import platform +import copy +import unittest + +import gettext +gettext.install('urpm-tools') + + +ARCH = platform.machine() +downloaded_rpms_dir = '/tmp/urpm-reposync.rpms' +VERSION = "urpm-reposync 2.1" + +def vprint(text): + '''Print the message only if verbose mode is on''' + if(command_line.verbose): + print(text) + +def qprint(text): + '''Print the message only if quiet mode is off and 'printonly' is off''' + if command_line.printonly: + return + if(not command_line.quiet): + print(text) + + +def eprint(text, fatal=False, code=1): + '''Print the message to stderr. Exit if fatal''' + print >> sys.stderr, text + if (fatal): + exit(code) + +def oprint(text): + '''Print the message only if quiet mode is off''' + if(not command_line.quiet): + print(text) + + +def get_command_output(command, fatal_fails=True): + '''Execute command using subprocess.Popen and return its stdout output string. If return code is not 0, print error message and exit''' + vprint("Executing command: " + str(command)) + res = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output = list(res.communicate()) + if sys.stdout.encoding: + output[0] = output[0].decode(sys.stdout.encoding).encode("UTF-8") + output[1] = output[1].decode(sys.stdout.encoding).encode("UTF-8") + if(res.returncode != 0 and fatal_fails): + eprint(_("Error while calling command") + " '" + " ".join(command) + "'") + if(output[1] != None or output[0] != None): + eprint(_("Error message: \n")+ ((output[0].strip() + "\n") if output[0]!=None else "") + + (output[1].strip() if output[1]!=None else "") ) + exit(1) + return [output[0], output[1], res.returncode] + + +def parse_command_line(): + global command_line + arg_parser = argparse.ArgumentParser(description=_('reposync is used to synchronize a set of packages on the local computer with the remote repository.')) + + arg_parser.add_argument('--include-media', '--media', action='append',nargs = '+', help=_("Use only selected URPM media")) + arg_parser.add_argument('--exclude-media', action='append',nargs = '+', help=_("Do not use selected URPM media")) + #arg_parser.add_argument('-x', '--exclude-packages', action='store',nargs = '+', help="Exclude package(s) by regex") + arg_parser.add_argument('-v', '--verbose', action='store_true', help=_("Verbose (print additional info)")) + arg_parser.add_argument('-q', '--quiet', action='store_true', help=_("Quiet operation. Senseless without --auto.")) + arg_parser.add_argument('-a', '--auto', action='store_true', help=_("Do not ask questions, just do it!")) + arg_parser.add_argument('-p', '--printonly', action='store_true', help=_("Only print the list of actions to be done and do nothing more!")) + arg_parser.add_argument('-d', '--download', action='store_true', help=_("Only download the rpm files, but install or remove nothing.")) + #arg_parser.add_argument('-n', '--noremove', action='store_true', help=_("Do not remove packages at all. If some installed package prevent another package from beeing updated - do not update it.")) + arg_parser.add_argument('-r', '--remove', action='store_true', help=_("Remove all the packages which do not present in repository. By default, only some of them would be removed.")) + arg_parser.add_argument('-c', '--check', action='store_true', help=_("Download packages and check wether they can be installed to your system, but do not install them.")) + arg_parser.add_argument('-k', '--nokernel', action='store_true', help=_("Do nothing with kernels.")) + arg_parser.add_argument('--runselftests', action='store_true', help=_("Run self-tests end exit.")) + arg_parser.add_argument('--detailed', action='store_true', help=_("Show detailed information about packages are going to be removed or installed (why does it have to be done)")) + + command_line = arg_parser.parse_args(sys.argv[1:]) + if(command_line.quiet and not command_line.auto): + eprint(_("It's senseless to use --quiet without --auto!"), fatal=True, code=2) + + if command_line.verbose: + command_line.detailed = True + + + +cmd = ['urpmq'] + + +class MediaSet(object): + def __init__(self): + global cmd + self.urls = [] + self.media = {} + self.by_url = {} + vprint("Loading media urls...") + lines = get_command_output(cmd + ["--list-url", "--list-media", 'active'])[0].strip().split("\n") + + for line in lines: + parts = line.split(" ") + medium = ' '.join(parts[:-1]) + url = parts[-1] + if(url.endswith("/")): + url = url[:-1] + if(url.find('/') != -1): + self.media[medium] = url + self.by_url[parts[-1]] = medium + self.urls.append(url) + vprint("Media urls: " + str(self.urls)) + + +class NEVR: + EQUAL = rpm.RPMSENSE_EQUAL #8 + GREATER = rpm.RPMSENSE_GREATER #4 + LESS = rpm.RPMSENSE_LESS #2 + #re_ver = re.compile('^([\d\.]+:)?([\w\d\.\-\[\]]+)(:[\d\.]+)?$') + + re_dep_ver = re.compile('^([^ \[\]]+)\[([\>\<\=\!]*) ([^ ]+)\]$') + re_dep = re.compile('^([^ \[\]]+)$') + types = {None: 0, + '==' : EQUAL, + '' : EQUAL, + '=' : EQUAL, + '>=' : EQUAL|GREATER, + '<=' : EQUAL|LESS, + '>' : GREATER, + '<' : LESS, + '!=' : LESS|GREATER, + '<>' : LESS|GREATER} + + def __init__(self, N, EVR, DE=None, DT=None, FL=None, E=None): + self.N = N + self.EVR = EVR + self.DE = DE + self.DT = DT + self.FL = FL + self.E = E + self.VR = EVR + + if E and EVR.startswith(E + ':'): + self.VR = EVR[len(E)+1:] + if E and not EVR.startswith(E + ':'): + self.EVR = E + ':' + self.EVR + + #try to get E + if not self.E and self.EVR and self.EVR.find(':') != -1: + items = self.EVR.split(':') + if items[0].find('.') == -1 and items[0].find('-') == -1: + self.E = items[0] + if not self.E and self.EVR: + self.E = '0' + self.EVR = '0:' + self.EVR + + if self.DE == 'None': + self.DE = None + + def __str__(self): + if self.FL: + for t in NEVR.types: + if not t: + continue + if NEVR.types[t] == self.FL: + return "%s %s %s" % (self.N, t, self.EVR) + if self.EVR: + return "%s == %s" % (self.N, self.EVR) + + return "%s" % (self.N) + + def __repr__(self): + return self.__str__() + + def __eq__(self, val): + if not isinstance(val, NEVR): + raise Exception("Internal error: comparing between NEVR and " + str(type(val))) + return str(self) == str(val) + + def __ne__(self, val): + return not (self == val) + + @staticmethod + def from_depstring(s, DE_toremove=None): + s = s.replace('[*]', '') + + if DE_toremove: + res = NEVR.re_dep_ver.match(s) + if res: + (name, t, val) = res.groups() + + if val.endswith(':' + DE_toremove): + val = val[:-(len(DE_toremove) + 1)] + EVR = '%s[%s %s]' % (name, t, val) + + res = NEVR.re_dep.match(s) + if res: + return NEVR(res.group(1), None) + + res = NEVR.re_dep_ver.match(s) + + if not res: + raise Exception('Incorrect requirement string: ' + s) + (name, t, val) = res.groups() + + return NEVR(name, val, FL=NEVR.types[t]) + + + re_version = re.compile("(\.)?((alpha)|(cvs)|(svn)|(r))?\d+((mdv)|(mdk)|(mnb))") + @staticmethod + def from_filename(rpmname, E=None): + ''' Returns [name, version] for given rpm file or package name ''' + suffix = ['.x86_64', '.noarch'] + ['.i%s86' % i for i in range(3,6)] + for s in suffix: + if(rpmname.endswith(s)): + rpmname = rpmname[:-len(s)] + + sections = rpmname.split("-") + if(NEVR.re_version.search(sections[-1]) == None): + name = sections[:-3] + version = sections[-3:-1] + else: + name = sections[:-2] + version = sections[-2:] + return NEVR("-".join(name), "-".join(version), FL=NEVR.EQUAL, E=E) + + def satisfies(self, val): + if self.N != val.N: + return False + + if self.EVR == None or val.EVR == None: + return True + + (pname, pt, pval) = (self.N, self.FL, self.EVR) + (rname, rt, rval) = (val.N, val.FL, val.EVR) + + def cut_part(seperator, val1, val2): + if val1 and val2 and val1.count(seperator) != val2.count(seperator): + n = max(val1.count(seperator), val2.count(seperator)) + val1 = seperator.join(val1.split(seperator)[:n]) + val2 = seperator.join(val2.split(seperator)[:n]) + return (val1, val2) + + (rval, pval) = cut_part(':', rval, pval) + (rval, pval) = cut_part('-', rval, pval) + + res = rpm.evrCompare(rval, pval) + + if res == 1: # > + if pt & NEVR.GREATER: + return True + elif pt & NEVR.LESS: + if rt & NEVR.LESS: + return True + else: + return False + else: + if rt & NEVR.LESS: + return True + else: + return False + + elif res == 0: + if rt & NEVR.EQUAL and pt & NEVR.EQUAL: + return True + if rt & NEVR.LESS and pt & NEVR.LESS: + return True + if rt & NEVR.GREATER and pt & NEVR.GREATER: + return True + return False + + else: # < + if rt & NEVR.GREATER: + return True + elif rt & NEVR.LESS: + if pt & NEVR.LESS: + return True + else: + return False + else: + if pt & NEVR.LESS: + return True + else: + return False + + +class PackageSet: + tags = ['provides','requires','obsoletes','suggests', 'conflicts'] + alltags = tags + ['nevr', 'arch'] + def __init__(self): + self.what = {} + self.packages = {} + + def load_from_system(self): + qprint(_("Loading the list of installed packages...")) + ts = rpm.TransactionSet() + mi = ts.dbMatch() + + for tag in PackageSet.tags: + self.what[tag] = {} + + for h in mi: + name = h['name'] + if(name == 'gpg-pubkey'): + continue + if(name not in self.packages): + self.packages[h['name']] = {} + else: + qprint(_("Duplicating ") + name + '-' + h['version'] + '-' + h['release']) + qprint(_("Already found: ") + name + '-' + self.packages[name]["nevr"].EVR) + + E = str(h['epoch']) + V = h['version'] + R = h['release'] + DE = h['distepoch'] + DT = h['disttag'] + + if E == None or E == 'None': + E = '0' + + EVR = "%s:%s-%s" % (E, V, R) + + nevr = NEVR(name, EVR, FL=NEVR.EQUAL, DE=DE, DT=DT, E=E) + self.packages[name]['nevr'] = nevr + self.packages[name]['arch'] = h['arch'] + + for tag in PackageSet.tags: + if tag not in self.packages[name]: + self.packages[name][tag] = [] + dss = h.dsFromHeader(tag[:-1] + 'name') + for s in dss: + fl = s.Flags() + #undocumented flag for special dependencies + if fl & 16777216: + continue + fl = fl % 16 + + _evr = s.EVR() + + if _evr == '': + evr = NEVR(s.N(), None, FL=fl) + else: + evr = NEVR(s.N(), _evr, FL=fl) + + self.packages[name][tag].append(evr) + + if evr.N not in self.what[tag]: + self.what[tag][evr.N] = [] + self.what[tag][evr.N].append((name, evr)) + + def load_from_repository(self): + url_by_synthesis_url = {} + global fields + + def get_synthesis_by_url(url): + if url.startswith('file://'): + url = url[6:] + if url.startswith('/'): + medium = ms.by_url[url] + return '/var/lib/urpmi/%s/synthesis.hdlist.cz' % medium + else: + return url + "/media_info/synthesis.hdlist.cz" + + medium_by_synth = {} + synthesis_lists = [] + for url in ms.urls: + synth = get_synthesis_by_url(url) + synthesis_lists.append(synth) + url_by_synthesis_url[synth] = url + medium_by_synth[synth] = ms.by_url[url] + + def clear_data(): + '''Clears the data of the current package from 'fields' dictionary''' + global fields + fields = {"provides":[], "requires":[], "obsoletes":[], "suggests":[], + "conflicts":[], "info":[], "summary":[]} + arches32 = ['i%d86' for i in range(3,6)] + for tag in PackageSet.tags: + self.what[tag] = {} + + #the following code is awful, I know. But it's easy-to-understand and clear. + # don't like it - write better and send me :) + for synthesis_list in synthesis_lists: + try: + #print synthesis_list + qprint(_("Processing medium ") + medium_by_synth[synthesis_list] + "...") + vprint(synthesis_list) + if(synthesis_list.startswith("http://") or synthesis_list.startswith("ftp://")): + r = urlopen(synthesis_list) + s = r.read() + r.close() + elif(synthesis_list.startswith("rsync://")): + tmppath = '/tmp/urpm-reposync.synthesis_lists' + if (not os.path.exists(tmppath)): + os.mkdir(tmppath) + filename = tmppath + '/' + os.path.basename(synthesis_list) + os.system("rsync --copy-links %s %s 1>/dev/null 2>&1" % (synthesis_list, filename)) + r = open(filename) + s = r.read() + r.close() + shutil.rmtree(tmppath) + elif(synthesis_list.startswith("/")): #local file + if not os.path.exists(synthesis_list): + eprint(_('Could not read synthesis file. (File %s not found)') % synthesis_list) + continue + r = open(synthesis_list) + s = r.read() + r.close() + res = subprocess.Popen(['gzip', '-d'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output = res.communicate(s) + clear_data() + for line in output[0].split('\n'): + if(line == ''): # there can be empty lines + continue + + items = line.split("@") + data = [x.strip() for x in items[2:]] + fields[items[1]] = data + + if(items[1] == "info"): + rpmname = items[2] + size = int(items[4]) + nevr = NEVR.from_filename(items[2], E=items[3]) + nevr.E = items[3] + + disttagepoch = '-' + if(len(items)>6): + disttagepoch = items[6] + nevr.DT = items[6] + if(len(items)>7): + disttagepoch += items[7] + nevr.DE = items[7] + + arch = items[2].split('.')[-1] + if arch in arches32 and ARCH in arches: + arch = ARCH + + in_repo = nevr.N in self.packages + new_arch_correct = arch == ARCH + if in_repo: + ver_newer = rpm.evrCompare(nevr.EVR, self.packages[nevr.N]['nevr'].EVR) == 1 + old_arch_correct = self.packages[nevr.N]['arch'] == ARCH + else: + ver_newer = None + old_arch_correct = None + + + toinst = not in_repo or (not old_arch_correct and new_arch_correct) or \ + (ver_newer and old_arch_correct == new_arch_correct) + + if(nevr.N not in self.packages): + self.packages[nevr.N] = {} + + if toinst: + self.packages[nevr.N]['nevr'] = nevr + self.packages[nevr.N]["arch"] = arch + self.packages[nevr.N]["synthesis_list"] = synthesis_list + self.packages[nevr.N]["filename"] = rpmname + self.packages[nevr.N]["size"] = size + for tag in PackageSet.tags: + self.packages[nevr.N][tag] = [] + for item in fields[tag]: + if item == '': + continue + dep = NEVR.from_depstring(item, DE_toremove=nevr.DE) + self.packages[nevr.N][tag].append(dep) + if dep.N not in self.what[tag]: + self.what[tag][dep.N] = [] + self.what[tag][dep.N].append((nevr.N, dep)) + + self.packages[nevr.N]['medium'] = medium_by_synth[synthesis_list] + clear_data() + except (HTTPError,URLError): + eprint(_("File can not be processed! Url: ") + synthesis_list) + + + def whattag(self, tag, val): + if val.N not in self.what[tag]: + return [] + found = [] + for (pkg, dep) in self.what[tag][val.N]: + if dep.satisfies(val): + found.append(pkg) + return found + + def whattag_revert(self, tag, val): + if val.N not in self.what[tag]: + return [] + found = [] + for (pkg, dep) in self.what[tag][val.N]: + if val.satisfies(dep): + found.append(pkg) + return found + + def whatprovides(self, val): + return self.whattag('provides', val) + + def whatobsoletes(self, val): + return self.whattag_revert('obsoletes', val) + + def whatrequires(self, val): + return self.whattag_revert('requires', val) + + def whatconflicts(self, val): + return self.whattag_revert('conflicts', val) + + def whatrequires_pkg(self, pkg): + found = [] + for req in self.packages[pkg]['provides']: + found += [(d, req) for d in self.whatrequires(req)] + return found + + +to_update = [] +to_downgrade = [] +to_remove = [] +to_remove_pre = [] +to_append = [] +unresolved = {} +to_append_bysource = {} +to_remove_problems = {} +to_remove_saved = [] +files_to_download = [] +#If one of package deps matches this regexp and this package is +#not in the repository - don't try to save this package. +to_remove_force_list = [ + NEVR.from_depstring("plymouth(system-theme)"), + NEVR.from_depstring("mandriva-theme-screensaver"), + ] + + +flags = {rpm.RPMCALLBACK_UNKNOWN:'RPMCALLBACK_UNKNOWN', + rpm.RPMCALLBACK_INST_PROGRESS:'RPMCALLBACK_INST_PROGRESS', + rpm.RPMCALLBACK_INST_START:'RPMCALLBACK_INST_START', + rpm.RPMCALLBACK_INST_OPEN_FILE:'RPMCALLBACK_INST_OPEN_FILE', + rpm.RPMCALLBACK_INST_CLOSE_FILE:'RPMCALLBACK_INST_CLOSE_FILE', + rpm.RPMCALLBACK_TRANS_PROGRESS:'RPMCALLBACK_TRANS_PROGRESS', + rpm.RPMCALLBACK_TRANS_START:'RPMCALLBACK_TRANS_START', + rpm.RPMCALLBACK_TRANS_STOP:'RPMCALLBACK_TRANS_STOP', + rpm.RPMCALLBACK_UNINST_PROGRESS:'RPMCALLBACK_UNINST_PROGRESS', + rpm.RPMCALLBACK_UNINST_START:'RPMCALLBACK_UNINST_START', + rpm.RPMCALLBACK_UNINST_STOP:'RPMCALLBACK_UNINST_STOP', + rpm.RPMCALLBACK_REPACKAGE_PROGRESS:'RPMCALLBACK_REPACKAGE_PROGRESS', + rpm.RPMCALLBACK_REPACKAGE_START:'RPMCALLBACK_REPACKAGE_START', + rpm.RPMCALLBACK_REPACKAGE_STOP:'RPMCALLBACK_REPACKAGE_STOP', + rpm.RPMCALLBACK_UNPACK_ERROR:'RPMCALLBACK_UNPACK_ERROR', + rpm.RPMCALLBACK_CPIO_ERROR:'RPMCALLBACK_CPIO_ERROR', + rpm.RPMCALLBACK_SCRIPT_ERROR:'RPMCALLBACK_SCRIPT_ERROR'} + +rpmtsCallback_fd = None +file_id = 0 +current_file = "NotSet" +def runCallback(reason, amount, total, key, client_data): + global i, file_id, rpmtsCallback_fd, current_file + + if reason in flags: + fl = flags[reason] + #if not fl.endswith('PROGRESS'): + vprint ("rpm_callback was called: %s, %s, %s, %s, %s" %(fl, str(amount), str(total), + str(key), str(client_data))) + if reason == rpm.RPMCALLBACK_INST_OPEN_FILE: + vprint ("Opening file: " + key) + current_file = key + file_id += 1 + qprint("[%d/%d] %s" % (file_id, len(files_to_download), os.path.basename(key))) + rpmtsCallback_fd = os.open(key, os.O_RDONLY) + return rpmtsCallback_fd + if reason == rpm.RPMCALLBACK_UNINST_START: + qprint(_("Removing %s") % os.path.basename(key)) + elif reason == rpm.RPMCALLBACK_INST_START: + vprint ("Closing file") + os.close(rpmtsCallback_fd) + elif reason == rpm.RPMCALLBACK_UNPACK_ERROR or \ + reason == rpm.RPMCALLBACK_CPIO_ERROR or \ + reason == rpm.RPMCALLBACK_SCRIPT_ERROR: + eprint(_('urpm-reposync: error in package %s. Data: %(data)s') %{ 'cur_file': current_file, 'data': "%s; %s, %s, %s, %s" % (flags[reason], str(amount), + str(total), str(key), str(client_data))}) + + +def get_problem_dependencies(pkg): + ''' Get all the packages to satisfy dependencies not provided by some installed package or by some action ''' + global actions + + output = [] + for req in repository.packages[pkg]['requires']: # for every package requirement + pkgs_inst = installed.whatprovides(req) + if pkgs_inst: + continue #dependency is satisfied bt one of installed packages + + #look for dependency in 'actions' + pkgs_rep = repository.whatprovides(req) + for pkg in pkgs_rep[:]: + if pkg not in actions: + pkgs_rep.remove(pkg) + if not pkgs_rep: + output.append(req) + + vprint("Problem deps for %s: %s" %(pkg, str(output))) + return output + + +def resolve_dependency(dep, pkg): + + res = repository.whatprovides(dep) + if command_line.nokernel: + for pkg in res[:]: + if pkg.startswith('kernel'): + res.remove('kernel') + + if not res: + if pkg not in unresolved: + unresolved[pkg] = [] + if str(dep) not in unresolved[pkg]: + unresolved[pkg].append(str(dep)) + return None + res = sorted(res) + vprint("Resolved dependencies: " + str(res)) + + if not pkg in to_append_bysource: + to_append_bysource[pkg] = [] + + to_append_bysource[pkg].append(res[0]) + return res[0] + + +def emulate_install(pkg): + global actions + vprint('Emulating package installation: ' + pkg) + + url = ms.media[repository.packages[pkg]['medium']] + url += '/' + repository.packages[pkg]['filename'] + '.rpm' + files_to_download.append(url) + + if pkg not in to_update and pkg not in to_downgrade and pkg not in to_append: + to_append.append(pkg) + + if pkg not in installed.packages: + installed.packages[pkg] = {} + + for tag in PackageSet.alltags: + installed.packages[pkg][tag] = repository.packages[pkg][tag] + + for tag in PackageSet.tags: + deps = installed.packages[pkg][tag] + for dep in deps: + if dep.N not in installed.what[tag]: + installed.what[tag][dep.N] = [] + installed.what[tag][dep.N].append((pkg,dep)) + + actions.remove(pkg) + + +def emulate_remove(pkg): + vprint("Emulating package removing: " + pkg) + if pkg not in installed.packages: + vprint("Nothing to remove") + return + for tag in PackageSet.tags: + deps = installed.packages[pkg][tag] + for dep in deps: + installed.what[tag][dep.N].remove((pkg,dep)) + + P = copy.deepcopy(installed.packages[pkg]) + installed.packages[pkg] = {} + installed.packages[pkg]['old_package'] = P + + +def have_to_be_removed(pkg): + to_remove_problems[pkg] = [] + for dep in installed.packages[pkg]['requires']: + res = installed.whatprovides(dep) + if not res: + to_remove_problems[pkg].append(_("\tRequires %s, which will not be installed.") % (str(dep) )) + continue + + for dep in installed.packages[pkg]['provides']: + res = installed.whatconflicts(dep) + if res: + to_remove_problems[pkg].append(_("\t%s conflicts with it" %(', '.join(res)))) + + for dep in installed.packages[pkg]['conflicts']: + res = installed.whatprovides(dep) + if res: + to_remove_problems[pkg].append(_("\tIt conflicts with %s" %(', '.join(res)))) + return to_remove_problems[pkg] + + +def process_packages(): + global actions, to_remove + + qprint("Computing actions list...") + + if command_line.remove: + for pkg in to_remove_pre: + emulate_remove(pkg) + to_remove.append(pkg) + + actions = to_update + to_downgrade + actions_backup = actions[:] + + problems = {} + changed = True + while changed: + i = 0 + l = len(actions) + changed = False + for act in actions[:]: + i = i + 1 + vprint('[%d/%d] %s' % (i, l, act)) + prob = get_problem_dependencies(act) + problems[act] = [] + for p in prob: + problems[act].append((p, resolve_dependency(p, act))) + + if problems[act]: + vprint ("\nPROBLEM: %s: %s" % (act, problems[act])) + if not problems[act]: + emulate_remove(act) + emulate_install(act) + changed = True + + vprint ('Problem dependencies found:') + for pr in problems: + if len(problems[pr])>0: + + for prob, resolved in problems[pr]: + if resolved: + vprint ("Package '%s' requires '%s' via dependency '%s'" % (pr, resolved, prob)) + changed = True + if resolved not in actions: + actions.append(resolved) + + if not command_line.remove: + changed = True + while changed: + changed = False + for pkg in to_remove_pre[:]: + vprint("Checking wether to remove " + pkg) + res = have_to_be_removed(pkg) + if res: + vprint("%s have to be removed because:" % (pkg)) + for item in res: + vprint(str(item)) + emulate_remove(pkg) + if not pkg in to_remove: + to_remove.append(pkg) + + if pkg in to_remove_saved: + to_remove_saved.remove(pkg) + changed = True + to_remove_pre.remove(pkg) + else: + if pkg not in to_remove_saved: + to_remove_saved.append(pkg) + + vprint ('Actions left: ' + str(actions)) + if actions: + eprint(_("Some packages can not be installed dew to unresolved dependencies: ")) + for pkg in unresolved: + eprint("%s requires %s" %(pkg, ', '.join(unresolved[pkg]))) + eprint(_("Contact repository maintaiers and send them this information, please."), fatal=True, code=4) + + +def download_packages(): + if not files_to_download: + return + qprint(_('Downloading files...')) + l = len(files_to_download) + i = 0 + for url in files_to_download: + i += 1 + qprint("[%d/%d] %s " %(i, l, os.path.basename(url))) + path = os.path.join(downloaded_rpms_dir, os.path.basename(url)) + if os.path.isfile(path): + continue + try: + if(url.startswith('/')): # local file + shutil.copyfile(url, path) + else: + fd = urlopen(url) + file = open(path, 'w') + file.write(fd.read()) + file.close() + fd.close() + except IOError, e: + eprint("Can not download file %s: %s" % (url, str(e)), fatal=True, code=5) + +def install_packages(): + + def readRpmHeader(ts, filename): + vprint("Reading header of " + filename) + fd = os.open(filename, os.O_RDONLY) + h = ts.hdrFromFdno(fd) + os.close(fd) + return h + + qprint(_("Generating transaction...")) + ts = rpm.TransactionSet() + + # turn all the checks off. They can cause segfault in RPM for now. + ts.setVSFlags(rpm.RPMVSF_NOHDRCHK|rpm.RPMVSF_NOSHA1HEADER|rpm.RPMVSF_NODSAHEADER|rpm.RPMVSF_NORSAHEADER|rpm.RPMVSF_NOMD5|rpm.RPMVSF_NODSA|rpm.RPMVSF_NORSA|rpm._RPMVSF_NODIGESTS|rpm._RPMVSF_NOSIGNATURES) + ts.setProbFilter(rpm.RPMPROB_FILTER_OLDPACKAGE) + + #flags for ts.run execution. We need it to speed the process up + ts.setFlags(rpm.RPMTRANS_FLAG_NOFDIGESTS) + + for file in files_to_download: + f = os.path.join(downloaded_rpms_dir, os.path.basename(file)) + h = readRpmHeader(ts, f) + ts.addInstall(h, f, 'u') + + for pkg in to_remove: + ts.addErase(pkg) + + qprint(_("Checking dependencies...")) + def format_dep(dep): + ((name, ver, rel), (namereq, verreq), needsFlags, suggestedPackage, sense) = dep + + vprint (dep) + t = _('requires') + if sense & 1: + t = _('conflicts with') + + s = '' + if needsFlags & rpm.RPMSENSE_LESS: #2 + s = '<' + if needsFlags & rpm.RPMSENSE_EQUAL: #8 + s = '=' + if needsFlags & rpm.RPMSENSE_GREATER: #4 + s = '>' + if needsFlags & rpm.RPMSENSE_NOTEQUAL: #6 + s = '!=' + + if(verreq): + verreq = '[%s %s]' % (s, verreq) + else: + verreq = '' + return _("Package %(name)s-%(ver)s-%(rel)s %(t)s %(namereq)s%(verreq)s") % \ + {'name': name,'ver': ver,'rel': rel,'namereq': namereq,'verreq': verreq, 't': t} + + + unresolved_dependencies = ts.check() + if(unresolved_dependencies): + eprint(_("There are some unresolved dependencies: ") ) + for dep in unresolved_dependencies: + eprint("\t" + format_dep(dep)) + eprint(_("Packages can not be installed. Please, contact urpm-tools developers and provide this output."), fatal=True, code=3) + else: + qprint(_("No errors found in transaction")) + ts.order() + + if command_line.check: + return + qprint(_("Running transaction...")) + ts.run(runCallback, 1) + + +def check_media_set(): + def try_solve_lib_arch(pkgname): + '''if you have lib64A installed, but there is only libA in repository, it have not to be removed. And vice versa''' + if not pkgname.startswith('lib'): + return None + if pkgname in repository.packages: + return None + + is64 = (pkgname[3:5] == '64') + is32 = not is64 + + if is32: + l32 = pkgname + l64 = 'lib64' + pkgname[3:] + else: + l32 = 'lib' + pkgname[5:] + l64 = pkgname + + e32 = (l32 in repository.packages) + e64 = (l64 in repository.packages) + + if(is32 and e64): # you have 32bit version installed, but there is only 64 bit version in repository + if(ARCH=="x86_64"): + return l64 + else: + return # 64bit library can not work in 32bit system + if(is64 and e32): + return l32 + + found = [] + for pkg in to_remove: + res = try_solve_lib_arch(pkg) + if res: + found.append((pkg, res)) + + vprint("The list of libs with incorrect arch in repository: " + str(found)) + if found: + qprint(_("WARNING: Some libraries are going to be removed because there are only the packages with the other architecture in the repository. Maybe you missed media with the correct architecture?")) + + +def print_actions(): + if(command_line.quiet): + return + + def count_total_size(): + sum = 0 + for pkg in to_append + to_update + to_downgrade: + sum += repository.packages[pkg]['size'] + return sum + + def bytes_to_human_readable(bytes): + bytes = float(bytes) + if bytes >= 1099511627776: + terabytes = bytes / 1099511627776 + size = '%.2fT' % terabytes + elif bytes >= 1073741824: + gigabytes = bytes / 1073741824 + size = '%.2fG' % gigabytes + elif bytes >= 1048576: + megabytes = bytes / 1048576 + size = '%.2fM' % megabytes + elif bytes >= 1024: + kilobytes = bytes / 1024 + size = '%.2fK' % kilobytes + else: + size = '%.2fb' % bytes + return size + + media = ms.media.keys() + def print_pkg_list(pkglist, tag): + media_contents = {} + for medium in media: + for pkg in pkglist: + if(repository.packages[pkg]['medium'] == medium): + if( medium not in media_contents): + media_contents[medium] = [] + media_contents[medium].append(pkg) + + qprint(" %-30s %-15s %-15s %-10s" %(_('Package Name'), _('Current Version'), _('New Version'), _('Arch'))) + for medium in media_contents: + qprint("(%s %s)" %( _("medium"), medium)) + for pkg in sorted(media_contents[medium]): + nevri = installed.packages[pkg]['nevr'] + nevrr = repository.packages[pkg]['nevr'] + + + if(nevri.E == nevrr.E): + veri = nevri.VR + verr = nevrr.VR + else: + veri = nevri.EVR + verr = nevrr.EVR + + if nevri.DE and nevrr.DE and nevri.DE != nevrr.DE: + veri += '(%s%s) ' % ( nevri.DT, nevri.DE) + verr += '(%s%s) ' % ( nevrr.DT, nevrr.DE) + + oprint("%s %-30s %-15s %-15s %-10s" %(prefix, pkg, veri, verr, installed.packages[pkg]['arch'])) + qprint('') + + prefix = '' + if to_update: + qprint(_("The following packages are going to be upgraded:")) + if command_line.printonly: + prefix = 'U' + print_pkg_list(to_update, 'U') + if to_downgrade: + qprint(_("The following packages are going to be downgraded:")) + if command_line.printonly: + prefix = 'D' + print_pkg_list(to_downgrade, 'D') + if to_append: + qprint(_("Additional packages are going to be installed:")) + qprint(" %-30s %-15s %-10s" %(_('Package Name'), _('Version'), _('Arch'))) + + if command_line.printonly: + prefix = 'A' + + def get_append_sources(pkg): + out = [] + for item in to_append_bysource: + if pkg in to_append_bysource[item]: + out.append(item) + return out + + for pkg in to_append: + nevr = repository.packages[pkg]['nevr'] + oprint("%s %-30s %-15s %-10s" %(prefix, pkg, nevr.VR, repository.packages[pkg]['arch'])) + if command_line.detailed: + qprint(_("\tRequired by %s") % (", ".join(get_append_sources(pkg)))) + + qprint('') + + if to_remove: + qprint(_("The following packages are going to be removed:")) + qprint(" %-30s %-15s %-10s" %(_('Package Name'), _('Current Version'), _('Arch'))) + if command_line.printonly: + prefix = 'R' + for pkg in sorted(to_remove): + nevr = installed.packages[pkg]['nevr'] + oprint("%s %-30s %-15s %-10s" %(prefix, pkg, nevr.VR, installed.packages[pkg]['arch'])) + if command_line.detailed and not command_line.remove: + for problem in sorted(to_remove_problems[pkg]): + qprint(problem) + qprint('') + + if to_remove_saved and command_line.detailed: + qprint(_("Packages which do not present in repositories, but do not have to be removed (will be saved):")) + qprint(" %-30s %-15s %-10s" %(_('Package Name'), _('Current Version'), _('Arch'))) + if command_line.printonly: + prefix = 'S' + for pkg in sorted(to_remove_saved): + oprint("%s %-30s %-15s %-10s" %(prefix, pkg, installed.packages[pkg]['nevr'].VR, installed.packages[pkg]['arch'])) + + qprint(_("%d packages are going to be downloaded and installed.") % len(files_to_download)) + qprint(_("%d packages are going to be removed.") % len(to_remove)) + qprint(_("%s will be downloaded.") % bytes_to_human_readable(count_total_size())) + + +def have_to_be_forced(pkg): + for dep in installed.packages[pkg]['provides']: + for f in to_remove_force_list: + if dep.satisfies(f): + vprint("Package %s have been forced to removal." % pkg) + return f + return None + + +def Main(): + global cmd, resolve_source, installed, repository, include_media, exclude_media, installed_backup, ms + resolve_source = False # variable that makes download_rpm to download resolved build-deps + cmd = ['urpmq'] + include_media = [] + if(command_line.include_media != None): + media = '' + for i in command_line.include_media: + media = ",".join([media]+i) + for ii in i: + include_media.append(ii) + cmd = cmd + ['--media', media[1:]] + + exclude_media = [] + if(command_line.exclude_media != None): + media = '' + for i in command_line.exclude_media: + media = ",".join([media]+i) + for ii in i: + exclude_media.append(iii) + cmd = cmd + ['--excludemedia', media[1:]] + + ms = MediaSet() + installed = PackageSet() + installed.load_from_system() + + repository = PackageSet() + repository.load_from_repository() + + installed_backup = copy.deepcopy(installed) + + for inst in installed.packages: + + if command_line.nokernel and inst.startswith('kernel'): + continue + + if inst not in repository.packages: + if command_line.remove: + to_remove_pre.append(inst) + else: + res = have_to_be_forced(inst) + if res: + emulate_remove(inst) + to_remove.append(inst) + to_remove_problems[inst]=[_('\tForced to be removed dew to "%s" policy.') % str(res)] + else: + to_remove_pre.append(inst) + continue + + #compare distepochs first + if installed.packages[inst]["nevr"].DE == None or repository.packages[inst]["nevr"].DE == None: + res_epoch = 0 + else: + res_epoch = rpm.evrCompare(installed.packages[inst]["nevr"].DE, repository.packages[inst]["nevr"].DE) + + if res_epoch == -1: + to_update.append(inst) + elif res_epoch == 1: + to_downgrade.append(inst) + else: # disteposhs are the same + #now versions can be compared + res = rpm.evrCompare(installed.packages[inst]["nevr"].EVR, repository.packages[inst]["nevr"].EVR) + if(res == -1): + to_update.append(inst) + elif res == 1: + to_downgrade.append(inst) + else: # res == 0 + pass # do nothing + + process_packages() + + if len(to_update + to_downgrade + to_remove) == 0: + qprint(_("Nothing to do")) + return + installed = installed_backup + print_actions() + if command_line.printonly: + return + + vprint("Installed packages: " + str(len(installed.packages))) + vprint("Repository packages: " + str(len(repository.packages))) + vprint("Packages that need some actions: " + str(len(to_update) + len(to_downgrade) + len(to_remove) + len(to_append))) + + check_media_set() + if(not command_line.auto): + sys.stdout.write(_("Do you want to proceed? (y/n): ")) + sys.stdout.flush() + while(True): + res = sys.stdin.readline() + res = res.strip() + if res in [_('y'), _('yes'), 'y', 'yes']: + break + if res in [_('n'), _('no'), 'n', 'no']: + exit(0) + + download_packages() + if command_line.download: + return + install_packages() + + +if not os.path.exists(downloaded_rpms_dir): + os.makedirs(downloaded_rpms_dir) + + +class Tests(unittest.TestCase): + def setUp(self): + self.p1 = NEVR.from_depstring('a[== 1.0]') + self.p2 = NEVR.from_depstring('a[> 1.0]') + self.p3 = NEVR.from_depstring('a[< 1.0]') + self.p4 = NEVR.from_depstring('a[>= 1.0]') + self.p5 = NEVR.from_depstring('b[== 1.0]') + + self.r1 = NEVR.from_depstring('a[== 1.0]') + self.r2 = NEVR.from_depstring('a[== 1.1]') + self.r3 = NEVR.from_depstring('a[<= 1.1]') + self.r4 = NEVR.from_depstring('a[>= 1.1]') + self.r5 = NEVR.from_depstring('a[< 0.9]') + self.r6 = NEVR.from_depstring('a[> 0.9]') + self.r7 = NEVR.from_depstring('a[< 1.0]') + self.r8 = NEVR.from_depstring('b[== 1.0]') + + self.pkg1 = NEVR.from_filename("s-c-t-0.0.1-0.20091218.2-rosa.lts2012.0.x86_64") + + def test_nevr_parse(self): + self.assertEqual(self.p1.N, 'a') + self.assertEqual(self.p1.VR, '1.0') + self.assertEqual(self.p1.EVR, '1.0') + self.assertEqual(self.p1.FL, NEVR.EQUAL) + self.assertEqual(self.p2.FL, NEVR.GREATER) + self.assertEqual(self.p3.FL, NEVR.LESS) + self.assertEqual(self.p4.FL, NEVR.EQUAL | NEVR.GREATER) + + self.assertEqual(self.pkg1.N, 's-c-t') + self.assertEqual(self.pkg1.EVR, '0.0.1-0.20091218.2') + self.assertEqual(self.pkg1.FL, NEVR.EQUAL) + + def test_version_compare(self): + self.assertTrue(self.p1.satisfies(self.r1)) + self.assertTrue(self.p1.satisfies(self.r3)) + self.assertTrue(self.p1.satisfies(self.r6)) + self.assertFalse(self.p1.satisfies(self.r4)) + self.assertFalse(self.p1.satisfies(self.r5)) + self.assertFalse(self.p1.satisfies(self.r7)) + self.assertFalse(self.p1.satisfies(self.r8)) + + self.assertTrue(self.p2.satisfies(self.r2)) + self.assertTrue(self.p2.satisfies(self.r2)) + self.assertTrue(self.p2.satisfies(self.r4)) + self.assertTrue(self.p2.satisfies(self.r6)) + self.assertFalse(self.p2.satisfies(self.r1)) + self.assertFalse(self.p2.satisfies(self.r5)) + self.assertFalse(self.p2.satisfies(self.r7)) + + self.assertTrue(self.p3.satisfies(self.r3)) + self.assertTrue(self.p3.satisfies(self.r5)) + self.assertTrue(self.p3.satisfies(self.r6)) + self.assertTrue(self.p3.satisfies(self.r7)) + self.assertFalse(self.p3.satisfies(self.r1)) + self.assertFalse(self.p3.satisfies(self.r2)) + self.assertFalse(self.p3.satisfies(self.r4)) + + self.assertTrue(self.p4.satisfies(self.r1)) + self.assertTrue(self.p4.satisfies(self.r6)) + self.assertFalse(self.p4.satisfies(self.r5)) + self.assertFalse(self.p4.satisfies(self.r7)) + + self.assertTrue(self.p5.satisfies(self.r8)) + + self.assertEqual(self.p1, self.r1) + self.assertNotEqual(self.p1, self.r2) + + self.assertRaises(Exception, NEVR.from_depstring, "a [== 1.0]") + self.assertRaises(Exception, NEVR.from_depstring, "a [== 1.0 ]") + self.assertRaises(Exception, NEVR.from_depstring, "a[! 1.0]") + self.assertRaises(Exception, NEVR.from_depstring, "a == 1.0") + + self.assertRaises(Exception, self.p1.__eq__, "a [== 1.0]") + + +if __name__ == '__main__': + parse_command_line() + + if command_line.runselftests: + suite = unittest.TestLoader().loadTestsFromTestCase(Tests) + unittest.TextTestRunner(verbosity=2).run(suite) + else: + Main() diff --git a/urpm-tools/urpm-tools.pot b/urpm-tools/urpm-tools.pot new file mode 100644 index 0000000..58ddc58 --- /dev/null +++ b/urpm-tools/urpm-tools.pot @@ -0,0 +1,1101 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2012-08-21 16:34+0400\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#. if not fatal_fails, do nothing. Caller have to deal with that himself +#. rpm return code is not 0 +#: urpm-reposync.py:64 urpm-downloader.py:156 urpm-downloader.py:546 +msgid "Error while calling command" +msgstr "" + +#: urpm-reposync.py:66 urpm-downloader.py:158 +msgid "Error message: \n" +msgstr "" + +#: urpm-reposync.py:74 +msgid "reposync is used to synchronize a set of packages on the local computer with the remote repository." +msgstr "" + +#: urpm-reposync.py:76 urpm-downloader.py:104 +msgid "Use only selected URPM media" +msgstr "" + +#: urpm-reposync.py:77 urpm-downloader.py:105 +msgid "Do not use selected URPM media" +msgstr "" + +#. arg_parser.add_argument('-x', '--exclude-packages', action='store',nargs = '+', help="Exclude package(s) by regex") +#: urpm-reposync.py:79 urpm-downloader.py:102 +msgid "Verbose (print additional info)" +msgstr "" + +#: urpm-reposync.py:80 +msgid "Quiet operation. Senseless without --auto." +msgstr "" + +#: urpm-reposync.py:81 +msgid "Do not ask questions, just do it!" +msgstr "" + +#: urpm-reposync.py:82 +msgid "Only print the list of actions to be done and do nothing more!" +msgstr "" + +#: urpm-reposync.py:83 +msgid "Only download the rpm files, but install or remove nothing." +msgstr "" + +#. arg_parser.add_argument('-n', '--noremove', action='store_true', help=_("Do not remove packages at all. If some installed package prevent another package from beeing updated - do not update it.")) +#: urpm-reposync.py:85 +msgid "Remove all the packages which do not present in repository. By default, only some of them would be removed." +msgstr "" + +#: urpm-reposync.py:86 +msgid "Download packages and check wether they can be installed to your system, but do not install them." +msgstr "" + +#: urpm-reposync.py:87 +msgid "Do nothing with kernels." +msgstr "" + +#: urpm-reposync.py:88 +msgid "Run self-tests end exit." +msgstr "" + +#: urpm-reposync.py:89 +msgid "Show detailed information about packages are going to be removed or installed (why does it have to be done)" +msgstr "" + +#: urpm-reposync.py:93 +msgid "It's senseless to use --quiet without --auto!" +msgstr "" + +#: urpm-reposync.py:305 +msgid "Loading the list of installed packages..." +msgstr "" + +#: urpm-reposync.py:319 +msgid "Duplicating " +msgstr "" + +#: urpm-reposync.py:320 +msgid "Already found: " +msgstr "" + +#. print synthesis_list +#: urpm-reposync.py:396 +msgid "Processing medium " +msgstr "" + +#: urpm-reposync.py:414 +#, python-format +msgid "Could not read synthesis file. (File %s not found)" +msgstr "" + +#: urpm-reposync.py:484 +msgid "File can not be processed! Url: " +msgstr "" + +#: urpm-reposync.py:579 +#, python-format +msgid "Removing %s" +msgstr "" + +#: urpm-reposync.py:586 +msgid "urpm-reposync: error in package %s. Data: %(data)s" +msgstr "" + +#: urpm-reposync.py:683 +#, python-format +msgid "\tRequires %s, which will not be installed." +msgstr "" + +#: urpm-reposync.py:689 +#, python-format +msgid "\t%s conflicts with it" +msgstr "" + +#: urpm-reposync.py:694 +#, python-format +msgid "\tIt conflicts with %s" +msgstr "" + +#: urpm-reposync.py:768 +msgid "Some packages can not be installed dew to unresolved dependencies: " +msgstr "" + +#: urpm-reposync.py:771 +msgid "Contact repository maintaiers and send them this information, please." +msgstr "" + +#: urpm-reposync.py:777 +msgid "Downloading files..." +msgstr "" + +#: urpm-reposync.py:807 +msgid "Generating transaction..." +msgstr "" + +#: urpm-reposync.py:825 +msgid "Checking dependencies..." +msgstr "" + +#: urpm-reposync.py:830 +msgid "requires" +msgstr "" + +#: urpm-reposync.py:832 +msgid "conflicts with" +msgstr "" + +#: urpm-reposync.py:848 +#, python-format +msgid "Package %(name)s-%(ver)s-%(rel)s %(t)s %(namereq)s%(verreq)s" +msgstr "" + +#: urpm-reposync.py:854 +msgid "There are some unresolved dependencies: " +msgstr "" + +#: urpm-reposync.py:857 +msgid "Packages can not be installed. Please, contact urpm-tools developers and provide this output." +msgstr "" + +#: urpm-reposync.py:859 +msgid "No errors found in transaction" +msgstr "" + +#: urpm-reposync.py:864 +msgid "Running transaction..." +msgstr "" + +#: urpm-reposync.py:905 +msgid "WARNING: Some libraries are going to be removed because there are only the packages with the other architecture in the repository. Maybe you missed media with the correct architecture?" +msgstr "" + +#: urpm-reposync.py:946 urpm-reposync.py:981 urpm-reposync.py:1003 +#: urpm-reposync.py:1016 +msgid "Package Name" +msgstr "" + +#: urpm-reposync.py:946 urpm-reposync.py:1003 urpm-reposync.py:1016 +msgid "Current Version" +msgstr "" + +#: urpm-reposync.py:946 +msgid "New Version" +msgstr "" + +#: urpm-reposync.py:946 urpm-reposync.py:981 urpm-reposync.py:1003 +#: urpm-reposync.py:1016 +msgid "Arch" +msgstr "" + +#: urpm-reposync.py:948 +msgid "medium" +msgstr "" + +#: urpm-reposync.py:970 +msgid "The following packages are going to be upgraded:" +msgstr "" + +#: urpm-reposync.py:975 +msgid "The following packages are going to be downgraded:" +msgstr "" + +#: urpm-reposync.py:980 +msgid "Additional packages are going to be installed:" +msgstr "" + +#: urpm-reposync.py:981 +msgid "Version" +msgstr "" + +#: urpm-reposync.py:997 +#, python-format +msgid "\tRequired by %s" +msgstr "" + +#: urpm-reposync.py:1002 +msgid "The following packages are going to be removed:" +msgstr "" + +#: urpm-reposync.py:1015 +msgid "Packages which do not present in repositories, but do not have to be removed (will be saved):" +msgstr "" + +#: urpm-reposync.py:1022 +#, python-format +msgid "%d packages are going to be downloaded and installed." +msgstr "" + +#: urpm-reposync.py:1023 +#, python-format +msgid "%d packages are going to be removed." +msgstr "" + +#: urpm-reposync.py:1024 +#, python-format +msgid "%s will be downloaded." +msgstr "" + +#: urpm-reposync.py:1080 +#, python-format +msgid "\tForced to be removed dew to \"%s\" policy." +msgstr "" + +#: urpm-reposync.py:1108 +msgid "Nothing to do" +msgstr "" + +#: urpm-reposync.py:1121 +msgid "Do you want to proceed? (y/n): " +msgstr "" + +#: urpm-reposync.py:1126 +msgid "y" +msgstr "" + +#: urpm-reposync.py:1126 +msgid "yes" +msgstr "" + +#: urpm-reposync.py:1128 +msgid "n" +msgstr "" + +#: urpm-reposync.py:1128 +msgid "no" +msgstr "" + +#: urpm-repograph.py:86 +msgid "Tool for generating dependency graph for REPOSITORY packages." +msgstr "" + +#: urpm-repograph.py:90 +msgid "Search for cross-repository references in CROSS_REPO(s) repositories." +msgstr "" + +#: urpm-repograph.py:93 +msgid "Hide service messages. (About progress status etc.)" +msgstr "" + +#: urpm-repograph.py:95 +msgid "Show warnings. (About unprovided packages etc.)" +msgstr "" + +#: urpm-repograph.py:98 +msgid "Process \"requires\" package dependencies. Used by default." +msgstr "" + +#: urpm-repograph.py:100 +msgid "Process \"suggests\" package dependencies. If used without --requires then only suggests dependencies are processed." +msgstr "" + +#: urpm-repograph.py:103 +msgid "Process file dependencies." +msgstr "" + +#: urpm-repograph.py:105 +msgid "Show unprovided dependencies." +msgstr "" + +#: urpm-repograph.py:109 +msgid "Search for packages, which are required by package PKG (PKG is a file name or package name)" +msgstr "" + +#: urpm-repograph.py:111 +msgid "Search for packages, which requires package PKG (PKG is a file name or package name)" +msgstr "" + +#: urpm-repograph.py:115 +msgid "Search for all simple loops of package dependecies." +msgstr "" + +#: urpm-repograph.py:117 +msgid "Search for alternative packages providing the same feature." +msgstr "" + +#: urpm-repograph.py:119 +msgid "Search for all broken packages and anything beetween them" +msgstr "" + +#: urpm-repograph.py:121 +msgid "Output each loop or each alternative in different file. Ignored if --loops or --alternatives options are not present. OUTPUT_FILE (if present) is tracted as folder name for new files in that case." +msgstr "" + +#: urpm-repograph.py:127 +msgid "Change graph output to \"OUTPUT_FILE\". STDOUT by default." +msgstr "" + +#: urpm-repograph.py:129 +msgid "Do not output graph. Tool will not start working if --quiet, --nograph are present and --verbose is not. (If there is nothing to output - then nothing has to be done.)" +msgstr "" + +#: urpm-repograph.py:157 urpm-repodiff.py:125 +#, python-format +msgid "Error: URL to repository \"%s\" is incorrect" +msgstr "" + +#: urpm-repograph.py:179 urpm-repodiff.py:147 +#, python-format +msgid "Error: directory %s does not exist" +msgstr "" + +#: urpm-repograph.py:189 urpm-repodiff.py:157 +#, python-format +msgid "Error: \"%s\" is not correct url, path or name of repository" +msgstr "" + +#: urpm-repograph.py:216 +#, python-format +msgid "Error: directory %s already exists" +msgstr "" + +#: urpm-repograph.py:222 urpm-repograph.py:237 urpm-repodiff.py:183 +#, python-format +msgid "Error: File %s already exists" +msgstr "" + +#: urpm-repograph.py:229 +#, python-format +msgid "Error: directory %s was not created" +msgstr "" + +#: urpm-repograph.py:246 urpm-repodiff.py:192 +#, python-format +msgid "Error: File %s cannot be created" +msgstr "" + +#: urpm-repograph.py:250 urpm-repodiff.py:196 +#, python-format +msgid "Error: Path %s does not exist." +msgstr "" + +#: urpm-repograph.py:262 urpm-repodiff.py:218 +#, python-format +msgid "getting file %s from " +msgstr "" + +#: urpm-repograph.py:267 urpm-repodiff.py:223 +#, python-format +msgid "Error: file %s was not copied" +msgstr "" + +#: urpm-repograph.py:275 urpm-repodiff.py:231 +#, python-format +msgid "Error: file %(from)s was not downloaded to %(to)s" +msgstr "" + +#: urpm-repograph.py:288 urpm-repodiff.py:272 +msgid "Error: file not found: " +msgstr "" + +#: urpm-repograph.py:293 urpm-repodiff.py:277 +#, python-format +msgid "Error: cannot rename file %(from)s to %(to)s" +msgstr "" + +#: urpm-repograph.py:297 urpm-repograph.py:313 urpm-repograph.py:543 +#: urpm-repodiff.py:281 +#, python-format +msgid "Error: file %s is missing." +msgstr "" + +#: urpm-repograph.py:301 urpm-repodiff.py:285 +#, python-format +msgid "file %(from)s was renamed to %(to)s" +msgstr "" + +#: urpm-repograph.py:311 urpm-repograph.py:541 urpm-repodiff.py:294 +#: urpm-repodiff.py:297 +msgid "unpacking file " +msgstr "" + +#: urpm-repograph.py:371 urpm-repodiff.py:410 +msgid "REPODIFF-Warning: strange : " +msgstr "" + +#: urpm-repograph.py:406 urpm-repodiff.py:351 +#, python-format +msgid "Error: Synthesis file %s was not found." +msgstr "" + +#: urpm-repograph.py:409 +msgid "Parsing synthesis." +msgstr "" + +#: urpm-repograph.py:435 +#, python-format +msgid "Warning: Unexpected sign %(sign)s in 'provides' section of %(of)s" +msgstr "" + +#: urpm-repograph.py:451 urpm-repodiff.py:380 +msgid "Error: Failed to open synthesis file " +msgstr "" + +#: urpm-repograph.py:555 +msgid "Reading fileslist" +msgstr "" + +#: urpm-repograph.py:557 +msgid "Error: Can't find fileslist " +msgstr "" + +#: urpm-repograph.py:561 +msgid "Error: Can't read fileslist " +msgstr "" + +#: urpm-repograph.py:565 +msgid "Error: Wrong fileslist." +msgstr "" + +#: urpm-repograph.py:578 +msgid "Error: Corrupted fileslist" +msgstr "" + +#: urpm-repograph.py:608 +msgid "Warning: cross-repository dependency: " +msgstr "" + +#: urpm-repograph.py:612 urpm-repograph.py:662 +msgid "Warning: package has self-dependecies: " +msgstr "" + +#: urpm-repograph.py:658 +#, python-format +msgid "" +"Warning: cross-repository dependency:\n" +" package %(pkg)s is dependent from\n" +" <- %(from)s located in another repository" +msgstr "" + +#: urpm-repograph.py:691 +#, python-format +msgid "Warning: needed version is absent <%(ver)s> %(rel)s required by package" +msgstr "" + +#: urpm-repograph.py:708 +#, python-format +msgid "Warning: Package %(pkg)s unprovided by %(by)s" +msgstr "" + +#: urpm-repograph.py:740 +msgid "Finding dependencies." +msgstr "" + +#: urpm-repograph.py:749 +#, python-format +msgid "" +"Warning: can't find <%(ask)s> required by package\n" +" <%(pkg)s>" +msgstr "" + +#: urpm-repograph.py:812 +msgid "Total cross-referenced packages: " +msgstr "" + +#: urpm-repograph.py:816 +msgid "Total unprovided packages: " +msgstr "" + +#: urpm-repograph.py:833 +msgid "Calculating colors." +msgstr "" + +#: urpm-repograph.py:1112 +msgid "Non-cycle nodes removed: " +msgstr "" + +#: urpm-repograph.py:1113 +msgid "Cyclic packages: " +msgstr "" + +#: urpm-repograph.py:1130 +#, python-format +msgid "Worktime: %s seconds" +msgstr "" + +#: urpm-repograph.py:1136 +msgid "Searching loops." +msgstr "" + +#: urpm-repograph.py:1140 urpm-repograph.py:1188 +msgid "End of search." +msgstr "" + +#: urpm-repograph.py:1141 +#, python-format +msgid "Loops search: %s seconds" +msgstr "" + +#: urpm-repograph.py:1145 +#, python-format +msgid "Total: %s loops." +msgstr "" + +#: urpm-repograph.py:1151 +msgid "Loop " +msgstr "" + +#: urpm-repograph.py:1168 +msgid "Searching alternatives." +msgstr "" + +#: urpm-repograph.py:1180 +#, python-format +msgid "Total: %d alternatives." +msgstr "" + +#: urpm-repograph.py:1182 +msgid "Alternative " +msgstr "" + +#: urpm-repograph.py:1182 +msgid " is provided by:" +msgstr "" + +#: urpm-repograph.py:1260 +msgid "Searching for broken packages." +msgstr "" + +#: urpm-repograph.py:1266 +msgid "Searching for packages REQUIRED by " +msgstr "" + +#: urpm-repograph.py:1268 +msgid "Searching for packages that REQUIRE " +msgstr "" + +#: urpm-repograph.py:1276 +#, python-format +msgid "Level %d dependency." +msgstr "" + +#: urpm-repograph.py:1355 +msgid "Remaking structures." +msgstr "" + +#: urpm-repograph.py:1367 +msgid "Error: can't find package name or filename \"" +msgstr "" + +#: urpm-repograph.py:1401 +msgid "Do not use -q/--quiet and -n/--nograph without -v/--verbose together." +msgstr "" + +#: urpm-repograph.py:1402 +msgid "That way there is no information to output anywhere. Nothing will be done." +msgstr "" + +#: urpm-repograph.py:1405 +msgid "Do not use -u/--unprovided and -b/--broken options together." +msgstr "" + +#: urpm-repograph.py:1406 +msgid "-b does everything that do -u and a little more." +msgstr "" + +#: urpm-downloader.py:91 +msgid "A tool for downloading RPMs and SRPMs from URPM-based linux repositories" +msgstr "" + +#: urpm-downloader.py:92 +msgid "If none of the options -b, -s, -d turned on, it will be treated as -b" +msgstr "" + +#: urpm-downloader.py:93 +msgid "Package name(s) to download. It can contain not only package names, but (S)RPM files too. In this case package name extracted from this file will be used" +msgstr "" + +#: urpm-downloader.py:94 +msgid "Instead of downloading files, list the URLs that would be processed" +msgstr "" + +#: urpm-downloader.py:95 +msgid "When downloading RPMs, resolve dependencies and also download the required packages, if they are not already installed" +msgstr "" + +#: urpm-downloader.py:96 +msgid "When downloading RPMs, resolve dependencies and also download the required packages, even if they are already installed" +msgstr "" + +#: urpm-downloader.py:97 +msgid "Download binary RPMs" +msgstr "" + +#: urpm-downloader.py:98 +msgid "Download the source RPMs (SRPMs)" +msgstr "" + +#: urpm-downloader.py:99 +msgid "Download debug RPMs" +msgstr "" + +#: urpm-downloader.py:100 +msgid "Download debug RPMs and install" +msgstr "" + +#: urpm-downloader.py:103 +msgid "Quiet operation." +msgstr "" + +#: urpm-downloader.py:106 +msgid "Exclude package(s) by regex" +msgstr "" + +#: urpm-downloader.py:107 +msgid "Try to continue when error occurs" +msgstr "" + +#: urpm-downloader.py:108 +msgid "If the file already exists, download it again and overwrite the old one" +msgstr "" + +#: urpm-downloader.py:109 +msgid "If package dependency can be satisfied by several packages, download all of them (by default, only the first one is downloaded)" +msgstr "" + +#: urpm-downloader.py:110 +msgid "If different versions of package present in repository, process them all" +msgstr "" + +#. arg_parser.add_argument('--self-test', action='store_true', help="Test urpm-downloader end exit") +#: urpm-downloader.py:113 +msgid "Specify a destination directory for the download" +msgstr "" + +#: urpm-downloader.py:130 +msgid "Use of --verbose with --quiet is senseless. Turning verbose mode off." +msgstr "" + +#: urpm-downloader.py:134 +msgid "Note that resolving of SRPM dependencies is not possible until SRPM downloaded. So, it will be done despite --urls" +msgstr "" + +#: urpm-downloader.py:375 +msgid "* Downloaded: " +msgstr "" + +#: urpm-downloader.py:377 +msgid "* File exists, skipping: " +msgstr "" + +#: urpm-downloader.py:476 +msgid "Can not download SRPM for package" +msgstr "" + +#: urpm-downloader.py:499 urpm-downloader.py:532 +msgid "Can not download RPM" +msgstr "" + +#: urpm-downloader.py:504 +msgid "Resolving debug-info packages..." +msgstr "" + +#. urpmq output. RU: Нет пакета с названием +#: urpm-downloader.py:509 +msgid "No package named " +msgstr "" + +#: urpm-downloader.py:533 +msgid "Maybe you need to update urpmi database (urpmi.update -a)?" +msgstr "" + +#: urpm-downloader.py:542 +msgid "Installing " +msgstr "" + +#. return code is not 0 +#: urpm-downloader.py:553 +#, python-format +msgid "Debug package for '%s' not found" +msgstr "" + +#: urpm-downloader.py:602 +msgid "Parameters that end with '.rpm' seem to be local files, but the folowing files do not exist: " +msgstr "" + +#: urpm-downloader.py:627 +msgid "Searching src.rpm file(s) in repository..." +msgstr "" + +#: urpm-downloader.py:629 +msgid "Downloading src.rpm file(s)..." +msgstr "" + +#: urpm-downloader.py:659 +msgid "Resolving build dependencies..." +msgstr "" + +#: urpm-downloader.py:661 +msgid "Resolving dependencies..." +msgstr "" + +#: urpm-downloader.py:663 +#, python-format +msgid "Resolved %d packages" +msgstr "" + +#: urpm-downloader.py:665 +msgid "Nothing to download" +msgstr "" + +#: urpm-repomanage.py:56 +#, python-format +msgid "Error accessing directory %(path)s, %(e)s" +msgstr "" + +#: urpm-repomanage.py:86 +msgid "manage a directory of rpm packages and report newest or oldest packages" +msgstr "" + +#: urpm-repomanage.py:92 +msgid "path to directory with rpm packages" +msgstr "" + +#: urpm-repomanage.py:95 +msgid "print the older packages" +msgstr "" + +#: urpm-repomanage.py:97 +msgid "print the newest packages (this is the default behavior)" +msgstr "" + +#: urpm-repomanage.py:99 +msgid "remove older packages" +msgstr "" + +#: urpm-repomanage.py:101 +msgid "space separated output, not newline" +msgstr "" + +#: urpm-repomanage.py:103 +msgid "number of newest packages to keep - defaults to 1" +msgstr "" + +#: urpm-repomanage.py:105 +msgid "do not check package payload signatures/digests" +msgstr "" + +#: urpm-repomanage.py:108 +msgid "be completely quiet" +msgstr "" + +#: urpm-repomanage.py:110 +msgid "be verbose - say which packages are decided to be old and why (this info is dumped to STDERR)" +msgstr "" + +#: urpm-repomanage.py:131 +msgid "No files to process" +msgstr "" + +#: urpm-repomanage.py:144 +#, python-format +msgid "Error opening pkg %(pkg)s: %(err)s" +msgstr "" + +#: urpm-repomanage.py:195 urpm-repomanage.py:221 +msgid "Dropped " +msgstr "" + +#: urpm-repomanage.py:196 urpm-repomanage.py:222 +msgid " superseded by: " +msgstr "" + +#: urpm-repodiff.py:83 +msgid "Tool for comparing sets of repositories." +msgstr "" + +#: urpm-repodiff.py:85 +msgid "URL or PATH to old repositories" +msgstr "" + +#: urpm-repodiff.py:87 +msgid "URL or PATH to new repositories" +msgstr "" + +#: urpm-repodiff.py:89 +msgid "Show differences in package sizes." +msgstr "" + +#: urpm-repodiff.py:91 +msgid "Simple output format." +msgstr "" + +#: urpm-repodiff.py:93 +msgid "Hide service messages." +msgstr "" + +#: urpm-repodiff.py:95 +msgid "Show changelog difference." +msgstr "" + +#: urpm-repodiff.py:97 +#, python-format +msgid "Output in HTML format, if --output is not present \"%s\" will be created in current directory. --size, --simple and --changelog options are ignored." +msgstr "" + +#: urpm-repodiff.py:101 +msgid "Change standart output to \"OUTPUT_FILE\"." +msgstr "" + +#: urpm-repodiff.py:174 +#, python-format +msgid "Error: Cannot open %s for writing." +msgstr "" + +#: urpm-repodiff.py:354 +msgid "Parsing synthesis" +msgstr "" + +#: urpm-repodiff.py:389 +msgid "REPODIFF-Warning: strange format of or : " +msgstr "" + +#: urpm-repodiff.py:527 +msgid "New package: " +msgstr "" + +#: urpm-repodiff.py:542 +msgid "Generating obsoleted list." +msgstr "" + +#: urpm-repodiff.py:601 +msgid "Removed package: " +msgstr "" + +#: urpm-repodiff.py:609 +msgid " Obsoleted by " +msgstr "" + +#: urpm-repodiff.py:630 +msgid "Reading changelog" +msgstr "" + +#: urpm-repodiff.py:632 +msgid "Error: Can't find changelog " +msgstr "" + +#: urpm-repodiff.py:636 +msgid "Error: Can't read changelog " +msgstr "" + +#: urpm-repodiff.py:640 +msgid "Error: Wrong changelog." +msgstr "" + +#: urpm-repodiff.py:662 +msgid "Error: Corrupted changelog" +msgstr "" + +#: urpm-repodiff.py:756 +msgid "Generating changes list." +msgstr "" + +#: urpm-repodiff.py:770 urpm-repodiff.py:773 +#, python-format +msgid "REPODIFF-Warning: Package %s was not described in changelogs.xml" +msgstr "" + +#: urpm-repodiff.py:771 +msgid "REPODIFF-Warning: Changelogs of a package are absent in \"new\" repository." +msgstr "" + +#: urpm-repodiff.py:774 +msgid "REPODIFF-Warning: Changelogs of a package are absent." +msgstr "" + +#: urpm-repodiff.py:800 +#, python-format +msgid "Package %s has no changelog info\n" +msgstr "" + +#: urpm-repodiff.py:818 +msgid "" +"\n" +"\n" +"Updated packages:\n" +"\n" +msgstr "" + +#: urpm-repodiff.py:825 +msgid " ***DOWNGRADED***\n" +msgstr "" + +#: urpm-repodiff.py:834 +#, python-format +msgid "" +"Size Change: %d bytes\n" +"\n" +msgstr "" + +#: urpm-repodiff.py:844 +msgid " Total added packages: " +msgstr "" + +#: urpm-repodiff.py:847 +msgid " Total removed packages: " +msgstr "" + +#: urpm-repodiff.py:856 +msgid " Total updated packages: " +msgstr "" + +#: urpm-repodiff.py:858 +msgid " Total downgraded packages: " +msgstr "" + +#: urpm-repodiff.py:1316 +msgid "Creating HTML file." +msgstr "" + +#: urpm-package-cleanup.py:58 +msgid "Find problems in the rpmdb of system and correct them" +msgstr "" + +#: urpm-package-cleanup.py:62 +msgid "Query format to use for output." +msgstr "" + +#: urpm-package-cleanup.py:65 +msgid "Use non-interactive mode" +msgstr "" + +#: urpm-package-cleanup.py:68 +msgid "Orphans Options" +msgstr "" + +#: urpm-package-cleanup.py:71 +msgid "List installed packages which are not available from currently configured repositories" +msgstr "" + +#: urpm-package-cleanup.py:75 +msgid "Use only update media. This means that urpmq will search and resolve dependencies only in media marked as containing updates (e.g. which have been created with \"urpmi.addmedia --update\")." +msgstr "" + +#: urpm-package-cleanup.py:80 +msgid "Select specific media to be used, instead of defaulting to all available media (or all update media if --update is used). No rpm will be found in other media." +msgstr "" + +#: urpm-package-cleanup.py:85 +msgid "Do not use the specified media." +msgstr "" + +#: urpm-package-cleanup.py:87 +msgid "Dependency Problems Options" +msgstr "" + +#: urpm-package-cleanup.py:90 +msgid "List dependency problems in the local RPM database" +msgstr "" + +#: urpm-package-cleanup.py:93 +msgid "List missing suggestions of installed packages" +msgstr "" + +#: urpm-package-cleanup.py:96 +msgid "Duplicate Package Options" +msgstr "" + +#: urpm-package-cleanup.py:99 +msgid "Scan for duplicates in your rpmdb" +msgstr "" + +#: urpm-package-cleanup.py:102 +msgid "Scan for duplicates in your rpmdb and remove older " +msgstr "" + +#: urpm-package-cleanup.py:105 +msgid "disable rpm scriptlets from running when cleaning duplicates" +msgstr "" + +#: urpm-package-cleanup.py:107 +msgid "Leaf Node Options" +msgstr "" + +#: urpm-package-cleanup.py:110 +msgid "List leaf nodes in the local RPM database" +msgstr "" + +#: urpm-package-cleanup.py:113 +msgid "list all packages leaf nodes that do not match leaf-regex" +msgstr "" + +#: urpm-package-cleanup.py:117 +msgid "A package name that matches this regular expression (case insensitively) is a leaf" +msgstr "" + +#: urpm-package-cleanup.py:121 +msgid "do not list development packages as leaf nodes" +msgstr "" + +#: urpm-package-cleanup.py:124 +msgid "do not list packages with files in a bin dirs as leaf nodes" +msgstr "" + +#: urpm-package-cleanup.py:127 +msgid "Old Kernel Options" +msgstr "" + +#: urpm-package-cleanup.py:130 +msgid "Remove old kernel and kernel-devel packages" +msgstr "" + +#: urpm-package-cleanup.py:133 +msgid "Number of kernel packages to keep on the system (default 2)" +msgstr "" + +#: urpm-package-cleanup.py:137 +msgid "Do not remove kernel-devel packages when removing kernels" +msgstr "" + +#: urpm-package-cleanup.py:306 +#, python-format +msgid "Warning: neither single nor multi lib arch: %s " +msgstr "" + +#: urpm-package-cleanup.py:417 +#, python-format +msgid "Not removing kernel %(kver)s-%(krel)s because it is the running kernel" +msgstr "" + +#: urpm-package-cleanup.py:447 +#, python-format +msgid "Package %(qf)s %(prob)s" +msgstr "" + +#: urpm-package-cleanup.py:450 +msgid "Missing suggests:" +msgstr "" + +#: urpm-package-cleanup.py:458 +msgid "No Problems Found" +msgstr "" + +#: urpm-package-cleanup.py:473 +msgid "Error: Cannot remove kernels as a user, must be root" +msgstr "" + +#: urpm-package-cleanup.py:476 +msgid "Error: should keep at least 1 kernel!" +msgstr "" + +#: urpm-package-cleanup.py:529 +msgid "Error: Cannot remove packages as a user, must be root" +msgstr "" diff --git a/urpm-tools/urpm-tools.spec b/urpm-tools/urpm-tools.spec new file mode 100644 index 0000000..081e529 --- /dev/null +++ b/urpm-tools/urpm-tools.spec @@ -0,0 +1,80 @@ +Name: urpm-tools +Version: 2.1 +Release: 1 +Summary: Utilities that help to work with URPM-based repositories +Group: System/Configuration/Packaging +License: GPLv2 +URL: http://wiki.rosalab.ru/index.php/Urpm-tools +Source0: %{name}-%{version}.tar.gz +BuildArch: noarch +BuildRoot: %{name}-%{version} + +Requires: urpmi >= 6.68 +Requires: python-rpm >= 5.3 +Requires: libxml2-python >= 2.7 +Requires: gzip +Requires: python-rpm5utils = %{version} + +%description +%{name} is a collection of utilities for URPM-based repositories. +They make URPM-based repositories easier and more powerful to use. +These tools include: urpm-downloader, urpm-package-cleanup, +urpm-repoclosure, urpm-repodiff, urpm-repomanage, urpm-repograph, +urpm-reposync + +%package -n python-rpm5utils +Group: Development/Python +Summary: Auxiliary modules to work with rpm +Provides: python-rpm5utils = %{version}-%{release} + +%description -n python-rpm5utils +%{name} contains some useful modules that are used by %{name}. +Mostly taken from yum. + +%prep +%setup -q -n %{name}-%{version} + +%install +rm -rf %{buildroot} +make install DESTDIR=$RPM_BUILD_ROOT +%find_lang %{name} + +%files -f %{name}.lang +%defattr(-,root,root,-) + +%{_bindir}/urpm-downloader +%{_bindir}/urpm-package-cleanup +%{_bindir}/urpm-repoclosure +%{_bindir}/urpm-repodiff +%{_bindir}/urpm-repomanage +%{_bindir}/urpm-repograph +%{_bindir}/urpm-reposync +%{_mandir}/man1/urpm-downloader.1.xz +%{_mandir}/man1/urpm-package-cleanup.1.xz +%{_mandir}/man1/urpm-repoclosure.1.xz +%{_mandir}/man1/urpm-repodiff.1.xz +%{_mandir}/man1/urpm-repomanage.1.xz +%{_mandir}/man1/urpm-repograph.1.xz +%{_mandir}/man1/urpm-reposync.1.xz + +%{_datadir}/locale/*/LC_MESSAGES/urpm-tools.mo +%doc COPYING + +%files -n python-rpm5utils +%defattr(-,root,root,-) +%dir %{py_puresitedir}/rpm5utils +%dir %{py_puresitedir}/rpm5utils/tests +%dir %{py_puresitedir}/rpm5utils/urpmgraphs +%dir %{py_puresitedir}/rpm5utils/urpmgraphs/algorithms +%dir %{py_puresitedir}/rpm5utils/urpmgraphs/algorithms/components +%dir %{py_puresitedir}/rpm5utils/urpmgraphs/classes + +%{py_puresitedir}/urpmmisc.py +%{py_puresitedir}/rpm5utils/*.py* +%{py_puresitedir}/rpm5utils/tests/*.py* +%{py_puresitedir}/rpm5utils/urpmgraphs/*.py* +%{py_puresitedir}/rpm5utils/urpmgraphs/algorithms/*.py* +%{py_puresitedir}/rpm5utils/urpmgraphs/algorithms/components/*.py* +%{py_puresitedir}/rpm5utils/urpmgraphs/classes/*.py* + +%doc rpm5utils/COPYING diff --git a/urpm-tools/urpmmisc.py b/urpm-tools/urpmmisc.py new file mode 100644 index 0000000..924fbae --- /dev/null +++ b/urpm-tools/urpmmisc.py @@ -0,0 +1,182 @@ +''' +" Miscellaneous routines used by urpm-tools +" +" Taken from yum's misc.py +''' + +import types + +_share_data_store = {} +_share_data_store_u = {} +def share_data(value): + """ Take a value and use the same value from the store, + if the value isn't in the store this one becomes the shared version. """ + # We don't want to change the types of strings, between str <=> unicode + # and hash('a') == hash(u'a') ... so use different stores. + # In theory eventaully we'll have all of one type, but don't hold breath. + store = _share_data_store + if isinstance(value, unicode): + store = _share_data_store_u + # hahahah, of course the above means that: + # hash(('a', 'b')) == hash((u'a', u'b')) + # ...which we have in deptuples, so just screw sharing those atm. + if type(value) == types.TupleType: + return value + return store.setdefault(value, value) + +def string_to_prco_tuple(prcoString): + """returns a prco tuple (name, flags, (e, v, r)) for a string""" + + if type(prcoString) == types.TupleType: + (n, f, v) = prcoString + else: + n = prcoString + f = v = None + + # We love GPG keys as packages, esp. awesome provides like: + # gpg(Fedora (13) ) + if n[0] != '/' and not n.startswith("gpg("): + # not a file dep - look at it for being versioned + prco_split = n.split() + if len(prco_split) == 3: + n, f, v = prco_split + + # now we have 'n, f, v' where f and v could be None and None + if f is not None and f not in constants.LETTERFLAGS: + if f not in constants.SYMBOLFLAGS: + try: + f = flagToString(int(f)) + except (ValueError,TypeError), e: + raise Errors.MiscError, 'Invalid version flag: %s' % f + else: + f = constants.SYMBOLFLAGS[f] + + if type(v) in (types.StringType, types.NoneType, types.UnicodeType): + (prco_e, prco_v, prco_r) = stringToVersion(v) + elif type(v) in (types.TupleType, types.ListType): + (prco_e, prco_v, prco_r) = v + + #now we have (n, f, (e, v, r)) for the thing specified + return (n, f, (prco_e, prco_v, prco_r)) + +########### +# Title: Remove duplicates from a sequence +# Submitter: Tim Peters +# From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560 +def unique(s): + """Return a list of the elements in s, but without duplicates. + + For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3], + unique("abcabc") some permutation of ["a", "b", "c"], and + unique(([1, 2], [2, 3], [1, 2])) some permutation of + [[2, 3], [1, 2]]. + + For best speed, all sequence elements should be hashable. Then + unique() will usually work in linear time. + + If not possible, the sequence elements should enjoy a total + ordering, and if list(s).sort() doesn't raise TypeError it's + assumed that they do enjoy a total ordering. Then unique() will + usually work in O(N*log2(N)) time. + + If that's not possible either, the sequence elements must support + equality-testing. Then unique() will usually work in quadratic + time. + """ + + n = len(s) + if n == 0: + return [] + + # Try using a set first, as that's the fastest and will usually + # work. If it doesn't work, it will usually fail quickly, so it + # usually doesn't cost much to *try* it. It requires that all the + # sequence elements be hashable, and support equality comparison. + try: + u = set(s) + except TypeError: + pass + else: + return list(u) + + # We can't hash all the elements. Second fastest is to sort, + # which brings the equal elements together; then duplicates are + # easy to weed out in a single pass. + # NOTE: Python's list.sort() was designed to be efficient in the + # presence of many duplicate elements. This isn't true of all + # sort functions in all languages or libraries, so this approach + # is more effective in Python than it may be elsewhere. + try: + t = list(s) + t.sort() + except TypeError: + del t # move on to the next method + else: + assert n > 0 + last = t[0] + lasti = i = 1 + while i < n: + if t[i] != last: + t[lasti] = last = t[i] + lasti += 1 + i += 1 + return t[:lasti] + + # Brute force is all that's left. + u = [] + for x in s: + if x not in u: + u.append(x) + return u + +def GetUrlFromRepoName(reponame): + urpmi = open("/etc/urpmi/urpmi.cfg") + if not urpmi: + print "cannot open file urpmi.cfg" + return None + i = 0 + repo_dict = {} + name = '' + isignore = 0 + isupdate = 0 + mirrorlist = '' + withdir = '' + for line in urpmi: + line = line.strip() + if line.endswith('{'): + line = line[:-1].strip() + line = line.lower() + line = line.split("\ ") + line = ' '.join(line) + name = line + elif line.startswith("ignore"): + isignore = 1 + elif line.startswith("update"): + isupdate = 1 + elif line.startswith("mirrorlist: "): + line = line[12:] + if not line.startswith('$'): + if not line.endswith('/'): + line = line + '/' + mirrorlist = line + elif line.startswith("with-dir: "): + line = line[10:] + withdir = line + elif line.startswith('}'): + if mirrorlist == '': + path = None + else: + path = mirrorlist + withdir + if (name) and (path): + repo_dict[name]=(isignore, isupdate, path) + name = '' + isignore = 0 + isupdate = 0 + mirrorlist = '' + withdir = '' + urpmi.close() + name2 = reponame.lower() + if name2 not in repo_dict: + return (None, None, None) + else: + return repo_dict[name2] diff --git a/urpmmisc.py b/urpmmisc.py new file mode 100644 index 0000000..924fbae --- /dev/null +++ b/urpmmisc.py @@ -0,0 +1,182 @@ +''' +" Miscellaneous routines used by urpm-tools +" +" Taken from yum's misc.py +''' + +import types + +_share_data_store = {} +_share_data_store_u = {} +def share_data(value): + """ Take a value and use the same value from the store, + if the value isn't in the store this one becomes the shared version. """ + # We don't want to change the types of strings, between str <=> unicode + # and hash('a') == hash(u'a') ... so use different stores. + # In theory eventaully we'll have all of one type, but don't hold breath. + store = _share_data_store + if isinstance(value, unicode): + store = _share_data_store_u + # hahahah, of course the above means that: + # hash(('a', 'b')) == hash((u'a', u'b')) + # ...which we have in deptuples, so just screw sharing those atm. + if type(value) == types.TupleType: + return value + return store.setdefault(value, value) + +def string_to_prco_tuple(prcoString): + """returns a prco tuple (name, flags, (e, v, r)) for a string""" + + if type(prcoString) == types.TupleType: + (n, f, v) = prcoString + else: + n = prcoString + f = v = None + + # We love GPG keys as packages, esp. awesome provides like: + # gpg(Fedora (13) ) + if n[0] != '/' and not n.startswith("gpg("): + # not a file dep - look at it for being versioned + prco_split = n.split() + if len(prco_split) == 3: + n, f, v = prco_split + + # now we have 'n, f, v' where f and v could be None and None + if f is not None and f not in constants.LETTERFLAGS: + if f not in constants.SYMBOLFLAGS: + try: + f = flagToString(int(f)) + except (ValueError,TypeError), e: + raise Errors.MiscError, 'Invalid version flag: %s' % f + else: + f = constants.SYMBOLFLAGS[f] + + if type(v) in (types.StringType, types.NoneType, types.UnicodeType): + (prco_e, prco_v, prco_r) = stringToVersion(v) + elif type(v) in (types.TupleType, types.ListType): + (prco_e, prco_v, prco_r) = v + + #now we have (n, f, (e, v, r)) for the thing specified + return (n, f, (prco_e, prco_v, prco_r)) + +########### +# Title: Remove duplicates from a sequence +# Submitter: Tim Peters +# From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560 +def unique(s): + """Return a list of the elements in s, but without duplicates. + + For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3], + unique("abcabc") some permutation of ["a", "b", "c"], and + unique(([1, 2], [2, 3], [1, 2])) some permutation of + [[2, 3], [1, 2]]. + + For best speed, all sequence elements should be hashable. Then + unique() will usually work in linear time. + + If not possible, the sequence elements should enjoy a total + ordering, and if list(s).sort() doesn't raise TypeError it's + assumed that they do enjoy a total ordering. Then unique() will + usually work in O(N*log2(N)) time. + + If that's not possible either, the sequence elements must support + equality-testing. Then unique() will usually work in quadratic + time. + """ + + n = len(s) + if n == 0: + return [] + + # Try using a set first, as that's the fastest and will usually + # work. If it doesn't work, it will usually fail quickly, so it + # usually doesn't cost much to *try* it. It requires that all the + # sequence elements be hashable, and support equality comparison. + try: + u = set(s) + except TypeError: + pass + else: + return list(u) + + # We can't hash all the elements. Second fastest is to sort, + # which brings the equal elements together; then duplicates are + # easy to weed out in a single pass. + # NOTE: Python's list.sort() was designed to be efficient in the + # presence of many duplicate elements. This isn't true of all + # sort functions in all languages or libraries, so this approach + # is more effective in Python than it may be elsewhere. + try: + t = list(s) + t.sort() + except TypeError: + del t # move on to the next method + else: + assert n > 0 + last = t[0] + lasti = i = 1 + while i < n: + if t[i] != last: + t[lasti] = last = t[i] + lasti += 1 + i += 1 + return t[:lasti] + + # Brute force is all that's left. + u = [] + for x in s: + if x not in u: + u.append(x) + return u + +def GetUrlFromRepoName(reponame): + urpmi = open("/etc/urpmi/urpmi.cfg") + if not urpmi: + print "cannot open file urpmi.cfg" + return None + i = 0 + repo_dict = {} + name = '' + isignore = 0 + isupdate = 0 + mirrorlist = '' + withdir = '' + for line in urpmi: + line = line.strip() + if line.endswith('{'): + line = line[:-1].strip() + line = line.lower() + line = line.split("\ ") + line = ' '.join(line) + name = line + elif line.startswith("ignore"): + isignore = 1 + elif line.startswith("update"): + isupdate = 1 + elif line.startswith("mirrorlist: "): + line = line[12:] + if not line.startswith('$'): + if not line.endswith('/'): + line = line + '/' + mirrorlist = line + elif line.startswith("with-dir: "): + line = line[10:] + withdir = line + elif line.startswith('}'): + if mirrorlist == '': + path = None + else: + path = mirrorlist + withdir + if (name) and (path): + repo_dict[name]=(isignore, isupdate, path) + name = '' + isignore = 0 + isupdate = 0 + mirrorlist = '' + withdir = '' + urpmi.close() + name2 = reponame.lower() + if name2 not in repo_dict: + return (None, None, None) + else: + return repo_dict[name2]