aboutsummaryrefslogtreecommitdiffstats
path: root/scripts/lib/mic/utils
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/lib/mic/utils')
-rw-r--r--scripts/lib/mic/utils/BmapCreate.py298
-rw-r--r--scripts/lib/mic/utils/Fiemap.py252
-rw-r--r--scripts/lib/mic/utils/__init__.py0
-rw-r--r--scripts/lib/mic/utils/cmdln.py1586
-rw-r--r--scripts/lib/mic/utils/errors.py71
-rw-r--r--scripts/lib/mic/utils/fs_related.py1029
-rw-r--r--scripts/lib/mic/utils/gpt_parser.py331
-rw-r--r--scripts/lib/mic/utils/grabber.py97
-rw-r--r--scripts/lib/mic/utils/misc.py1067
-rw-r--r--scripts/lib/mic/utils/partitionedfs.py790
-rw-r--r--scripts/lib/mic/utils/proxy.py183
-rw-r--r--scripts/lib/mic/utils/rpmmisc.py600
-rw-r--r--scripts/lib/mic/utils/runner.py109
13 files changed, 6413 insertions, 0 deletions
diff --git a/scripts/lib/mic/utils/BmapCreate.py b/scripts/lib/mic/utils/BmapCreate.py
new file mode 100644
index 0000000000..65b19a5f46
--- /dev/null
+++ b/scripts/lib/mic/utils/BmapCreate.py
@@ -0,0 +1,298 @@
+""" This module implements the block map (bmap) creation functionality and
+provides the corresponding API in form of the 'BmapCreate' class.
+
+The idea is that while images files may generally be very large (e.g., 4GiB),
+they may nevertheless contain only little real data, e.g., 512MiB. This data
+are files, directories, file-system meta-data, partition table, etc. When
+copying the image to the target device, you do not have to copy all the 4GiB of
+data, you can copy only 512MiB of it, which is 4 times less, so copying should
+presumably be 4 times faster.
+
+The block map file is an XML file which contains a list of blocks which have to
+be copied to the target device. The other blocks are not used and there is no
+need to copy them. The XML file also contains some additional information like
+block size, image size, count of mapped blocks, etc. There are also many
+commentaries, so it is human-readable.
+
+The image has to be a sparse file. Generally, this means that when you generate
+this image file, you should start with a huge sparse file which contains a
+single hole spanning the entire file. Then you should partition it, write all
+the data (probably by means of loop-back mounting the image or parts of it),
+etc. The end result should be a sparse file where mapped areas represent useful
+parts of the image and holes represent useless parts of the image, which do not
+have to be copied when copying the image to the target device.
+
+This module uses the FIBMAP ioctl to detect holes. """
+
+# Disable the following pylint recommendations:
+# * Too many instance attributes - R0902
+# * Too few public methods - R0903
+# pylint: disable=R0902,R0903
+
+import hashlib
+from mic.utils.misc import human_size
+from mic.utils import Fiemap
+
+# The bmap format version we generate
+SUPPORTED_BMAP_VERSION = "1.3"
+
+_BMAP_START_TEMPLATE = \
+"""<?xml version="1.0" ?>
+<!-- This file contains the block map for an image file, which is basically
+ a list of useful (mapped) block numbers in the image file. In other words,
+ it lists only those blocks which contain data (boot sector, partition
+ table, file-system metadata, files, directories, extents, etc). These
+ blocks have to be copied to the target device. The other blocks do not
+ contain any useful data and do not have to be copied to the target
+ device.
+
+ The block map an optimization which allows to copy or flash the image to
+ the image quicker than copying of flashing the entire image. This is
+ because with bmap less data is copied: <MappedBlocksCount> blocks instead
+ of <BlocksCount> blocks.
+
+ Besides the machine-readable data, this file contains useful commentaries
+ which contain human-readable information like image size, percentage of
+ mapped data, etc.
+
+ The 'version' attribute is the block map file format version in the
+ 'major.minor' format. The version major number is increased whenever an
+ incompatible block map format change is made. The minor number changes
+ in case of minor backward-compatible changes. -->
+
+<bmap version="%s">
+ <!-- Image size in bytes: %s -->
+ <ImageSize> %u </ImageSize>
+
+ <!-- Size of a block in bytes -->
+ <BlockSize> %u </BlockSize>
+
+ <!-- Count of blocks in the image file -->
+ <BlocksCount> %u </BlocksCount>
+
+"""
+
+class Error(Exception):
+ """ A class for exceptions generated by this module. We currently support
+ only one type of exceptions, and we basically throw human-readable problem
+ description in case of errors. """
+ pass
+
+class BmapCreate:
+ """ This class implements the bmap creation functionality. To generate a
+ bmap for an image (which is supposedly a sparse file), you should first
+ create an instance of 'BmapCreate' and provide:
+
+ * full path or a file-like object of the image to create bmap for
+ * full path or a file object to use for writing the results to
+
+ Then you should invoke the 'generate()' method of this class. It will use
+ the FIEMAP ioctl to generate the bmap. """
+
+ def _open_image_file(self):
+ """ Open the image file. """
+
+ try:
+ self._f_image = open(self._image_path, 'rb')
+ except IOError as err:
+ raise Error("cannot open image file '%s': %s" \
+ % (self._image_path, err))
+
+ self._f_image_needs_close = True
+
+ def _open_bmap_file(self):
+ """ Open the bmap file. """
+
+ try:
+ self._f_bmap = open(self._bmap_path, 'w+')
+ except IOError as err:
+ raise Error("cannot open bmap file '%s': %s" \
+ % (self._bmap_path, err))
+
+ self._f_bmap_needs_close = True
+
+ def __init__(self, image, bmap):
+ """ Initialize a class instance:
+ * image - full path or a file-like object of the image to create bmap
+ for
+ * bmap - full path or a file object to use for writing the resulting
+ bmap to """
+
+ self.image_size = None
+ self.image_size_human = None
+ self.block_size = None
+ self.blocks_cnt = None
+ self.mapped_cnt = None
+ self.mapped_size = None
+ self.mapped_size_human = None
+ self.mapped_percent = None
+
+ self._mapped_count_pos1 = None
+ self._mapped_count_pos2 = None
+ self._sha1_pos = None
+
+ self._f_image_needs_close = False
+ self._f_bmap_needs_close = False
+
+ if hasattr(image, "read"):
+ self._f_image = image
+ self._image_path = image.name
+ else:
+ self._image_path = image
+ self._open_image_file()
+
+ if hasattr(bmap, "read"):
+ self._f_bmap = bmap
+ self._bmap_path = bmap.name
+ else:
+ self._bmap_path = bmap
+ self._open_bmap_file()
+
+ self.fiemap = Fiemap.Fiemap(self._f_image)
+
+ self.image_size = self.fiemap.image_size
+ self.image_size_human = human_size(self.image_size)
+ if self.image_size == 0:
+ raise Error("cannot generate bmap for zero-sized image file '%s'" \
+ % self._image_path)
+
+ self.block_size = self.fiemap.block_size
+ self.blocks_cnt = self.fiemap.blocks_cnt
+
+ def _bmap_file_start(self):
+ """ A helper function which generates the starting contents of the
+ block map file: the header comment, image size, block size, etc. """
+
+ # We do not know the amount of mapped blocks at the moment, so just put
+ # whitespaces instead of real numbers. Assume the longest possible
+ # numbers.
+ mapped_count = ' ' * len(str(self.image_size))
+ mapped_size_human = ' ' * len(self.image_size_human)
+
+ xml = _BMAP_START_TEMPLATE \
+ % (SUPPORTED_BMAP_VERSION, self.image_size_human,
+ self.image_size, self.block_size, self.blocks_cnt)
+ xml += " <!-- Count of mapped blocks: "
+
+ self._f_bmap.write(xml)
+ self._mapped_count_pos1 = self._f_bmap.tell()
+
+ # Just put white-spaces instead of real information about mapped blocks
+ xml = "%s or %.1f -->\n" % (mapped_size_human, 100.0)
+ xml += " <MappedBlocksCount> "
+
+ self._f_bmap.write(xml)
+ self._mapped_count_pos2 = self._f_bmap.tell()
+
+ xml = "%s </MappedBlocksCount>\n\n" % mapped_count
+
+ # pylint: disable=C0301
+ xml += " <!-- The checksum of this bmap file. When it is calculated, the value of\n"
+ xml += " the SHA1 checksum has be zeoro (40 ASCII \"0\" symbols). -->\n"
+ xml += " <BmapFileSHA1> "
+
+ self._f_bmap.write(xml)
+ self._sha1_pos = self._f_bmap.tell()
+
+ xml = "0" * 40 + " </BmapFileSHA1>\n\n"
+ xml += " <!-- The block map which consists of elements which may either be a\n"
+ xml += " range of blocks or a single block. The 'sha1' attribute (if present)\n"
+ xml += " is the SHA1 checksum of this blocks range. -->\n"
+ xml += " <BlockMap>\n"
+ # pylint: enable=C0301
+
+ self._f_bmap.write(xml)
+
+ def _bmap_file_end(self):
+ """ A helper function which generates the final parts of the block map
+ file: the ending tags and the information about the amount of mapped
+ blocks. """
+
+ xml = " </BlockMap>\n"
+ xml += "</bmap>\n"
+
+ self._f_bmap.write(xml)
+
+ self._f_bmap.seek(self._mapped_count_pos1)
+ self._f_bmap.write("%s or %.1f%%" % \
+ (self.mapped_size_human, self.mapped_percent))
+
+ self._f_bmap.seek(self._mapped_count_pos2)
+ self._f_bmap.write("%u" % self.mapped_cnt)
+
+ self._f_bmap.seek(0)
+ sha1 = hashlib.sha1(self._f_bmap.read()).hexdigest()
+ self._f_bmap.seek(self._sha1_pos)
+ self._f_bmap.write("%s" % sha1)
+
+ def _calculate_sha1(self, first, last):
+ """ A helper function which calculates SHA1 checksum for the range of
+ blocks of the image file: from block 'first' to block 'last'. """
+
+ start = first * self.block_size
+ end = (last + 1) * self.block_size
+
+ self._f_image.seek(start)
+ hash_obj = hashlib.new("sha1")
+
+ chunk_size = 1024*1024
+ to_read = end - start
+ read = 0
+
+ while read < to_read:
+ if read + chunk_size > to_read:
+ chunk_size = to_read - read
+ chunk = self._f_image.read(chunk_size)
+ hash_obj.update(chunk)
+ read += chunk_size
+
+ return hash_obj.hexdigest()
+
+ def generate(self, include_checksums = True):
+ """ Generate bmap for the image file. If 'include_checksums' is 'True',
+ also generate SHA1 checksums for block ranges. """
+
+ # Save image file position in order to restore it at the end
+ image_pos = self._f_image.tell()
+
+ self._bmap_file_start()
+
+ # Generate the block map and write it to the XML block map
+ # file as we go.
+ self.mapped_cnt = 0
+ for first, last in self.fiemap.get_mapped_ranges(0, self.blocks_cnt):
+ self.mapped_cnt += last - first + 1
+ if include_checksums:
+ sha1 = self._calculate_sha1(first, last)
+ sha1 = " sha1=\"%s\"" % sha1
+ else:
+ sha1 = ""
+
+ if first != last:
+ self._f_bmap.write(" <Range%s> %s-%s </Range>\n" \
+ % (sha1, first, last))
+ else:
+ self._f_bmap.write(" <Range%s> %s </Range>\n" \
+ % (sha1, first))
+
+ self.mapped_size = self.mapped_cnt * self.block_size
+ self.mapped_size_human = human_size(self.mapped_size)
+ self.mapped_percent = (self.mapped_cnt * 100.0) / self.blocks_cnt
+
+ self._bmap_file_end()
+
+ try:
+ self._f_bmap.flush()
+ except IOError as err:
+ raise Error("cannot flush the bmap file '%s': %s" \
+ % (self._bmap_path, err))
+
+ self._f_image.seek(image_pos)
+
+ def __del__(self):
+ """ The class destructor which closes the opened files. """
+
+ if self._f_image_needs_close:
+ self._f_image.close()
+ if self._f_bmap_needs_close:
+ self._f_bmap.close()
diff --git a/scripts/lib/mic/utils/Fiemap.py b/scripts/lib/mic/utils/Fiemap.py
new file mode 100644
index 0000000000..f2db6ff0b8
--- /dev/null
+++ b/scripts/lib/mic/utils/Fiemap.py
@@ -0,0 +1,252 @@
+""" This module implements python API for the FIEMAP ioctl. The FIEMAP ioctl
+allows to find holes and mapped areas in a file. """
+
+# Note, a lot of code in this module is not very readable, because it deals
+# with the rather complex FIEMAP ioctl. To understand the code, you need to
+# know the FIEMAP interface, which is documented in the
+# Documentation/filesystems/fiemap.txt file in the Linux kernel sources.
+
+# Disable the following pylint recommendations:
+# * Too many instance attributes (R0902)
+# pylint: disable=R0902
+
+import os
+import struct
+import array
+import fcntl
+from mic.utils.misc import get_block_size
+
+# Format string for 'struct fiemap'
+_FIEMAP_FORMAT = "=QQLLLL"
+# sizeof(struct fiemap)
+_FIEMAP_SIZE = struct.calcsize(_FIEMAP_FORMAT)
+# Format string for 'struct fiemap_extent'
+_FIEMAP_EXTENT_FORMAT = "=QQQQQLLLL"
+# sizeof(struct fiemap_extent)
+_FIEMAP_EXTENT_SIZE = struct.calcsize(_FIEMAP_EXTENT_FORMAT)
+# The FIEMAP ioctl number
+_FIEMAP_IOCTL = 0xC020660B
+
+# Minimum buffer which is required for 'class Fiemap' to operate
+MIN_BUFFER_SIZE = _FIEMAP_SIZE + _FIEMAP_EXTENT_SIZE
+# The default buffer size for 'class Fiemap'
+DEFAULT_BUFFER_SIZE = 256 * 1024
+
+class Error(Exception):
+ """ A class for exceptions generated by this module. We currently support
+ only one type of exceptions, and we basically throw human-readable problem
+ description in case of errors. """
+ pass
+
+class Fiemap:
+ """ This class provides API to the FIEMAP ioctl. Namely, it allows to
+ iterate over all mapped blocks and over all holes. """
+
+ def _open_image_file(self):
+ """ Open the image file. """
+
+ try:
+ self._f_image = open(self._image_path, 'rb')
+ except IOError as err:
+ raise Error("cannot open image file '%s': %s" \
+ % (self._image_path, err))
+
+ self._f_image_needs_close = True
+
+ def __init__(self, image, buf_size = DEFAULT_BUFFER_SIZE):
+ """ Initialize a class instance. The 'image' argument is full path to
+ the file to operate on, or a file object to operate on.
+
+ The 'buf_size' argument is the size of the buffer for 'struct
+ fiemap_extent' elements which will be used when invoking the FIEMAP
+ ioctl. The larger is the buffer, the less times the FIEMAP ioctl will
+ be invoked. """
+
+ self._f_image_needs_close = False
+
+ if hasattr(image, "fileno"):
+ self._f_image = image
+ self._image_path = image.name
+ else:
+ self._image_path = image
+ self._open_image_file()
+
+ # Validate 'buf_size'
+ if buf_size < MIN_BUFFER_SIZE:
+ raise Error("too small buffer (%d bytes), minimum is %d bytes" \
+ % (buf_size, MIN_BUFFER_SIZE))
+
+ # How many 'struct fiemap_extent' elements fit the buffer
+ buf_size -= _FIEMAP_SIZE
+ self._fiemap_extent_cnt = buf_size / _FIEMAP_EXTENT_SIZE
+ self._buf_size = self._fiemap_extent_cnt * _FIEMAP_EXTENT_SIZE
+ self._buf_size += _FIEMAP_SIZE
+
+ # Allocate a mutable buffer for the FIEMAP ioctl
+ self._buf = array.array('B', [0] * self._buf_size)
+
+ self.image_size = os.fstat(self._f_image.fileno()).st_size
+
+ try:
+ self.block_size = get_block_size(self._f_image)
+ except IOError as err:
+ raise Error("cannot get block size for '%s': %s" \
+ % (self._image_path, err))
+
+ self.blocks_cnt = self.image_size + self.block_size - 1
+ self.blocks_cnt /= self.block_size
+
+ # Synchronize the image file to make sure FIEMAP returns correct values
+ try:
+ self._f_image.flush()
+ except IOError as err:
+ raise Error("cannot flush image file '%s': %s" \
+ % (self._image_path, err))
+ try:
+ os.fsync(self._f_image.fileno()),
+ except OSError as err:
+ raise Error("cannot synchronize image file '%s': %s " \
+ % (self._image_path, err.strerror))
+
+ # Check if the FIEMAP ioctl is supported
+ self.block_is_mapped(0)
+
+ def __del__(self):
+ """ The class destructor which closes the opened files. """
+
+ if self._f_image_needs_close:
+ self._f_image.close()
+
+ def _invoke_fiemap(self, block, count):
+ """ Invoke the FIEMAP ioctl for 'count' blocks of the file starting from
+ block number 'block'.
+
+ The full result of the operation is stored in 'self._buf' on exit.
+ Returns the unpacked 'struct fiemap' data structure in form of a python
+ list (just like 'struct.upack()'). """
+
+ if block < 0 or block >= self.blocks_cnt:
+ raise Error("bad block number %d, should be within [0, %d]" \
+ % (block, self.blocks_cnt))
+
+ # Initialize the 'struct fiemap' part of the buffer
+ struct.pack_into(_FIEMAP_FORMAT, self._buf, 0, block * self.block_size,
+ count * self.block_size, 0, 0,
+ self._fiemap_extent_cnt, 0)
+
+ try:
+ fcntl.ioctl(self._f_image, _FIEMAP_IOCTL, self._buf, 1)
+ except IOError as err:
+ error_msg = "the FIEMAP ioctl failed for '%s': %s" \
+ % (self._image_path, err)
+ if err.errno == os.errno.EPERM or err.errno == os.errno.EACCES:
+ # The FIEMAP ioctl was added in kernel version 2.6.28 in 2008
+ error_msg += " (looks like your kernel does not support FIEMAP)"
+
+ raise Error(error_msg)
+
+ return struct.unpack(_FIEMAP_FORMAT, self._buf[:_FIEMAP_SIZE])
+
+ def block_is_mapped(self, block):
+ """ This function returns 'True' if block number 'block' of the image
+ file is mapped and 'False' otherwise. """
+
+ struct_fiemap = self._invoke_fiemap(block, 1)
+
+ # The 3rd element of 'struct_fiemap' is the 'fm_mapped_extents' field.
+ # If it contains zero, the block is not mapped, otherwise it is
+ # mapped.
+ return bool(struct_fiemap[3])
+
+ def block_is_unmapped(self, block):
+ """ This function returns 'True' if block number 'block' of the image
+ file is not mapped (hole) and 'False' otherwise. """
+
+ return not self.block_is_mapped(block)
+
+ def _unpack_fiemap_extent(self, index):
+ """ Unpack a 'struct fiemap_extent' structure object number 'index'
+ from the internal 'self._buf' buffer. """
+
+ offset = _FIEMAP_SIZE + _FIEMAP_EXTENT_SIZE * index
+ return struct.unpack(_FIEMAP_EXTENT_FORMAT,
+ self._buf[offset : offset + _FIEMAP_EXTENT_SIZE])
+
+ def _do_get_mapped_ranges(self, start, count):
+ """ Implements most the functionality for the 'get_mapped_ranges()'
+ generator: invokes the FIEMAP ioctl, walks through the mapped
+ extents and yields mapped block ranges. However, the ranges may be
+ consecutive (e.g., (1, 100), (100, 200)) and 'get_mapped_ranges()'
+ simply merges them. """
+
+ block = start
+ while block < start + count:
+ struct_fiemap = self._invoke_fiemap(block, count)
+
+ mapped_extents = struct_fiemap[3]
+ if mapped_extents == 0:
+ # No more mapped blocks
+ return
+
+ extent = 0
+ while extent < mapped_extents:
+ fiemap_extent = self._unpack_fiemap_extent(extent)
+
+ # Start of the extent
+ extent_start = fiemap_extent[0]
+ # Starting block number of the extent
+ extent_block = extent_start / self.block_size
+ # Length of the extent
+ extent_len = fiemap_extent[2]
+ # Count of blocks in the extent
+ extent_count = extent_len / self.block_size
+
+ # Extent length and offset have to be block-aligned
+ assert extent_start % self.block_size == 0
+ assert extent_len % self.block_size == 0
+
+ if extent_block > start + count - 1:
+ return
+
+ first = max(extent_block, block)
+ last = min(extent_block + extent_count, start + count) - 1
+ yield (first, last)
+
+ extent += 1
+
+ block = extent_block + extent_count
+
+ def get_mapped_ranges(self, start, count):
+ """ A generator which yields ranges of mapped blocks in the file. The
+ ranges are tuples of 2 elements: [first, last], where 'first' is the
+ first mapped block and 'last' is the last mapped block.
+
+ The ranges are yielded for the area of the file of size 'count' blocks,
+ starting from block 'start'. """
+
+ iterator = self._do_get_mapped_ranges(start, count)
+
+ first_prev, last_prev = iterator.next()
+
+ for first, last in iterator:
+ if last_prev == first - 1:
+ last_prev = last
+ else:
+ yield (first_prev, last_prev)
+ first_prev, last_prev = first, last
+
+ yield (first_prev, last_prev)
+
+ def get_unmapped_ranges(self, start, count):
+ """ Just like 'get_mapped_ranges()', but yields unmapped block ranges
+ instead (holes). """
+
+ hole_first = start
+ for first, last in self._do_get_mapped_ranges(start, count):
+ if first > hole_first:
+ yield (hole_first, first - 1)
+
+ hole_first = last + 1
+
+ if hole_first < start + count:
+ yield (hole_first, start + count - 1)
diff --git a/scripts/lib/mic/utils/__init__.py b/scripts/lib/mic/utils/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/scripts/lib/mic/utils/__init__.py
diff --git a/scripts/lib/mic/utils/cmdln.py b/scripts/lib/mic/utils/cmdln.py
new file mode 100644
index 0000000000..b099473ee4
--- /dev/null
+++ b/scripts/lib/mic/utils/cmdln.py
@@ -0,0 +1,1586 @@
+#!/usr/bin/env python
+# Copyright (c) 2002-2007 ActiveState Software Inc.
+# License: MIT (see LICENSE.txt for license details)
+# Author: Trent Mick
+# Home: http://trentm.com/projects/cmdln/
+
+"""An improvement on Python's standard cmd.py module.
+
+As with cmd.py, this module provides "a simple framework for writing
+line-oriented command intepreters." This module provides a 'RawCmdln'
+class that fixes some design flaws in cmd.Cmd, making it more scalable
+and nicer to use for good 'cvs'- or 'svn'-style command line interfaces
+or simple shells. And it provides a 'Cmdln' class that add
+optparse-based option processing. Basically you use it like this:
+
+ import cmdln
+
+ class MySVN(cmdln.Cmdln):
+ name = "svn"
+
+ @cmdln.alias('stat', 'st')
+ @cmdln.option('-v', '--verbose', action='store_true'
+ help='print verbose information')
+ def do_status(self, subcmd, opts, *paths):
+ print "handle 'svn status' command"
+
+ #...
+
+ if __name__ == "__main__":
+ shell = MySVN()
+ retval = shell.main()
+ sys.exit(retval)
+
+See the README.txt or <http://trentm.com/projects/cmdln/> for more
+details.
+"""
+
+__version_info__ = (1, 1, 2)
+__version__ = '.'.join(map(str, __version_info__))
+
+import os
+import sys
+import re
+import cmd
+import optparse
+from pprint import pprint
+import sys
+
+
+
+
+#---- globals
+
+LOOP_ALWAYS, LOOP_NEVER, LOOP_IF_EMPTY = range(3)
+
+# An unspecified optional argument when None is a meaningful value.
+_NOT_SPECIFIED = ("Not", "Specified")
+
+# Pattern to match a TypeError message from a call that
+# failed because of incorrect number of arguments (see
+# Python/getargs.c).
+_INCORRECT_NUM_ARGS_RE = re.compile(
+ r"(takes [\w ]+ )(\d+)( arguments? \()(\d+)( given\))")
+
+
+
+#---- exceptions
+
+class CmdlnError(Exception):
+ """A cmdln.py usage error."""
+ def __init__(self, msg):
+ self.msg = msg
+ def __str__(self):
+ return self.msg
+
+class CmdlnUserError(Exception):
+ """An error by a user of a cmdln-based tool/shell."""
+ pass
+
+
+
+#---- public methods and classes
+
+def alias(*aliases):
+ """Decorator to add aliases for Cmdln.do_* command handlers.
+
+ Example:
+ class MyShell(cmdln.Cmdln):
+ @cmdln.alias("!", "sh")
+ def do_shell(self, argv):
+ #...implement 'shell' command
+ """
+ def decorate(f):
+ if not hasattr(f, "aliases"):
+ f.aliases = []
+ f.aliases += aliases
+ return f
+ return decorate
+
+
+class RawCmdln(cmd.Cmd):
+ """An improved (on cmd.Cmd) framework for building multi-subcommand
+ scripts (think "svn" & "cvs") and simple shells (think "pdb" and
+ "gdb").
+
+ A simple example:
+
+ import cmdln
+
+ class MySVN(cmdln.RawCmdln):
+ name = "svn"
+
+ @cmdln.aliases('stat', 'st')
+ def do_status(self, argv):
+ print "handle 'svn status' command"
+
+ if __name__ == "__main__":
+ shell = MySVN()
+ retval = shell.main()
+ sys.exit(retval)
+
+ See <http://trentm.com/projects/cmdln> for more information.
+ """
+ name = None # if unset, defaults basename(sys.argv[0])
+ prompt = None # if unset, defaults to self.name+"> "
+ version = None # if set, default top-level options include --version
+
+ # Default messages for some 'help' command error cases.
+ # They are interpolated with one arg: the command.
+ nohelp = "no help on '%s'"
+ unknowncmd = "unknown command: '%s'"
+
+ helpindent = '' # string with which to indent help output
+
+ def __init__(self, completekey='tab',
+ stdin=None, stdout=None, stderr=None):
+ """Cmdln(completekey='tab', stdin=None, stdout=None, stderr=None)
+
+ The optional argument 'completekey' is the readline name of a
+ completion key; it defaults to the Tab key. If completekey is
+ not None and the readline module is available, command completion
+ is done automatically.
+
+ The optional arguments 'stdin', 'stdout' and 'stderr' specify
+ alternate input, output and error output file objects; if not
+ specified, sys.* are used.
+
+ If 'stdout' but not 'stderr' is specified, stdout is used for
+ error output. This is to provide least surprise for users used
+ to only the 'stdin' and 'stdout' options with cmd.Cmd.
+ """
+ import sys
+ if self.name is None:
+ self.name = os.path.basename(sys.argv[0])
+ if self.prompt is None:
+ self.prompt = self.name+"> "
+ self._name_str = self._str(self.name)
+ self._prompt_str = self._str(self.prompt)
+ if stdin is not None:
+ self.stdin = stdin
+ else:
+ self.stdin = sys.stdin
+ if stdout is not None:
+ self.stdout = stdout
+ else:
+ self.stdout = sys.stdout
+ if stderr is not None:
+ self.stderr = stderr
+ elif stdout is not None:
+ self.stderr = stdout
+ else:
+ self.stderr = sys.stderr
+ self.cmdqueue = []
+ self.completekey = completekey
+ self.cmdlooping = False
+
+ def get_optparser(self):
+ """Hook for subclasses to set the option parser for the
+ top-level command/shell.
+
+ This option parser is used retrieved and used by `.main()' to
+ handle top-level options.
+
+ The default implements a single '-h|--help' option. Sub-classes
+ can return None to have no options at the top-level. Typically
+ an instance of CmdlnOptionParser should be returned.
+ """
+ version = (self.version is not None
+ and "%s %s" % (self._name_str, self.version)
+ or None)
+ return CmdlnOptionParser(self, version=version)
+
+ def postoptparse(self):
+ """Hook method executed just after `.main()' parses top-level
+ options.
+
+ When called `self.options' holds the results of the option parse.
+ """
+ pass
+
+ def main(self, argv=None, loop=LOOP_NEVER):
+ """A possible mainline handler for a script, like so:
+
+ import cmdln
+ class MyCmd(cmdln.Cmdln):
+ name = "mycmd"
+ ...
+
+ if __name__ == "__main__":
+ MyCmd().main()
+
+ By default this will use sys.argv to issue a single command to
+ 'MyCmd', then exit. The 'loop' argument can be use to control
+ interactive shell behaviour.
+
+ Arguments:
+ "argv" (optional, default sys.argv) is the command to run.
+ It must be a sequence, where the first element is the
+ command name and subsequent elements the args for that
+ command.
+ "loop" (optional, default LOOP_NEVER) is a constant
+ indicating if a command loop should be started (i.e. an
+ interactive shell). Valid values (constants on this module):
+ LOOP_ALWAYS start loop and run "argv", if any
+ LOOP_NEVER run "argv" (or .emptyline()) and exit
+ LOOP_IF_EMPTY run "argv", if given, and exit;
+ otherwise, start loop
+ """
+ if argv is None:
+ import sys
+ argv = sys.argv
+ else:
+ argv = argv[:] # don't modify caller's list
+
+ self.optparser = self.get_optparser()
+ if self.optparser: # i.e. optparser=None means don't process for opts
+ try:
+ self.options, args = self.optparser.parse_args(argv[1:])
+ except CmdlnUserError, ex:
+ msg = "%s: %s\nTry '%s help' for info.\n"\
+ % (self.name, ex, self.name)
+ self.stderr.write(self._str(msg))
+ self.stderr.flush()
+ return 1
+ except StopOptionProcessing, ex:
+ return 0
+ else:
+ self.options, args = None, argv[1:]
+ self.postoptparse()
+
+ if loop == LOOP_ALWAYS:
+ if args:
+ self.cmdqueue.append(args)
+ return self.cmdloop()
+ elif loop == LOOP_NEVER:
+ if args:
+ return self.cmd(args)
+ else:
+ return self.emptyline()
+ elif loop == LOOP_IF_EMPTY:
+ if args:
+ return self.cmd(args)
+ else:
+ return self.cmdloop()
+
+ def cmd(self, argv):
+ """Run one command and exit.
+
+ "argv" is the arglist for the command to run. argv[0] is the
+ command to run. If argv is an empty list then the
+ 'emptyline' handler is run.
+
+ Returns the return value from the command handler.
+ """
+ assert isinstance(argv, (list, tuple)), \
+ "'argv' is not a sequence: %r" % argv
+ retval = None
+ try:
+ argv = self.precmd(argv)
+ retval = self.onecmd(argv)
+ self.postcmd(argv)
+ except:
+ if not self.cmdexc(argv):
+ raise
+ retval = 1
+ return retval
+
+ def _str(self, s):
+ """Safely convert the given str/unicode to a string for printing."""
+ try:
+ return str(s)
+ except UnicodeError:
+ #XXX What is the proper encoding to use here? 'utf-8' seems
+ # to work better than "getdefaultencoding" (usually
+ # 'ascii'), on OS X at least.
+ #import sys
+ #return s.encode(sys.getdefaultencoding(), "replace")
+ return s.encode("utf-8", "replace")
+
+ def cmdloop(self, intro=None):
+ """Repeatedly issue a prompt, accept input, parse into an argv, and
+ dispatch (via .precmd(), .onecmd() and .postcmd()), passing them
+ the argv. In other words, start a shell.
+
+ "intro" (optional) is a introductory message to print when
+ starting the command loop. This overrides the class
+ "intro" attribute, if any.
+ """
+ self.cmdlooping = True
+ self.preloop()
+ if self.use_rawinput and self.completekey:
+ try:
+ import readline
+ self.old_completer = readline.get_completer()
+ readline.set_completer(self.complete)
+ readline.parse_and_bind(self.completekey+": complete")
+ except ImportError:
+ pass
+ try:
+ if intro is None:
+ intro = self.intro
+ if intro:
+ intro_str = self._str(intro)
+ self.stdout.write(intro_str+'\n')
+ self.stop = False
+ retval = None
+ while not self.stop:
+ if self.cmdqueue:
+ argv = self.cmdqueue.pop(0)
+ assert isinstance(argv, (list, tuple)), \
+ "item on 'cmdqueue' is not a sequence: %r" % argv
+ else:
+ if self.use_rawinput:
+ try:
+ line = raw_input(self._prompt_str)
+ except EOFError:
+ line = 'EOF'
+ else:
+ self.stdout.write(self._prompt_str)
+ self.stdout.flush()
+ line = self.stdin.readline()
+ if not len(line):
+ line = 'EOF'
+ else:
+ line = line[:-1] # chop '\n'
+ argv = line2argv(line)
+ try:
+ argv = self.precmd(argv)
+ retval = self.onecmd(argv)
+ self.postcmd(argv)
+ except:
+ if not self.cmdexc(argv):
+ raise
+ retval = 1
+ self.lastretval = retval
+ self.postloop()
+ finally:
+ if self.use_rawinput and self.completekey:
+ try:
+ import readline
+ readline.set_completer(self.old_completer)
+ except ImportError:
+ pass
+ self.cmdlooping = False
+ return retval
+
+ def precmd(self, argv):
+ """Hook method executed just before the command argv is
+ interpreted, but after the input prompt is generated and issued.
+
+ "argv" is the cmd to run.
+
+ Returns an argv to run (i.e. this method can modify the command
+ to run).
+ """
+ return argv
+
+ def postcmd(self, argv):
+ """Hook method executed just after a command dispatch is finished.
+
+ "argv" is the command that was run.
+ """
+ pass
+
+ def cmdexc(self, argv):
+ """Called if an exception is raised in any of precmd(), onecmd(),
+ or postcmd(). If True is returned, the exception is deemed to have
+ been dealt with. Otherwise, the exception is re-raised.
+
+ The default implementation handles CmdlnUserError's, which
+ typically correspond to user error in calling commands (as
+ opposed to programmer error in the design of the script using
+ cmdln.py).
+ """
+ import sys
+ type, exc, traceback = sys.exc_info()
+ if isinstance(exc, CmdlnUserError):
+ msg = "%s %s: %s\nTry '%s help %s' for info.\n"\
+ % (self.name, argv[0], exc, self.name, argv[0])
+ self.stderr.write(self._str(msg))
+ self.stderr.flush()
+ return True
+
+ def onecmd(self, argv):
+ if not argv:
+ return self.emptyline()
+ self.lastcmd = argv
+ cmdname = self._get_canonical_cmd_name(argv[0])
+ if cmdname:
+ handler = self._get_cmd_handler(cmdname)
+ if handler:
+ return self._dispatch_cmd(handler, argv)
+ return self.default(argv)
+
+ def _dispatch_cmd(self, handler, argv):
+ return handler(argv)
+
+ def default(self, argv):
+ """Hook called to handle a command for which there is no handler.
+
+ "argv" is the command and arguments to run.
+
+ The default implementation writes and error message to stderr
+ and returns an error exit status.
+
+ Returns a numeric command exit status.
+ """
+ errmsg = self._str(self.unknowncmd % (argv[0],))
+ if self.cmdlooping:
+ self.stderr.write(errmsg+"\n")
+ else:
+ self.stderr.write("%s: %s\nTry '%s help' for info.\n"
+ % (self._name_str, errmsg, self._name_str))
+ self.stderr.flush()
+ return 1
+
+ def parseline(self, line):
+ # This is used by Cmd.complete (readline completer function) to
+ # massage the current line buffer before completion processing.
+ # We override to drop special '!' handling.
+ line = line.strip()
+ if not line:
+ return None, None, line
+ elif line[0] == '?':
+ line = 'help ' + line[1:]
+ i, n = 0, len(line)
+ while i < n and line[i] in self.identchars: i = i+1
+ cmd, arg = line[:i], line[i:].strip()
+ return cmd, arg, line
+
+ def helpdefault(self, cmd, known):
+ """Hook called to handle help on a command for which there is no
+ help handler.
+
+ "cmd" is the command name on which help was requested.
+ "known" is a boolean indicating if this command is known
+ (i.e. if there is a handler for it).
+
+ Returns a return code.
+ """
+ if known:
+ msg = self._str(self.nohelp % (cmd,))
+ if self.cmdlooping:
+ self.stderr.write(msg + '\n')
+ else:
+ self.stderr.write("%s: %s\n" % (self.name, msg))
+ else:
+ msg = self.unknowncmd % (cmd,)
+ if self.cmdlooping:
+ self.stderr.write(msg + '\n')
+ else:
+ self.stderr.write("%s: %s\n"
+ "Try '%s help' for info.\n"
+ % (self.name, msg, self.name))
+ self.stderr.flush()
+ return 1
+
+ def do_help(self, argv):
+ """${cmd_name}: give detailed help on a specific sub-command
+
+ Usage:
+ ${name} help [COMMAND]
+ """
+ if len(argv) > 1: # asking for help on a particular command
+ doc = None
+ cmdname = self._get_canonical_cmd_name(argv[1]) or argv[1]
+ if not cmdname:
+ return self.helpdefault(argv[1], False)
+ else:
+ helpfunc = getattr(self, "help_"+cmdname, None)
+ if helpfunc:
+ doc = helpfunc()
+ else:
+ handler = self._get_cmd_handler(cmdname)
+ if handler:
+ doc = handler.__doc__
+ if doc is None:
+ return self.helpdefault(argv[1], handler != None)
+ else: # bare "help" command
+ doc = self.__class__.__doc__ # try class docstring
+ if doc is None:
+ # Try to provide some reasonable useful default help.
+ if self.cmdlooping: prefix = ""
+ else: prefix = self.name+' '
+ doc = """Usage:
+ %sCOMMAND [ARGS...]
+ %shelp [COMMAND]
+
+ ${option_list}
+ ${command_list}
+ ${help_list}
+ """ % (prefix, prefix)
+ cmdname = None
+
+ if doc: # *do* have help content, massage and print that
+ doc = self._help_reindent(doc)
+ doc = self._help_preprocess(doc, cmdname)
+ doc = doc.rstrip() + '\n' # trim down trailing space
+ self.stdout.write(self._str(doc))
+ self.stdout.flush()
+ do_help.aliases = ["?"]
+
+ def _help_reindent(self, help, indent=None):
+ """Hook to re-indent help strings before writing to stdout.
+
+ "help" is the help content to re-indent
+ "indent" is a string with which to indent each line of the
+ help content after normalizing. If unspecified or None
+ then the default is use: the 'self.helpindent' class
+ attribute. By default this is the empty string, i.e.
+ no indentation.
+
+ By default, all common leading whitespace is removed and then
+ the lot is indented by 'self.helpindent'. When calculating the
+ common leading whitespace the first line is ignored -- hence
+ help content for Conan can be written as follows and have the
+ expected indentation:
+
+ def do_crush(self, ...):
+ '''${cmd_name}: crush your enemies, see them driven before you...
+
+ c.f. Conan the Barbarian'''
+ """
+ if indent is None:
+ indent = self.helpindent
+ lines = help.splitlines(0)
+ _dedentlines(lines, skip_first_line=True)
+ lines = [(indent+line).rstrip() for line in lines]
+ return '\n'.join(lines)
+
+ def _help_preprocess(self, help, cmdname):
+ """Hook to preprocess a help string before writing to stdout.
+
+ "help" is the help string to process.
+ "cmdname" is the canonical sub-command name for which help
+ is being given, or None if the help is not specific to a
+ command.
+
+ By default the following template variables are interpolated in
+ help content. (Note: these are similar to Python 2.4's
+ string.Template interpolation but not quite.)
+
+ ${name}
+ The tool's/shell's name, i.e. 'self.name'.
+ ${option_list}
+ A formatted table of options for this shell/tool.
+ ${command_list}
+ A formatted table of available sub-commands.
+ ${help_list}
+ A formatted table of additional help topics (i.e. 'help_*'
+ methods with no matching 'do_*' method).
+ ${cmd_name}
+ The name (and aliases) for this sub-command formatted as:
+ "NAME (ALIAS1, ALIAS2, ...)".
+ ${cmd_usage}
+ A formatted usage block inferred from the command function
+ signature.
+ ${cmd_option_list}
+ A formatted table of options for this sub-command. (This is
+ only available for commands using the optparse integration,
+ i.e. using @cmdln.option decorators or manually setting the
+ 'optparser' attribute on the 'do_*' method.)
+
+ Returns the processed help.
+ """
+ preprocessors = {
+ "${name}": self._help_preprocess_name,
+ "${option_list}": self._help_preprocess_option_list,
+ "${command_list}": self._help_preprocess_command_list,
+ "${help_list}": self._help_preprocess_help_list,
+ "${cmd_name}": self._help_preprocess_cmd_name,
+ "${cmd_usage}": self._help_preprocess_cmd_usage,
+ "${cmd_option_list}": self._help_preprocess_cmd_option_list,
+ }
+
+ for marker, preprocessor in preprocessors.items():
+ if marker in help:
+ help = preprocessor(help, cmdname)
+ return help
+
+ def _help_preprocess_name(self, help, cmdname=None):
+ return help.replace("${name}", self.name)
+
+ def _help_preprocess_option_list(self, help, cmdname=None):
+ marker = "${option_list}"
+ indent, indent_width = _get_indent(marker, help)
+ suffix = _get_trailing_whitespace(marker, help)
+
+ if self.optparser:
+ # Setup formatting options and format.
+ # - Indentation of 4 is better than optparse default of 2.
+ # C.f. Damian Conway's discussion of this in Perl Best
+ # Practices.
+ self.optparser.formatter.indent_increment = 4
+ self.optparser.formatter.current_indent = indent_width
+ block = self.optparser.format_option_help() + '\n'
+ else:
+ block = ""
+
+ help = help.replace(indent+marker+suffix, block, 1)
+ return help
+
+
+ def _help_preprocess_command_list(self, help, cmdname=None):
+ marker = "${command_list}"
+ indent, indent_width = _get_indent(marker, help)
+ suffix = _get_trailing_whitespace(marker, help)
+
+ # Find any aliases for commands.
+ token2canonical = self._get_canonical_map()
+ aliases = {}
+ for token, cmdname in token2canonical.items():
+ if token == cmdname: continue
+ aliases.setdefault(cmdname, []).append(token)
+
+ # Get the list of (non-hidden) commands and their
+ # documentation, if any.
+ cmdnames = {} # use a dict to strip duplicates
+ for attr in self.get_names():
+ if attr.startswith("do_"):
+ cmdnames[attr[3:]] = True
+ cmdnames = cmdnames.keys()
+ cmdnames.sort()
+ linedata = []
+ for cmdname in cmdnames:
+ if aliases.get(cmdname):
+ a = aliases[cmdname]
+ a.sort()
+ cmdstr = "%s (%s)" % (cmdname, ", ".join(a))
+ else:
+ cmdstr = cmdname
+ doc = None
+ try:
+ helpfunc = getattr(self, 'help_'+cmdname)
+ except AttributeError:
+ handler = self._get_cmd_handler(cmdname)
+ if handler:
+ doc = handler.__doc__
+ else:
+ doc = helpfunc()
+
+ # Strip "${cmd_name}: " from the start of a command's doc. Best
+ # practice dictates that command help strings begin with this, but
+ # it isn't at all wanted for the command list.
+ to_strip = "${cmd_name}:"
+ if doc and doc.startswith(to_strip):
+ #log.debug("stripping %r from start of %s's help string",
+ # to_strip, cmdname)
+ doc = doc[len(to_strip):].lstrip()
+ linedata.append( (cmdstr, doc) )
+
+ if linedata:
+ subindent = indent + ' '*4
+ lines = _format_linedata(linedata, subindent, indent_width+4)
+ block = indent + "Commands:\n" \
+ + '\n'.join(lines) + "\n\n"
+ help = help.replace(indent+marker+suffix, block, 1)
+ return help
+
+ def _gen_names_and_attrs(self):
+ # Inheritance says we have to look in class and
+ # base classes; order is not important.
+ names = []
+ classes = [self.__class__]
+ while classes:
+ aclass = classes.pop(0)
+ if aclass.__bases__:
+ classes = classes + list(aclass.__bases__)
+ for name in dir(aclass):
+ yield (name, getattr(aclass, name))
+
+ def _help_preprocess_help_list(self, help, cmdname=None):
+ marker = "${help_list}"
+ indent, indent_width = _get_indent(marker, help)
+ suffix = _get_trailing_whitespace(marker, help)
+
+ # Determine the additional help topics, if any.
+ helpnames = {}
+ token2cmdname = self._get_canonical_map()
+ for attrname, attr in self._gen_names_and_attrs():
+ if not attrname.startswith("help_"): continue
+ helpname = attrname[5:]
+ if helpname not in token2cmdname:
+ helpnames[helpname] = attr
+
+ if helpnames:
+ linedata = [(n, a.__doc__ or "") for n, a in helpnames.items()]
+ linedata.sort()
+
+ subindent = indent + ' '*4
+ lines = _format_linedata(linedata, subindent, indent_width+4)
+ block = (indent
+ + "Additional help topics (run `%s help TOPIC'):\n" % self.name
+ + '\n'.join(lines)
+ + "\n\n")
+ else:
+ block = ''
+ help = help.replace(indent+marker+suffix, block, 1)
+ return help
+
+ def _help_preprocess_cmd_name(self, help, cmdname=None):
+ marker = "${cmd_name}"
+ handler = self._get_cmd_handler(cmdname)
+ if not handler:
+ raise CmdlnError("cannot preprocess '%s' into help string: "
+ "could not find command handler for %r"
+ % (marker, cmdname))
+ s = cmdname
+ if hasattr(handler, "aliases"):
+ s += " (%s)" % (", ".join(handler.aliases))
+ help = help.replace(marker, s)
+ return help
+
+ #TODO: this only makes sense as part of the Cmdln class.
+ # Add hooks to add help preprocessing template vars and put
+ # this one on that class.
+ def _help_preprocess_cmd_usage(self, help, cmdname=None):
+ marker = "${cmd_usage}"
+ handler = self._get_cmd_handler(cmdname)
+ if not handler:
+ raise CmdlnError("cannot preprocess '%s' into help string: "
+ "could not find command handler for %r"
+ % (marker, cmdname))
+ indent, indent_width = _get_indent(marker, help)
+ suffix = _get_trailing_whitespace(marker, help)
+
+ # Extract the introspection bits we need.
+ func = handler.im_func
+ if func.func_defaults:
+ func_defaults = list(func.func_defaults)
+ else:
+ func_defaults = []
+ co_argcount = func.func_code.co_argcount
+ co_varnames = func.func_code.co_varnames
+ co_flags = func.func_code.co_flags
+ CO_FLAGS_ARGS = 4
+ CO_FLAGS_KWARGS = 8
+
+ # Adjust argcount for possible *args and **kwargs arguments.
+ argcount = co_argcount
+ if co_flags & CO_FLAGS_ARGS: argcount += 1
+ if co_flags & CO_FLAGS_KWARGS: argcount += 1
+
+ # Determine the usage string.
+ usage = "%s %s" % (self.name, cmdname)
+ if argcount <= 2: # handler ::= do_FOO(self, argv)
+ usage += " [ARGS...]"
+ elif argcount >= 3: # handler ::= do_FOO(self, subcmd, opts, ...)
+ argnames = list(co_varnames[3:argcount])
+ tail = ""
+ if co_flags & CO_FLAGS_KWARGS:
+ name = argnames.pop(-1)
+ import warnings
+ # There is no generally accepted mechanism for passing
+ # keyword arguments from the command line. Could
+ # *perhaps* consider: arg=value arg2=value2 ...
+ warnings.warn("argument '**%s' on '%s.%s' command "
+ "handler will never get values"
+ % (name, self.__class__.__name__,
+ func.func_name))
+ if co_flags & CO_FLAGS_ARGS:
+ name = argnames.pop(-1)
+ tail = "[%s...]" % name.upper()
+ while func_defaults:
+ func_defaults.pop(-1)
+ name = argnames.pop(-1)
+ tail = "[%s%s%s]" % (name.upper(), (tail and ' ' or ''), tail)
+ while argnames:
+ name = argnames.pop(-1)
+ tail = "%s %s" % (name.upper(), tail)
+ usage += ' ' + tail
+
+ block_lines = [
+ self.helpindent + "Usage:",
+ self.helpindent + ' '*4 + usage
+ ]
+ block = '\n'.join(block_lines) + '\n\n'
+
+ help = help.replace(indent+marker+suffix, block, 1)
+ return help
+
+ #TODO: this only makes sense as part of the Cmdln class.
+ # Add hooks to add help preprocessing template vars and put
+ # this one on that class.
+ def _help_preprocess_cmd_option_list(self, help, cmdname=None):
+ marker = "${cmd_option_list}"
+ handler = self._get_cmd_handler(cmdname)
+ if not handler:
+ raise CmdlnError("cannot preprocess '%s' into help string: "
+ "could not find command handler for %r"
+ % (marker, cmdname))
+ indent, indent_width = _get_indent(marker, help)
+ suffix = _get_trailing_whitespace(marker, help)
+ if hasattr(handler, "optparser"):
+ # Setup formatting options and format.
+ # - Indentation of 4 is better than optparse default of 2.
+ # C.f. Damian Conway's discussion of this in Perl Best
+ # Practices.
+ handler.optparser.formatter.indent_increment = 4
+ handler.optparser.formatter.current_indent = indent_width
+ block = handler.optparser.format_option_help() + '\n'
+ else:
+ block = ""
+
+ help = help.replace(indent+marker+suffix, block, 1)
+ return help
+
+ def _get_canonical_cmd_name(self, token):
+ map = self._get_canonical_map()
+ return map.get(token, None)
+
+ def _get_canonical_map(self):
+ """Return a mapping of available command names and aliases to
+ their canonical command name.
+ """
+ cacheattr = "_token2canonical"
+ if not hasattr(self, cacheattr):
+ # Get the list of commands and their aliases, if any.
+ token2canonical = {}
+ cmd2funcname = {} # use a dict to strip duplicates
+ for attr in self.get_names():
+ if attr.startswith("do_"): cmdname = attr[3:]
+ elif attr.startswith("_do_"): cmdname = attr[4:]
+ else:
+ continue
+ cmd2funcname[cmdname] = attr
+ token2canonical[cmdname] = cmdname
+ for cmdname, funcname in cmd2funcname.items(): # add aliases
+ func = getattr(self, funcname)
+ aliases = getattr(func, "aliases", [])
+ for alias in aliases:
+ if alias in cmd2funcname:
+ import warnings
+ warnings.warn("'%s' alias for '%s' command conflicts "
+ "with '%s' handler"
+ % (alias, cmdname, cmd2funcname[alias]))
+ continue
+ token2canonical[alias] = cmdname
+ setattr(self, cacheattr, token2canonical)
+ return getattr(self, cacheattr)
+
+ def _get_cmd_handler(self, cmdname):
+ handler = None
+ try:
+ handler = getattr(self, 'do_' + cmdname)
+ except AttributeError:
+ try:
+ # Private command handlers begin with "_do_".
+ handler = getattr(self, '_do_' + cmdname)
+ except AttributeError:
+ pass
+ return handler
+
+ def _do_EOF(self, argv):
+ # Default EOF handler
+ # Note: an actual EOF is redirected to this command.
+ #TODO: separate name for this. Currently it is available from
+ # command-line. Is that okay?
+ self.stdout.write('\n')
+ self.stdout.flush()
+ self.stop = True
+
+ def emptyline(self):
+ # Different from cmd.Cmd: don't repeat the last command for an
+ # emptyline.
+ if self.cmdlooping:
+ pass
+ else:
+ return self.do_help(["help"])
+
+
+#---- optparse.py extension to fix (IMO) some deficiencies
+#
+# See the class _OptionParserEx docstring for details.
+#
+
+class StopOptionProcessing(Exception):
+ """Indicate that option *and argument* processing should stop
+ cleanly. This is not an error condition. It is similar in spirit to
+ StopIteration. This is raised by _OptionParserEx's default "help"
+ and "version" option actions and can be raised by custom option
+ callbacks too.
+
+ Hence the typical CmdlnOptionParser (a subclass of _OptionParserEx)
+ usage is:
+
+ parser = CmdlnOptionParser(mycmd)
+ parser.add_option("-f", "--force", dest="force")
+ ...
+ try:
+ opts, args = parser.parse_args()
+ except StopOptionProcessing:
+ # normal termination, "--help" was probably given
+ sys.exit(0)
+ """
+
+class _OptionParserEx(optparse.OptionParser):
+ """An optparse.OptionParser that uses exceptions instead of sys.exit.
+
+ This class is an extension of optparse.OptionParser that differs
+ as follows:
+ - Correct (IMO) the default OptionParser error handling to never
+ sys.exit(). Instead OptParseError exceptions are passed through.
+ - Add the StopOptionProcessing exception (a la StopIteration) to
+ indicate normal termination of option processing.
+ See StopOptionProcessing's docstring for details.
+
+ I'd also like to see the following in the core optparse.py, perhaps
+ as a RawOptionParser which would serve as a base class for the more
+ generally used OptionParser (that works as current):
+ - Remove the implicit addition of the -h|--help and --version
+ options. They can get in the way (e.g. if want '-?' and '-V' for
+ these as well) and it is not hard to do:
+ optparser.add_option("-h", "--help", action="help")
+ optparser.add_option("--version", action="version")
+ These are good practices, just not valid defaults if they can
+ get in the way.
+ """
+ def error(self, msg):
+ raise optparse.OptParseError(msg)
+
+ def exit(self, status=0, msg=None):
+ if status == 0:
+ raise StopOptionProcessing(msg)
+ else:
+ #TODO: don't lose status info here
+ raise optparse.OptParseError(msg)
+
+
+
+#---- optparse.py-based option processing support
+
+class CmdlnOptionParser(_OptionParserEx):
+ """An optparse.OptionParser class more appropriate for top-level
+ Cmdln options. For parsing of sub-command options, see
+ SubCmdOptionParser.
+
+ Changes:
+ - disable_interspersed_args() by default, because a Cmdln instance
+ has sub-commands which may themselves have options.
+ - Redirect print_help() to the Cmdln.do_help() which is better
+ equiped to handle the "help" action.
+ - error() will raise a CmdlnUserError: OptionParse.error() is meant
+ to be called for user errors. Raising a well-known error here can
+ make error handling clearer.
+ - Also see the changes in _OptionParserEx.
+ """
+ def __init__(self, cmdln, **kwargs):
+ self.cmdln = cmdln
+ kwargs["prog"] = self.cmdln.name
+ _OptionParserEx.__init__(self, **kwargs)
+ self.disable_interspersed_args()
+
+ def print_help(self, file=None):
+ self.cmdln.onecmd(["help"])
+
+ def error(self, msg):
+ raise CmdlnUserError(msg)
+
+
+class SubCmdOptionParser(_OptionParserEx):
+ def set_cmdln_info(self, cmdln, subcmd):
+ """Called by Cmdln to pass relevant info about itself needed
+ for print_help().
+ """
+ self.cmdln = cmdln
+ self.subcmd = subcmd
+
+ def print_help(self, file=None):
+ self.cmdln.onecmd(["help", self.subcmd])
+
+ def error(self, msg):
+ raise CmdlnUserError(msg)
+
+
+def option(*args, **kwargs):
+ """Decorator to add an option to the optparser argument of a Cmdln
+ subcommand.
+
+ Example:
+ class MyShell(cmdln.Cmdln):
+ @cmdln.option("-f", "--force", help="force removal")
+ def do_remove(self, subcmd, opts, *args):
+ #...
+ """
+ #XXX Is there a possible optimization for many options to not have a
+ # large stack depth here?
+ def decorate(f):
+ if not hasattr(f, "optparser"):
+ f.optparser = SubCmdOptionParser()
+ f.optparser.add_option(*args, **kwargs)
+ return f
+ return decorate
+
+
+class Cmdln(RawCmdln):
+ """An improved (on cmd.Cmd) framework for building multi-subcommand
+ scripts (think "svn" & "cvs") and simple shells (think "pdb" and
+ "gdb").
+
+ A simple example:
+
+ import cmdln
+
+ class MySVN(cmdln.Cmdln):
+ name = "svn"
+
+ @cmdln.aliases('stat', 'st')
+ @cmdln.option('-v', '--verbose', action='store_true'
+ help='print verbose information')
+ def do_status(self, subcmd, opts, *paths):
+ print "handle 'svn status' command"
+
+ #...
+
+ if __name__ == "__main__":
+ shell = MySVN()
+ retval = shell.main()
+ sys.exit(retval)
+
+ 'Cmdln' extends 'RawCmdln' by providing optparse option processing
+ integration. See this class' _dispatch_cmd() docstring and
+ <http://trentm.com/projects/cmdln> for more information.
+ """
+ def _dispatch_cmd(self, handler, argv):
+ """Introspect sub-command handler signature to determine how to
+ dispatch the command. The raw handler provided by the base
+ 'RawCmdln' class is still supported:
+
+ def do_foo(self, argv):
+ # 'argv' is the vector of command line args, argv[0] is
+ # the command name itself (i.e. "foo" or an alias)
+ pass
+
+ In addition, if the handler has more than 2 arguments option
+ processing is automatically done (using optparse):
+
+ @cmdln.option('-v', '--verbose', action='store_true')
+ def do_bar(self, subcmd, opts, *args):
+ # subcmd = <"bar" or an alias>
+ # opts = <an optparse.Values instance>
+ if opts.verbose:
+ print "lots of debugging output..."
+ # args = <tuple of arguments>
+ for arg in args:
+ bar(arg)
+
+ TODO: explain that "*args" can be other signatures as well.
+
+ The `cmdln.option` decorator corresponds to an `add_option()`
+ method call on an `optparse.OptionParser` instance.
+
+ You can declare a specific number of arguments:
+
+ @cmdln.option('-v', '--verbose', action='store_true')
+ def do_bar2(self, subcmd, opts, bar_one, bar_two):
+ #...
+
+ and an appropriate error message will be raised/printed if the
+ command is called with a different number of args.
+ """
+ co_argcount = handler.im_func.func_code.co_argcount
+ if co_argcount == 2: # handler ::= do_foo(self, argv)
+ return handler(argv)
+ elif co_argcount >= 3: # handler ::= do_foo(self, subcmd, opts, ...)
+ try:
+ optparser = handler.optparser
+ except AttributeError:
+ optparser = handler.im_func.optparser = SubCmdOptionParser()
+ assert isinstance(optparser, SubCmdOptionParser)
+ optparser.set_cmdln_info(self, argv[0])
+ try:
+ opts, args = optparser.parse_args(argv[1:])
+ except StopOptionProcessing:
+ #TODO: this doesn't really fly for a replacement of
+ # optparse.py behaviour, does it?
+ return 0 # Normal command termination
+
+ try:
+ return handler(argv[0], opts, *args)
+ except TypeError, ex:
+ # Some TypeError's are user errors:
+ # do_foo() takes at least 4 arguments (3 given)
+ # do_foo() takes at most 5 arguments (6 given)
+ # do_foo() takes exactly 5 arguments (6 given)
+ # Raise CmdlnUserError for these with a suitably
+ # massaged error message.
+ import sys
+ tb = sys.exc_info()[2] # the traceback object
+ if tb.tb_next is not None:
+ # If the traceback is more than one level deep, then the
+ # TypeError do *not* happen on the "handler(...)" call
+ # above. In that we don't want to handle it specially
+ # here: it would falsely mask deeper code errors.
+ raise
+ msg = ex.args[0]
+ match = _INCORRECT_NUM_ARGS_RE.search(msg)
+ if match:
+ msg = list(match.groups())
+ msg[1] = int(msg[1]) - 3
+ if msg[1] == 1:
+ msg[2] = msg[2].replace("arguments", "argument")
+ msg[3] = int(msg[3]) - 3
+ msg = ''.join(map(str, msg))
+ raise CmdlnUserError(msg)
+ else:
+ raise
+ else:
+ raise CmdlnError("incorrect argcount for %s(): takes %d, must "
+ "take 2 for 'argv' signature or 3+ for 'opts' "
+ "signature" % (handler.__name__, co_argcount))
+
+
+
+#---- internal support functions
+
+def _format_linedata(linedata, indent, indent_width):
+ """Format specific linedata into a pleasant layout.
+
+ "linedata" is a list of 2-tuples of the form:
+ (<item-display-string>, <item-docstring>)
+ "indent" is a string to use for one level of indentation
+ "indent_width" is a number of columns by which the
+ formatted data will be indented when printed.
+
+ The <item-display-string> column is held to 15 columns.
+ """
+ lines = []
+ WIDTH = 78 - indent_width
+ SPACING = 2
+ NAME_WIDTH_LOWER_BOUND = 13
+ NAME_WIDTH_UPPER_BOUND = 16
+ NAME_WIDTH = max([len(s) for s,d in linedata])
+ if NAME_WIDTH < NAME_WIDTH_LOWER_BOUND:
+ NAME_WIDTH = NAME_WIDTH_LOWER_BOUND
+ else:
+ NAME_WIDTH = NAME_WIDTH_UPPER_BOUND
+
+ DOC_WIDTH = WIDTH - NAME_WIDTH - SPACING
+ for namestr, doc in linedata:
+ line = indent + namestr
+ if len(namestr) <= NAME_WIDTH:
+ line += ' ' * (NAME_WIDTH + SPACING - len(namestr))
+ else:
+ lines.append(line)
+ line = indent + ' ' * (NAME_WIDTH + SPACING)
+ line += _summarize_doc(doc, DOC_WIDTH)
+ lines.append(line.rstrip())
+ return lines
+
+def _summarize_doc(doc, length=60):
+ r"""Parse out a short one line summary from the given doclines.
+
+ "doc" is the doc string to summarize.
+ "length" is the max length for the summary
+
+ >>> _summarize_doc("this function does this")
+ 'this function does this'
+ >>> _summarize_doc("this function does this", 10)
+ 'this fu...'
+ >>> _summarize_doc("this function does this\nand that")
+ 'this function does this and that'
+ >>> _summarize_doc("this function does this\n\nand that")
+ 'this function does this'
+ """
+ import re
+ if doc is None:
+ return ""
+ assert length > 3, "length <= 3 is absurdly short for a doc summary"
+ doclines = doc.strip().splitlines(0)
+ if not doclines:
+ return ""
+
+ summlines = []
+ for i, line in enumerate(doclines):
+ stripped = line.strip()
+ if not stripped:
+ break
+ summlines.append(stripped)
+ if len(''.join(summlines)) >= length:
+ break
+
+ summary = ' '.join(summlines)
+ if len(summary) > length:
+ summary = summary[:length-3] + "..."
+ return summary
+
+
+def line2argv(line):
+ r"""Parse the given line into an argument vector.
+
+ "line" is the line of input to parse.
+
+ This may get niggly when dealing with quoting and escaping. The
+ current state of this parsing may not be completely thorough/correct
+ in this respect.
+
+ >>> from cmdln import line2argv
+ >>> line2argv("foo")
+ ['foo']
+ >>> line2argv("foo bar")
+ ['foo', 'bar']
+ >>> line2argv("foo bar ")
+ ['foo', 'bar']
+ >>> line2argv(" foo bar")
+ ['foo', 'bar']
+
+ Quote handling:
+
+ >>> line2argv("'foo bar'")
+ ['foo bar']
+ >>> line2argv('"foo bar"')
+ ['foo bar']
+ >>> line2argv(r'"foo\"bar"')
+ ['foo"bar']
+ >>> line2argv("'foo bar' spam")
+ ['foo bar', 'spam']
+ >>> line2argv("'foo 'bar spam")
+ ['foo bar', 'spam']
+
+ >>> line2argv('some\tsimple\ttests')
+ ['some', 'simple', 'tests']
+ >>> line2argv('a "more complex" test')
+ ['a', 'more complex', 'test']
+ >>> line2argv('a more="complex test of " quotes')
+ ['a', 'more=complex test of ', 'quotes']
+ >>> line2argv('a more" complex test of " quotes')
+ ['a', 'more complex test of ', 'quotes']
+ >>> line2argv('an "embedded \\"quote\\""')
+ ['an', 'embedded "quote"']
+
+ # Komodo bug 48027
+ >>> line2argv('foo bar C:\\')
+ ['foo', 'bar', 'C:\\']
+
+ # Komodo change 127581
+ >>> line2argv(r'"\test\slash" "foo bar" "foo\"bar"')
+ ['\\test\\slash', 'foo bar', 'foo"bar']
+
+ # Komodo change 127629
+ >>> if sys.platform == "win32":
+ ... line2argv(r'\foo\bar') == ['\\foo\\bar']
+ ... line2argv(r'\\foo\\bar') == ['\\\\foo\\\\bar']
+ ... line2argv('"foo') == ['foo']
+ ... else:
+ ... line2argv(r'\foo\bar') == ['foobar']
+ ... line2argv(r'\\foo\\bar') == ['\\foo\\bar']
+ ... try:
+ ... line2argv('"foo')
+ ... except ValueError, ex:
+ ... "not terminated" in str(ex)
+ True
+ True
+ True
+ """
+ import string
+ line = line.strip()
+ argv = []
+ state = "default"
+ arg = None # the current argument being parsed
+ i = -1
+ while 1:
+ i += 1
+ if i >= len(line): break
+ ch = line[i]
+
+ if ch == "\\" and i+1 < len(line):
+ # escaped char always added to arg, regardless of state
+ if arg is None: arg = ""
+ if (sys.platform == "win32"
+ or state in ("double-quoted", "single-quoted")
+ ) and line[i+1] not in tuple('"\''):
+ arg += ch
+ i += 1
+ arg += line[i]
+ continue
+
+ if state == "single-quoted":
+ if ch == "'":
+ state = "default"
+ else:
+ arg += ch
+ elif state == "double-quoted":
+ if ch == '"':
+ state = "default"
+ else:
+ arg += ch
+ elif state == "default":
+ if ch == '"':
+ if arg is None: arg = ""
+ state = "double-quoted"
+ elif ch == "'":
+ if arg is None: arg = ""
+ state = "single-quoted"
+ elif ch in string.whitespace:
+ if arg is not None:
+ argv.append(arg)
+ arg = None
+ else:
+ if arg is None: arg = ""
+ arg += ch
+ if arg is not None:
+ argv.append(arg)
+ if not sys.platform == "win32" and state != "default":
+ raise ValueError("command line is not terminated: unfinished %s "
+ "segment" % state)
+ return argv
+
+
+def argv2line(argv):
+ r"""Put together the given argument vector into a command line.
+
+ "argv" is the argument vector to process.
+
+ >>> from cmdln import argv2line
+ >>> argv2line(['foo'])
+ 'foo'
+ >>> argv2line(['foo', 'bar'])
+ 'foo bar'
+ >>> argv2line(['foo', 'bar baz'])
+ 'foo "bar baz"'
+ >>> argv2line(['foo"bar'])
+ 'foo"bar'
+ >>> print argv2line(['foo" bar'])
+ 'foo" bar'
+ >>> print argv2line(["foo' bar"])
+ "foo' bar"
+ >>> argv2line(["foo'bar"])
+ "foo'bar"
+ """
+ escapedArgs = []
+ for arg in argv:
+ if ' ' in arg and '"' not in arg:
+ arg = '"'+arg+'"'
+ elif ' ' in arg and "'" not in arg:
+ arg = "'"+arg+"'"
+ elif ' ' in arg:
+ arg = arg.replace('"', r'\"')
+ arg = '"'+arg+'"'
+ escapedArgs.append(arg)
+ return ' '.join(escapedArgs)
+
+
+# Recipe: dedent (0.1) in /Users/trentm/tm/recipes/cookbook
+def _dedentlines(lines, tabsize=8, skip_first_line=False):
+ """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
+
+ "lines" is a list of lines to dedent.
+ "tabsize" is the tab width to use for indent width calculations.
+ "skip_first_line" is a boolean indicating if the first line should
+ be skipped for calculating the indent width and for dedenting.
+ This is sometimes useful for docstrings and similar.
+
+ Same as dedent() except operates on a sequence of lines. Note: the
+ lines list is modified **in-place**.
+ """
+ DEBUG = False
+ if DEBUG:
+ print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
+ % (tabsize, skip_first_line)
+ indents = []
+ margin = None
+ for i, line in enumerate(lines):
+ if i == 0 and skip_first_line: continue
+ indent = 0
+ for ch in line:
+ if ch == ' ':
+ indent += 1
+ elif ch == '\t':
+ indent += tabsize - (indent % tabsize)
+ elif ch in '\r\n':
+ continue # skip all-whitespace lines
+ else:
+ break
+ else:
+ continue # skip all-whitespace lines
+ if DEBUG: print "dedent: indent=%d: %r" % (indent, line)
+ if margin is None:
+ margin = indent
+ else:
+ margin = min(margin, indent)
+ if DEBUG: print "dedent: margin=%r" % margin
+
+ if margin is not None and margin > 0:
+ for i, line in enumerate(lines):
+ if i == 0 and skip_first_line: continue
+ removed = 0
+ for j, ch in enumerate(line):
+ if ch == ' ':
+ removed += 1
+ elif ch == '\t':
+ removed += tabsize - (removed % tabsize)
+ elif ch in '\r\n':
+ if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line
+ lines[i] = lines[i][j:]
+ break
+ else:
+ raise ValueError("unexpected non-whitespace char %r in "
+ "line %r while removing %d-space margin"
+ % (ch, line, margin))
+ if DEBUG:
+ print "dedent: %r: %r -> removed %d/%d"\
+ % (line, ch, removed, margin)
+ if removed == margin:
+ lines[i] = lines[i][j+1:]
+ break
+ elif removed > margin:
+ lines[i] = ' '*(removed-margin) + lines[i][j+1:]
+ break
+ return lines
+
+def _dedent(text, tabsize=8, skip_first_line=False):
+ """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
+
+ "text" is the text to dedent.
+ "tabsize" is the tab width to use for indent width calculations.
+ "skip_first_line" is a boolean indicating if the first line should
+ be skipped for calculating the indent width and for dedenting.
+ This is sometimes useful for docstrings and similar.
+
+ textwrap.dedent(s), but don't expand tabs to spaces
+ """
+ lines = text.splitlines(1)
+ _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
+ return ''.join(lines)
+
+
+def _get_indent(marker, s, tab_width=8):
+ """_get_indent(marker, s, tab_width=8) ->
+ (<indentation-of-'marker'>, <indentation-width>)"""
+ # Figure out how much the marker is indented.
+ INDENT_CHARS = tuple(' \t')
+ start = s.index(marker)
+ i = start
+ while i > 0:
+ if s[i-1] not in INDENT_CHARS:
+ break
+ i -= 1
+ indent = s[i:start]
+ indent_width = 0
+ for ch in indent:
+ if ch == ' ':
+ indent_width += 1
+ elif ch == '\t':
+ indent_width += tab_width - (indent_width % tab_width)
+ return indent, indent_width
+
+def _get_trailing_whitespace(marker, s):
+ """Return the whitespace content trailing the given 'marker' in string 's',
+ up to and including a newline.
+ """
+ suffix = ''
+ start = s.index(marker) + len(marker)
+ i = start
+ while i < len(s):
+ if s[i] in ' \t':
+ suffix += s[i]
+ elif s[i] in '\r\n':
+ suffix += s[i]
+ if s[i] == '\r' and i+1 < len(s) and s[i+1] == '\n':
+ suffix += s[i+1]
+ break
+ else:
+ break
+ i += 1
+ return suffix
+
+
+
+#---- bash completion support
+# Note: This is still experimental. I expect to change this
+# significantly.
+#
+# To get Bash completion for a cmdln.Cmdln class, run the following
+# bash command:
+# $ complete -C 'python -m cmdln /path/to/script.py CmdlnClass' cmdname
+# For example:
+# $ complete -C 'python -m cmdln ~/bin/svn.py SVN' svn
+#
+#TODO: Simplify the above so don't have to given path to script (try to
+# find it on PATH, if possible). Could also make class name
+# optional if there is only one in the module (common case).
+
+if __name__ == "__main__" and len(sys.argv) == 6:
+ def _log(s):
+ return # no-op, comment out for debugging
+ from os.path import expanduser
+ fout = open(expanduser("~/tmp/bashcpln.log"), 'a')
+ fout.write(str(s) + '\n')
+ fout.close()
+
+ # Recipe: module_from_path (1.0.1+)
+ def _module_from_path(path):
+ import imp, os, sys
+ path = os.path.expanduser(path)
+ dir = os.path.dirname(path) or os.curdir
+ name = os.path.splitext(os.path.basename(path))[0]
+ sys.path.insert(0, dir)
+ try:
+ iinfo = imp.find_module(name, [dir])
+ return imp.load_module(name, *iinfo)
+ finally:
+ sys.path.remove(dir)
+
+ def _get_bash_cplns(script_path, class_name, cmd_name,
+ token, preceding_token):
+ _log('--')
+ _log('get_cplns(%r, %r, %r, %r, %r)'
+ % (script_path, class_name, cmd_name, token, preceding_token))
+ comp_line = os.environ["COMP_LINE"]
+ comp_point = int(os.environ["COMP_POINT"])
+ _log("COMP_LINE: %r" % comp_line)
+ _log("COMP_POINT: %r" % comp_point)
+
+ try:
+ script = _module_from_path(script_path)
+ except ImportError, ex:
+ _log("error importing `%s': %s" % (script_path, ex))
+ return []
+ shell = getattr(script, class_name)()
+ cmd_map = shell._get_canonical_map()
+ del cmd_map["EOF"]
+
+ # Determine if completing the sub-command name.
+ parts = comp_line[:comp_point].split(None, 1)
+ _log(parts)
+ if len(parts) == 1 or not (' ' in parts[1] or '\t' in parts[1]):
+ #TODO: if parts[1].startswith('-'): handle top-level opts
+ _log("complete sub-command names")
+ matches = {}
+ for name, canon_name in cmd_map.items():
+ if name.startswith(token):
+ matches[name] = canon_name
+ if not matches:
+ return []
+ elif len(matches) == 1:
+ return matches.keys()
+ elif len(set(matches.values())) == 1:
+ return [matches.values()[0]]
+ else:
+ return matches.keys()
+
+ # Otherwise, complete options for the given sub-command.
+ #TODO: refine this so it does the right thing with option args
+ if token.startswith('-'):
+ cmd_name = comp_line.split(None, 2)[1]
+ try:
+ cmd_canon_name = cmd_map[cmd_name]
+ except KeyError:
+ return []
+ handler = shell._get_cmd_handler(cmd_canon_name)
+ optparser = getattr(handler, "optparser", None)
+ if optparser is None:
+ optparser = SubCmdOptionParser()
+ opt_strs = []
+ for option in optparser.option_list:
+ for opt_str in option._short_opts + option._long_opts:
+ if opt_str.startswith(token):
+ opt_strs.append(opt_str)
+ return opt_strs
+
+ return []
+
+ for cpln in _get_bash_cplns(*sys.argv[1:]):
+ print cpln
+
diff --git a/scripts/lib/mic/utils/errors.py b/scripts/lib/mic/utils/errors.py
new file mode 100644
index 0000000000..8d720f9080
--- /dev/null
+++ b/scripts/lib/mic/utils/errors.py
@@ -0,0 +1,71 @@
+#!/usr/bin/python -tt
+#
+# Copyright (c) 2007 Red Hat, Inc.
+# Copyright (c) 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+class CreatorError(Exception):
+ """An exception base class for all imgcreate errors."""
+ keyword = '<creator>'
+
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ if isinstance(self.msg, unicode):
+ self.msg = self.msg.encode('utf-8', 'ignore')
+ else:
+ self.msg = str(self.msg)
+ return self.keyword + self.msg
+
+class Usage(CreatorError):
+ keyword = '<usage>'
+
+ def __str__(self):
+ if isinstance(self.msg, unicode):
+ self.msg = self.msg.encode('utf-8', 'ignore')
+ else:
+ self.msg = str(self.msg)
+ return self.keyword + self.msg + ', please use "--help" for more info'
+
+class Abort(CreatorError):
+ keyword = ''
+
+class ConfigError(CreatorError):
+ keyword = '<config>'
+
+class KsError(CreatorError):
+ keyword = '<kickstart>'
+
+class RepoError(CreatorError):
+ keyword = '<repo>'
+
+class RpmError(CreatorError):
+ keyword = '<rpm>'
+
+class MountError(CreatorError):
+ keyword = '<mount>'
+
+class SnapshotError(CreatorError):
+ keyword = '<snapshot>'
+
+class SquashfsError(CreatorError):
+ keyword = '<squashfs>'
+
+class BootstrapError(CreatorError):
+ keyword = '<bootstrap>'
+
+class RuntimeError(CreatorError):
+ keyword = '<runtime>'
diff --git a/scripts/lib/mic/utils/fs_related.py b/scripts/lib/mic/utils/fs_related.py
new file mode 100644
index 0000000000..b9b9a97175
--- /dev/null
+++ b/scripts/lib/mic/utils/fs_related.py
@@ -0,0 +1,1029 @@
+#!/usr/bin/python -tt
+#
+# Copyright (c) 2007, Red Hat, Inc.
+# Copyright (c) 2009, 2010, 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+from __future__ import with_statement
+import os
+import sys
+import errno
+import stat
+import random
+import string
+import time
+import uuid
+
+from mic import msger
+from mic.utils import runner
+from mic.utils.errors import *
+
+
+def find_binary_inchroot(binary, chroot):
+ paths = ["/usr/sbin",
+ "/usr/bin",
+ "/sbin",
+ "/bin"
+ ]
+
+ for path in paths:
+ bin_path = "%s/%s" % (path, binary)
+ if os.path.exists("%s/%s" % (chroot, bin_path)):
+ return bin_path
+ return None
+
+def find_binary_path(binary):
+ if os.environ.has_key("PATH"):
+ paths = os.environ["PATH"].split(":")
+ else:
+ paths = []
+ if os.environ.has_key("HOME"):
+ paths += [os.environ["HOME"] + "/bin"]
+ paths += ["/usr/local/sbin", "/usr/local/bin", "/usr/sbin", "/usr/bin", "/sbin", "/bin"]
+
+ for path in paths:
+ bin_path = "%s/%s" % (path, binary)
+ if os.path.exists(bin_path):
+ return bin_path
+ raise CreatorError("Command '%s' is not available." % binary)
+
+def makedirs(dirname):
+ """A version of os.makedirs() that doesn't throw an
+ exception if the leaf directory already exists.
+ """
+ try:
+ os.makedirs(dirname)
+ except OSError, err:
+ if err.errno != errno.EEXIST:
+ raise
+
+def mksquashfs(in_img, out_img):
+ fullpathmksquashfs = find_binary_path("mksquashfs")
+ args = [fullpathmksquashfs, in_img, out_img]
+
+ if not sys.stdout.isatty():
+ args.append("-no-progress")
+
+ ret = runner.show(args)
+ if ret != 0:
+ raise SquashfsError("'%s' exited with error (%d)" % (' '.join(args), ret))
+
+def resize2fs(fs, size):
+ resize2fs = find_binary_path("resize2fs")
+ if size == 0:
+ # it means to minimalize it
+ return runner.show([resize2fs, '-M', fs])
+ else:
+ return runner.show([resize2fs, fs, "%sK" % (size / 1024,)])
+
+def my_fuser(fp):
+ fuser = find_binary_path("fuser")
+ if not os.path.exists(fp):
+ return False
+
+ rc = runner.quiet([fuser, "-s", fp])
+ if rc == 0:
+ for pid in runner.outs([fuser, fp]).split():
+ fd = open("/proc/%s/cmdline" % pid, "r")
+ cmdline = fd.read()
+ fd.close()
+ if cmdline[:-1] == "/bin/bash":
+ return True
+
+ # not found
+ return False
+
+class BindChrootMount:
+ """Represents a bind mount of a directory into a chroot."""
+ def __init__(self, src, chroot, dest = None, option = None):
+ self.root = os.path.abspath(os.path.expanduser(chroot))
+ self.option = option
+
+ self.orig_src = self.src = src
+ if os.path.islink(src):
+ self.src = os.readlink(src)
+ if not self.src.startswith('/'):
+ self.src = os.path.abspath(os.path.join(os.path.dirname(src),
+ self.src))
+
+ if not dest:
+ dest = self.src
+ self.dest = os.path.join(self.root, dest.lstrip('/'))
+
+ self.mounted = False
+ self.mountcmd = find_binary_path("mount")
+ self.umountcmd = find_binary_path("umount")
+
+ def ismounted(self):
+ with open('/proc/mounts') as f:
+ for line in f:
+ if line.split()[1] == os.path.abspath(self.dest):
+ return True
+
+ return False
+
+ def has_chroot_instance(self):
+ lock = os.path.join(self.root, ".chroot.lock")
+ return my_fuser(lock)
+
+ def mount(self):
+ if self.mounted or self.ismounted():
+ return
+
+ makedirs(self.dest)
+ rc = runner.show([self.mountcmd, "--bind", self.src, self.dest])
+ if rc != 0:
+ raise MountError("Bind-mounting '%s' to '%s' failed" %
+ (self.src, self.dest))
+ if self.option:
+ rc = runner.show([self.mountcmd, "--bind", "-o", "remount,%s" % self.option, self.dest])
+ if rc != 0:
+ raise MountError("Bind-remounting '%s' failed" % self.dest)
+
+ self.mounted = True
+ if os.path.islink(self.orig_src):
+ dest = os.path.join(self.root, self.orig_src.lstrip('/'))
+ if not os.path.exists(dest):
+ os.symlink(self.src, dest)
+
+ def unmount(self):
+ if self.has_chroot_instance():
+ return
+
+ if self.ismounted():
+ runner.show([self.umountcmd, "-l", self.dest])
+ self.mounted = False
+
+class LoopbackMount:
+ """LoopbackMount compatibility layer for old API"""
+ def __init__(self, lofile, mountdir, fstype = None):
+ self.diskmount = DiskMount(LoopbackDisk(lofile,size = 0),mountdir,fstype,rmmountdir = True)
+ self.losetup = False
+ self.losetupcmd = find_binary_path("losetup")
+
+ def cleanup(self):
+ self.diskmount.cleanup()
+
+ def unmount(self):
+ self.diskmount.unmount()
+
+ def lounsetup(self):
+ if self.losetup:
+ runner.show([self.losetupcmd, "-d", self.loopdev])
+ self.losetup = False
+ self.loopdev = None
+
+ def loopsetup(self):
+ if self.losetup:
+ return
+
+ self.loopdev = get_loop_device(self.losetupcmd, self.lofile)
+ self.losetup = True
+
+ def mount(self):
+ self.diskmount.mount()
+
+class SparseLoopbackMount(LoopbackMount):
+ """SparseLoopbackMount compatibility layer for old API"""
+ def __init__(self, lofile, mountdir, size, fstype = None):
+ self.diskmount = DiskMount(SparseLoopbackDisk(lofile,size),mountdir,fstype,rmmountdir = True)
+
+ def expand(self, create = False, size = None):
+ self.diskmount.disk.expand(create, size)
+
+ def truncate(self, size = None):
+ self.diskmount.disk.truncate(size)
+
+ def create(self):
+ self.diskmount.disk.create()
+
+class SparseExtLoopbackMount(SparseLoopbackMount):
+ """SparseExtLoopbackMount compatibility layer for old API"""
+ def __init__(self, lofile, mountdir, size, fstype, blocksize, fslabel):
+ self.diskmount = ExtDiskMount(SparseLoopbackDisk(lofile,size), mountdir, fstype, blocksize, fslabel, rmmountdir = True)
+
+
+ def __format_filesystem(self):
+ self.diskmount.__format_filesystem()
+
+ def create(self):
+ self.diskmount.disk.create()
+
+ def resize(self, size = None):
+ return self.diskmount.__resize_filesystem(size)
+
+ def mount(self):
+ self.diskmount.mount()
+
+ def __fsck(self):
+ self.extdiskmount.__fsck()
+
+ def __get_size_from_filesystem(self):
+ return self.diskmount.__get_size_from_filesystem()
+
+ def __resize_to_minimal(self):
+ return self.diskmount.__resize_to_minimal()
+
+ def resparse(self, size = None):
+ return self.diskmount.resparse(size)
+
+class Disk:
+ """Generic base object for a disk
+
+ The 'create' method must make the disk visible as a block device - eg
+ by calling losetup. For RawDisk, this is obviously a no-op. The 'cleanup'
+ method must undo the 'create' operation.
+ """
+ def __init__(self, size, device = None):
+ self._device = device
+ self._size = size
+
+ def create(self):
+ pass
+
+ def cleanup(self):
+ pass
+
+ def get_device(self):
+ return self._device
+ def set_device(self, path):
+ self._device = path
+ device = property(get_device, set_device)
+
+ def get_size(self):
+ return self._size
+ size = property(get_size)
+
+
+class RawDisk(Disk):
+ """A Disk backed by a block device.
+ Note that create() is a no-op.
+ """
+ def __init__(self, size, device):
+ Disk.__init__(self, size, device)
+
+ def fixed(self):
+ return True
+
+ def exists(self):
+ return True
+
+class LoopbackDisk(Disk):
+ """A Disk backed by a file via the loop module."""
+ def __init__(self, lofile, size):
+ Disk.__init__(self, size)
+ self.lofile = lofile
+ self.losetupcmd = find_binary_path("losetup")
+
+ def fixed(self):
+ return False
+
+ def exists(self):
+ return os.path.exists(self.lofile)
+
+ def create(self):
+ if self.device is not None:
+ return
+
+ self.device = get_loop_device(self.losetupcmd, self.lofile)
+
+ def cleanup(self):
+ if self.device is None:
+ return
+ msger.debug("Losetup remove %s" % self.device)
+ rc = runner.show([self.losetupcmd, "-d", self.device])
+ self.device = None
+
+class SparseLoopbackDisk(LoopbackDisk):
+ """A Disk backed by a sparse file via the loop module."""
+ def __init__(self, lofile, size):
+ LoopbackDisk.__init__(self, lofile, size)
+
+ def expand(self, create = False, size = None):
+ flags = os.O_WRONLY
+ if create:
+ flags |= os.O_CREAT
+ if not os.path.exists(self.lofile):
+ makedirs(os.path.dirname(self.lofile))
+
+ if size is None:
+ size = self.size
+
+ msger.debug("Extending sparse file %s to %d" % (self.lofile, size))
+ if create:
+ fd = os.open(self.lofile, flags, 0644)
+ else:
+ fd = os.open(self.lofile, flags)
+
+ if size <= 0:
+ size = 1
+ try:
+ os.ftruncate(fd, size)
+ except:
+ # may be limited by 2G in 32bit env
+ os.ftruncate(fd, 2**31L)
+
+ os.close(fd)
+
+ def truncate(self, size = None):
+ if size is None:
+ size = self.size
+
+ msger.debug("Truncating sparse file %s to %d" % (self.lofile, size))
+ fd = os.open(self.lofile, os.O_WRONLY)
+ os.ftruncate(fd, size)
+ os.close(fd)
+
+ def create(self):
+ self.expand(create = True)
+ LoopbackDisk.create(self)
+
+class Mount:
+ """A generic base class to deal with mounting things."""
+ def __init__(self, mountdir):
+ self.mountdir = mountdir
+
+ def cleanup(self):
+ self.unmount()
+
+ def mount(self, options = None):
+ pass
+
+ def unmount(self):
+ pass
+
+class DiskMount(Mount):
+ """A Mount object that handles mounting of a Disk."""
+ def __init__(self, disk, mountdir, fstype = None, rmmountdir = True):
+ Mount.__init__(self, mountdir)
+
+ self.disk = disk
+ self.fstype = fstype
+ self.rmmountdir = rmmountdir
+
+ self.mounted = False
+ self.rmdir = False
+ if fstype:
+ self.mkfscmd = find_binary_path("mkfs." + self.fstype)
+ else:
+ self.mkfscmd = None
+ self.mountcmd = find_binary_path("mount")
+ self.umountcmd = find_binary_path("umount")
+
+ def cleanup(self):
+ Mount.cleanup(self)
+ self.disk.cleanup()
+
+ def unmount(self):
+ if self.mounted:
+ msger.debug("Unmounting directory %s" % self.mountdir)
+ runner.quiet('sync') # sync the data on this mount point
+ rc = runner.show([self.umountcmd, "-l", self.mountdir])
+ if rc == 0:
+ self.mounted = False
+ else:
+ raise MountError("Failed to umount %s" % self.mountdir)
+ if self.rmdir and not self.mounted:
+ try:
+ os.rmdir(self.mountdir)
+ except OSError, e:
+ pass
+ self.rmdir = False
+
+
+ def __create(self):
+ self.disk.create()
+
+
+ def mount(self, options = None):
+ if self.mounted:
+ return
+
+ if not os.path.isdir(self.mountdir):
+ msger.debug("Creating mount point %s" % self.mountdir)
+ os.makedirs(self.mountdir)
+ self.rmdir = self.rmmountdir
+
+ self.__create()
+
+ msger.debug("Mounting %s at %s" % (self.disk.device, self.mountdir))
+ if options:
+ args = [ self.mountcmd, "-o", options, self.disk.device, self.mountdir ]
+ else:
+ args = [ self.mountcmd, self.disk.device, self.mountdir ]
+ if self.fstype:
+ args.extend(["-t", self.fstype])
+
+ rc = runner.show(args)
+ if rc != 0:
+ raise MountError("Failed to mount '%s' to '%s' with command '%s'. Retval: %s" %
+ (self.disk.device, self.mountdir, " ".join(args), rc))
+
+ self.mounted = True
+
+class ExtDiskMount(DiskMount):
+ """A DiskMount object that is able to format/resize ext[23] filesystems."""
+ def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None):
+ DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir)
+ self.blocksize = blocksize
+ self.fslabel = fslabel.replace("/", "")
+ self.uuid = str(uuid.uuid4())
+ self.skipformat = skipformat
+ self.fsopts = fsopts
+ self.extopts = None
+ self.dumpe2fs = find_binary_path("dumpe2fs")
+ self.tune2fs = find_binary_path("tune2fs")
+
+ def __parse_field(self, output, field):
+ for line in output.split("\n"):
+ if line.startswith(field + ":"):
+ return line[len(field) + 1:].strip()
+
+ raise KeyError("Failed to find field '%s' in output" % field)
+
+ def __format_filesystem(self):
+ if self.skipformat:
+ msger.debug("Skip filesystem format.")
+ return
+
+ msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device))
+ cmdlist = [self.mkfscmd, "-F", "-L", self.fslabel, "-m", "1", "-b",
+ str(self.blocksize), "-U", self.uuid]
+ if self.extopts:
+ cmdlist.extend(self.extopts.split())
+ cmdlist.extend([self.disk.device])
+
+ rc, errout = runner.runtool(cmdlist, catch=2)
+ if rc != 0:
+ raise MountError("Error creating %s filesystem on disk %s:\n%s" %
+ (self.fstype, self.disk.device, errout))
+
+ if not self.extopts:
+ msger.debug("Tuning filesystem on %s" % self.disk.device)
+ runner.show([self.tune2fs, "-c0", "-i0", "-Odir_index", "-ouser_xattr,acl", self.disk.device])
+
+ def __resize_filesystem(self, size = None):
+ current_size = os.stat(self.disk.lofile)[stat.ST_SIZE]
+
+ if size is None:
+ size = self.disk.size
+
+ if size == current_size:
+ return
+
+ if size > current_size:
+ self.disk.expand(size)
+
+ self.__fsck()
+
+ resize2fs(self.disk.lofile, size)
+ return size
+
+ def __create(self):
+ resize = False
+ if not self.disk.fixed() and self.disk.exists():
+ resize = True
+
+ self.disk.create()
+
+ if resize:
+ self.__resize_filesystem()
+ else:
+ self.__format_filesystem()
+
+ def mount(self, options = None):
+ self.__create()
+ DiskMount.mount(self, options)
+
+ def __fsck(self):
+ msger.info("Checking filesystem %s" % self.disk.lofile)
+ runner.quiet(["/sbin/e2fsck", "-f", "-y", self.disk.lofile])
+
+ def __get_size_from_filesystem(self):
+ return int(self.__parse_field(runner.outs([self.dumpe2fs, '-h', self.disk.lofile]),
+ "Block count")) * self.blocksize
+
+ def __resize_to_minimal(self):
+ self.__fsck()
+
+ #
+ # Use a binary search to find the minimal size
+ # we can resize the image to
+ #
+ bot = 0
+ top = self.__get_size_from_filesystem()
+ while top != (bot + 1):
+ t = bot + ((top - bot) / 2)
+
+ if not resize2fs(self.disk.lofile, t):
+ top = t
+ else:
+ bot = t
+ return top
+
+ def resparse(self, size = None):
+ self.cleanup()
+ if size == 0:
+ minsize = 0
+ else:
+ minsize = self.__resize_to_minimal()
+ self.disk.truncate(minsize)
+
+ self.__resize_filesystem(size)
+ return minsize
+
+class VfatDiskMount(DiskMount):
+ """A DiskMount object that is able to format vfat/msdos filesystems."""
+ def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None):
+ DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir)
+ self.blocksize = blocksize
+ self.fslabel = fslabel.replace("/", "")
+ rand1 = random.randint(0, 2**16 - 1)
+ rand2 = random.randint(0, 2**16 - 1)
+ self.uuid = "%04X-%04X" % (rand1, rand2)
+ self.skipformat = skipformat
+ self.fsopts = fsopts
+ self.fsckcmd = find_binary_path("fsck." + self.fstype)
+
+ def __format_filesystem(self):
+ if self.skipformat:
+ msger.debug("Skip filesystem format.")
+ return
+
+ msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device))
+ rc = runner.show([self.mkfscmd, "-n", self.fslabel,
+ "-i", self.uuid.replace("-", ""), self.disk.device])
+ if rc != 0:
+ raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device))
+
+ msger.verbose("Tuning filesystem on %s" % self.disk.device)
+
+ def __resize_filesystem(self, size = None):
+ current_size = os.stat(self.disk.lofile)[stat.ST_SIZE]
+
+ if size is None:
+ size = self.disk.size
+
+ if size == current_size:
+ return
+
+ if size > current_size:
+ self.disk.expand(size)
+
+ self.__fsck()
+
+ #resize2fs(self.disk.lofile, size)
+ return size
+
+ def __create(self):
+ resize = False
+ if not self.disk.fixed() and self.disk.exists():
+ resize = True
+
+ self.disk.create()
+
+ if resize:
+ self.__resize_filesystem()
+ else:
+ self.__format_filesystem()
+
+ def mount(self, options = None):
+ self.__create()
+ DiskMount.mount(self, options)
+
+ def __fsck(self):
+ msger.debug("Checking filesystem %s" % self.disk.lofile)
+ runner.show([self.fsckcmd, "-y", self.disk.lofile])
+
+ def __get_size_from_filesystem(self):
+ return self.disk.size
+
+ def __resize_to_minimal(self):
+ self.__fsck()
+
+ #
+ # Use a binary search to find the minimal size
+ # we can resize the image to
+ #
+ bot = 0
+ top = self.__get_size_from_filesystem()
+ return top
+
+ def resparse(self, size = None):
+ self.cleanup()
+ minsize = self.__resize_to_minimal()
+ self.disk.truncate(minsize)
+ self.__resize_filesystem(size)
+ return minsize
+
+class BtrfsDiskMount(DiskMount):
+ """A DiskMount object that is able to format/resize btrfs filesystems."""
+ def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None):
+ self.__check_btrfs()
+ DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir)
+ self.blocksize = blocksize
+ self.fslabel = fslabel.replace("/", "")
+ self.uuid = None
+ self.skipformat = skipformat
+ self.fsopts = fsopts
+ self.blkidcmd = find_binary_path("blkid")
+ self.btrfsckcmd = find_binary_path("btrfsck")
+
+ def __check_btrfs(self):
+ found = False
+ """ Need to load btrfs module to mount it """
+ load_module("btrfs")
+ for line in open("/proc/filesystems").xreadlines():
+ if line.find("btrfs") > -1:
+ found = True
+ break
+ if not found:
+ raise MountError("Your system can't mount btrfs filesystem, please make sure your kernel has btrfs support and the module btrfs.ko has been loaded.")
+
+ # disable selinux, selinux will block write
+ if os.path.exists("/usr/sbin/setenforce"):
+ runner.show(["/usr/sbin/setenforce", "0"])
+
+ def __parse_field(self, output, field):
+ for line in output.split(" "):
+ if line.startswith(field + "="):
+ return line[len(field) + 1:].strip().replace("\"", "")
+
+ raise KeyError("Failed to find field '%s' in output" % field)
+
+ def __format_filesystem(self):
+ if self.skipformat:
+ msger.debug("Skip filesystem format.")
+ return
+
+ msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device))
+ rc = runner.show([self.mkfscmd, "-L", self.fslabel, self.disk.device])
+ if rc != 0:
+ raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device))
+
+ self.uuid = self.__parse_field(runner.outs([self.blkidcmd, self.disk.device]), "UUID")
+
+ def __resize_filesystem(self, size = None):
+ current_size = os.stat(self.disk.lofile)[stat.ST_SIZE]
+
+ if size is None:
+ size = self.disk.size
+
+ if size == current_size:
+ return
+
+ if size > current_size:
+ self.disk.expand(size)
+
+ self.__fsck()
+ return size
+
+ def __create(self):
+ resize = False
+ if not self.disk.fixed() and self.disk.exists():
+ resize = True
+
+ self.disk.create()
+
+ if resize:
+ self.__resize_filesystem()
+ else:
+ self.__format_filesystem()
+
+ def mount(self, options = None):
+ self.__create()
+ DiskMount.mount(self, options)
+
+ def __fsck(self):
+ msger.debug("Checking filesystem %s" % self.disk.lofile)
+ runner.quiet([self.btrfsckcmd, self.disk.lofile])
+
+ def __get_size_from_filesystem(self):
+ return self.disk.size
+
+ def __resize_to_minimal(self):
+ self.__fsck()
+
+ return self.__get_size_from_filesystem()
+
+ def resparse(self, size = None):
+ self.cleanup()
+ minsize = self.__resize_to_minimal()
+ self.disk.truncate(minsize)
+ self.__resize_filesystem(size)
+ return minsize
+
+class DeviceMapperSnapshot(object):
+ def __init__(self, imgloop, cowloop):
+ self.imgloop = imgloop
+ self.cowloop = cowloop
+
+ self.__created = False
+ self.__name = None
+ self.dmsetupcmd = find_binary_path("dmsetup")
+
+ """Load dm_snapshot if it isn't loaded"""
+ load_module("dm_snapshot")
+
+ def get_path(self):
+ if self.__name is None:
+ return None
+ return os.path.join("/dev/mapper", self.__name)
+ path = property(get_path)
+
+ def create(self):
+ if self.__created:
+ return
+
+ self.imgloop.create()
+ self.cowloop.create()
+
+ self.__name = "imgcreate-%d-%d" % (os.getpid(),
+ random.randint(0, 2**16))
+
+ size = os.stat(self.imgloop.lofile)[stat.ST_SIZE]
+
+ table = "0 %d snapshot %s %s p 8" % (size / 512,
+ self.imgloop.device,
+ self.cowloop.device)
+
+ args = [self.dmsetupcmd, "create", self.__name, "--table", table]
+ if runner.show(args) != 0:
+ self.cowloop.cleanup()
+ self.imgloop.cleanup()
+ raise SnapshotError("Could not create snapshot device using: " + ' '.join(args))
+
+ self.__created = True
+
+ def remove(self, ignore_errors = False):
+ if not self.__created:
+ return
+
+ time.sleep(2)
+ rc = runner.show([self.dmsetupcmd, "remove", self.__name])
+ if not ignore_errors and rc != 0:
+ raise SnapshotError("Could not remove snapshot device")
+
+ self.__name = None
+ self.__created = False
+
+ self.cowloop.cleanup()
+ self.imgloop.cleanup()
+
+ def get_cow_used(self):
+ if not self.__created:
+ return 0
+
+ #
+ # dmsetup status on a snapshot returns e.g.
+ # "0 8388608 snapshot 416/1048576"
+ # or, more generally:
+ # "A B snapshot C/D"
+ # where C is the number of 512 byte sectors in use
+ #
+ out = runner.outs([self.dmsetupcmd, "status", self.__name])
+ try:
+ return int((out.split()[3]).split('/')[0]) * 512
+ except ValueError:
+ raise SnapshotError("Failed to parse dmsetup status: " + out)
+
+def create_image_minimizer(path, image, minimal_size):
+ """
+ Builds a copy-on-write image which can be used to
+ create a device-mapper snapshot of an image where
+ the image's filesystem is as small as possible
+
+ The steps taken are:
+ 1) Create a sparse COW
+ 2) Loopback mount the image and the COW
+ 3) Create a device-mapper snapshot of the image
+ using the COW
+ 4) Resize the filesystem to the minimal size
+ 5) Determine the amount of space used in the COW
+ 6) Restroy the device-mapper snapshot
+ 7) Truncate the COW, removing unused space
+ 8) Create a squashfs of the COW
+ """
+ imgloop = LoopbackDisk(image, None) # Passing bogus size - doesn't matter
+
+ cowloop = SparseLoopbackDisk(os.path.join(os.path.dirname(path), "osmin"),
+ 64L * 1024L * 1024L)
+
+ snapshot = DeviceMapperSnapshot(imgloop, cowloop)
+
+ try:
+ snapshot.create()
+
+ resize2fs(snapshot.path, minimal_size)
+
+ cow_used = snapshot.get_cow_used()
+ finally:
+ snapshot.remove(ignore_errors = (not sys.exc_info()[0] is None))
+
+ cowloop.truncate(cow_used)
+
+ mksquashfs(cowloop.lofile, path)
+
+ os.unlink(cowloop.lofile)
+
+def load_module(module):
+ found = False
+ for line in open('/proc/modules').xreadlines():
+ if line.startswith("%s " % module):
+ found = True
+ break
+ if not found:
+ msger.info("Loading %s..." % module)
+ runner.quiet(['modprobe', module])
+
+class LoopDevice(object):
+ def __init__(self, loopid=None):
+ self.device = None
+ self.loopid = loopid
+ self.created = False
+ self.kpartxcmd = find_binary_path("kpartx")
+ self.losetupcmd = find_binary_path("losetup")
+
+ def register(self, device):
+ self.device = device
+ self.loopid = None
+ self.created = True
+
+ def reg_atexit(self):
+ import atexit
+ atexit.register(self.close)
+
+ def _genloopid(self):
+ import glob
+ if not glob.glob("/dev/loop[0-9]*"):
+ return 10
+
+ fint = lambda x: x[9:].isdigit() and int(x[9:]) or 0
+ maxid = 1 + max(filter(lambda x: x<100,
+ map(fint, glob.glob("/dev/loop[0-9]*"))))
+ if maxid < 10: maxid = 10
+ if maxid >= 100: raise
+ return maxid
+
+ def _kpseek(self, device):
+ rc, out = runner.runtool([self.kpartxcmd, '-l', '-v', device])
+ if rc != 0:
+ raise MountError("Can't query dm snapshot on %s" % device)
+ for line in out.splitlines():
+ if line and line.startswith("loop"):
+ return True
+ return False
+
+ def _loseek(self, device):
+ import re
+ rc, out = runner.runtool([self.losetupcmd, '-a'])
+ if rc != 0:
+ raise MountError("Failed to run 'losetup -a'")
+ for line in out.splitlines():
+ m = re.match("([^:]+): .*", line)
+ if m and m.group(1) == device:
+ return True
+ return False
+
+ def create(self):
+ if not self.created:
+ if not self.loopid:
+ self.loopid = self._genloopid()
+ self.device = "/dev/loop%d" % self.loopid
+ if os.path.exists(self.device):
+ if self._loseek(self.device):
+ raise MountError("Device busy: %s" % self.device)
+ else:
+ self.created = True
+ return
+
+ mknod = find_binary_path('mknod')
+ rc = runner.show([mknod, '-m664', self.device, 'b', '7', str(self.loopid)])
+ if rc != 0:
+ raise MountError("Failed to create device %s" % self.device)
+ else:
+ self.created = True
+
+ def close(self):
+ if self.created:
+ try:
+ self.cleanup()
+ self.device = None
+ except MountError, e:
+ msger.error("%s" % e)
+
+ def cleanup(self):
+
+ if self.device is None:
+ return
+
+
+ if self._kpseek(self.device):
+ if self.created:
+ for i in range(3, os.sysconf("SC_OPEN_MAX")):
+ try:
+ os.close(i)
+ except:
+ pass
+ runner.quiet([self.kpartxcmd, "-d", self.device])
+ if self._loseek(self.device):
+ runner.quiet([self.losetupcmd, "-d", self.device])
+ # FIXME: should sleep a while between two loseek
+ if self._loseek(self.device):
+ msger.warning("Can't cleanup loop device %s" % self.device)
+ elif self.loopid:
+ os.unlink(self.device)
+
+DEVICE_PIDFILE_DIR = "/var/tmp/mic/device"
+DEVICE_LOCKFILE = "/var/lock/__mic_loopdev.lock"
+
+def get_loop_device(losetupcmd, lofile):
+ global DEVICE_PIDFILE_DIR
+ global DEVICE_LOCKFILE
+
+ import fcntl
+ makedirs(os.path.dirname(DEVICE_LOCKFILE))
+ fp = open(DEVICE_LOCKFILE, 'w')
+ fcntl.flock(fp, fcntl.LOCK_EX)
+ try:
+ loopdev = None
+ devinst = LoopDevice()
+
+ # clean up left loop device first
+ clean_loop_devices()
+
+ # provide an avaible loop device
+ rc, out = runner.runtool([losetupcmd, "--find"])
+ if rc == 0:
+ loopdev = out.split()[0]
+ devinst.register(loopdev)
+ if not loopdev or not os.path.exists(loopdev):
+ devinst.create()
+ loopdev = devinst.device
+
+ # setup a loop device for image file
+ rc = runner.show([losetupcmd, loopdev, lofile])
+ if rc != 0:
+ raise MountError("Failed to setup loop device for '%s'" % lofile)
+
+ devinst.reg_atexit()
+
+ # try to save device and pid
+ makedirs(DEVICE_PIDFILE_DIR)
+ pidfile = os.path.join(DEVICE_PIDFILE_DIR, os.path.basename(loopdev))
+ if os.path.exists(pidfile):
+ os.unlink(pidfile)
+ with open(pidfile, 'w') as wf:
+ wf.write(str(os.getpid()))
+
+ except MountError, err:
+ raise CreatorError("%s" % str(err))
+ except:
+ raise
+ finally:
+ try:
+ fcntl.flock(fp, fcntl.LOCK_UN)
+ fp.close()
+ os.unlink(DEVICE_LOCKFILE)
+ except:
+ pass
+
+ return loopdev
+
+def clean_loop_devices(piddir=DEVICE_PIDFILE_DIR):
+ if not os.path.exists(piddir) or not os.path.isdir(piddir):
+ return
+
+ for loopdev in os.listdir(piddir):
+ pidfile = os.path.join(piddir, loopdev)
+ try:
+ with open(pidfile, 'r') as rf:
+ devpid = int(rf.read())
+ except:
+ devpid = None
+
+ # if the process using this device is alive, skip it
+ if not devpid or os.path.exists(os.path.join('/proc', str(devpid))):
+ continue
+
+ # try to clean it up
+ try:
+ devinst = LoopDevice()
+ devinst.register(os.path.join('/dev', loopdev))
+ devinst.cleanup()
+ os.unlink(pidfile)
+ except:
+ pass
+
diff --git a/scripts/lib/mic/utils/gpt_parser.py b/scripts/lib/mic/utils/gpt_parser.py
new file mode 100644
index 0000000000..5d43b70778
--- /dev/null
+++ b/scripts/lib/mic/utils/gpt_parser.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python -tt
+#
+# Copyright (c) 2013 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+""" This module implements a simple GPT partitions parser which can read the
+GPT header and the GPT partition table. """
+
+import struct
+import uuid
+import binascii
+from mic.utils.errors import MountError
+
+_GPT_HEADER_FORMAT = "<8s4sIIIQQQQ16sQIII"
+_GPT_HEADER_SIZE = struct.calcsize(_GPT_HEADER_FORMAT)
+_GPT_ENTRY_FORMAT = "<16s16sQQQ72s"
+_GPT_ENTRY_SIZE = struct.calcsize(_GPT_ENTRY_FORMAT)
+_SUPPORTED_GPT_REVISION = '\x00\x00\x01\x00'
+
+def _stringify_uuid(binary_uuid):
+ """ A small helper function to transform a binary UUID into a string
+ format. """
+
+ uuid_str = str(uuid.UUID(bytes_le = binary_uuid))
+
+ return uuid_str.upper()
+
+def _calc_header_crc(raw_hdr):
+ """ Calculate GPT header CRC32 checksum. The 'raw_hdr' parameter has to
+ be a list or a tuple containing all the elements of the GPT header in a
+ "raw" form, meaning that it should simply contain "unpacked" disk data.
+ """
+
+ raw_hdr = list(raw_hdr)
+ raw_hdr[3] = 0
+ raw_hdr = struct.pack(_GPT_HEADER_FORMAT, *raw_hdr)
+
+ return binascii.crc32(raw_hdr) & 0xFFFFFFFF
+
+def _validate_header(raw_hdr):
+ """ Validate the GPT header. The 'raw_hdr' parameter has to be a list or a
+ tuple containing all the elements of the GPT header in a "raw" form,
+ meaning that it should simply contain "unpacked" disk data. """
+
+ # Validate the signature
+ if raw_hdr[0] != 'EFI PART':
+ raise MountError("GPT partition table not found")
+
+ # Validate the revision
+ if raw_hdr[1] != _SUPPORTED_GPT_REVISION:
+ raise MountError("Unsupported GPT revision '%s', supported revision " \
+ "is '%s'" % \
+ (binascii.hexlify(raw_hdr[1]),
+ binascii.hexlify(_SUPPORTED_GPT_REVISION)))
+
+ # Validate header size
+ if raw_hdr[2] != _GPT_HEADER_SIZE:
+ raise MountError("Bad GPT header size: %d bytes, expected %d" % \
+ (raw_hdr[2], _GPT_HEADER_SIZE))
+
+ crc = _calc_header_crc(raw_hdr)
+ if raw_hdr[3] != crc:
+ raise MountError("GPT header crc mismatch: %#x, should be %#x" % \
+ (crc, raw_hdr[3]))
+
+class GptParser:
+ """ GPT partition table parser. Allows reading the GPT header and the
+ partition table, as well as modifying the partition table records. """
+
+ def __init__(self, disk_path, sector_size = 512):
+ """ The class constructor which accepts the following parameters:
+ * disk_path - full path to the disk image or device node
+ * sector_size - size of a disk sector in bytes """
+
+ self.sector_size = sector_size
+ self.disk_path = disk_path
+
+ try:
+ self._disk_obj = open(disk_path, 'r+b')
+ except IOError as err:
+ raise MountError("Cannot open file '%s' for reading GPT " \
+ "partitions: %s" % (disk_path, err))
+
+ def __del__(self):
+ """ The class destructor. """
+
+ self._disk_obj.close()
+
+ def _read_disk(self, offset, size):
+ """ A helper function which reads 'size' bytes from offset 'offset' of
+ the disk and checks all the error conditions. """
+
+ self._disk_obj.seek(offset)
+ try:
+ data = self._disk_obj.read(size)
+ except IOError as err:
+ raise MountError("cannot read from '%s': %s" % \
+ (self.disk_path, err))
+
+ if len(data) != size:
+ raise MountError("cannot read %d bytes from offset '%d' of '%s', " \
+ "read only %d bytes" % \
+ (size, offset, self.disk_path, len(data)))
+
+ return data
+
+ def _write_disk(self, offset, buf):
+ """ A helper function which writes buffer 'buf' to offset 'offset' of
+ the disk. This function takes care of unaligned writes and checks all
+ the error conditions. """
+
+ # Since we may be dealing with a block device, we only can write in
+ # 'self.sector_size' chunks. Find the aligned starting and ending
+ # disk offsets to read.
+ start = (offset / self.sector_size) * self.sector_size
+ end = ((start + len(buf)) / self.sector_size + 1) * self.sector_size
+
+ data = self._read_disk(start, end - start)
+ off = offset - start
+ data = data[:off] + buf + data[off + len(buf):]
+
+ self._disk_obj.seek(start)
+ try:
+ self._disk_obj.write(data)
+ except IOError as err:
+ raise MountError("cannot write to '%s': %s" % (self.disk_path, err))
+
+ def read_header(self, primary = True):
+ """ Read and verify the GPT header and return a dictionary containing
+ the following elements:
+
+ 'signature' : header signature
+ 'revision' : header revision
+ 'hdr_size' : header size in bytes
+ 'hdr_crc' : header CRC32
+ 'hdr_lba' : LBA of this header
+ 'hdr_offs' : byte disk offset of this header
+ 'backup_lba' : backup header LBA
+ 'backup_offs' : byte disk offset of backup header
+ 'first_lba' : first usable LBA for partitions
+ 'first_offs' : first usable byte disk offset for partitions
+ 'last_lba' : last usable LBA for partitions
+ 'last_offs' : last usable byte disk offset for partitions
+ 'disk_uuid' : UUID of the disk
+ 'ptable_lba' : starting LBA of array of partition entries
+ 'ptable_offs' : disk byte offset of the start of the partition table
+ 'ptable_size' : partition table size in bytes
+ 'entries_cnt' : number of available partition table entries
+ 'entry_size' : size of a single partition entry
+ 'ptable_crc' : CRC32 of the partition table
+ 'primary' : a boolean, if 'True', this is the primary GPT header,
+ if 'False' - the secondary
+ 'primary_str' : contains string "primary" if this is the primary GPT
+ header, and "backup" otherwise
+
+ This dictionary corresponds to the GPT header format. Please, see the
+ UEFI standard for the description of these fields.
+
+ If the 'primary' parameter is 'True', the primary GPT header is read,
+ otherwise the backup GPT header is read instead. """
+
+ # Read and validate the primary GPT header
+ raw_hdr = self._read_disk(self.sector_size, _GPT_HEADER_SIZE)
+ raw_hdr = struct.unpack(_GPT_HEADER_FORMAT, raw_hdr)
+ _validate_header(raw_hdr)
+ primary_str = "primary"
+
+ if not primary:
+ # Read and validate the backup GPT header
+ raw_hdr = self._read_disk(raw_hdr[6] * self.sector_size, _GPT_HEADER_SIZE)
+ raw_hdr = struct.unpack(_GPT_HEADER_FORMAT, raw_hdr)
+ _validate_header(raw_hdr)
+ primary_str = "backup"
+
+ return { 'signature' : raw_hdr[0],
+ 'revision' : raw_hdr[1],
+ 'hdr_size' : raw_hdr[2],
+ 'hdr_crc' : raw_hdr[3],
+ 'hdr_lba' : raw_hdr[5],
+ 'hdr_offs' : raw_hdr[5] * self.sector_size,
+ 'backup_lba' : raw_hdr[6],
+ 'backup_offs' : raw_hdr[6] * self.sector_size,
+ 'first_lba' : raw_hdr[7],
+ 'first_offs' : raw_hdr[7] * self.sector_size,
+ 'last_lba' : raw_hdr[8],
+ 'last_offs' : raw_hdr[8] * self.sector_size,
+ 'disk_uuid' :_stringify_uuid(raw_hdr[9]),
+ 'ptable_lba' : raw_hdr[10],
+ 'ptable_offs' : raw_hdr[10] * self.sector_size,
+ 'ptable_size' : raw_hdr[11] * raw_hdr[12],
+ 'entries_cnt' : raw_hdr[11],
+ 'entry_size' : raw_hdr[12],
+ 'ptable_crc' : raw_hdr[13],
+ 'primary' : primary,
+ 'primary_str' : primary_str }
+
+ def _read_raw_ptable(self, header):
+ """ Read and validate primary or backup partition table. The 'header'
+ argument is the GPT header. If it is the primary GPT header, then the
+ primary partition table is read and validated, otherwise - the backup
+ one. The 'header' argument is a dictionary which is returned by the
+ 'read_header()' method. """
+
+ raw_ptable = self._read_disk(header['ptable_offs'],
+ header['ptable_size'])
+
+ crc = binascii.crc32(raw_ptable) & 0xFFFFFFFF
+ if crc != header['ptable_crc']:
+ raise MountError("Partition table at LBA %d (%s) is corrupted" % \
+ (header['ptable_lba'], header['primary_str']))
+
+ return raw_ptable
+
+ def get_partitions(self, primary = True):
+ """ This is a generator which parses the GPT partition table and
+ generates the following dictionary for each partition:
+
+ 'index' : the index of the partition table endry
+ 'offs' : byte disk offset of the partition table entry
+ 'type_uuid' : partition type UUID
+ 'part_uuid' : partition UUID
+ 'first_lba' : the first LBA
+ 'last_lba' : the last LBA
+ 'flags' : attribute flags
+ 'name' : partition name
+ 'primary' : a boolean, if 'True', this is the primary partition
+ table, if 'False' - the secondary
+ 'primary_str' : contains string "primary" if this is the primary GPT
+ header, and "backup" otherwise
+
+ This dictionary corresponds to the GPT header format. Please, see the
+ UEFI standard for the description of these fields.
+
+ If the 'primary' parameter is 'True', partitions from the primary GPT
+ partition table are generated, otherwise partitions from the backup GPT
+ partition table are generated. """
+
+ if primary:
+ primary_str = "primary"
+ else:
+ primary_str = "backup"
+
+ header = self.read_header(primary)
+ raw_ptable = self._read_raw_ptable(header)
+
+ for index in xrange(0, header['entries_cnt']):
+ start = header['entry_size'] * index
+ end = start + header['entry_size']
+ raw_entry = struct.unpack(_GPT_ENTRY_FORMAT, raw_ptable[start:end])
+
+ if raw_entry[2] == 0 or raw_entry[3] == 0:
+ continue
+
+ part_name = str(raw_entry[5].decode('UTF-16').split('\0', 1)[0])
+
+ yield { 'index' : index,
+ 'offs' : header['ptable_offs'] + start,
+ 'type_uuid' : _stringify_uuid(raw_entry[0]),
+ 'part_uuid' : _stringify_uuid(raw_entry[1]),
+ 'first_lba' : raw_entry[2],
+ 'last_lba' : raw_entry[3],
+ 'flags' : raw_entry[4],
+ 'name' : part_name,
+ 'primary' : primary,
+ 'primary_str' : primary_str }
+
+ def _change_partition(self, header, entry):
+ """ A helper function for 'change_partitions()' which changes a
+ a paricular instance of the partition table (primary or backup). """
+
+ if entry['index'] >= header['entries_cnt']:
+ raise MountError("Partition table at LBA %d has only %d " \
+ "records cannot change record number %d" % \
+ (header['entries_cnt'], entry['index']))
+ # Read raw GPT header
+ raw_hdr = self._read_disk(header['hdr_offs'], _GPT_HEADER_SIZE)
+ raw_hdr = list(struct.unpack(_GPT_HEADER_FORMAT, raw_hdr))
+ _validate_header(raw_hdr)
+
+ # Prepare the new partition table entry
+ raw_entry = struct.pack(_GPT_ENTRY_FORMAT,
+ uuid.UUID(entry['type_uuid']).bytes_le,
+ uuid.UUID(entry['part_uuid']).bytes_le,
+ entry['first_lba'],
+ entry['last_lba'],
+ entry['flags'],
+ entry['name'].encode('UTF-16'))
+
+ # Write the updated entry to the disk
+ entry_offs = header['ptable_offs'] + \
+ header['entry_size'] * entry['index']
+ self._write_disk(entry_offs, raw_entry)
+
+ # Calculate and update partition table CRC32
+ raw_ptable = self._read_disk(header['ptable_offs'],
+ header['ptable_size'])
+ raw_hdr[13] = binascii.crc32(raw_ptable) & 0xFFFFFFFF
+
+ # Calculate and update the GPT header CRC
+ raw_hdr[3] = _calc_header_crc(raw_hdr)
+
+ # Write the updated header to the disk
+ raw_hdr = struct.pack(_GPT_HEADER_FORMAT, *raw_hdr)
+ self._write_disk(header['hdr_offs'], raw_hdr)
+
+ def change_partition(self, entry):
+ """ Change a GPT partition. The 'entry' argument has the same format as
+ 'get_partitions()' returns. This function simply changes the partition
+ table record corresponding to 'entry' in both, the primary and the
+ backup GPT partition tables. The parition table CRC is re-calculated
+ and the GPT headers are modified accordingly. """
+
+ # Change the primary partition table
+ header = self.read_header(True)
+ self._change_partition(header, entry)
+
+ # Change the backup partition table
+ header = self.read_header(False)
+ self._change_partition(header, entry)
diff --git a/scripts/lib/mic/utils/grabber.py b/scripts/lib/mic/utils/grabber.py
new file mode 100644
index 0000000000..45e30b4fb0
--- /dev/null
+++ b/scripts/lib/mic/utils/grabber.py
@@ -0,0 +1,97 @@
+#!/usr/bin/python
+
+import os
+import sys
+import rpm
+import fcntl
+import struct
+import termios
+
+from mic import msger
+from mic.utils import runner
+from mic.utils.errors import CreatorError
+
+from urlgrabber import grabber
+from urlgrabber import __version__ as grabber_version
+
+if rpm.labelCompare(grabber_version.split('.'), '3.9.0'.split('.')) == -1:
+ msger.warning("Version of python-urlgrabber is %s, lower than '3.9.0', "
+ "you may encounter some network issues" % grabber_version)
+
+def myurlgrab(url, filename, proxies, progress_obj = None):
+ g = grabber.URLGrabber()
+ if progress_obj is None:
+ progress_obj = TextProgress()
+
+ if url.startswith("file:/"):
+ filepath = "/%s" % url.replace("file:", "").lstrip('/')
+ if not os.path.exists(filepath):
+ raise CreatorError("URLGrabber error: can't find file %s" % url)
+ if url.endswith('.rpm'):
+ return filepath
+ else:
+ # untouch repometadata in source path
+ runner.show(['cp', '-f', filepath, filename])
+
+ else:
+ try:
+ filename = g.urlgrab(url=str(url),
+ filename=filename,
+ ssl_verify_host=False,
+ ssl_verify_peer=False,
+ proxies=proxies,
+ http_headers=(('Pragma', 'no-cache'),),
+ quote=0,
+ progress_obj=progress_obj)
+ except grabber.URLGrabError, err:
+ msg = str(err)
+ if msg.find(url) < 0:
+ msg += ' on %s' % url
+ raise CreatorError(msg)
+
+ return filename
+
+def terminal_width(fd=1):
+ """ Get the real terminal width """
+ try:
+ buf = 'abcdefgh'
+ buf = fcntl.ioctl(fd, termios.TIOCGWINSZ, buf)
+ return struct.unpack('hhhh', buf)[1]
+ except: # IOError
+ return 80
+
+def truncate_url(url, width):
+ return os.path.basename(url)[0:width]
+
+class TextProgress(object):
+ # make the class as singleton
+ _instance = None
+ def __new__(cls, *args, **kwargs):
+ if not cls._instance:
+ cls._instance = super(TextProgress, cls).__new__(cls, *args, **kwargs)
+
+ return cls._instance
+
+ def __init__(self, totalnum = None):
+ self.total = totalnum
+ self.counter = 1
+
+ def start(self, filename, url, *args, **kwargs):
+ self.url = url
+ self.termwidth = terminal_width()
+ msger.info("\r%-*s" % (self.termwidth, " "))
+ if self.total is None:
+ msger.info("\rRetrieving %s ..." % truncate_url(self.url, self.termwidth - 15))
+ else:
+ msger.info("\rRetrieving %s [%d/%d] ..." % (truncate_url(self.url, self.termwidth - 25), self.counter, self.total))
+
+ def update(self, *args):
+ pass
+
+ def end(self, *args):
+ if self.counter == self.total:
+ msger.raw("\n")
+
+ if self.total is not None:
+ self.counter += 1
+
diff --git a/scripts/lib/mic/utils/misc.py b/scripts/lib/mic/utils/misc.py
new file mode 100644
index 0000000000..63024346a9
--- /dev/null
+++ b/scripts/lib/mic/utils/misc.py
@@ -0,0 +1,1067 @@
+#!/usr/bin/python -tt
+#
+# Copyright (c) 2010, 2011 Intel Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+import sys
+import time
+import tempfile
+import re
+import shutil
+import glob
+import hashlib
+import subprocess
+import platform
+import traceback
+
+
+try:
+ import sqlite3 as sqlite
+except ImportError:
+ import sqlite
+
+try:
+ from xml.etree import cElementTree
+except ImportError:
+ import cElementTree
+xmlparse = cElementTree.parse
+
+from mic import msger
+from mic.utils.errors import CreatorError, SquashfsError
+from mic.utils.fs_related import find_binary_path, makedirs
+from mic.utils.grabber import myurlgrab
+from mic.utils.proxy import get_proxy_for
+from mic.utils import runner
+from mic.utils import rpmmisc
+
+
+RPM_RE = re.compile("(.*)\.(.*) (.*)-(.*)")
+RPM_FMT = "%(name)s.%(arch)s %(version)s-%(release)s"
+SRPM_RE = re.compile("(.*)-(\d+.*)-(\d+\.\d+).src.rpm")
+
+
+def build_name(kscfg, release=None, prefix = None, suffix = None):
+ """Construct and return an image name string.
+
+ This is a utility function to help create sensible name and fslabel
+ strings. The name is constructed using the sans-prefix-and-extension
+ kickstart filename and the supplied prefix and suffix.
+
+ kscfg -- a path to a kickstart file
+ release -- a replacement to suffix for image release
+ prefix -- a prefix to prepend to the name; defaults to None, which causes
+ no prefix to be used
+ suffix -- a suffix to append to the name; defaults to None, which causes
+ a YYYYMMDDHHMM suffix to be used
+
+ Note, if maxlen is less then the len(suffix), you get to keep both pieces.
+
+ """
+ name = os.path.basename(kscfg)
+ idx = name.rfind('.')
+ if idx >= 0:
+ name = name[:idx]
+
+ if release is not None:
+ suffix = ""
+ if prefix is None:
+ prefix = ""
+ if suffix is None:
+ suffix = time.strftime("%Y%m%d%H%M")
+
+ if name.startswith(prefix):
+ name = name[len(prefix):]
+
+ prefix = "%s-" % prefix if prefix else ""
+ suffix = "-%s" % suffix if suffix else ""
+
+ ret = prefix + name + suffix
+ return ret
+
+def get_distro():
+ """Detect linux distribution, support "meego"
+ """
+
+ support_dists = ('SuSE',
+ 'debian',
+ 'fedora',
+ 'redhat',
+ 'centos',
+ 'meego',
+ 'moblin',
+ 'tizen')
+ try:
+ (dist, ver, id) = platform.linux_distribution( \
+ supported_dists = support_dists)
+ except:
+ (dist, ver, id) = platform.dist( \
+ supported_dists = support_dists)
+
+ return (dist, ver, id)
+
+def get_distro_str():
+ """Get composited string for current linux distribution
+ """
+ (dist, ver, id) = get_distro()
+
+ if not dist:
+ return 'Unknown Linux Distro'
+ else:
+ distro_str = ' '.join(map(str.strip, (dist, ver, id)))
+ return distro_str.strip()
+
+_LOOP_RULE_PTH = None
+
+def hide_loopdev_presentation():
+ udev_rules = "80-prevent-loop-present.rules"
+ udev_rules_dir = [
+ '/usr/lib/udev/rules.d/',
+ '/lib/udev/rules.d/',
+ '/etc/udev/rules.d/'
+ ]
+
+ global _LOOP_RULE_PTH
+
+ for rdir in udev_rules_dir:
+ if os.path.exists(rdir):
+ _LOOP_RULE_PTH = os.path.join(rdir, udev_rules)
+
+ if not _LOOP_RULE_PTH:
+ return
+
+ try:
+ with open(_LOOP_RULE_PTH, 'w') as wf:
+ wf.write('KERNEL=="loop*", ENV{UDISKS_PRESENTATION_HIDE}="1"')
+
+ runner.quiet('udevadm trigger')
+ except:
+ pass
+
+def unhide_loopdev_presentation():
+ global _LOOP_RULE_PTH
+
+ if not _LOOP_RULE_PTH:
+ return
+
+ try:
+ os.unlink(_LOOP_RULE_PTH)
+ runner.quiet('udevadm trigger')
+ except:
+ pass
+
+def extract_rpm(rpmfile, targetdir):
+ rpm2cpio = find_binary_path("rpm2cpio")
+ cpio = find_binary_path("cpio")
+
+ olddir = os.getcwd()
+ os.chdir(targetdir)
+
+ msger.verbose("Extract rpm file with cpio: %s" % rpmfile)
+ p1 = subprocess.Popen([rpm2cpio, rpmfile], stdout=subprocess.PIPE)
+ p2 = subprocess.Popen([cpio, "-idv"], stdin=p1.stdout,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (sout, serr) = p2.communicate()
+ msger.verbose(sout or serr)
+
+ os.chdir(olddir)
+
+def compressing(fpath, method):
+ comp_map = {
+ "gz": "gzip",
+ "bz2": "bzip2"
+ }
+ if method not in comp_map:
+ raise CreatorError("Unsupport compress format: %s, valid values: %s"
+ % (method, ','.join(comp_map.keys())))
+ cmd = find_binary_path(comp_map[method])
+ rc = runner.show([cmd, "-f", fpath])
+ if rc:
+ raise CreatorError("Failed to %s file: %s" % (comp_map[method], fpath))
+
+def taring(dstfile, target):
+ import tarfile
+ basen, ext = os.path.splitext(dstfile)
+ comp = {".tar": None,
+ ".gz": "gz", # for .tar.gz
+ ".bz2": "bz2", # for .tar.bz2
+ ".tgz": "gz",
+ ".tbz": "bz2"}[ext]
+
+ # specify tarball file path
+ if not comp:
+ tarpath = dstfile
+ elif basen.endswith(".tar"):
+ tarpath = basen
+ else:
+ tarpath = basen + ".tar"
+ wf = tarfile.open(tarpath, 'w')
+
+ if os.path.isdir(target):
+ for item in os.listdir(target):
+ wf.add(os.path.join(target, item), item)
+ else:
+ wf.add(target, os.path.basename(target))
+ wf.close()
+
+ if comp:
+ compressing(tarpath, comp)
+ # when dstfile ext is ".tgz" and ".tbz", should rename
+ if not basen.endswith(".tar"):
+ shutil.move("%s.%s" % (tarpath, comp), dstfile)
+
+def ziping(dstfile, target):
+ import zipfile
+ wf = zipfile.ZipFile(dstfile, 'w', compression=zipfile.ZIP_DEFLATED)
+ if os.path.isdir(target):
+ for item in os.listdir(target):
+ fpath = os.path.join(target, item)
+ if not os.path.isfile(fpath):
+ continue
+ wf.write(fpath, item, zipfile.ZIP_DEFLATED)
+ else:
+ wf.write(target, os.path.basename(target), zipfile.ZIP_DEFLATED)
+ wf.close()
+
+pack_formats = {
+ ".tar": taring,
+ ".tar.gz": taring,
+ ".tar.bz2": taring,
+ ".tgz": taring,
+ ".tbz": taring,
+ ".zip": ziping,
+}
+
+def packing(dstfile, target):
+ (base, ext) = os.path.splitext(dstfile)
+ if ext in (".gz", ".bz2") and base.endswith(".tar"):
+ ext = ".tar" + ext
+ if ext not in pack_formats:
+ raise CreatorError("Unsupport pack format: %s, valid values: %s"
+ % (ext, ','.join(pack_formats.keys())))
+ func = pack_formats[ext]
+ # func should be callable
+ func(dstfile, target)
+
+def human_size(size):
+ """Return human readable string for Bytes size
+ """
+
+ if size <= 0:
+ return "0M"
+ import math
+ measure = ['B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
+ expo = int(math.log(size, 1024))
+ mant = float(size/math.pow(1024, expo))
+ return "{0:.1f}{1:s}".format(mant, measure[expo])
+
+def get_block_size(file_obj):
+ """ Returns block size for file object 'file_obj'. Errors are indicated by
+ the 'IOError' exception. """
+
+ from fcntl import ioctl
+ import struct
+
+ # Get the block size of the host file-system for the image file by calling
+ # the FIGETBSZ ioctl (number 2).
+ binary_data = ioctl(file_obj, 2, struct.pack('I', 0))
+ return struct.unpack('I', binary_data)[0]
+
+def check_space_pre_cp(src, dst):
+ """Check whether disk space is enough before 'cp' like
+ operations, else exception will be raised.
+ """
+
+ srcsize = get_file_size(src) * 1024 * 1024
+ freesize = get_filesystem_avail(dst)
+ if srcsize > freesize:
+ raise CreatorError("space on %s(%s) is not enough for about %s files"
+ % (dst, human_size(freesize), human_size(srcsize)))
+
+def calc_hashes(file_path, hash_names, start = 0, end = None):
+ """ Calculate hashes for a file. The 'file_path' argument is the file
+ to calculate hash functions for, 'start' and 'end' are the starting and
+ ending file offset to calculate the has functions for. The 'hash_names'
+ argument is a list of hash names to calculate. Returns the the list
+ of calculated hash values in the hexadecimal form in the same order
+ as 'hash_names'.
+ """
+ if end == None:
+ end = os.path.getsize(file_path)
+
+ chunk_size = 65536
+ to_read = end - start
+ read = 0
+
+ hashes = []
+ for hash_name in hash_names:
+ hashes.append(hashlib.new(hash_name))
+
+ with open(file_path, "rb") as f:
+ f.seek(start)
+
+ while read < to_read:
+ if read + chunk_size > to_read:
+ chunk_size = to_read - read
+ chunk = f.read(chunk_size)
+ for hash_obj in hashes:
+ hash_obj.update(chunk)
+ read += chunk_size
+
+ result = []
+ for hash_obj in hashes:
+ result.append(hash_obj.hexdigest())
+
+ return result
+
+def get_md5sum(fpath):
+ return calc_hashes(fpath, ('md5', ))[0]
+
+
+def normalize_ksfile(ksconf, release, arch):
+ '''
+ Return the name of a normalized ks file in which macro variables
+ @BUILD_ID@ and @ARCH@ are replace with real values.
+
+ The original ks file is returned if no special macro is used, otherwise
+ a temp file is created and returned, which will be deleted when program
+ exits normally.
+ '''
+
+ if not release:
+ release = "latest"
+ if not arch or re.match(r'i.86', arch):
+ arch = "ia32"
+
+ with open(ksconf) as f:
+ ksc = f.read()
+
+ if "@ARCH@" not in ksc and "@BUILD_ID@" not in ksc:
+ return ksconf
+
+ msger.info("Substitute macro variable @BUILD_ID@/@ARCH@ in ks: %s" % ksconf)
+ ksc = ksc.replace("@ARCH@", arch)
+ ksc = ksc.replace("@BUILD_ID@", release)
+
+ fd, ksconf = tempfile.mkstemp(prefix=os.path.basename(ksconf))
+ os.write(fd, ksc)
+ os.close(fd)
+
+ msger.debug('normalized ks file:%s' % ksconf)
+
+ def remove_temp_ks():
+ try:
+ os.unlink(ksconf)
+ except OSError, err:
+ msger.warning('Failed to remove temp ks file:%s:%s' % (ksconf, err))
+
+ import atexit
+ atexit.register(remove_temp_ks)
+
+ return ksconf
+
+
+def _check_mic_chroot(rootdir):
+ def _path(path):
+ return rootdir.rstrip('/') + path
+
+ release_files = map(_path, [ "/etc/moblin-release",
+ "/etc/meego-release",
+ "/etc/tizen-release"])
+
+ if not any(map(os.path.exists, release_files)):
+ msger.warning("Dir %s is not a MeeGo/Tizen chroot env" % rootdir)
+
+ if not glob.glob(rootdir + "/boot/vmlinuz-*"):
+ msger.warning("Failed to find kernel module under %s" % rootdir)
+
+ return
+
+def selinux_check(arch, fstypes):
+ try:
+ getenforce = find_binary_path('getenforce')
+ except CreatorError:
+ return
+
+ selinux_status = runner.outs([getenforce])
+ if arch and arch.startswith("arm") and selinux_status == "Enforcing":
+ raise CreatorError("Can't create arm image if selinux is enabled, "
+ "please run 'setenforce 0' to disable selinux")
+
+ use_btrfs = filter(lambda typ: typ == 'btrfs', fstypes)
+ if use_btrfs and selinux_status == "Enforcing":
+ raise CreatorError("Can't create btrfs image if selinux is enabled,"
+ " please run 'setenforce 0' to disable selinux")
+
+def get_image_type(path):
+ def _get_extension_name(path):
+ match = re.search("(?<=\.)\w+$", path)
+ if match:
+ return match.group(0)
+ else:
+ return None
+
+ if os.path.isdir(path):
+ _check_mic_chroot(path)
+ return "fs"
+
+ maptab = {
+ "tar": "loop",
+ "raw":"raw",
+ "vmdk":"vmdk",
+ "vdi":"vdi",
+ "iso":"livecd",
+ "usbimg":"liveusb",
+ }
+
+ extension = _get_extension_name(path)
+ if extension in maptab:
+ return maptab[extension]
+
+ fd = open(path, "rb")
+ file_header = fd.read(1024)
+ fd.close()
+ vdi_flag = "<<< Sun VirtualBox Disk Image >>>"
+ if file_header[0:len(vdi_flag)] == vdi_flag:
+ return maptab["vdi"]
+
+ output = runner.outs(['file', path])
+ isoptn = re.compile(r".*ISO 9660 CD-ROM filesystem.*(bootable).*")
+ usbimgptn = re.compile(r".*x86 boot sector.*active.*")
+ rawptn = re.compile(r".*x86 boot sector.*")
+ vmdkptn = re.compile(r".*VMware. disk image.*")
+ ext3fsimgptn = re.compile(r".*Linux.*ext3 filesystem data.*")
+ ext4fsimgptn = re.compile(r".*Linux.*ext4 filesystem data.*")
+ btrfsimgptn = re.compile(r".*BTRFS.*")
+ if isoptn.match(output):
+ return maptab["iso"]
+ elif usbimgptn.match(output):
+ return maptab["usbimg"]
+ elif rawptn.match(output):
+ return maptab["raw"]
+ elif vmdkptn.match(output):
+ return maptab["vmdk"]
+ elif ext3fsimgptn.match(output):
+ return "ext3fsimg"
+ elif ext4fsimgptn.match(output):
+ return "ext4fsimg"
+ elif btrfsimgptn.match(output):
+ return "btrfsimg"
+ else:
+ raise CreatorError("Cannot detect the type of image: %s" % path)
+
+
+def get_file_size(filename):
+ """ Return size in MB unit """
+ cmd = ['du', "-s", "-b", "-B", "1M", filename]
+ rc, duOutput = runner.runtool(cmd)
+ if rc != 0:
+ raise CreatorError("Failed to run: %s" % ' '.join(cmd))
+ size1 = int(duOutput.split()[0])
+
+ cmd = ['du', "-s", "-B", "1M", filename]
+ rc, duOutput = runner.runtool(cmd)
+ if rc != 0:
+ raise CreatorError("Failed to run: %s" % ' '.join(cmd))
+
+ size2 = int(duOutput.split()[0])
+ return max(size1, size2)
+
+
+def get_filesystem_avail(fs):
+ vfstat = os.statvfs(fs)
+ return vfstat.f_bavail * vfstat.f_bsize
+
+def convert_image(srcimg, srcfmt, dstimg, dstfmt):
+ #convert disk format
+ if dstfmt != "raw":
+ raise CreatorError("Invalid destination image format: %s" % dstfmt)
+ msger.debug("converting %s image to %s" % (srcimg, dstimg))
+ if srcfmt == "vmdk":
+ path = find_binary_path("qemu-img")
+ argv = [path, "convert", "-f", "vmdk", srcimg, "-O", dstfmt, dstimg]
+ elif srcfmt == "vdi":
+ path = find_binary_path("VBoxManage")
+ argv = [path, "internalcommands", "converttoraw", srcimg, dstimg]
+ else:
+ raise CreatorError("Invalid soure image format: %s" % srcfmt)
+
+ rc = runner.show(argv)
+ if rc == 0:
+ msger.debug("convert successful")
+ if rc != 0:
+ raise CreatorError("Unable to convert disk to %s" % dstfmt)
+
+def uncompress_squashfs(squashfsimg, outdir):
+ """Uncompress file system from squshfs image"""
+ unsquashfs = find_binary_path("unsquashfs")
+ args = [ unsquashfs, "-d", outdir, squashfsimg ]
+ rc = runner.show(args)
+ if (rc != 0):
+ raise SquashfsError("Failed to uncompress %s." % squashfsimg)
+
+def mkdtemp(dir = "/var/tmp", prefix = "mic-tmp-"):
+ """ FIXME: use the dir in mic.conf instead """
+
+ makedirs(dir)
+ return tempfile.mkdtemp(dir = dir, prefix = prefix)
+
+def get_repostrs_from_ks(ks):
+ def _get_temp_reponame(baseurl):
+ md5obj = hashlib.md5(baseurl)
+ tmpreponame = "%s" % md5obj.hexdigest()
+ return tmpreponame
+
+ kickstart_repos = []
+
+ for repodata in ks.handler.repo.repoList:
+ repo = {}
+ for attr in ('name',
+ 'baseurl',
+ 'mirrorlist',
+ 'includepkgs', # val is list
+ 'excludepkgs', # val is list
+ 'cost', # int
+ 'priority',# int
+ 'save',
+ 'proxy',
+ 'proxyuser',
+ 'proxypasswd',
+ 'proxypasswd',
+ 'debuginfo',
+ 'source',
+ 'gpgkey',
+ 'ssl_verify'):
+ if hasattr(repodata, attr) and getattr(repodata, attr):
+ repo[attr] = getattr(repodata, attr)
+
+ if 'name' not in repo:
+ repo['name'] = _get_temp_reponame(repodata.baseurl)
+
+ kickstart_repos.append(repo)
+
+ return kickstart_repos
+
+def _get_uncompressed_data_from_url(url, filename, proxies):
+ filename = myurlgrab(url, filename, proxies)
+ suffix = None
+ if filename.endswith(".gz"):
+ suffix = ".gz"
+ runner.quiet(['gunzip', "-f", filename])
+ elif filename.endswith(".bz2"):
+ suffix = ".bz2"
+ runner.quiet(['bunzip2', "-f", filename])
+ if suffix:
+ filename = filename.replace(suffix, "")
+ return filename
+
+def _get_metadata_from_repo(baseurl, proxies, cachedir, reponame, filename,
+ sumtype=None, checksum=None):
+ url = os.path.join(baseurl, filename)
+ filename_tmp = str("%s/%s/%s" % (cachedir, reponame, os.path.basename(filename)))
+ if os.path.splitext(filename_tmp)[1] in (".gz", ".bz2"):
+ filename = os.path.splitext(filename_tmp)[0]
+ else:
+ filename = filename_tmp
+ if sumtype and checksum and os.path.exists(filename):
+ try:
+ sumcmd = find_binary_path("%ssum" % sumtype)
+ except:
+ file_checksum = None
+ else:
+ file_checksum = runner.outs([sumcmd, filename]).split()[0]
+
+ if file_checksum and file_checksum == checksum:
+ return filename
+
+ return _get_uncompressed_data_from_url(url,filename_tmp,proxies)
+
+def get_metadata_from_repos(repos, cachedir):
+ my_repo_metadata = []
+ for repo in repos:
+ reponame = repo['name']
+ baseurl = repo['baseurl']
+
+
+ if 'proxy' in repo:
+ proxy = repo['proxy']
+ else:
+ proxy = get_proxy_for(baseurl)
+
+ proxies = None
+ if proxy:
+ proxies = {str(baseurl.split(":")[0]):str(proxy)}
+
+ makedirs(os.path.join(cachedir, reponame))
+ url = os.path.join(baseurl, "repodata/repomd.xml")
+ filename = os.path.join(cachedir, reponame, 'repomd.xml')
+ repomd = myurlgrab(url, filename, proxies)
+ try:
+ root = xmlparse(repomd)
+ except SyntaxError:
+ raise CreatorError("repomd.xml syntax error.")
+
+ ns = root.getroot().tag
+ ns = ns[0:ns.rindex("}")+1]
+
+ filepaths = {}
+ checksums = {}
+ sumtypes = {}
+
+ for elm in root.getiterator("%sdata" % ns):
+ if elm.attrib["type"] == "patterns":
+ filepaths['patterns'] = elm.find("%slocation" % ns).attrib['href']
+ checksums['patterns'] = elm.find("%sopen-checksum" % ns).text
+ sumtypes['patterns'] = elm.find("%sopen-checksum" % ns).attrib['type']
+ break
+
+ for elm in root.getiterator("%sdata" % ns):
+ if elm.attrib["type"] in ("group_gz", "group"):
+ filepaths['comps'] = elm.find("%slocation" % ns).attrib['href']
+ checksums['comps'] = elm.find("%sopen-checksum" % ns).text
+ sumtypes['comps'] = elm.find("%sopen-checksum" % ns).attrib['type']
+ break
+
+ primary_type = None
+ for elm in root.getiterator("%sdata" % ns):
+ if elm.attrib["type"] in ("primary_db", "primary"):
+ primary_type = elm.attrib["type"]
+ filepaths['primary'] = elm.find("%slocation" % ns).attrib['href']
+ checksums['primary'] = elm.find("%sopen-checksum" % ns).text
+ sumtypes['primary'] = elm.find("%sopen-checksum" % ns).attrib['type']
+ break
+
+ if not primary_type:
+ continue
+
+ for item in ("primary", "patterns", "comps"):
+ if item not in filepaths:
+ filepaths[item] = None
+ continue
+ if not filepaths[item]:
+ continue
+ filepaths[item] = _get_metadata_from_repo(baseurl,
+ proxies,
+ cachedir,
+ reponame,
+ filepaths[item],
+ sumtypes[item],
+ checksums[item])
+
+ """ Get repo key """
+ try:
+ repokey = _get_metadata_from_repo(baseurl,
+ proxies,
+ cachedir,
+ reponame,
+ "repodata/repomd.xml.key")
+ except CreatorError:
+ repokey = None
+ msger.debug("\ncan't get %s/%s" % (baseurl, "repodata/repomd.xml.key"))
+
+ my_repo_metadata.append({"name":reponame,
+ "baseurl":baseurl,
+ "repomd":repomd,
+ "primary":filepaths['primary'],
+ "cachedir":cachedir,
+ "proxies":proxies,
+ "patterns":filepaths['patterns'],
+ "comps":filepaths['comps'],
+ "repokey":repokey})
+
+ return my_repo_metadata
+
+def get_rpmver_in_repo(repometadata):
+ for repo in repometadata:
+ if repo["primary"].endswith(".xml"):
+ root = xmlparse(repo["primary"])
+ ns = root.getroot().tag
+ ns = ns[0:ns.rindex("}")+1]
+
+ versionlist = []
+ for elm in root.getiterator("%spackage" % ns):
+ if elm.find("%sname" % ns).text == 'rpm':
+ for node in elm.getchildren():
+ if node.tag == "%sversion" % ns:
+ versionlist.append(node.attrib['ver'])
+
+ if versionlist:
+ return reversed(
+ sorted(
+ versionlist,
+ key = lambda ver: map(int, ver.split('.')))).next()
+
+ elif repo["primary"].endswith(".sqlite"):
+ con = sqlite.connect(repo["primary"])
+ for row in con.execute("select version from packages where "
+ "name=\"rpm\" ORDER by version DESC"):
+ con.close()
+ return row[0]
+
+ return None
+
+def get_arch(repometadata):
+ archlist = []
+ for repo in repometadata:
+ if repo["primary"].endswith(".xml"):
+ root = xmlparse(repo["primary"])
+ ns = root.getroot().tag
+ ns = ns[0:ns.rindex("}")+1]
+ for elm in root.getiterator("%spackage" % ns):
+ if elm.find("%sarch" % ns).text not in ("noarch", "src"):
+ arch = elm.find("%sarch" % ns).text
+ if arch not in archlist:
+ archlist.append(arch)
+ elif repo["primary"].endswith(".sqlite"):
+ con = sqlite.connect(repo["primary"])
+ for row in con.execute("select arch from packages where arch not in (\"src\", \"noarch\")"):
+ if row[0] not in archlist:
+ archlist.append(row[0])
+
+ con.close()
+
+ uniq_arch = []
+ for i in range(len(archlist)):
+ if archlist[i] not in rpmmisc.archPolicies.keys():
+ continue
+ need_append = True
+ j = 0
+ while j < len(uniq_arch):
+ if archlist[i] in rpmmisc.archPolicies[uniq_arch[j]].split(':'):
+ need_append = False
+ break
+ if uniq_arch[j] in rpmmisc.archPolicies[archlist[i]].split(':'):
+ if need_append:
+ uniq_arch[j] = archlist[i]
+ need_append = False
+ else:
+ uniq_arch.remove(uniq_arch[j])
+ continue
+ j += 1
+ if need_append:
+ uniq_arch.append(archlist[i])
+
+ return uniq_arch, archlist
+
+def get_package(pkg, repometadata, arch = None):
+ ver = ""
+ target_repo = None
+ if not arch:
+ arches = []
+ elif arch not in rpmmisc.archPolicies:
+ arches = [arch]
+ else:
+ arches = rpmmisc.archPolicies[arch].split(':')
+ arches.append('noarch')
+
+ for repo in repometadata:
+ if repo["primary"].endswith(".xml"):
+ root = xmlparse(repo["primary"])
+ ns = root.getroot().tag
+ ns = ns[0:ns.rindex("}")+1]
+ for elm in root.getiterator("%spackage" % ns):
+ if elm.find("%sname" % ns).text == pkg:
+ if elm.find("%sarch" % ns).text in arches:
+ version = elm.find("%sversion" % ns)
+ tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel'])
+ if tmpver > ver:
+ ver = tmpver
+ location = elm.find("%slocation" % ns)
+ pkgpath = "%s" % location.attrib['href']
+ target_repo = repo
+ break
+ if repo["primary"].endswith(".sqlite"):
+ con = sqlite.connect(repo["primary"])
+ if arch:
+ sql = 'select version, release, location_href from packages ' \
+ 'where name = "%s" and arch IN ("%s")' % \
+ (pkg, '","'.join(arches))
+ for row in con.execute(sql):
+ tmpver = "%s-%s" % (row[0], row[1])
+ if tmpver > ver:
+ ver = tmpver
+ pkgpath = "%s" % row[2]
+ target_repo = repo
+ break
+ else:
+ sql = 'select version, release, location_href from packages ' \
+ 'where name = "%s"' % pkg
+ for row in con.execute(sql):
+ tmpver = "%s-%s" % (row[0], row[1])
+ if tmpver > ver:
+ ver = tmpver
+ pkgpath = "%s" % row[2]
+ target_repo = repo
+ break
+ con.close()
+ if target_repo:
+ makedirs("%s/packages/%s" % (target_repo["cachedir"], target_repo["name"]))
+ url = os.path.join(target_repo["baseurl"], pkgpath)
+ filename = str("%s/packages/%s/%s" % (target_repo["cachedir"], target_repo["name"], os.path.basename(pkgpath)))
+ if os.path.exists(filename):
+ ret = rpmmisc.checkRpmIntegrity('rpm', filename)
+ if ret == 0:
+ return filename
+
+ msger.warning("package %s is damaged: %s" %
+ (os.path.basename(filename), filename))
+ os.unlink(filename)
+
+ pkg = myurlgrab(str(url), filename, target_repo["proxies"])
+ return pkg
+ else:
+ return None
+
+def get_source_name(pkg, repometadata):
+
+ def get_bin_name(pkg):
+ m = RPM_RE.match(pkg)
+ if m:
+ return m.group(1)
+ return None
+
+ def get_src_name(srpm):
+ m = SRPM_RE.match(srpm)
+ if m:
+ return m.group(1)
+ return None
+
+ ver = ""
+ target_repo = None
+
+ pkg_name = get_bin_name(pkg)
+ if not pkg_name:
+ return None
+
+ for repo in repometadata:
+ if repo["primary"].endswith(".xml"):
+ root = xmlparse(repo["primary"])
+ ns = root.getroot().tag
+ ns = ns[0:ns.rindex("}")+1]
+ for elm in root.getiterator("%spackage" % ns):
+ if elm.find("%sname" % ns).text == pkg_name:
+ if elm.find("%sarch" % ns).text != "src":
+ version = elm.find("%sversion" % ns)
+ tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel'])
+ if tmpver > ver:
+ ver = tmpver
+ fmt = elm.find("%sformat" % ns)
+ if fmt:
+ fns = fmt.getchildren()[0].tag
+ fns = fns[0:fns.rindex("}")+1]
+ pkgpath = fmt.find("%ssourcerpm" % fns).text
+ target_repo = repo
+ break
+
+ if repo["primary"].endswith(".sqlite"):
+ con = sqlite.connect(repo["primary"])
+ for row in con.execute("select version, release, rpm_sourcerpm from packages where name = \"%s\" and arch != \"src\"" % pkg_name):
+ tmpver = "%s-%s" % (row[0], row[1])
+ if tmpver > ver:
+ pkgpath = "%s" % row[2]
+ target_repo = repo
+ break
+ con.close()
+ if target_repo:
+ return get_src_name(pkgpath)
+ else:
+ return None
+
+def get_pkglist_in_patterns(group, patterns):
+ found = False
+ pkglist = []
+ try:
+ root = xmlparse(patterns)
+ except SyntaxError:
+ raise SyntaxError("%s syntax error." % patterns)
+
+ for elm in list(root.getroot()):
+ ns = elm.tag
+ ns = ns[0:ns.rindex("}")+1]
+ name = elm.find("%sname" % ns)
+ summary = elm.find("%ssummary" % ns)
+ if name.text == group or summary.text == group:
+ found = True
+ break
+
+ if not found:
+ return pkglist
+
+ found = False
+ for requires in list(elm):
+ if requires.tag.endswith("requires"):
+ found = True
+ break
+
+ if not found:
+ return pkglist
+
+ for pkg in list(requires):
+ pkgname = pkg.attrib["name"]
+ if pkgname not in pkglist:
+ pkglist.append(pkgname)
+
+ return pkglist
+
+def get_pkglist_in_comps(group, comps):
+ found = False
+ pkglist = []
+ try:
+ root = xmlparse(comps)
+ except SyntaxError:
+ raise SyntaxError("%s syntax error." % comps)
+
+ for elm in root.getiterator("group"):
+ id = elm.find("id")
+ name = elm.find("name")
+ if id.text == group or name.text == group:
+ packagelist = elm.find("packagelist")
+ found = True
+ break
+
+ if not found:
+ return pkglist
+
+ for require in elm.getiterator("packagereq"):
+ if require.tag.endswith("packagereq"):
+ pkgname = require.text
+ if pkgname not in pkglist:
+ pkglist.append(pkgname)
+
+ return pkglist
+
+def is_statically_linked(binary):
+ return ", statically linked, " in runner.outs(['file', binary])
+
+def setup_qemu_emulator(rootdir, arch):
+ # mount binfmt_misc if it doesn't exist
+ if not os.path.exists("/proc/sys/fs/binfmt_misc"):
+ modprobecmd = find_binary_path("modprobe")
+ runner.show([modprobecmd, "binfmt_misc"])
+ if not os.path.exists("/proc/sys/fs/binfmt_misc/register"):
+ mountcmd = find_binary_path("mount")
+ runner.show([mountcmd, "-t", "binfmt_misc", "none", "/proc/sys/fs/binfmt_misc"])
+
+ # qemu_emulator is a special case, we can't use find_binary_path
+ # qemu emulator should be a statically-linked executable file
+ qemu_emulator = "/usr/bin/qemu-arm"
+ if not os.path.exists(qemu_emulator) or not is_statically_linked(qemu_emulator):
+ qemu_emulator = "/usr/bin/qemu-arm-static"
+ if not os.path.exists(qemu_emulator):
+ raise CreatorError("Please install a statically-linked qemu-arm")
+
+ # qemu emulator version check
+ armv7_list = [arch for arch in rpmmisc.archPolicies.keys() if arch.startswith('armv7')]
+ if arch in armv7_list: # need qemu (>=0.13.0)
+ qemuout = runner.outs([qemu_emulator, "-h"])
+ m = re.search("version\s*([.\d]+)", qemuout)
+ if m:
+ qemu_version = m.group(1)
+ if qemu_version < "0.13":
+ raise CreatorError("Requires %s version >=0.13 for %s" % (qemu_emulator, arch))
+ else:
+ msger.warning("Can't get version info of %s, please make sure it's higher than 0.13.0" % qemu_emulator)
+
+ if not os.path.exists(rootdir + "/usr/bin"):
+ makedirs(rootdir + "/usr/bin")
+ shutil.copy(qemu_emulator, rootdir + "/usr/bin/qemu-arm-static")
+ qemu_emulator = "/usr/bin/qemu-arm-static"
+
+ # disable selinux, selinux will block qemu emulator to run
+ if os.path.exists("/usr/sbin/setenforce"):
+ msger.info('Try to disable selinux')
+ runner.show(["/usr/sbin/setenforce", "0"])
+
+ # unregister it if it has been registered and is a dynamically-linked executable
+ node = "/proc/sys/fs/binfmt_misc/arm"
+ if os.path.exists(node):
+ qemu_unregister_string = "-1\n"
+ fd = open("/proc/sys/fs/binfmt_misc/arm", "w")
+ fd.write(qemu_unregister_string)
+ fd.close()
+
+ # register qemu emulator for interpreting other arch executable file
+ if not os.path.exists(node):
+ qemu_arm_string = ":arm:M::\\x7fELF\\x01\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x28\\x00:\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfa\\xff\\xff\\xff:%s:\n" % qemu_emulator
+ fd = open("/proc/sys/fs/binfmt_misc/register", "w")
+ fd.write(qemu_arm_string)
+ fd.close()
+
+ return qemu_emulator
+
+def SrcpkgsDownload(pkgs, repometadata, instroot, cachedir):
+ def get_source_repometadata(repometadata):
+ src_repometadata=[]
+ for repo in repometadata:
+ if repo["name"].endswith("-source"):
+ src_repometadata.append(repo)
+ if src_repometadata:
+ return src_repometadata
+ return None
+
+ def get_src_name(srpm):
+ m = SRPM_RE.match(srpm)
+ if m:
+ return m.group(1)
+ return None
+
+ src_repometadata = get_source_repometadata(repometadata)
+
+ if not src_repometadata:
+ msger.warning("No source repo found")
+ return None
+
+ src_pkgs = []
+ lpkgs_dict = {}
+ lpkgs_path = []
+ for repo in src_repometadata:
+ cachepath = "%s/%s/packages/*.src.rpm" %(cachedir, repo["name"])
+ lpkgs_path += glob.glob(cachepath)
+
+ for lpkg in lpkgs_path:
+ lpkg_name = get_src_name(os.path.basename(lpkg))
+ lpkgs_dict[lpkg_name] = lpkg
+ localpkgs = lpkgs_dict.keys()
+
+ cached_count = 0
+ destdir = instroot+'/usr/src/SRPMS'
+ if not os.path.exists(destdir):
+ os.makedirs(destdir)
+
+ srcpkgset = set()
+ for _pkg in pkgs:
+ srcpkg_name = get_source_name(_pkg, repometadata)
+ if not srcpkg_name:
+ continue
+ srcpkgset.add(srcpkg_name)
+
+ for pkg in list(srcpkgset):
+ if pkg in localpkgs:
+ cached_count += 1
+ shutil.copy(lpkgs_dict[pkg], destdir)
+ src_pkgs.append(os.path.basename(lpkgs_dict[pkg]))
+ else:
+ src_pkg = get_package(pkg, src_repometadata, 'src')
+ if src_pkg:
+ shutil.copy(src_pkg, destdir)
+ src_pkgs.append(src_pkg)
+ msger.info("%d source packages gotten from cache" % cached_count)
+
+ return src_pkgs
+
+def strip_end(text, suffix):
+ if not text.endswith(suffix):
+ return text
+ return text[:-len(suffix)]
diff --git a/scripts/lib/mic/utils/partitionedfs.py b/scripts/lib/mic/utils/partitionedfs.py
new file mode 100644
index 0000000000..04758440e1
--- /dev/null
+++ b/scripts/lib/mic/utils/partitionedfs.py
@@ -0,0 +1,790 @@
+#!/usr/bin/python -tt
+#
+# Copyright (c) 2009, 2010, 2011 Intel, Inc.
+# Copyright (c) 2007, 2008 Red Hat, Inc.
+# Copyright (c) 2008 Daniel P. Berrange
+# Copyright (c) 2008 David P. Huff
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+
+from mic import msger
+from mic.utils import runner
+from mic.utils.errors import MountError
+from mic.utils.fs_related import *
+from mic.utils.gpt_parser import GptParser
+
+# Overhead of the MBR partitioning scheme (just one sector)
+MBR_OVERHEAD = 1
+# Overhead of the GPT partitioning scheme
+GPT_OVERHEAD = 34
+
+# Size of a sector in bytes
+SECTOR_SIZE = 512
+
+class PartitionedMount(Mount):
+ def __init__(self, mountdir, skipformat = False):
+ Mount.__init__(self, mountdir)
+ self.disks = {}
+ self.partitions = []
+ self.subvolumes = []
+ self.mapped = False
+ self.mountOrder = []
+ self.unmountOrder = []
+ self.parted = find_binary_path("parted")
+ self.kpartx = find_binary_path("kpartx")
+ self.mkswap = find_binary_path("mkswap")
+ self.btrfscmd=None
+ self.mountcmd = find_binary_path("mount")
+ self.umountcmd = find_binary_path("umount")
+ self.skipformat = skipformat
+ self.snapshot_created = self.skipformat
+ # Size of a sector used in calculations
+ self.sector_size = SECTOR_SIZE
+ self._partitions_layed_out = False
+
+ def __add_disk(self, disk_name):
+ """ Add a disk 'disk_name' to the internal list of disks. Note,
+ 'disk_name' is the name of the disk in the target system
+ (e.g., sdb). """
+
+ if disk_name in self.disks:
+ # We already have this disk
+ return
+
+ assert not self._partitions_layed_out
+
+ self.disks[disk_name] = \
+ { 'disk': None, # Disk object
+ 'mapped': False, # True if kpartx mapping exists
+ 'numpart': 0, # Number of allocate partitions
+ 'partitions': [], # Indexes to self.partitions
+ 'offset': 0, # Offset of next partition (in sectors)
+ # Minimum required disk size to fit all partitions (in bytes)
+ 'min_size': 0,
+ 'ptable_format': "msdos" } # Partition table format
+
+ def add_disk(self, disk_name, disk_obj):
+ """ Add a disk object which have to be partitioned. More than one disk
+ can be added. In case of multiple disks, disk partitions have to be
+ added for each disk separately with 'add_partition()". """
+
+ self.__add_disk(disk_name)
+ self.disks[disk_name]['disk'] = disk_obj
+
+ def __add_partition(self, part):
+ """ This is a helper function for 'add_partition()' which adds a
+ partition to the internal list of partitions. """
+
+ assert not self._partitions_layed_out
+
+ self.partitions.append(part)
+ self.__add_disk(part['disk_name'])
+
+ def add_partition(self, size, disk_name, mountpoint, fstype = None,
+ label=None, fsopts = None, boot = False, align = None,
+ part_type = None):
+ """ Add the next partition. Prtitions have to be added in the
+ first-to-last order. """
+
+ ks_pnum = len(self.partitions)
+
+ # Converting MB to sectors for parted
+ size = size * 1024 * 1024 / self.sector_size
+
+ # We need to handle subvolumes for btrfs
+ if fstype == "btrfs" and fsopts and fsopts.find("subvol=") != -1:
+ self.btrfscmd=find_binary_path("btrfs")
+ subvol = None
+ opts = fsopts.split(",")
+ for opt in opts:
+ if opt.find("subvol=") != -1:
+ subvol = opt.replace("subvol=", "").strip()
+ break
+ if not subvol:
+ raise MountError("No subvolume: %s" % fsopts)
+ self.subvolumes.append({'size': size, # In sectors
+ 'mountpoint': mountpoint, # Mount relative to chroot
+ 'fstype': fstype, # Filesystem type
+ 'fsopts': fsopts, # Filesystem mount options
+ 'disk_name': disk_name, # physical disk name holding partition
+ 'device': None, # kpartx device node for partition
+ 'mount': None, # Mount object
+ 'subvol': subvol, # Subvolume name
+ 'boot': boot, # Bootable flag
+ 'mounted': False # Mount flag
+ })
+
+ # We still need partition for "/" or non-subvolume
+ if mountpoint == "/" or not fsopts or fsopts.find("subvol=") == -1:
+ # Don't need subvolume for "/" because it will be set as default subvolume
+ if fsopts and fsopts.find("subvol=") != -1:
+ opts = fsopts.split(",")
+ for opt in opts:
+ if opt.strip().startswith("subvol="):
+ opts.remove(opt)
+ break
+ fsopts = ",".join(opts)
+
+ part = { 'ks_pnum' : ks_pnum, # Partition number in the KS file
+ 'size': size, # In sectors
+ 'mountpoint': mountpoint, # Mount relative to chroot
+ 'fstype': fstype, # Filesystem type
+ 'fsopts': fsopts, # Filesystem mount options
+ 'label': label, # Partition label
+ 'disk_name': disk_name, # physical disk name holding partition
+ 'device': None, # kpartx device node for partition
+ 'mount': None, # Mount object
+ 'num': None, # Partition number
+ 'boot': boot, # Bootable flag
+ 'align': align, # Partition alignment
+ 'part_type' : part_type, # Partition type
+ 'partuuid': None } # Partition UUID (GPT-only)
+
+ self.__add_partition(part)
+
+ def layout_partitions(self, ptable_format = "msdos"):
+ """ Layout the partitions, meaning calculate the position of every
+ partition on the disk. The 'ptable_format' parameter defines the
+ partition table format, and may be either "msdos" or "gpt". """
+
+ msger.debug("Assigning %s partitions to disks" % ptable_format)
+
+ if ptable_format not in ('msdos', 'gpt'):
+ raise MountError("Unknown partition table format '%s', supported " \
+ "formats are: 'msdos' and 'gpt'" % ptable_format)
+
+ if self._partitions_layed_out:
+ return
+
+ self._partitions_layed_out = True
+
+ # Go through partitions in the order they are added in .ks file
+ for n in range(len(self.partitions)):
+ p = self.partitions[n]
+
+ if not self.disks.has_key(p['disk_name']):
+ raise MountError("No disk %s for partition %s" \
+ % (p['disk_name'], p['mountpoint']))
+
+ if p['part_type'] and ptable_format != 'gpt':
+ # The --part-type can also be implemented for MBR partitions,
+ # in which case it would map to the 1-byte "partition type"
+ # filed at offset 3 of the partition entry.
+ raise MountError("setting custom partition type is only " \
+ "imlemented for GPT partitions")
+
+ # Get the disk where the partition is located
+ d = self.disks[p['disk_name']]
+ d['numpart'] += 1
+ d['ptable_format'] = ptable_format
+
+ if d['numpart'] == 1:
+ if ptable_format == "msdos":
+ overhead = MBR_OVERHEAD
+ else:
+ overhead = GPT_OVERHEAD
+
+ # Skip one sector required for the partitioning scheme overhead
+ d['offset'] += overhead
+ # Steal few sectors from the first partition to offset for the
+ # partitioning overhead
+ p['size'] -= overhead
+
+ if p['align']:
+ # If not first partition and we do have alignment set we need
+ # to align the partition.
+ # FIXME: This leaves a empty spaces to the disk. To fill the
+ # gaps we could enlargea the previous partition?
+
+ # Calc how much the alignment is off.
+ align_sectors = d['offset'] % (p['align'] * 1024 / self.sector_size)
+ # We need to move forward to the next alignment point
+ align_sectors = (p['align'] * 1024 / self.sector_size) - align_sectors
+
+ msger.debug("Realignment for %s%s with %s sectors, original"
+ " offset %s, target alignment is %sK." %
+ (p['disk_name'], d['numpart'], align_sectors,
+ d['offset'], p['align']))
+
+ # increase the offset so we actually start the partition on right alignment
+ d['offset'] += align_sectors
+
+ p['start'] = d['offset']
+ d['offset'] += p['size']
+
+ p['type'] = 'primary'
+ p['num'] = d['numpart']
+
+ if d['ptable_format'] == "msdos":
+ if d['numpart'] > 2:
+ # Every logical partition requires an additional sector for
+ # the EBR, so steal the last sector from the end of each
+ # partition starting from the 3rd one for the EBR. This
+ # will make sure the logical partitions are aligned
+ # correctly.
+ p['size'] -= 1
+
+ if d['numpart'] > 3:
+ p['type'] = 'logical'
+ p['num'] = d['numpart'] + 1
+
+ d['partitions'].append(n)
+ msger.debug("Assigned %s to %s%d, sectors range %d-%d size %d "
+ "sectors (%d bytes)." \
+ % (p['mountpoint'], p['disk_name'], p['num'],
+ p['start'], p['start'] + p['size'] - 1,
+ p['size'], p['size'] * self.sector_size))
+
+ # Once all the partitions have been layed out, we can calculate the
+ # minumim disk sizes.
+ for disk_name, d in self.disks.items():
+ d['min_size'] = d['offset']
+ if d['ptable_format'] == 'gpt':
+ # Account for the backup partition table at the end of the disk
+ d['min_size'] += GPT_OVERHEAD
+
+ d['min_size'] *= self.sector_size
+
+ def __run_parted(self, args):
+ """ Run parted with arguments specified in the 'args' list. """
+
+ args.insert(0, self.parted)
+ msger.debug(args)
+
+ rc, out = runner.runtool(args, catch = 3)
+ out = out.strip()
+ if out:
+ msger.debug('"parted" output: %s' % out)
+
+ if rc != 0:
+ # We don't throw exception when return code is not 0, because
+ # parted always fails to reload part table with loop devices. This
+ # prevents us from distinguishing real errors based on return
+ # code.
+ msger.debug("WARNING: parted returned '%s' instead of 0" % rc)
+
+ def __create_partition(self, device, parttype, fstype, start, size):
+ """ Create a partition on an image described by the 'device' object. """
+
+ # Start is included to the size so we need to substract one from the end.
+ end = start + size - 1
+ msger.debug("Added '%s' partition, sectors %d-%d, size %d sectors" %
+ (parttype, start, end, size))
+
+ args = ["-s", device, "unit", "s", "mkpart", parttype]
+ if fstype:
+ args.extend([fstype])
+ args.extend(["%d" % start, "%d" % end])
+
+ return self.__run_parted(args)
+
+ def __format_disks(self):
+ self.layout_partitions()
+
+ if self.skipformat:
+ msger.debug("Skipping disk format, because skipformat flag is set.")
+ return
+
+ for dev in self.disks.keys():
+ d = self.disks[dev]
+ msger.debug("Initializing partition table for %s" % \
+ (d['disk'].device))
+ self.__run_parted(["-s", d['disk'].device, "mklabel",
+ d['ptable_format']])
+
+ msger.debug("Creating partitions")
+
+ for p in self.partitions:
+ d = self.disks[p['disk_name']]
+ if d['ptable_format'] == "msdos" and p['num'] == 5:
+ # The last sector of the 3rd partition was reserved for the EBR
+ # of the first _logical_ partition. This is why the extended
+ # partition should start one sector before the first logical
+ # partition.
+ self.__create_partition(d['disk'].device, "extended",
+ None, p['start'] - 1,
+ d['offset'] - p['start'])
+
+ if p['fstype'] == "swap":
+ parted_fs_type = "linux-swap"
+ elif p['fstype'] == "vfat":
+ parted_fs_type = "fat32"
+ elif p['fstype'] == "msdos":
+ parted_fs_type = "fat16"
+ else:
+ # Type for ext2/ext3/ext4/btrfs
+ parted_fs_type = "ext2"
+
+ # Boot ROM of OMAP boards require vfat boot partition to have an
+ # even number of sectors.
+ if p['mountpoint'] == "/boot" and p['fstype'] in ["vfat", "msdos"] \
+ and p['size'] % 2:
+ msger.debug("Substracting one sector from '%s' partition to " \
+ "get even number of sectors for the partition" % \
+ p['mountpoint'])
+ p['size'] -= 1
+
+ self.__create_partition(d['disk'].device, p['type'],
+ parted_fs_type, p['start'], p['size'])
+
+ if p['boot']:
+ if d['ptable_format'] == 'gpt':
+ flag_name = "legacy_boot"
+ else:
+ flag_name = "boot"
+ msger.debug("Set '%s' flag for partition '%s' on disk '%s'" % \
+ (flag_name, p['num'], d['disk'].device))
+ self.__run_parted(["-s", d['disk'].device, "set",
+ "%d" % p['num'], flag_name, "on"])
+
+ # If the partition table format is "gpt", find out PARTUUIDs for all
+ # the partitions. And if users specified custom parition type UUIDs,
+ # set them.
+ for disk_name, disk in self.disks.items():
+ if disk['ptable_format'] != 'gpt':
+ continue
+
+ pnum = 0
+ gpt_parser = GptParser(d['disk'].device, SECTOR_SIZE)
+ # Iterate over all GPT partitions on this disk
+ for entry in gpt_parser.get_partitions():
+ pnum += 1
+ # Find the matching partition in the 'self.partitions' list
+ for n in d['partitions']:
+ p = self.partitions[n]
+ if p['num'] == pnum:
+ # Found, fetch PARTUUID (partition's unique ID)
+ p['partuuid'] = entry['part_uuid']
+ msger.debug("PARTUUID for partition %d on disk '%s' " \
+ "(mount point '%s') is '%s'" % (pnum, \
+ disk_name, p['mountpoint'], p['partuuid']))
+ if p['part_type']:
+ entry['type_uuid'] = p['part_type']
+ msger.debug("Change type of partition %d on disk " \
+ "'%s' (mount point '%s') to '%s'" % \
+ (pnum, disk_name, p['mountpoint'],
+ p['part_type']))
+ gpt_parser.change_partition(entry)
+
+ del gpt_parser
+
+ def __map_partitions(self):
+ """Load it if dm_snapshot isn't loaded. """
+ load_module("dm_snapshot")
+
+ for dev in self.disks.keys():
+ d = self.disks[dev]
+ if d['mapped']:
+ continue
+
+ msger.debug("Running kpartx on %s" % d['disk'].device )
+ rc, kpartxOutput = runner.runtool([self.kpartx, "-l", "-v", d['disk'].device])
+ kpartxOutput = kpartxOutput.splitlines()
+
+ if rc != 0:
+ raise MountError("Failed to query partition mapping for '%s'" %
+ d['disk'].device)
+
+ # Strip trailing blank and mask verbose output
+ i = 0
+ while i < len(kpartxOutput) and kpartxOutput[i][0:4] != "loop":
+ i = i + 1
+ kpartxOutput = kpartxOutput[i:]
+
+ # Make sure kpartx reported the right count of partitions
+ if len(kpartxOutput) != d['numpart']:
+ # If this disk has more than 3 partitions, then in case of MBR
+ # paritions there is an extended parition. Different versions
+ # of kpartx behave differently WRT the extended partition -
+ # some map it, some ignore it. This is why we do the below hack
+ # - if kpartx reported one more partition and the partition
+ # table type is "msdos" and the amount of partitions is more
+ # than 3, we just assume kpartx mapped the extended parition
+ # and we remove it.
+ if len(kpartxOutput) == d['numpart'] + 1 \
+ and d['ptable_format'] == 'msdos' and len(kpartxOutput) > 3:
+ kpartxOutput.pop(3)
+ else:
+ raise MountError("Unexpected number of partitions from " \
+ "kpartx: %d != %d" % \
+ (len(kpartxOutput), d['numpart']))
+
+ for i in range(len(kpartxOutput)):
+ line = kpartxOutput[i]
+ newdev = line.split()[0]
+ mapperdev = "/dev/mapper/" + newdev
+ loopdev = d['disk'].device + newdev[-1]
+
+ msger.debug("Dev %s: %s -> %s" % (newdev, loopdev, mapperdev))
+ pnum = d['partitions'][i]
+ self.partitions[pnum]['device'] = loopdev
+
+ # grub's install wants partitions to be named
+ # to match their parent device + partition num
+ # kpartx doesn't work like this, so we add compat
+ # symlinks to point to /dev/mapper
+ if os.path.lexists(loopdev):
+ os.unlink(loopdev)
+ os.symlink(mapperdev, loopdev)
+
+ msger.debug("Adding partx mapping for %s" % d['disk'].device)
+ rc = runner.show([self.kpartx, "-v", "-a", d['disk'].device])
+
+ if rc != 0:
+ # Make sure that the device maps are also removed on error case.
+ # The d['mapped'] isn't set to True if the kpartx fails so
+ # failed mapping will not be cleaned on cleanup either.
+ runner.quiet([self.kpartx, "-d", d['disk'].device])
+ raise MountError("Failed to map partitions for '%s'" %
+ d['disk'].device)
+
+ # FIXME: there is a bit delay for multipath device setup,
+ # wait 10ms for the setup
+ import time
+ time.sleep(10)
+ d['mapped'] = True
+
+ def __unmap_partitions(self):
+ for dev in self.disks.keys():
+ d = self.disks[dev]
+ if not d['mapped']:
+ continue
+
+ msger.debug("Removing compat symlinks")
+ for pnum in d['partitions']:
+ if self.partitions[pnum]['device'] != None:
+ os.unlink(self.partitions[pnum]['device'])
+ self.partitions[pnum]['device'] = None
+
+ msger.debug("Unmapping %s" % d['disk'].device)
+ rc = runner.quiet([self.kpartx, "-d", d['disk'].device])
+ if rc != 0:
+ raise MountError("Failed to unmap partitions for '%s'" %
+ d['disk'].device)
+
+ d['mapped'] = False
+
+ def __calculate_mountorder(self):
+ msger.debug("Calculating mount order")
+ for p in self.partitions:
+ if p['mountpoint']:
+ self.mountOrder.append(p['mountpoint'])
+ self.unmountOrder.append(p['mountpoint'])
+
+ self.mountOrder.sort()
+ self.unmountOrder.sort()
+ self.unmountOrder.reverse()
+
+ def cleanup(self):
+ Mount.cleanup(self)
+ if self.disks:
+ self.__unmap_partitions()
+ for dev in self.disks.keys():
+ d = self.disks[dev]
+ try:
+ d['disk'].cleanup()
+ except:
+ pass
+
+ def unmount(self):
+ self.__unmount_subvolumes()
+ for mp in self.unmountOrder:
+ if mp == 'swap':
+ continue
+ p = None
+ for p1 in self.partitions:
+ if p1['mountpoint'] == mp:
+ p = p1
+ break
+
+ if p['mount'] != None:
+ try:
+ # Create subvolume snapshot here
+ if p['fstype'] == "btrfs" and p['mountpoint'] == "/" and not self.snapshot_created:
+ self.__create_subvolume_snapshots(p, p["mount"])
+ p['mount'].cleanup()
+ except:
+ pass
+ p['mount'] = None
+
+ # Only for btrfs
+ def __get_subvolume_id(self, rootpath, subvol):
+ if not self.btrfscmd:
+ self.btrfscmd=find_binary_path("btrfs")
+ argv = [ self.btrfscmd, "subvolume", "list", rootpath ]
+
+ rc, out = runner.runtool(argv)
+ msger.debug(out)
+
+ if rc != 0:
+ raise MountError("Failed to get subvolume id from %s', return code: %d." % (rootpath, rc))
+
+ subvolid = -1
+ for line in out.splitlines():
+ if line.endswith(" path %s" % subvol):
+ subvolid = line.split()[1]
+ if not subvolid.isdigit():
+ raise MountError("Invalid subvolume id: %s" % subvolid)
+ subvolid = int(subvolid)
+ break
+ return subvolid
+
+ def __create_subvolume_metadata(self, p, pdisk):
+ if len(self.subvolumes) == 0:
+ return
+
+ argv = [ self.btrfscmd, "subvolume", "list", pdisk.mountdir ]
+ rc, out = runner.runtool(argv)
+ msger.debug(out)
+
+ if rc != 0:
+ raise MountError("Failed to get subvolume id from %s', return code: %d." % (pdisk.mountdir, rc))
+
+ subvolid_items = out.splitlines()
+ subvolume_metadata = ""
+ for subvol in self.subvolumes:
+ for line in subvolid_items:
+ if line.endswith(" path %s" % subvol["subvol"]):
+ subvolid = line.split()[1]
+ if not subvolid.isdigit():
+ raise MountError("Invalid subvolume id: %s" % subvolid)
+
+ subvolid = int(subvolid)
+ opts = subvol["fsopts"].split(",")
+ for opt in opts:
+ if opt.strip().startswith("subvol="):
+ opts.remove(opt)
+ break
+ fsopts = ",".join(opts)
+ subvolume_metadata += "%d\t%s\t%s\t%s\n" % (subvolid, subvol["subvol"], subvol['mountpoint'], fsopts)
+
+ if subvolume_metadata:
+ fd = open("%s/.subvolume_metadata" % pdisk.mountdir, "w")
+ fd.write(subvolume_metadata)
+ fd.close()
+
+ def __get_subvolume_metadata(self, p, pdisk):
+ subvolume_metadata_file = "%s/.subvolume_metadata" % pdisk.mountdir
+ if not os.path.exists(subvolume_metadata_file):
+ return
+
+ fd = open(subvolume_metadata_file, "r")
+ content = fd.read()
+ fd.close()
+
+ for line in content.splitlines():
+ items = line.split("\t")
+ if items and len(items) == 4:
+ self.subvolumes.append({'size': 0, # In sectors
+ 'mountpoint': items[2], # Mount relative to chroot
+ 'fstype': "btrfs", # Filesystem type
+ 'fsopts': items[3] + ",subvol=%s" % items[1], # Filesystem mount options
+ 'disk_name': p['disk_name'], # physical disk name holding partition
+ 'device': None, # kpartx device node for partition
+ 'mount': None, # Mount object
+ 'subvol': items[1], # Subvolume name
+ 'boot': False, # Bootable flag
+ 'mounted': False # Mount flag
+ })
+
+ def __create_subvolumes(self, p, pdisk):
+ """ Create all the subvolumes. """
+
+ for subvol in self.subvolumes:
+ argv = [ self.btrfscmd, "subvolume", "create", pdisk.mountdir + "/" + subvol["subvol"]]
+
+ rc = runner.show(argv)
+ if rc != 0:
+ raise MountError("Failed to create subvolume '%s', return code: %d." % (subvol["subvol"], rc))
+
+ # Set default subvolume, subvolume for "/" is default
+ subvol = None
+ for subvolume in self.subvolumes:
+ if subvolume["mountpoint"] == "/" and p['disk_name'] == subvolume['disk_name']:
+ subvol = subvolume
+ break
+
+ if subvol:
+ # Get default subvolume id
+ subvolid = self. __get_subvolume_id(pdisk.mountdir, subvol["subvol"])
+ # Set default subvolume
+ if subvolid != -1:
+ rc = runner.show([ self.btrfscmd, "subvolume", "set-default", "%d" % subvolid, pdisk.mountdir])
+ if rc != 0:
+ raise MountError("Failed to set default subvolume id: %d', return code: %d." % (subvolid, rc))
+
+ self.__create_subvolume_metadata(p, pdisk)
+
+ def __mount_subvolumes(self, p, pdisk):
+ if self.skipformat:
+ # Get subvolume info
+ self.__get_subvolume_metadata(p, pdisk)
+ # Set default mount options
+ if len(self.subvolumes) != 0:
+ for subvol in self.subvolumes:
+ if subvol["mountpoint"] == p["mountpoint"] == "/":
+ opts = subvol["fsopts"].split(",")
+ for opt in opts:
+ if opt.strip().startswith("subvol="):
+ opts.remove(opt)
+ break
+ pdisk.fsopts = ",".join(opts)
+ break
+
+ if len(self.subvolumes) == 0:
+ # Return directly if no subvolumes
+ return
+
+ # Remount to make default subvolume mounted
+ rc = runner.show([self.umountcmd, pdisk.mountdir])
+ if rc != 0:
+ raise MountError("Failed to umount %s" % pdisk.mountdir)
+
+ rc = runner.show([self.mountcmd, "-o", pdisk.fsopts, pdisk.disk.device, pdisk.mountdir])
+ if rc != 0:
+ raise MountError("Failed to umount %s" % pdisk.mountdir)
+
+ for subvol in self.subvolumes:
+ if subvol["mountpoint"] == "/":
+ continue
+ subvolid = self. __get_subvolume_id(pdisk.mountdir, subvol["subvol"])
+ if subvolid == -1:
+ msger.debug("WARNING: invalid subvolume %s" % subvol["subvol"])
+ continue
+ # Replace subvolume name with subvolume ID
+ opts = subvol["fsopts"].split(",")
+ for opt in opts:
+ if opt.strip().startswith("subvol="):
+ opts.remove(opt)
+ break
+
+ opts.extend(["subvolrootid=0", "subvol=%s" % subvol["subvol"]])
+ fsopts = ",".join(opts)
+ subvol['fsopts'] = fsopts
+ mountpoint = self.mountdir + subvol['mountpoint']
+ makedirs(mountpoint)
+ rc = runner.show([self.mountcmd, "-o", fsopts, pdisk.disk.device, mountpoint])
+ if rc != 0:
+ raise MountError("Failed to mount subvolume %s to %s" % (subvol["subvol"], mountpoint))
+ subvol["mounted"] = True
+
+ def __unmount_subvolumes(self):
+ """ It may be called multiple times, so we need to chekc if it is still mounted. """
+ for subvol in self.subvolumes:
+ if subvol["mountpoint"] == "/":
+ continue
+ if not subvol["mounted"]:
+ continue
+ mountpoint = self.mountdir + subvol['mountpoint']
+ rc = runner.show([self.umountcmd, mountpoint])
+ if rc != 0:
+ raise MountError("Failed to unmount subvolume %s from %s" % (subvol["subvol"], mountpoint))
+ subvol["mounted"] = False
+
+ def __create_subvolume_snapshots(self, p, pdisk):
+ import time
+
+ if self.snapshot_created:
+ return
+
+ # Remount with subvolid=0
+ rc = runner.show([self.umountcmd, pdisk.mountdir])
+ if rc != 0:
+ raise MountError("Failed to umount %s" % pdisk.mountdir)
+ if pdisk.fsopts:
+ mountopts = pdisk.fsopts + ",subvolid=0"
+ else:
+ mountopts = "subvolid=0"
+ rc = runner.show([self.mountcmd, "-o", mountopts, pdisk.disk.device, pdisk.mountdir])
+ if rc != 0:
+ raise MountError("Failed to umount %s" % pdisk.mountdir)
+
+ # Create all the subvolume snapshots
+ snapshotts = time.strftime("%Y%m%d-%H%M")
+ for subvol in self.subvolumes:
+ subvolpath = pdisk.mountdir + "/" + subvol["subvol"]
+ snapshotpath = subvolpath + "_%s-1" % snapshotts
+ rc = runner.show([ self.btrfscmd, "subvolume", "snapshot", subvolpath, snapshotpath ])
+ if rc != 0:
+ raise MountError("Failed to create subvolume snapshot '%s' for '%s', return code: %d." % (snapshotpath, subvolpath, rc))
+
+ self.snapshot_created = True
+
+ def mount(self):
+ for dev in self.disks.keys():
+ d = self.disks[dev]
+ d['disk'].create()
+
+ self.__format_disks()
+ self.__map_partitions()
+ self.__calculate_mountorder()
+
+ for mp in self.mountOrder:
+ p = None
+ for p1 in self.partitions:
+ if p1['mountpoint'] == mp:
+ p = p1
+ break
+
+ if not p['label']:
+ if p['mountpoint'] == "/":
+ p['label'] = 'platform'
+ else:
+ p['label'] = mp.split('/')[-1]
+
+ if mp == 'swap':
+ import uuid
+ p['uuid'] = str(uuid.uuid1())
+ runner.show([self.mkswap,
+ '-L', p['label'],
+ '-U', p['uuid'],
+ p['device']])
+ continue
+
+ rmmountdir = False
+ if p['mountpoint'] == "/":
+ rmmountdir = True
+ if p['fstype'] == "vfat" or p['fstype'] == "msdos":
+ myDiskMount = VfatDiskMount
+ elif p['fstype'] in ("ext2", "ext3", "ext4"):
+ myDiskMount = ExtDiskMount
+ elif p['fstype'] == "btrfs":
+ myDiskMount = BtrfsDiskMount
+ else:
+ raise MountError("Fail to support file system " + p['fstype'])
+
+ if p['fstype'] == "btrfs" and not p['fsopts']:
+ p['fsopts'] = "subvolid=0"
+
+ pdisk = myDiskMount(RawDisk(p['size'] * self.sector_size, p['device']),
+ self.mountdir + p['mountpoint'],
+ p['fstype'],
+ 4096,
+ p['label'],
+ rmmountdir,
+ self.skipformat,
+ fsopts = p['fsopts'])
+ pdisk.mount(pdisk.fsopts)
+ if p['fstype'] == "btrfs" and p['mountpoint'] == "/":
+ if not self.skipformat:
+ self.__create_subvolumes(p, pdisk)
+ self.__mount_subvolumes(p, pdisk)
+ p['mount'] = pdisk
+ p['uuid'] = pdisk.uuid
+
+ def resparse(self, size = None):
+ # Can't re-sparse a disk image - too hard
+ pass
diff --git a/scripts/lib/mic/utils/proxy.py b/scripts/lib/mic/utils/proxy.py
new file mode 100644
index 0000000000..91451a2d01
--- /dev/null
+++ b/scripts/lib/mic/utils/proxy.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python -tt
+#
+# Copyright (c) 2010, 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+import urlparse
+
+_my_proxies = {}
+_my_noproxy = None
+_my_noproxy_list = []
+
+def set_proxy_environ():
+ global _my_noproxy, _my_proxies
+ if not _my_proxies:
+ return
+ for key in _my_proxies.keys():
+ os.environ[key + "_proxy"] = _my_proxies[key]
+ if not _my_noproxy:
+ return
+ os.environ["no_proxy"] = _my_noproxy
+
+def unset_proxy_environ():
+ for env in ('http_proxy',
+ 'https_proxy',
+ 'ftp_proxy',
+ 'all_proxy'):
+ if env in os.environ:
+ del os.environ[env]
+
+ ENV=env.upper()
+ if ENV in os.environ:
+ del os.environ[ENV]
+
+def _set_proxies(proxy = None, no_proxy = None):
+ """Return a dictionary of scheme -> proxy server URL mappings.
+ """
+
+ global _my_noproxy, _my_proxies
+ _my_proxies = {}
+ _my_noproxy = None
+ proxies = []
+ if proxy:
+ proxies.append(("http_proxy", proxy))
+ if no_proxy:
+ proxies.append(("no_proxy", no_proxy))
+
+ # Get proxy settings from environment if not provided
+ if not proxy and not no_proxy:
+ proxies = os.environ.items()
+
+ # Remove proxy env variables, urllib2 can't handle them correctly
+ unset_proxy_environ()
+
+ for name, value in proxies:
+ name = name.lower()
+ if value and name[-6:] == '_proxy':
+ if name[0:2] != "no":
+ _my_proxies[name[:-6]] = value
+ else:
+ _my_noproxy = value
+
+def _ip_to_int(ip):
+ ipint=0
+ shift=24
+ for dec in ip.split("."):
+ ipint |= int(dec) << shift
+ shift -= 8
+ return ipint
+
+def _int_to_ip(val):
+ ipaddr=""
+ shift=0
+ for i in range(4):
+ dec = val >> shift
+ dec &= 0xff
+ ipaddr = ".%d%s" % (dec, ipaddr)
+ shift += 8
+ return ipaddr[1:]
+
+def _isip(host):
+ if host.replace(".", "").isdigit():
+ return True
+ return False
+
+def _set_noproxy_list():
+ global _my_noproxy, _my_noproxy_list
+ _my_noproxy_list = []
+ if not _my_noproxy:
+ return
+ for item in _my_noproxy.split(","):
+ item = item.strip()
+ if not item:
+ continue
+
+ if item[0] != '.' and item.find("/") == -1:
+ # Need to match it
+ _my_noproxy_list.append({"match":0,"needle":item})
+
+ elif item[0] == '.':
+ # Need to match at tail
+ _my_noproxy_list.append({"match":1,"needle":item})
+
+ elif item.find("/") > 3:
+ # IP/MASK, need to match at head
+ needle = item[0:item.find("/")].strip()
+ ip = _ip_to_int(needle)
+ netmask = 0
+ mask = item[item.find("/")+1:].strip()
+
+ if mask.isdigit():
+ netmask = int(mask)
+ netmask = ~((1<<(32-netmask)) - 1)
+ ip &= netmask
+ else:
+ shift=24
+ netmask=0
+ for dec in mask.split("."):
+ netmask |= int(dec) << shift
+ shift -= 8
+ ip &= netmask
+
+ _my_noproxy_list.append({"match":2,"needle":ip,"netmask":netmask})
+
+def _isnoproxy(url):
+ (scheme, host, path, parm, query, frag) = urlparse.urlparse(url)
+
+ if '@' in host:
+ user_pass, host = host.split('@', 1)
+
+ if ':' in host:
+ host, port = host.split(':', 1)
+
+ hostisip = _isip(host)
+ for item in _my_noproxy_list:
+ if hostisip and item["match"] <= 1:
+ continue
+
+ if item["match"] == 2 and hostisip:
+ if (_ip_to_int(host) & item["netmask"]) == item["needle"]:
+ return True
+
+ if item["match"] == 0:
+ if host == item["needle"]:
+ return True
+
+ if item["match"] == 1:
+ if host.rfind(item["needle"]) > 0:
+ return True
+
+ return False
+
+def set_proxies(proxy = None, no_proxy = None):
+ _set_proxies(proxy, no_proxy)
+ _set_noproxy_list()
+ set_proxy_environ()
+
+def get_proxy_for(url):
+ if url.startswith('file:') or _isnoproxy(url):
+ return None
+
+ type = url[0:url.index(":")]
+ proxy = None
+ if _my_proxies.has_key(type):
+ proxy = _my_proxies[type]
+ elif _my_proxies.has_key("http"):
+ proxy = _my_proxies["http"]
+ else:
+ proxy = None
+
+ return proxy
diff --git a/scripts/lib/mic/utils/rpmmisc.py b/scripts/lib/mic/utils/rpmmisc.py
new file mode 100644
index 0000000000..af15763e18
--- /dev/null
+++ b/scripts/lib/mic/utils/rpmmisc.py
@@ -0,0 +1,600 @@
+#!/usr/bin/python -tt
+#
+# Copyright (c) 2008, 2009, 2010, 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+import sys
+import re
+import rpm
+
+from mic import msger
+from mic.utils.errors import CreatorError
+from mic.utils.proxy import get_proxy_for
+from mic.utils import runner
+
+
+class RPMInstallCallback:
+ """ Command line callback class for callbacks from the RPM library.
+ """
+
+ def __init__(self, ts, output=1):
+ self.output = output
+ self.callbackfilehandles = {}
+ self.total_actions = 0
+ self.total_installed = 0
+ self.installed_pkg_names = []
+ self.total_removed = 0
+ self.mark = "+"
+ self.marks = 40
+ self.lastmsg = None
+ self.tsInfo = None # this needs to be set for anything else to work
+ self.ts = ts
+ self.filelog = False
+ self.logString = []
+ self.headmsg = "Installing"
+
+ def _dopkgtup(self, hdr):
+ tmpepoch = hdr['epoch']
+ if tmpepoch is None: epoch = '0'
+ else: epoch = str(tmpepoch)
+
+ return (hdr['name'], hdr['arch'], epoch, hdr['version'], hdr['release'])
+
+ def _makeHandle(self, hdr):
+ handle = '%s:%s.%s-%s-%s' % (hdr['epoch'], hdr['name'], hdr['version'],
+ hdr['release'], hdr['arch'])
+
+ return handle
+
+ def _localprint(self, msg):
+ if self.output:
+ msger.info(msg)
+
+ def _makefmt(self, percent, progress = True):
+ l = len(str(self.total_actions))
+ size = "%s.%s" % (l, l)
+ fmt_done = "[%" + size + "s/%" + size + "s]"
+ done = fmt_done % (self.total_installed + self.total_removed,
+ self.total_actions)
+ marks = self.marks - (2 * l)
+ width = "%s.%s" % (marks, marks)
+ fmt_bar = "%-" + width + "s"
+ if progress:
+ bar = fmt_bar % (self.mark * int(marks * (percent / 100.0)), )
+ fmt = "\r %-10.10s: %-20.20s " + bar + " " + done
+ else:
+ bar = fmt_bar % (self.mark * marks, )
+ fmt = " %-10.10s: %-20.20s " + bar + " " + done
+ return fmt
+
+ def _logPkgString(self, hdr):
+ """return nice representation of the package for the log"""
+ (n,a,e,v,r) = self._dopkgtup(hdr)
+ if e == '0':
+ pkg = '%s.%s %s-%s' % (n, a, v, r)
+ else:
+ pkg = '%s.%s %s:%s-%s' % (n, a, e, v, r)
+
+ return pkg
+
+ def callback(self, what, bytes, total, h, user):
+ if what == rpm.RPMCALLBACK_TRANS_START:
+ if bytes == 6:
+ self.total_actions = total
+
+ elif what == rpm.RPMCALLBACK_TRANS_PROGRESS:
+ pass
+
+ elif what == rpm.RPMCALLBACK_TRANS_STOP:
+ pass
+
+ elif what == rpm.RPMCALLBACK_INST_OPEN_FILE:
+ self.lastmsg = None
+ hdr = None
+ if h is not None:
+ try:
+ hdr, rpmloc = h
+ except:
+ rpmloc = h
+ hdr = readRpmHeader(self.ts, h)
+
+ handle = self._makeHandle(hdr)
+ fd = os.open(rpmloc, os.O_RDONLY)
+ self.callbackfilehandles[handle]=fd
+ if hdr['name'] not in self.installed_pkg_names:
+ self.installed_pkg_names.append(hdr['name'])
+ self.total_installed += 1
+ return fd
+ else:
+ self._localprint("No header - huh?")
+
+ elif what == rpm.RPMCALLBACK_INST_CLOSE_FILE:
+ hdr = None
+ if h is not None:
+ try:
+ hdr, rpmloc = h
+ except:
+ rpmloc = h
+ hdr = readRpmHeader(self.ts, h)
+
+ handle = self._makeHandle(hdr)
+ os.close(self.callbackfilehandles[handle])
+ fd = 0
+
+ # log stuff
+ #pkgtup = self._dopkgtup(hdr)
+ self.logString.append(self._logPkgString(hdr))
+
+ elif what == rpm.RPMCALLBACK_INST_PROGRESS:
+ if h is not None:
+ percent = (self.total_installed*100L)/self.total_actions
+ if total > 0:
+ try:
+ hdr, rpmloc = h
+ except:
+ rpmloc = h
+
+ m = re.match("(.*)-(\d+.*)-(\d+\.\d+)\.(.+)\.rpm", os.path.basename(rpmloc))
+ if m:
+ pkgname = m.group(1)
+ else:
+ pkgname = os.path.basename(rpmloc)
+ if self.output:
+ fmt = self._makefmt(percent)
+ msg = fmt % (self.headmsg, pkgname)
+ if msg != self.lastmsg:
+ self.lastmsg = msg
+
+ msger.info(msg)
+
+ if self.total_installed == self.total_actions:
+ msger.raw('')
+ msger.verbose('\n'.join(self.logString))
+
+ elif what == rpm.RPMCALLBACK_UNINST_START:
+ pass
+
+ elif what == rpm.RPMCALLBACK_UNINST_PROGRESS:
+ pass
+
+ elif what == rpm.RPMCALLBACK_UNINST_STOP:
+ self.total_removed += 1
+
+ elif what == rpm.RPMCALLBACK_REPACKAGE_START:
+ pass
+
+ elif what == rpm.RPMCALLBACK_REPACKAGE_STOP:
+ pass
+
+ elif what == rpm.RPMCALLBACK_REPACKAGE_PROGRESS:
+ pass
+
+def readRpmHeader(ts, filename):
+ """ Read an rpm header. """
+
+ fd = os.open(filename, os.O_RDONLY)
+ h = ts.hdrFromFdno(fd)
+ os.close(fd)
+ return h
+
+def splitFilename(filename):
+ """ Pass in a standard style rpm fullname
+
+ Return a name, version, release, epoch, arch, e.g.::
+ foo-1.0-1.i386.rpm returns foo, 1.0, 1, i386
+ 1:bar-9-123a.ia64.rpm returns bar, 9, 123a, 1, ia64
+ """
+
+ if filename[-4:] == '.rpm':
+ filename = filename[:-4]
+
+ archIndex = filename.rfind('.')
+ arch = filename[archIndex+1:]
+
+ relIndex = filename[:archIndex].rfind('-')
+ rel = filename[relIndex+1:archIndex]
+
+ verIndex = filename[:relIndex].rfind('-')
+ ver = filename[verIndex+1:relIndex]
+
+ epochIndex = filename.find(':')
+ if epochIndex == -1:
+ epoch = ''
+ else:
+ epoch = filename[:epochIndex]
+
+ name = filename[epochIndex + 1:verIndex]
+ return name, ver, rel, epoch, arch
+
+def getCanonX86Arch(arch):
+ #
+ if arch == "i586":
+ f = open("/proc/cpuinfo", "r")
+ lines = f.readlines()
+ f.close()
+ for line in lines:
+ if line.startswith("model name") and line.find("Geode(TM)") != -1:
+ return "geode"
+ return arch
+ # only athlon vs i686 isn't handled with uname currently
+ if arch != "i686":
+ return arch
+
+ # if we're i686 and AuthenticAMD, then we should be an athlon
+ f = open("/proc/cpuinfo", "r")
+ lines = f.readlines()
+ f.close()
+ for line in lines:
+ if line.startswith("vendor") and line.find("AuthenticAMD") != -1:
+ return "athlon"
+ # i686 doesn't guarantee cmov, but we depend on it
+ elif line.startswith("flags") and line.find("cmov") == -1:
+ return "i586"
+
+ return arch
+
+def getCanonX86_64Arch(arch):
+ if arch != "x86_64":
+ return arch
+
+ vendor = None
+ f = open("/proc/cpuinfo", "r")
+ lines = f.readlines()
+ f.close()
+ for line in lines:
+ if line.startswith("vendor_id"):
+ vendor = line.split(':')[1]
+ break
+ if vendor is None:
+ return arch
+
+ if vendor.find("Authentic AMD") != -1 or vendor.find("AuthenticAMD") != -1:
+ return "amd64"
+ if vendor.find("GenuineIntel") != -1:
+ return "ia32e"
+ return arch
+
+def getCanonArch():
+ arch = os.uname()[4]
+
+ if (len(arch) == 4 and arch[0] == "i" and arch[2:4] == "86"):
+ return getCanonX86Arch(arch)
+
+ if arch == "x86_64":
+ return getCanonX86_64Arch(arch)
+
+ return arch
+
+# Copy from libsatsolver:poolarch.c, with cleanup
+archPolicies = {
+ "x86_64": "x86_64:i686:i586:i486:i386",
+ "i686": "i686:i586:i486:i386",
+ "i586": "i586:i486:i386",
+ "ia64": "ia64:i686:i586:i486:i386",
+ "armv7tnhl": "armv7tnhl:armv7thl:armv7nhl:armv7hl",
+ "armv7thl": "armv7thl:armv7hl",
+ "armv7nhl": "armv7nhl:armv7hl",
+ "armv7hl": "armv7hl",
+ "armv7l": "armv7l:armv6l:armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l",
+ "armv6l": "armv6l:armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l",
+ "armv5tejl": "armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l",
+ "armv5tel": "armv5tel:armv5l:armv4tl:armv4l:armv3l",
+ "armv5l": "armv5l:armv4tl:armv4l:armv3l",
+}
+
+# dict mapping arch -> ( multicompat, best personality, biarch personality )
+multilibArches = {
+ "x86_64": ( "athlon", "x86_64", "athlon" ),
+}
+
+# from yumUtils.py
+arches = {
+ # ia32
+ "athlon": "i686",
+ "i686": "i586",
+ "geode": "i586",
+ "i586": "i486",
+ "i486": "i386",
+ "i386": "noarch",
+
+ # amd64
+ "x86_64": "athlon",
+ "amd64": "x86_64",
+ "ia32e": "x86_64",
+
+ # arm
+ "armv7tnhl": "armv7nhl",
+ "armv7nhl": "armv7hl",
+ "armv7hl": "noarch",
+ "armv7l": "armv6l",
+ "armv6l": "armv5tejl",
+ "armv5tejl": "armv5tel",
+ "armv5tel": "noarch",
+
+ #itanium
+ "ia64": "noarch",
+}
+
+def isMultiLibArch(arch=None):
+ """returns true if arch is a multilib arch, false if not"""
+ if arch is None:
+ arch = getCanonArch()
+
+ if not arches.has_key(arch): # or we could check if it is noarch
+ return False
+
+ if multilibArches.has_key(arch):
+ return True
+
+ if multilibArches.has_key(arches[arch]):
+ return True
+
+ return False
+
+def getBaseArch():
+ myarch = getCanonArch()
+ if not arches.has_key(myarch):
+ return myarch
+
+ if isMultiLibArch(arch=myarch):
+ if multilibArches.has_key(myarch):
+ return myarch
+ else:
+ return arches[myarch]
+
+ if arches.has_key(myarch):
+ basearch = myarch
+ value = arches[basearch]
+ while value != 'noarch':
+ basearch = value
+ value = arches[basearch]
+
+ return basearch
+
+def checkRpmIntegrity(bin_rpm, package):
+ return runner.quiet([bin_rpm, "-K", "--nosignature", package])
+
+def checkSig(ts, package):
+ """ Takes a transaction set and a package, check it's sigs,
+ return 0 if they are all fine
+ return 1 if the gpg key can't be found
+ return 2 if the header is in someway damaged
+ return 3 if the key is not trusted
+ return 4 if the pkg is not gpg or pgp signed
+ """
+
+ value = 0
+ currentflags = ts.setVSFlags(0)
+ fdno = os.open(package, os.O_RDONLY)
+ try:
+ hdr = ts.hdrFromFdno(fdno)
+
+ except rpm.error, e:
+ if str(e) == "public key not availaiable":
+ value = 1
+ if str(e) == "public key not available":
+ value = 1
+ if str(e) == "public key not trusted":
+ value = 3
+ if str(e) == "error reading package header":
+ value = 2
+ else:
+ error, siginfo = getSigInfo(hdr)
+ if error == 101:
+ os.close(fdno)
+ del hdr
+ value = 4
+ else:
+ del hdr
+
+ try:
+ os.close(fdno)
+ except OSError:
+ pass
+
+ ts.setVSFlags(currentflags) # put things back like they were before
+ return value
+
+def getSigInfo(hdr):
+ """ checks signature from an hdr hand back signature information and/or
+ an error code
+ """
+
+ import locale
+ locale.setlocale(locale.LC_ALL, 'C')
+
+ string = '%|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|'
+ siginfo = hdr.sprintf(string)
+ if siginfo != '(none)':
+ error = 0
+ sigtype, sigdate, sigid = siginfo.split(',')
+ else:
+ error = 101
+ sigtype = 'MD5'
+ sigdate = 'None'
+ sigid = 'None'
+
+ infotuple = (sigtype, sigdate, sigid)
+ return error, infotuple
+
+def checkRepositoryEULA(name, repo):
+ """ This function is to check the EULA file if provided.
+ return True: no EULA or accepted
+ return False: user declined the EULA
+ """
+
+ import tempfile
+ import shutil
+ import urlparse
+ import urllib2 as u2
+ import httplib
+ from mic.utils.errors import CreatorError
+
+ def _check_and_download_url(u2opener, url, savepath):
+ try:
+ if u2opener:
+ f = u2opener.open(url)
+ else:
+ f = u2.urlopen(url)
+ except u2.HTTPError, httperror:
+ if httperror.code in (404, 503):
+ return None
+ else:
+ raise CreatorError(httperror)
+ except OSError, oserr:
+ if oserr.errno == 2:
+ return None
+ else:
+ raise CreatorError(oserr)
+ except IOError, oserr:
+ if hasattr(oserr, "reason") and oserr.reason.errno == 2:
+ return None
+ else:
+ raise CreatorError(oserr)
+ except u2.URLError, err:
+ raise CreatorError(err)
+ except httplib.HTTPException, e:
+ raise CreatorError(e)
+
+ # save to file
+ licf = open(savepath, "w")
+ licf.write(f.read())
+ licf.close()
+ f.close()
+
+ return savepath
+
+ def _pager_file(savepath):
+
+ if os.path.splitext(savepath)[1].upper() in ('.HTM', '.HTML'):
+ pagers = ('w3m', 'links', 'lynx', 'less', 'more')
+ else:
+ pagers = ('less', 'more')
+
+ file_showed = False
+ for pager in pagers:
+ cmd = "%s %s" % (pager, savepath)
+ try:
+ os.system(cmd)
+ except OSError:
+ continue
+ else:
+ file_showed = True
+ break
+
+ if not file_showed:
+ f = open(savepath)
+ msger.raw(f.read())
+ f.close()
+ msger.pause()
+
+ # when proxy needed, make urllib2 follow it
+ proxy = repo.proxy
+ proxy_username = repo.proxy_username
+ proxy_password = repo.proxy_password
+
+ if not proxy:
+ proxy = get_proxy_for(repo.baseurl[0])
+
+ handlers = []
+ auth_handler = u2.HTTPBasicAuthHandler(u2.HTTPPasswordMgrWithDefaultRealm())
+ u2opener = None
+ if proxy:
+ if proxy_username:
+ proxy_netloc = urlparse.urlsplit(proxy).netloc
+ if proxy_password:
+ proxy_url = 'http://%s:%s@%s' % (proxy_username, proxy_password, proxy_netloc)
+ else:
+ proxy_url = 'http://%s@%s' % (proxy_username, proxy_netloc)
+ else:
+ proxy_url = proxy
+
+ proxy_support = u2.ProxyHandler({'http': proxy_url,
+ 'https': proxy_url,
+ 'ftp': proxy_url})
+ handlers.append(proxy_support)
+
+ # download all remote files to one temp dir
+ baseurl = None
+ repo_lic_dir = tempfile.mkdtemp(prefix = 'repolic')
+
+ for url in repo.baseurl:
+ tmphandlers = handlers[:]
+
+ (scheme, host, path, parm, query, frag) = urlparse.urlparse(url.rstrip('/') + '/')
+ if scheme not in ("http", "https", "ftp", "ftps", "file"):
+ raise CreatorError("Error: invalid url %s" % url)
+
+ if '@' in host:
+ try:
+ user_pass, host = host.split('@', 1)
+ if ':' in user_pass:
+ user, password = user_pass.split(':', 1)
+ except ValueError, e:
+ raise CreatorError('Bad URL: %s' % url)
+
+ msger.verbose("adding HTTP auth: %s, XXXXXXXX" %(user))
+ auth_handler.add_password(None, host, user, password)
+ tmphandlers.append(auth_handler)
+ url = scheme + "://" + host + path + parm + query + frag
+
+ if tmphandlers:
+ u2opener = u2.build_opener(*tmphandlers)
+
+ # try to download
+ repo_eula_url = urlparse.urljoin(url, "LICENSE.txt")
+ repo_eula_path = _check_and_download_url(
+ u2opener,
+ repo_eula_url,
+ os.path.join(repo_lic_dir, repo.id + '_LICENSE.txt'))
+ if repo_eula_path:
+ # found
+ baseurl = url
+ break
+
+ if not baseurl:
+ shutil.rmtree(repo_lic_dir) #cleanup
+ return True
+
+ # show the license file
+ msger.info('For the software packages in this yum repo:')
+ msger.info(' %s: %s' % (name, baseurl))
+ msger.info('There is an "End User License Agreement" file that need to be checked.')
+ msger.info('Please read the terms and conditions outlined in it and answer the followed qustions.')
+ msger.pause()
+
+ _pager_file(repo_eula_path)
+
+ # Asking for the "Accept/Decline"
+ if not msger.ask('Would you agree to the terms and conditions outlined in the above End User License Agreement?'):
+ msger.warning('Will not install pkgs from this repo.')
+ shutil.rmtree(repo_lic_dir) #cleanup
+ return False
+
+ # try to find support_info.html for extra infomation
+ repo_info_url = urlparse.urljoin(baseurl, "support_info.html")
+ repo_info_path = _check_and_download_url(
+ u2opener,
+ repo_info_url,
+ os.path.join(repo_lic_dir, repo.id + '_support_info.html'))
+ if repo_info_path:
+ msger.info('There is one more file in the repo for additional support information, please read it')
+ msger.pause()
+ _pager_file(repo_info_path)
+
+ #cleanup
+ shutil.rmtree(repo_lic_dir)
+ return True
diff --git a/scripts/lib/mic/utils/runner.py b/scripts/lib/mic/utils/runner.py
new file mode 100644
index 0000000000..fded3c93fa
--- /dev/null
+++ b/scripts/lib/mic/utils/runner.py
@@ -0,0 +1,109 @@
+#!/usr/bin/python -tt
+#
+# Copyright (c) 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+import subprocess
+
+from mic import msger
+
+def runtool(cmdln_or_args, catch=1):
+ """ wrapper for most of the subprocess calls
+ input:
+ cmdln_or_args: can be both args and cmdln str (shell=True)
+ catch: 0, quitely run
+ 1, only STDOUT
+ 2, only STDERR
+ 3, both STDOUT and STDERR
+ return:
+ (rc, output)
+ if catch==0: the output will always None
+ """
+
+ if catch not in (0, 1, 2, 3):
+ # invalid catch selection, will cause exception, that's good
+ return None
+
+ if isinstance(cmdln_or_args, list):
+ cmd = cmdln_or_args[0]
+ shell = False
+ else:
+ import shlex
+ cmd = shlex.split(cmdln_or_args)[0]
+ shell = True
+
+ if catch != 3:
+ dev_null = os.open("/dev/null", os.O_WRONLY)
+
+ if catch == 0:
+ sout = dev_null
+ serr = dev_null
+ elif catch == 1:
+ sout = subprocess.PIPE
+ serr = dev_null
+ elif catch == 2:
+ sout = dev_null
+ serr = subprocess.PIPE
+ elif catch == 3:
+ sout = subprocess.PIPE
+ serr = subprocess.STDOUT
+
+ try:
+ p = subprocess.Popen(cmdln_or_args, stdout=sout,
+ stderr=serr, shell=shell)
+ (sout, serr) = p.communicate()
+ # combine stdout and stderr, filter None out
+ out = ''.join(filter(None, [sout, serr]))
+ except OSError, e:
+ if e.errno == 2:
+ # [Errno 2] No such file or directory
+ msger.error('Cannot run command: %s, lost dependency?' % cmd)
+ else:
+ raise # relay
+ finally:
+ if catch != 3:
+ os.close(dev_null)
+
+ return (p.returncode, out)
+
+def show(cmdln_or_args):
+ # show all the message using msger.verbose
+
+ rc, out = runtool(cmdln_or_args, catch=3)
+
+ if isinstance(cmdln_or_args, list):
+ cmd = ' '.join(cmdln_or_args)
+ else:
+ cmd = cmdln_or_args
+
+ msg = 'running command: "%s"' % cmd
+ if out: out = out.strip()
+ if out:
+ msg += ', with output::'
+ msg += '\n +----------------'
+ for line in out.splitlines():
+ msg += '\n | %s' % line
+ msg += '\n +----------------'
+
+ msger.verbose(msg)
+ return rc
+
+def outs(cmdln_or_args, catch=1):
+ # get the outputs of tools
+ return runtool(cmdln_or_args, catch)[1].strip()
+
+def quiet(cmdln_or_args):
+ return runtool(cmdln_or_args, catch=0)[0]