aboutsummaryrefslogtreecommitdiffstats
path: root/meta/classes
diff options
context:
space:
mode:
authorRichard Purdie <richard@openedhand.com>2006-07-21 10:10:31 +0000
committerRichard Purdie <richard@openedhand.com>2006-07-21 10:10:31 +0000
commitb2f192faabe412adce79534e22efe9fb69ee40e2 (patch)
tree7076c49d4286f8a1733650bd8fbc7161af200d57 /meta/classes
parent2cf0eadf9f730027833af802d7e6c90b44248f80 (diff)
downloadopenembedded-core-b2f192faabe412adce79534e22efe9fb69ee40e2.tar.gz
Rename /openembedded/ -> /meta/
git-svn-id: https://svn.o-hand.com/repos/poky/trunk@530 311d38ba-8fff-0310-9ca6-ca027cbcb966
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/autotools.bbclass182
-rw-r--r--meta/classes/base.bbclass793
-rw-r--r--meta/classes/base_srpm.bbclass20
-rw-r--r--meta/classes/binconfig.bbclass36
-rw-r--r--meta/classes/ccache.inc11
-rw-r--r--meta/classes/ccdv.bbclass21
-rw-r--r--meta/classes/cml1.bbclass8
-rw-r--r--meta/classes/cpan.bbclass20
-rw-r--r--meta/classes/cross.bbclass55
-rw-r--r--meta/classes/debian.bbclass101
-rw-r--r--meta/classes/distutils-base.bbclass14
-rw-r--r--meta/classes/distutils.bbclass15
-rw-r--r--meta/classes/e.bbclass37
-rw-r--r--meta/classes/efl.bbclass49
-rw-r--r--meta/classes/flow-lossage.bbclass5
-rw-r--r--meta/classes/gconf.bbclass59
-rw-r--r--meta/classes/gettext.bbclass12
-rw-r--r--meta/classes/gnome.bbclass20
-rw-r--r--meta/classes/gpe.bbclass17
-rw-r--r--meta/classes/gtk-icon-cache.bbclass38
-rw-r--r--meta/classes/icecc.bbclass156
-rw-r--r--meta/classes/image_ipk.bbclass76
-rw-r--r--meta/classes/kernel-arch.bbclass26
-rw-r--r--meta/classes/kernel.bbclass435
-rw-r--r--meta/classes/lib_package.bbclass9
-rw-r--r--meta/classes/linux_modules.bbclass19
-rw-r--r--meta/classes/manifest.bbclass80
-rw-r--r--meta/classes/module-base.bbclass25
-rw-r--r--meta/classes/module.bbclass51
-rw-r--r--meta/classes/module_strip.bbclass18
-rw-r--r--meta/classes/mozilla.bbclass53
-rw-r--r--meta/classes/multimachine.bbclass22
-rw-r--r--meta/classes/native.bbclass95
-rw-r--r--meta/classes/nslu2-jffs2-image.bbclass18
-rw-r--r--meta/classes/nslu2-mirrors.bbclass4
-rw-r--r--meta/classes/nslu2-ramdisk-image.bbclass18
-rw-r--r--meta/classes/nylon-mirrors.bbclass6
-rw-r--r--meta/classes/oebuildstamp.bbclass16
-rw-r--r--meta/classes/oelint.bbclass174
-rw-r--r--meta/classes/opie.bbclass105
-rw-r--r--meta/classes/opie_i18n.bbclass163
-rw-r--r--meta/classes/package.bbclass729
-rw-r--r--meta/classes/package_ipk.bbclass234
-rw-r--r--meta/classes/package_rpm.bbclass133
-rw-r--r--meta/classes/package_tar.bbclass99
-rw-r--r--meta/classes/palmtop.bbclass20
-rw-r--r--meta/classes/patcher.bbclass7
-rw-r--r--meta/classes/pkg_distribute.bbclass29
-rw-r--r--meta/classes/pkg_metainfo.bbclass22
-rw-r--r--meta/classes/pkgconfig.bbclass28
-rw-r--r--meta/classes/poky.bbclass4
-rw-r--r--meta/classes/qmake-base.bbclass44
-rw-r--r--meta/classes/qmake.bbclass57
-rw-r--r--meta/classes/qpf.bbclass36
-rw-r--r--meta/classes/qt3e.bbclass11
-rw-r--r--meta/classes/qt3x11.bbclass15
-rw-r--r--meta/classes/qt4x11.bbclass17
-rw-r--r--meta/classes/rm_work.bbclass22
-rw-r--r--meta/classes/rootfs_ipk.bbclass145
-rw-r--r--meta/classes/rpm_core.bbclass16
-rw-r--r--meta/classes/sanity.bbclass112
-rw-r--r--meta/classes/scons.bbclass13
-rw-r--r--meta/classes/sdk.bbclass26
-rw-r--r--meta/classes/sdl.bbclass44
-rw-r--r--meta/classes/sip.bbclass58
-rw-r--r--meta/classes/sourcepkg.bbclass111
-rw-r--r--meta/classes/src_distribute.bbclass40
-rw-r--r--meta/classes/src_distribute_local.bbclass31
-rw-r--r--meta/classes/srec.bbclass28
-rw-r--r--meta/classes/tinderclient.bbclass332
-rw-r--r--meta/classes/tmake.bbclass77
-rw-r--r--meta/classes/update-alternatives.bbclass33
-rw-r--r--meta/classes/update-rc.d.bbclass69
-rw-r--r--meta/classes/wrt-image.bbclass33
-rw-r--r--meta/classes/xfce.bbclass19
-rw-r--r--meta/classes/xlibs.bbclass15
76 files changed, 5791 insertions, 0 deletions
diff --git a/meta/classes/autotools.bbclass b/meta/classes/autotools.bbclass
new file mode 100644
index 0000000000..927e3432b7
--- /dev/null
+++ b/meta/classes/autotools.bbclass
@@ -0,0 +1,182 @@
+inherit base
+
+def autotools_dep_prepend(d):
+ import bb;
+
+ if bb.data.getVar('INHIBIT_AUTOTOOLS_DEPS', d, 1):
+ return ''
+
+ pn = bb.data.getVar('PN', d, 1)
+ deps = ''
+
+ if pn in ['autoconf-native', 'automake-native']:
+ return deps
+ deps += 'autoconf-native automake-native '
+
+ if not pn in ['libtool', 'libtool-native', 'libtool-cross']:
+ deps += 'libtool-native '
+
+ return deps + 'gnu-config-native '
+
+EXTRA_OEMAKE = ""
+DEPENDS_prepend = "${@autotools_dep_prepend(d)}"
+acpaths = "default"
+EXTRA_AUTORECONF = "--exclude=autopoint"
+
+def autotools_set_crosscompiling(d):
+ import bb
+ if not bb.data.inherits_class('native', d):
+ return " cross_compiling=yes"
+ return ""
+
+# EXTRA_OECONF_append = "${@autotools_set_crosscompiling(d)}"
+
+oe_runconf () {
+ if [ -x ${S}/configure ] ; then
+ cfgcmd="${S}/configure \
+ --build=${BUILD_SYS} \
+ --host=${HOST_SYS} \
+ --target=${TARGET_SYS} \
+ --prefix=${prefix} \
+ --exec_prefix=${exec_prefix} \
+ --bindir=${bindir} \
+ --sbindir=${sbindir} \
+ --libexecdir=${libexecdir} \
+ --datadir=${datadir} \
+ --sysconfdir=${sysconfdir} \
+ --sharedstatedir=${sharedstatedir} \
+ --localstatedir=${localstatedir} \
+ --libdir=${libdir} \
+ --includedir=${includedir} \
+ --oldincludedir=${oldincludedir} \
+ --infodir=${infodir} \
+ --mandir=${mandir} \
+ ${EXTRA_OECONF} \
+ $@"
+ oenote "Running $cfgcmd..."
+ $cfgcmd || oefatal "oe_runconf failed"
+ else
+ oefatal "no configure script found"
+ fi
+}
+
+autotools_do_configure() {
+ case ${PN} in
+ autoconf*)
+ ;;
+ automake*)
+ ;;
+ *)
+ # WARNING: gross hack follows:
+ # An autotools built package generally needs these scripts, however only
+ # automake or libtoolize actually install the current versions of them.
+ # This is a problem in builds that do not use libtool or automake, in the case
+ # where we -need- the latest version of these scripts. e.g. running a build
+ # for a package whose autotools are old, on an x86_64 machine, which the old
+ # config.sub does not support. Work around this by installing them manually
+ # regardless.
+ ( for ac in `find ${S} -name configure.in -o -name configure.ac`; do
+ rm -f `dirname $ac`/configure
+ done )
+ if [ -e ${S}/configure.in -o -e ${S}/configure.ac ]; then
+ olddir=`pwd`
+ cd ${S}
+ if [ x"${acpaths}" = xdefault ]; then
+ acpaths=
+ for i in `find ${S} -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
+ grep -v 'acinclude.m4' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do
+ acpaths="$acpaths -I $i"
+ done
+ else
+ acpaths="${acpaths}"
+ fi
+ AUTOV=`automake --version |head -n 1 |sed "s/.* //;s/\.[0-9]\+$//"`
+ automake --version
+ echo "AUTOV is $AUTOV"
+ install -d ${STAGING_DIR}/${HOST_SYS}/share/aclocal
+ install -d ${STAGING_DIR}/${HOST_SYS}/share/aclocal-$AUTOV
+ acpaths="$acpaths -I ${STAGING_DIR}/${HOST_SYS}/share/aclocal-$AUTOV -I ${STAGING_DIR}/${HOST_SYS}/share/aclocal"
+ # autoreconf is too shy to overwrite aclocal.m4 if it doesn't look
+ # like it was auto-generated. Work around this by blowing it away
+ # by hand, unless the package specifically asked not to run aclocal.
+ if ! echo ${EXTRA_AUTORECONF} | grep -q "aclocal"; then
+ rm -f aclocal.m4
+ fi
+ if [ -e configure.in ]; then
+ CONFIGURE_AC=configure.in
+ else
+ CONFIGURE_AC=configure.ac
+ fi
+ if grep "^AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
+ if grep "sed.*POTFILES" $CONFIGURE_AC >/dev/null; then
+ : do nothing -- we still have an old unmodified configure.ac
+ else
+ oenote Executing glib-gettextize --force --copy
+ echo "no" | glib-gettextize --force --copy
+ fi
+ fi
+ if grep "^AC_PROG_INTLTOOL" $CONFIGURE_AC >/dev/null; then
+ oenote Executing intltoolize --copy --force --automake
+ intltoolize --copy --force --automake
+ fi
+ oenote Executing autoreconf --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
+ mkdir -p m4
+ autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || oefatal "autoreconf execution failed."
+ cd $olddir
+ fi
+ ;;
+ esac
+ if [ -e ${S}/configure ]; then
+ oe_runconf
+ else
+ oenote "nothing to configure"
+ fi
+}
+
+autotools_do_install() {
+ oe_runmake 'DESTDIR=${D}' install
+}
+
+STAGE_TEMP="${WORKDIR}/temp-staging"
+
+autotools_stage_includes() {
+ if [ "${INHIBIT_AUTO_STAGE_INCLUDES}" != "1" ]
+ then
+ rm -rf ${STAGE_TEMP}
+ mkdir -p ${STAGE_TEMP}
+ make DESTDIR="${STAGE_TEMP}" install
+ cp -pPR ${STAGE_TEMP}/${includedir}/* ${STAGING_INCDIR}
+ rm -rf ${STAGE_TEMP}
+ fi
+}
+
+autotools_stage_all() {
+ if [ "${INHIBIT_AUTO_STAGE}" = "1" ]
+ then
+ return
+ fi
+ rm -rf ${STAGE_TEMP}
+ mkdir -p ${STAGE_TEMP}
+ oe_runmake DESTDIR="${STAGE_TEMP}" install
+ if [ -d ${STAGE_TEMP}/${includedir} ]; then
+ cp -fpPR ${STAGE_TEMP}/${includedir}/* ${STAGING_INCDIR}
+ fi
+ if [ -d ${STAGE_TEMP}/${libdir} ]
+ then
+ for i in ${STAGE_TEMP}/${libdir}/*.la
+ do
+ if [ ! -f "$i" ]; then
+ cp -fpPR ${STAGE_TEMP}/${libdir}/* ${STAGING_LIBDIR}
+ break
+ fi
+ oe_libinstall -so $(basename $i .la) ${STAGING_LIBDIR}
+ done
+ fi
+ if [ -d ${STAGE_TEMP}/${datadir}/aclocal ]; then
+ install -d ${STAGING_DATADIR}/aclocal
+ cp -fpPR ${STAGE_TEMP}/${datadir}/aclocal/* ${STAGING_DATADIR}/aclocal
+ fi
+ rm -rf ${STAGE_TEMP}
+}
+
+EXPORT_FUNCTIONS do_configure do_install
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
new file mode 100644
index 0000000000..8467ebddc2
--- /dev/null
+++ b/meta/classes/base.bbclass
@@ -0,0 +1,793 @@
+PATCHES_DIR="${S}"
+
+def base_dep_prepend(d):
+ import bb;
+ #
+ # Ideally this will check a flag so we will operate properly in
+ # the case where host == build == target, for now we don't work in
+ # that case though.
+ #
+ deps = ""
+
+ # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
+ # we need that built is the responsibility of the patch function / class, not
+ # the application.
+ patchdeps = bb.data.getVar("PATCH_DEPENDS", d, 1)
+ if patchdeps and not patchdeps in bb.data.getVar("PROVIDES", d, 1):
+ deps = patchdeps
+
+ if not bb.data.getVar('INHIBIT_DEFAULT_DEPS', d):
+ if (bb.data.getVar('HOST_SYS', d, 1) !=
+ bb.data.getVar('BUILD_SYS', d, 1)):
+ deps += " virtual/${TARGET_PREFIX}gcc virtual/libc "
+ return deps
+
+def base_read_file(filename):
+ import bb
+ try:
+ f = file( filename, "r" )
+ except IOError, reason:
+ return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M:
+ else:
+ return f.read().strip()
+ return None
+
+def base_conditional(variable, checkvalue, truevalue, falsevalue, d):
+ import bb
+ if bb.data.getVar(variable,d,1) == checkvalue:
+ return truevalue
+ else:
+ return falsevalue
+
+DEPENDS_prepend="${@base_dep_prepend(d)} "
+
+def base_set_filespath(path, d):
+ import os, bb
+ filespath = []
+ for p in path:
+ overrides = bb.data.getVar("OVERRIDES", d, 1) or ""
+ overrides = overrides + ":"
+ for o in overrides.split(":"):
+ filespath.append(os.path.join(p, o))
+ bb.data.setVar("FILESPATH", ":".join(filespath), d)
+
+FILESPATH = "${@base_set_filespath([ "${FILE_DIRNAME}/${PF}", "${FILE_DIRNAME}/${P}", "${FILE_DIRNAME}/${PN}", "${FILE_DIRNAME}/files", "${FILE_DIRNAME}" ], d)}"
+
+def oe_filter(f, str, d):
+ from re import match
+ return " ".join(filter(lambda x: match(f, x, 0), str.split()))
+
+def oe_filter_out(f, str, d):
+ from re import match
+ return " ".join(filter(lambda x: not match(f, x, 0), str.split()))
+
+die() {
+ oefatal "$*"
+}
+
+oenote() {
+ echo "NOTE:" "$*"
+}
+
+oewarn() {
+ echo "WARNING:" "$*"
+}
+
+oefatal() {
+ echo "FATAL:" "$*"
+ exit 1
+}
+
+oedebug() {
+ test $# -ge 2 || {
+ echo "Usage: oedebug level \"message\""
+ exit 1
+ }
+
+ test ${OEDEBUG:-0} -ge $1 && {
+ shift
+ echo "DEBUG:" $*
+ }
+}
+
+oe_runmake() {
+ if [ x"$MAKE" = x ]; then MAKE=make; fi
+ oenote ${MAKE} ${EXTRA_OEMAKE} "$@"
+ ${MAKE} ${EXTRA_OEMAKE} "$@" || die "oe_runmake failed"
+}
+
+oe_soinstall() {
+ # Purpose: Install shared library file and
+ # create the necessary links
+ # Example:
+ #
+ # oe_
+ #
+ #oenote installing shared library $1 to $2
+ #
+ libname=`basename $1`
+ install -m 755 $1 $2/$libname
+ sonamelink=`${HOST_PREFIX}readelf -d $1 |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
+ solink=`echo $libname | sed -e 's/\.so\..*/.so/'`
+ ln -sf $libname $2/$sonamelink
+ ln -sf $libname $2/$solink
+}
+
+oe_libinstall() {
+ # Purpose: Install a library, in all its forms
+ # Example
+ #
+ # oe_libinstall libltdl ${STAGING_LIBDIR}/
+ # oe_libinstall -C src/libblah libblah ${D}/${libdir}/
+ dir=""
+ libtool=""
+ silent=""
+ require_static=""
+ require_shared=""
+ staging_install=""
+ while [ "$#" -gt 0 ]; do
+ case "$1" in
+ -C)
+ shift
+ dir="$1"
+ ;;
+ -s)
+ silent=1
+ ;;
+ -a)
+ require_static=1
+ ;;
+ -so)
+ require_shared=1
+ ;;
+ -*)
+ oefatal "oe_libinstall: unknown option: $1"
+ ;;
+ *)
+ break;
+ ;;
+ esac
+ shift
+ done
+
+ libname="$1"
+ shift
+ destpath="$1"
+ if [ -z "$destpath" ]; then
+ oefatal "oe_libinstall: no destination path specified"
+ fi
+ if echo "$destpath/" | egrep '^${STAGING_LIBDIR}/' >/dev/null
+ then
+ staging_install=1
+ fi
+
+ __runcmd () {
+ if [ -z "$silent" ]; then
+ echo >&2 "oe_libinstall: $*"
+ fi
+ $*
+ }
+
+ if [ -z "$dir" ]; then
+ dir=`pwd`
+ fi
+ dotlai=$libname.lai
+ dir=$dir`(cd $dir; find -name "$dotlai") | sed "s/^\.//;s/\/$dotlai\$//;q"`
+ olddir=`pwd`
+ __runcmd cd $dir
+
+ lafile=$libname.la
+ if [ -f "$lafile" ]; then
+ # libtool archive
+ eval `cat $lafile|grep "^library_names="`
+ libtool=1
+ else
+ library_names="$libname.so* $libname.dll.a"
+ fi
+
+ __runcmd install -d $destpath/
+ dota=$libname.a
+ if [ -f "$dota" -o -n "$require_static" ]; then
+ __runcmd install -m 0644 $dota $destpath/
+ fi
+ if [ -f "$dotlai" -a -n "$libtool" ]; then
+ if test -n "$staging_install"
+ then
+ # stop libtool using the final directory name for libraries
+ # in staging:
+ __runcmd rm -f $destpath/$libname.la
+ __runcmd sed -e 's/^installed=yes$/installed=no/' -e '/^dependency_libs=/s,${WORKDIR}[[:alnum:]/\._+-]*/\([[:alnum:]\._+-]*\),${STAGING_LIBDIR}/\1,g' $dotlai >$destpath/$libname.la
+ else
+ __runcmd install -m 0644 $dotlai $destpath/$libname.la
+ fi
+ fi
+
+ for name in $library_names; do
+ files=`eval echo $name`
+ for f in $files; do
+ if [ ! -e "$f" ]; then
+ if [ -n "$libtool" ]; then
+ oefatal "oe_libinstall: $dir/$f not found."
+ fi
+ elif [ -L "$f" ]; then
+ __runcmd cp -P "$f" $destpath/
+ elif [ ! -L "$f" ]; then
+ libfile="$f"
+ __runcmd install -m 0755 $libfile $destpath/
+ fi
+ done
+ done
+
+ if [ -z "$libfile" ]; then
+ if [ -n "$require_shared" ]; then
+ oefatal "oe_libinstall: unable to locate shared library"
+ fi
+ elif [ -z "$libtool" ]; then
+ # special case hack for non-libtool .so.#.#.# links
+ baselibfile=`basename "$libfile"`
+ if (echo $baselibfile | grep -qE '^lib.*\.so\.[0-9.]*$'); then
+ sonamelink=`${HOST_PREFIX}readelf -d $libfile |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
+ solink=`echo $baselibfile | sed -e 's/\.so\..*/.so/'`
+ if [ -n "$sonamelink" -a x"$baselibfile" != x"$sonamelink" ]; then
+ __runcmd ln -sf $baselibfile $destpath/$sonamelink
+ fi
+ __runcmd ln -sf $baselibfile $destpath/$solink
+ fi
+ fi
+
+ __runcmd cd "$olddir"
+}
+
+oe_machinstall() {
+ # Purpose: Install machine dependent files, if available
+ # If not available, check if there is a default
+ # If no default, just touch the destination
+ # Example:
+ # $1 $2 $3 $4
+ # oe_machinstall -m 0644 fstab ${D}/etc/fstab
+ #
+ # TODO: Check argument number?
+ #
+ filename=`basename $3`
+ dirname=`dirname $3`
+
+ for o in `echo ${OVERRIDES} | tr ':' ' '`; do
+ if [ -e $dirname/$o/$filename ]; then
+ oenote $dirname/$o/$filename present, installing to $4
+ install $1 $2 $dirname/$o/$filename $4
+ return
+ fi
+ done
+# oenote overrides specific file NOT present, trying default=$3...
+ if [ -e $3 ]; then
+ oenote $3 present, installing to $4
+ install $1 $2 $3 $4
+ else
+ oenote $3 NOT present, touching empty $4
+ touch $4
+ fi
+}
+
+addtask showdata
+do_showdata[nostamp] = "1"
+python do_showdata() {
+ import sys
+ # emit variables and shell functions
+ bb.data.emit_env(sys.__stdout__, d, True)
+ # emit the metadata which isnt valid shell
+ for e in d.keys():
+ if bb.data.getVarFlag(e, 'python', d):
+ sys.__stdout__.write("\npython %s () {\n%s}\n" % (e, bb.data.getVar(e, d, 1)))
+}
+
+addtask listtasks
+do_listtasks[nostamp] = "1"
+python do_listtasks() {
+ import sys
+ # emit variables and shell functions
+ #bb.data.emit_env(sys.__stdout__, d)
+ # emit the metadata which isnt valid shell
+ for e in d.keys():
+ if bb.data.getVarFlag(e, 'task', d):
+ sys.__stdout__.write("%s\n" % e)
+}
+
+addtask clean
+do_clean[dirs] = "${TOPDIR}"
+do_clean[nostamp] = "1"
+do_clean[bbdepcmd] = ""
+python base_do_clean() {
+ """clear the build and temp directories"""
+ dir = bb.data.expand("${WORKDIR}", d)
+ if dir == '//': raise bb.build.FuncFailed("wrong DATADIR")
+ bb.note("removing " + dir)
+ os.system('rm -rf ' + dir)
+
+ dir = "%s.*" % bb.data.expand(bb.data.getVar('STAMP', d), d)
+ bb.note("removing " + dir)
+ os.system('rm -f '+ dir)
+}
+
+addtask mrproper
+do_mrproper[dirs] = "${TOPDIR}"
+do_mrproper[nostamp] = "1"
+do_mrproper[bbdepcmd] = ""
+python base_do_mrproper() {
+ """clear downloaded sources, build and temp directories"""
+ dir = bb.data.expand("${DL_DIR}", d)
+ if dir == '/': bb.build.FuncFailed("wrong DATADIR")
+ bb.debug(2, "removing " + dir)
+ os.system('rm -rf ' + dir)
+ bb.build.exec_task('do_clean', d)
+}
+
+addtask fetch
+do_fetch[dirs] = "${DL_DIR}"
+do_fetch[nostamp] = "1"
+python base_do_fetch() {
+ import sys
+
+ localdata = bb.data.createCopy(d)
+ bb.data.update_data(localdata)
+
+ src_uri = bb.data.getVar('SRC_URI', localdata, 1)
+ if not src_uri:
+ return 1
+
+ try:
+ bb.fetch.init(src_uri.split(),d)
+ except bb.fetch.NoMethodError:
+ (type, value, traceback) = sys.exc_info()
+ raise bb.build.FuncFailed("No method: %s" % value)
+
+ try:
+ bb.fetch.go(localdata)
+ except bb.fetch.MissingParameterError:
+ (type, value, traceback) = sys.exc_info()
+ raise bb.build.FuncFailed("Missing parameters: %s" % value)
+ except bb.fetch.FetchError:
+ (type, value, traceback) = sys.exc_info()
+ raise bb.build.FuncFailed("Fetch failed: %s" % value)
+}
+
+def oe_unpack_file(file, data, url = None):
+ import bb, os
+ if not url:
+ url = "file://%s" % file
+ dots = file.split(".")
+ if dots[-1] in ['gz', 'bz2', 'Z']:
+ efile = os.path.join(bb.data.getVar('WORKDIR', data, 1),os.path.basename('.'.join(dots[0:-1])))
+ else:
+ efile = file
+ cmd = None
+ if file.endswith('.tar'):
+ cmd = 'tar x --no-same-owner -f %s' % file
+ elif file.endswith('.tgz') or file.endswith('.tar.gz'):
+ cmd = 'tar xz --no-same-owner -f %s' % file
+ elif file.endswith('.tbz') or file.endswith('.tar.bz2'):
+ cmd = 'bzip2 -dc %s | tar x --no-same-owner -f -' % file
+ elif file.endswith('.gz') or file.endswith('.Z') or file.endswith('.z'):
+ cmd = 'gzip -dc %s > %s' % (file, efile)
+ elif file.endswith('.bz2'):
+ cmd = 'bzip2 -dc %s > %s' % (file, efile)
+ elif file.endswith('.zip'):
+ cmd = 'unzip -q'
+ (type, host, path, user, pswd, parm) = bb.decodeurl(url)
+ if 'dos' in parm:
+ cmd = '%s -a' % cmd
+ cmd = '%s %s' % (cmd, file)
+ elif os.path.isdir(file):
+ filesdir = os.path.realpath(bb.data.getVar("FILESDIR", data, 1))
+ destdir = "."
+ if file[0:len(filesdir)] == filesdir:
+ destdir = file[len(filesdir):file.rfind('/')]
+ destdir = destdir.strip('/')
+ if len(destdir) < 1:
+ destdir = "."
+ elif not os.access("%s/%s" % (os.getcwd(), destdir), os.F_OK):
+ os.makedirs("%s/%s" % (os.getcwd(), destdir))
+ cmd = 'cp -pPR %s %s/%s/' % (file, os.getcwd(), destdir)
+ else:
+ (type, host, path, user, pswd, parm) = bb.decodeurl(url)
+ if not 'patch' in parm:
+ # The "destdir" handling was specifically done for FILESPATH
+ # items. So, only do so for file:// entries.
+ if type == "file":
+ destdir = bb.decodeurl(url)[1] or "."
+ else:
+ destdir = "."
+ bb.mkdirhier("%s/%s" % (os.getcwd(), destdir))
+ cmd = 'cp %s %s/%s/' % (file, os.getcwd(), destdir)
+ if not cmd:
+ return True
+
+
+ dest = os.path.join(os.getcwd(), os.path.basename(file))
+ if os.path.exists(dest):
+ if os.path.samefile(file, dest):
+ return True
+
+ cmd = "PATH=\"%s\" %s" % (bb.data.getVar('PATH', data, 1), cmd)
+ bb.note("Unpacking %s to %s/" % (file, os.getcwd()))
+ ret = os.system(cmd)
+ return ret == 0
+
+addtask unpack after do_fetch
+do_unpack[dirs] = "${WORKDIR}"
+python base_do_unpack() {
+ import re, os
+
+ localdata = bb.data.createCopy(d)
+ bb.data.update_data(localdata)
+
+ src_uri = bb.data.getVar('SRC_URI', localdata)
+ if not src_uri:
+ return
+ src_uri = bb.data.expand(src_uri, localdata)
+ for url in src_uri.split():
+ try:
+ local = bb.data.expand(bb.fetch.localpath(url, localdata), localdata)
+ except bb.MalformedUrl, e:
+ raise FuncFailed('Unable to generate local path for malformed uri: %s' % e)
+ # dont need any parameters for extraction, strip them off
+ local = re.sub(';.*$', '', local)
+ local = os.path.realpath(local)
+ ret = oe_unpack_file(local, localdata, url)
+ if not ret:
+ raise bb.build.FuncFailed()
+}
+
+addtask patch after do_unpack
+do_patch[dirs] = "${WORKDIR}"
+python base_do_patch() {
+ import re
+ import bb.fetch
+
+ src_uri = (bb.data.getVar('SRC_URI', d, 1) or '').split()
+ if not src_uri:
+ return
+
+ patchcleancmd = bb.data.getVar('PATCHCLEANCMD', d, 1)
+ if patchcleancmd:
+ bb.data.setVar("do_patchcleancmd", patchcleancmd, d)
+ bb.data.setVarFlag("do_patchcleancmd", "func", 1, d)
+ bb.build.exec_func("do_patchcleancmd", d)
+
+ workdir = bb.data.getVar('WORKDIR', d, 1)
+ for url in src_uri:
+
+ (type, host, path, user, pswd, parm) = bb.decodeurl(url)
+ if not "patch" in parm:
+ continue
+
+ bb.fetch.init([url],d)
+ url = bb.encodeurl((type, host, path, user, pswd, []))
+ local = os.path.join('/', bb.fetch.localpath(url, d))
+
+ # did it need to be unpacked?
+ dots = os.path.basename(local).split(".")
+ if dots[-1] in ['gz', 'bz2', 'Z']:
+ unpacked = os.path.join(bb.data.getVar('WORKDIR', d),'.'.join(dots[0:-1]))
+ else:
+ unpacked = local
+ unpacked = bb.data.expand(unpacked, d)
+
+ if "pnum" in parm:
+ pnum = parm["pnum"]
+ else:
+ pnum = "1"
+
+ if "pname" in parm:
+ pname = parm["pname"]
+ else:
+ pname = os.path.basename(unpacked)
+
+ if "mindate" in parm:
+ mindate = parm["mindate"]
+ else:
+ mindate = 0
+
+ if "maxdate" in parm:
+ maxdate = parm["maxdate"]
+ else:
+ maxdate = "20711226"
+
+ pn = bb.data.getVar('PN', d, 1)
+ srcdate = bb.data.getVar('SRCDATE_%s' % pn, d, 1)
+
+ if not srcdate:
+ srcdate = bb.data.getVar('SRCDATE', d, 1)
+
+ if srcdate == "now":
+ srcdate = bb.data.getVar('DATE', d, 1)
+
+ if (maxdate < srcdate) or (mindate > srcdate):
+ if (maxdate < srcdate):
+ bb.note("Patch '%s' is outdated" % pname)
+
+ if (mindate > srcdate):
+ bb.note("Patch '%s' is predated" % pname)
+
+ continue
+
+ bb.note("Applying patch '%s'" % pname)
+ bb.data.setVar("do_patchcmd", bb.data.getVar("PATCHCMD", d, 1) % (pnum, pname, unpacked), d)
+ bb.data.setVarFlag("do_patchcmd", "func", 1, d)
+ bb.data.setVarFlag("do_patchcmd", "dirs", "${WORKDIR} ${S}", d)
+ bb.build.exec_func("do_patchcmd", d)
+}
+
+
+addhandler base_eventhandler
+python base_eventhandler() {
+ from bb import note, error, data
+ from bb.event import Handled, NotHandled, getName
+ import os
+
+ messages = {}
+ messages["Completed"] = "completed"
+ messages["Succeeded"] = "completed"
+ messages["Started"] = "started"
+ messages["Failed"] = "failed"
+
+ name = getName(e)
+ msg = ""
+ if name.startswith("Pkg"):
+ msg += "package %s: " % data.getVar("P", e.data, 1)
+ msg += messages.get(name[3:]) or name[3:]
+ elif name.startswith("Task"):
+ msg += "package %s: task %s: " % (data.getVar("PF", e.data, 1), e.task)
+ msg += messages.get(name[4:]) or name[4:]
+ elif name.startswith("Build"):
+ msg += "build %s: " % e.name
+ msg += messages.get(name[5:]) or name[5:]
+ elif name == "UnsatisfiedDep":
+ msg += "package %s: dependency %s %s" % (e.pkg, e.dep, name[:-3].lower())
+ note(msg)
+
+ if name.startswith("BuildStarted"):
+ bb.data.setVar( 'BB_VERSION', bb.__version__, e.data )
+ path_to_bbfiles = bb.data.getVar( 'BBFILES', e.data, 1 )
+ path_to_packages = path_to_bbfiles[:path_to_bbfiles.rindex( "packages" )]
+ monotone_revision = "<unknown>"
+ try:
+ monotone_revision = file( "%s/MT/revision" % path_to_packages ).read().strip()
+ except IOError:
+ pass
+ bb.data.setVar( 'OE_REVISION', monotone_revision, e.data )
+ statusvars = ['BB_VERSION', 'OE_REVISION', 'TARGET_ARCH', 'TARGET_OS', 'MACHINE', 'DISTRO', 'DISTRO_VERSION','TARGET_FPU']
+ statuslines = ["%-14s = \"%s\"" % (i, bb.data.getVar(i, e.data, 1) or '') for i in statusvars]
+ statusmsg = "\nOE Build Configuration:\n%s\n" % '\n'.join(statuslines)
+ print statusmsg
+
+ needed_vars = [ "TARGET_ARCH", "TARGET_OS" ]
+ pesteruser = []
+ for v in needed_vars:
+ val = bb.data.getVar(v, e.data, 1)
+ if not val or val == 'INVALID':
+ pesteruser.append(v)
+ if pesteruser:
+ bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
+
+ if not data in e.__dict__:
+ return NotHandled
+
+ log = data.getVar("EVENTLOG", e.data, 1)
+ if log:
+ logfile = file(log, "a")
+ logfile.write("%s\n" % msg)
+ logfile.close()
+
+ return NotHandled
+}
+
+addtask configure after do_unpack do_patch
+do_configure[dirs] = "${S} ${B}"
+do_configure[bbdepcmd] = "do_populate_staging"
+base_do_configure() {
+ :
+}
+
+addtask compile after do_configure
+do_compile[dirs] = "${S} ${B}"
+do_compile[bbdepcmd] = "do_populate_staging"
+base_do_compile() {
+ if [ -e Makefile -o -e makefile ]; then
+ oe_runmake || die "make failed"
+ else
+ oenote "nothing to compile"
+ fi
+}
+
+
+addtask stage after do_compile
+base_do_stage () {
+ :
+}
+
+do_populate_staging[dirs] = "${STAGING_DIR}/${TARGET_SYS}/bin ${STAGING_DIR}/${TARGET_SYS}/lib \
+ ${STAGING_DIR}/${TARGET_SYS}/include \
+ ${STAGING_DIR}/${BUILD_SYS}/bin ${STAGING_DIR}/${BUILD_SYS}/lib \
+ ${STAGING_DIR}/${BUILD_SYS}/include \
+ ${STAGING_DATADIR} \
+ ${S} ${B}"
+
+addtask populate_staging after do_compile
+
+python do_populate_staging () {
+ bb.build.exec_func('do_stage', d)
+}
+
+addtask install after do_compile
+do_install[dirs] = "${S} ${B}"
+
+base_do_install() {
+ :
+}
+
+base_do_package() {
+ :
+}
+
+addtask build after do_populate_staging
+do_build = ""
+do_build[func] = "1"
+
+# Functions that update metadata based on files outputted
+# during the build process.
+
+SHLIBS = ""
+RDEPENDS_prepend = " ${SHLIBS}"
+
+def explode_deps(s):
+ r = []
+ l = s.split()
+ flag = False
+ for i in l:
+ if i[0] == '(':
+ flag = True
+ j = []
+ if flag:
+ j.append(i)
+ if i.endswith(')'):
+ flag = False
+ r[-1] += ' ' + ' '.join(j)
+ else:
+ r.append(i)
+ return r
+
+python read_shlibdeps () {
+ packages = (bb.data.getVar('PACKAGES', d, 1) or "").split()
+ for pkg in packages:
+ rdepends = explode_deps(bb.data.getVar('RDEPENDS_' + pkg, d, 0) or bb.data.getVar('RDEPENDS', d, 0) or "")
+ shlibsfile = bb.data.expand("${WORKDIR}/install/" + pkg + ".shlibdeps", d)
+ if os.access(shlibsfile, os.R_OK):
+ fd = file(shlibsfile)
+ lines = fd.readlines()
+ fd.close()
+ for l in lines:
+ rdepends.append(l.rstrip())
+ pcfile = bb.data.expand("${WORKDIR}/install/" + pkg + ".pcdeps", d)
+ if os.access(pcfile, os.R_OK):
+ fd = file(pcfile)
+ lines = fd.readlines()
+ fd.close()
+ for l in lines:
+ rdepends.append(l.rstrip())
+ bb.data.setVar('RDEPENDS_' + pkg, " " + " ".join(rdepends), d)
+}
+
+python read_subpackage_metadata () {
+ import re
+
+ def decode(str):
+ import codecs
+ c = codecs.getdecoder("string_escape")
+ return c(str)[0]
+
+ data_file = bb.data.expand("${WORKDIR}/install/${PN}.package", d)
+ if os.access(data_file, os.R_OK):
+ f = file(data_file, 'r')
+ lines = f.readlines()
+ f.close()
+ r = re.compile("([^:]+):\s*(.*)")
+ for l in lines:
+ m = r.match(l)
+ if m:
+ bb.data.setVar(m.group(1), decode(m.group(2)), d)
+}
+
+python __anonymous () {
+ import exceptions
+ need_host = bb.data.getVar('COMPATIBLE_HOST', d, 1)
+ if need_host:
+ import re
+ this_host = bb.data.getVar('HOST_SYS', d, 1)
+ if not re.match(need_host, this_host):
+ raise bb.parse.SkipPackage("incompatible with host %s" % this_host)
+
+ need_machine = bb.data.getVar('COMPATIBLE_MACHINE', d, 1)
+ if need_machine:
+ import re
+ this_machine = bb.data.getVar('MACHINE', d, 1)
+ if not re.match(need_machine, this_machine):
+ raise bb.parse.SkipPackage("incompatible with machine %s" % this_machine)
+
+ pn = bb.data.getVar('PN', d, 1)
+
+ srcdate = bb.data.getVar('SRCDATE_%s' % pn, d, 1)
+ if srcdate != None:
+ bb.data.setVar('SRCDATE', srcdate, d)
+
+ use_nls = bb.data.getVar('USE_NLS_%s' % pn, d, 1)
+ if use_nls != None:
+ bb.data.setVar('USE_NLS', use_nls, d)
+}
+
+python () {
+ import bb, os
+ mach_arch = bb.data.getVar('MACHINE_ARCH', d, 1)
+ old_arch = bb.data.getVar('PACKAGE_ARCH', d, 1)
+ if (old_arch == mach_arch):
+ # Nothing to do
+ return
+ if (bb.data.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH', d, 1) == '0'):
+ return
+ paths = []
+ for p in [ "${FILE_DIRNAME}/${PF}", "${FILE_DIRNAME}/${P}", "${FILE_DIRNAME}/${PN}", "${FILE_DIRNAME}/files", "${FILE_DIRNAME}" ]:
+ paths.append(bb.data.expand(os.path.join(p, mach_arch), d))
+ for s in bb.data.getVar('SRC_URI', d, 1).split():
+ local = bb.data.expand(bb.fetch.localpath(s, d), d)
+ for mp in paths:
+ if local.startswith(mp):
+# bb.note("overriding PACKAGE_ARCH from %s to %s" % (old_arch, mach_arch))
+ bb.data.setVar('PACKAGE_ARCH', mach_arch, d)
+ return
+}
+
+EXPORT_FUNCTIONS do_clean do_mrproper do_fetch do_unpack do_configure do_compile do_install do_package do_patch do_populate_pkgs do_stage
+
+MIRRORS[func] = "0"
+MIRRORS () {
+${DEBIAN_MIRROR}/main http://snapshot.debian.net/archive/pool
+${DEBIAN_MIRROR} ftp://ftp.de.debian.org/debian/pool
+${DEBIAN_MIRROR} ftp://ftp.au.debian.org/debian/pool
+${DEBIAN_MIRROR} ftp://ftp.cl.debian.org/debian/pool
+${DEBIAN_MIRROR} ftp://ftp.hr.debian.org/debian/pool
+${DEBIAN_MIRROR} ftp://ftp.fi.debian.org/debian/pool
+${DEBIAN_MIRROR} ftp://ftp.hk.debian.org/debian/pool
+${DEBIAN_MIRROR} ftp://ftp.hu.debian.org/debian/pool
+${DEBIAN_MIRROR} ftp://ftp.ie.debian.org/debian/pool
+${DEBIAN_MIRROR} ftp://ftp.it.debian.org/debian/pool
+${DEBIAN_MIRROR} ftp://ftp.jp.debian.org/debian/pool
+${DEBIAN_MIRROR} ftp://ftp.no.debian.org/debian/pool
+${DEBIAN_MIRROR} ftp://ftp.pl.debian.org/debian/pool
+${DEBIAN_MIRROR} ftp://ftp.ro.debian.org/debian/pool
+${DEBIAN_MIRROR} ftp://ftp.si.debian.org/debian/pool
+${DEBIAN_MIRROR} ftp://ftp.es.debian.org/debian/pool
+${DEBIAN_MIRROR} ftp://ftp.se.debian.org/debian/pool
+${DEBIAN_MIRROR} ftp://ftp.tr.debian.org/debian/pool
+${GNU_MIRROR} ftp://mirrors.kernel.org/gnu
+${GNU_MIRROR} ftp://ftp.matrix.com.br/pub/gnu
+${GNU_MIRROR} ftp://ftp.cs.ubc.ca/mirror2/gnu
+${GNU_MIRROR} ftp://sunsite.ust.hk/pub/gnu
+${GNU_MIRROR} ftp://ftp.ayamura.org/pub/gnu
+ftp://ftp.kernel.org/pub http://www.kernel.org/pub
+ftp://ftp.kernel.org/pub ftp://ftp.us.kernel.org/pub
+ftp://ftp.kernel.org/pub ftp://ftp.uk.kernel.org/pub
+ftp://ftp.kernel.org/pub ftp://ftp.hk.kernel.org/pub
+ftp://ftp.kernel.org/pub ftp://ftp.au.kernel.org/pub
+ftp://ftp.kernel.org/pub ftp://ftp.jp.kernel.org/pub
+ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt/
+ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.surfnet.nl/pub/security/gnupg/
+ftp://ftp.gnupg.org/gcrypt/ http://gulus.USherbrooke.ca/pub/appl/GnuPG/
+ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.gnutls.org/pub/gnutls/
+ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.gnupg.org/gcrypt/gnutls/
+ftp://ftp.gnutls.org/pub/gnutls http://www.mirrors.wiretapped.net/security/network-security/gnutls/
+ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.mirrors.wiretapped.net/pub/security/network-security/gnutls/
+ftp://ftp.gnutls.org/pub/gnutls http://josefsson.org/gnutls/releases/
+
+ftp://.*/.*/ http://www.oesources.org/source/current/
+http://.*/.*/ http://www.oesources.org/source/current/
+}
+
diff --git a/meta/classes/base_srpm.bbclass b/meta/classes/base_srpm.bbclass
new file mode 100644
index 0000000000..aea6335278
--- /dev/null
+++ b/meta/classes/base_srpm.bbclass
@@ -0,0 +1,20 @@
+inherit base package rpm_core
+
+SPECFILE="${RPMBUILDPATH}/SPECS/${PN}.spec"
+
+base_srpm_do_unpack() {
+ test -e ${SRPMFILE} || die "Source rpm \"${SRPMFILE}\"does not exist"
+ if ! test -e ${SPECFILE}; then
+ ${RPM} -i ${SRPMFILE}
+ fi
+ test -e ${SPECFILE} || die "Spec file \"${SPECFILE}\" does not exist"
+ ${RPMBUILD} -bp ${SPECFILE}
+}
+
+base_srpm_do_compile() {
+ ${RPMBUILD} -bc ${SPECFILE}
+}
+
+base_srpm_do_install() {
+ ${RPMBUILD} -bi ${SPECFILE}
+}
diff --git a/meta/classes/binconfig.bbclass b/meta/classes/binconfig.bbclass
new file mode 100644
index 0000000000..bf15ebcdf9
--- /dev/null
+++ b/meta/classes/binconfig.bbclass
@@ -0,0 +1,36 @@
+inherit base
+
+# The namespaces can clash here hence the two step replace
+def get_binconfig_mangle(d):
+ import bb.data
+ s = "-e ''"
+ if not bb.data.inherits_class('native', d):
+ s += " -e 's:=${libdir}:=OELIBDIR:;'"
+ s += " -e 's:=${includedir}:=OEINCDIR:;'"
+ s += " -e 's:=${datadir}:=OEDATADIR:'"
+ s += " -e 's:=${prefix}:=OEPREFIX:'"
+ s += " -e 's:=${exec_prefix}:=OEEXECPREFIX:'"
+ s += " -e 's:-L${libdir}:-LOELIBDIR:;'"
+ s += " -e 's:-I${includedir}:-IOEINCDIR:;'"
+ s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'"
+ s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'"
+ s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'"
+ s += " -e 's:OEPREFIX:${STAGING_LIBDIR}/..:'"
+ s += " -e 's:OEEXECPREFIX:${STAGING_LIBDIR}/..:'"
+ return s
+
+# Native package configurations go in ${BINDIR}/<name>-config-native to prevent a collision with cross packages
+def is_native(d):
+ import bb.data
+ return ["","-native"][bb.data.inherits_class('native', d)]
+
+BINCONFIG_GLOB ?= "*-config"
+
+do_stage_append() {
+ for config in `find ${S} -name '${BINCONFIG_GLOB}'`; do
+ configname=`basename $config`${@is_native(d)}
+ install -d ${STAGING_BINDIR}
+ cat $config | sed ${@get_binconfig_mangle(d)} > ${STAGING_BINDIR}/$configname
+ chmod u+x ${STAGING_BINDIR}/$configname
+ done
+}
diff --git a/meta/classes/ccache.inc b/meta/classes/ccache.inc
new file mode 100644
index 0000000000..5e9356104b
--- /dev/null
+++ b/meta/classes/ccache.inc
@@ -0,0 +1,11 @@
+# Make ccache use a TMPDIR specific ccache directory if using the crosscompiler,
+# since it isn't likely to be useful with any other toolchain than the one we just
+# built, and would otherwise push more useful things out of the default cache.
+
+CCACHE_DIR_TARGET = "${TMPDIR}/ccache"
+
+python () {
+ if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
+ bb.data.setVar('CCACHE_DIR', '${CCACHE_DIR_TARGET}', d)
+ bb.data.setVarFlag('CCACHE_DIR', 'export', '1', d)
+}
diff --git a/meta/classes/ccdv.bbclass b/meta/classes/ccdv.bbclass
new file mode 100644
index 0000000000..edd151ef8c
--- /dev/null
+++ b/meta/classes/ccdv.bbclass
@@ -0,0 +1,21 @@
+python () {
+ if bb.data.getVar('PN', d, 1) in ['ccdv-native']:
+ if not bb.data.getVar('INHIBIT_DEFAULT_DEPS', d, 1):
+ bb.data.setVar("DEPENDS", '%s %s' % ("ccdv-native", bb.data.getVar("DEPENDS", d, 1) or ""), d)
+ bb.data.setVar("CC", '%s %s' % ("ccdv", bb.data.getVar("CC", d, 1) or ""), d)
+ bb.data.setVar("BUILD_CC", '%s %s' % ("ccdv", bb.data.getVar("BUILD_CC", d, 1) or ""), d)
+ bb.data.setVar("CCLD", '%s %s' % ("ccdv", bb.data.getVar("CCLD", d, 1) or ""), d)
+}
+
+def quiet_libtool(bb,d):
+ deps = (bb.data.getVar('DEPENDS', d, 1) or "").split()
+ if 'libtool-cross' in deps:
+ return "'LIBTOOL=${STAGING_BINDIR}/${HOST_SYS}-libtool --silent'"
+ elif 'libtool-native' in deps:
+ return "'LIBTOOL=${B}/${HOST_SYS}-libtool --silent'"
+ else:
+ return ""
+
+CCDV = "ccdv"
+EXTRA_OEMAKE_append = " ${@quiet_libtool(bb,d)}"
+MAKE += "-s"
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass
new file mode 100644
index 0000000000..79218b4a12
--- /dev/null
+++ b/meta/classes/cml1.bbclass
@@ -0,0 +1,8 @@
+cml1_do_configure() {
+ set -e
+ unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
+ oe_runmake oldconfig
+}
+
+EXPORT_FUNCTIONS do_configure
+addtask configure after do_unpack do_patch before do_compile
diff --git a/meta/classes/cpan.bbclass b/meta/classes/cpan.bbclass
new file mode 100644
index 0000000000..853abfd1b3
--- /dev/null
+++ b/meta/classes/cpan.bbclass
@@ -0,0 +1,20 @@
+FILES_${PN} += '${libdir}/perl5'
+
+cpan_do_configure () {
+ perl Makefile.PL
+ if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then
+ . ${STAGING_DIR}/${TARGET_SYS}/perl/config.sh
+ sed -e "s:\(SITELIBEXP = \).*:\1${sitelibexp}:; s:\(SITEARCHEXP = \).*:\1${sitearchexp}:; s:\(INSTALLVENDORLIB = \).*:\1${D}${libdir}/perl5:; s:\(INSTALLVENDORARCH = \).*:\1${D}${libdir}/perl5:" < Makefile > Makefile.new
+ mv Makefile.new Makefile
+ fi
+}
+
+cpan_do_compile () {
+ oe_runmake PASTHRU_INC="${CFLAGS}"
+}
+
+cpan_do_install () {
+ oe_runmake install_vendor
+}
+
+EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/cross.bbclass b/meta/classes/cross.bbclass
new file mode 100644
index 0000000000..09357acbe8
--- /dev/null
+++ b/meta/classes/cross.bbclass
@@ -0,0 +1,55 @@
+# Cross packages are built indirectly via dependency,
+# no need for them to be a direct target of 'world'
+EXCLUDE_FROM_WORLD = "1"
+
+PACKAGES = ""
+
+HOST_ARCH = "${BUILD_ARCH}"
+HOST_VENDOR = "${BUILD_VENDOR}"
+HOST_OS = "${BUILD_OS}"
+HOST_PREFIX = "${BUILD_PREFIX}"
+HOST_CC_ARCH = "${BUILD_CC_ARCH}"
+
+CPPFLAGS = "${BUILD_CPPFLAGS}"
+CFLAGS = "${BUILD_CFLAGS}"
+CXXFLAGS = "${BUILD_CFLAGS}"
+LDFLAGS = "${BUILD_LDFLAGS}"
+LDFLAGS_build-darwin = "-L${STAGING_DIR}/${BUILD_SYS}/lib "
+
+# Overrides for paths
+
+# Path prefixes
+base_prefix = "${exec_prefix}"
+prefix = "${CROSS_DIR}"
+exec_prefix = "${prefix}"
+
+# Base paths
+base_bindir = "${base_prefix}/bin"
+base_sbindir = "${base_prefix}/bin"
+base_libdir = "${base_prefix}/lib"
+
+# Architecture independent paths
+datadir = "${prefix}/share"
+sysconfdir = "${prefix}/etc"
+sharedstatedir = "${prefix}/com"
+localstatedir = "${prefix}/var"
+infodir = "${datadir}/info"
+mandir = "${datadir}/man"
+docdir = "${datadir}/doc"
+servicedir = "${prefix}/srv"
+
+# Architecture dependent paths
+bindir = "${exec_prefix}/bin"
+sbindir = "${exec_prefix}/bin"
+libexecdir = "${exec_prefix}/libexec"
+libdir = "${exec_prefix}/lib"
+includedir = "${exec_prefix}/include"
+oldincludedir = "${exec_prefix}/include"
+
+do_stage () {
+ oe_runmake install
+}
+
+do_install () {
+ :
+}
diff --git a/meta/classes/debian.bbclass b/meta/classes/debian.bbclass
new file mode 100644
index 0000000000..5688dad93b
--- /dev/null
+++ b/meta/classes/debian.bbclass
@@ -0,0 +1,101 @@
+STAGING_PKGMAPS_DIR = "${STAGING_DIR}/pkgmaps/debian"
+
+# Debain package renaming only occurs when a package is built
+# We therefore have to make sure we build all runtime packages
+# before building the current package to make the packages runtime
+# depends are correct
+BUILD_ALL_DEPS = "1"
+
+python debian_package_name_hook () {
+ import glob, copy, stat, errno, re
+
+ workdir = bb.data.getVar('WORKDIR', d, 1)
+ packages = bb.data.getVar('PACKAGES', d, 1)
+
+ def socrunch(s):
+ s = s.lower().replace('_', '-')
+ m = re.match("^(.*)(.)\.so\.(.*)$", s)
+ if m is None:
+ return None
+ if m.group(2) in '0123456789':
+ bin = '%s%s-%s' % (m.group(1), m.group(2), m.group(3))
+ else:
+ bin = m.group(1) + m.group(2) + m.group(3)
+ dev = m.group(1) + m.group(2)
+ return (bin, dev)
+
+ def isexec(path):
+ try:
+ s = os.stat(path)
+ except (os.error, AttributeError):
+ return 0
+ return (s[stat.ST_MODE] & stat.S_IEXEC)
+
+ def auto_libname(packages, orig_pkg):
+ bin_re = re.compile(".*/s?bin$")
+ lib_re = re.compile(".*/lib$")
+ so_re = re.compile("lib.*\.so")
+ sonames = []
+ has_bins = 0
+ has_libs = 0
+ pkg_dir = os.path.join(workdir, "install", orig_pkg)
+ for root, dirs, files in os.walk(pkg_dir):
+ if bin_re.match(root) and files:
+ has_bins = 1
+ if lib_re.match(root) and files:
+ has_libs = 1
+ for f in files:
+ if so_re.match(f):
+ fp = os.path.join(root, f)
+ cmd = (bb.data.getVar('BUILD_PREFIX', d, 1) or "") + "objdump -p " + fp + " 2>/dev/null"
+ fd = os.popen(cmd)
+ lines = fd.readlines()
+ fd.close()
+ for l in lines:
+ m = re.match("\s+SONAME\s+([^\s]*)", l)
+ if m and not m.group(1) in sonames:
+ sonames.append(m.group(1))
+
+ bb.debug(1, 'LIBNAMES: pkg %s libs %d bins %d sonames %s' % (orig_pkg, has_libs, has_bins, sonames))
+ soname = None
+ if len(sonames) == 1:
+ soname = sonames[0]
+ elif len(sonames) > 1:
+ lead = bb.data.getVar('LEAD_SONAME', d, 1)
+ if lead:
+ r = re.compile(lead)
+ filtered = []
+ for s in sonames:
+ if r.match(s):
+ filtered.append(s)
+ if len(filtered) == 1:
+ soname = filtered[0]
+ elif len(filtered) > 1:
+ bb.note("Multiple matches (%s) for LEAD_SONAME '%s'" % (", ".join(filtered), lead))
+ else:
+ bb.note("Multiple libraries (%s) found, but LEAD_SONAME '%s' doesn't match any of them" % (", ".join(sonames), lead))
+ else:
+ bb.note("Multiple libraries (%s) found and LEAD_SONAME not defined" % ", ".join(sonames))
+
+ if has_libs and not has_bins and soname:
+ soname_result = socrunch(soname)
+ if soname_result:
+ (pkgname, devname) = soname_result
+ for pkg in packages.split():
+ if (bb.data.getVar('PKG_' + pkg, d) or bb.data.getVar('DEBIAN_NOAUTONAME_' + pkg, d)):
+ continue
+ if pkg == orig_pkg:
+ newpkg = pkgname
+ else:
+ newpkg = pkg.replace(orig_pkg, devname, 1)
+ if newpkg != pkg:
+ bb.data.setVar('PKG_' + pkg, newpkg, d)
+
+ for pkg in (bb.data.getVar('AUTO_LIBNAME_PKGS', d, 1) or "").split():
+ auto_libname(packages, pkg)
+}
+
+EXPORT_FUNCTIONS package_name_hook
+
+DEBIAN_NAMES = 1
+
diff --git a/meta/classes/distutils-base.bbclass b/meta/classes/distutils-base.bbclass
new file mode 100644
index 0000000000..68d7112166
--- /dev/null
+++ b/meta/classes/distutils-base.bbclass
@@ -0,0 +1,14 @@
+EXTRA_OEMAKE = ""
+DEPENDS += "${@["python-native python", ""][(bb.data.getVar('PACKAGES', d, 1) == '')]}"
+RDEPENDS += "python-core"
+
+def python_dir(d):
+ import os, bb
+ staging_incdir = bb.data.getVar( "STAGING_INCDIR", d, 1 )
+ if os.path.exists( "%s/python2.3" % staging_incdir ): return "python2.3"
+ if os.path.exists( "%s/python2.4" % staging_incdir ): return "python2.4"
+ raise "No Python in STAGING_INCDIR. Forgot to build python-native ?"
+
+PYTHON_DIR = "${@python_dir(d)}"
+FILES_${PN} = "${bindir} ${libdir} ${libdir}/${PYTHON_DIR}"
+
diff --git a/meta/classes/distutils.bbclass b/meta/classes/distutils.bbclass
new file mode 100644
index 0000000000..a2b0e2b770
--- /dev/null
+++ b/meta/classes/distutils.bbclass
@@ -0,0 +1,15 @@
+inherit distutils-base
+
+distutils_do_compile() {
+ BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
+ ${STAGING_BINDIR}/python setup.py build || \
+ oefatal "python setup.py build execution failed."
+}
+
+distutils_do_install() {
+ BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
+ ${STAGING_BINDIR}/python setup.py install --prefix=${D}/${prefix} --install-data=${D}/${datadir} || \
+ oefatal "python setup.py install execution failed."
+}
+
+EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/e.bbclass b/meta/classes/e.bbclass
new file mode 100644
index 0000000000..afd9b6d2b3
--- /dev/null
+++ b/meta/classes/e.bbclass
@@ -0,0 +1,37 @@
+MAINTAINER = "Justin Patrin <papercrane@reversefold.com>"
+HOMEPAGE = "http://www.enlightenment.org"
+SECTION = "e/apps"
+
+inherit autotools pkgconfig binconfig
+
+do_prepsources () {
+ make clean distclean || true
+}
+addtask prepsources after do_fetch before do_unpack
+
+def binconfig_suffix(d):
+ import bb
+ return ["","-native"][bb.data.inherits_class('native', d)]
+
+export CURL_CONFIG = "${STAGING_BINDIR}/curl-config${@binconfig_suffix(d)}"
+export EDB_CONFIG = "${STAGING_BINDIR}/edb-config${@binconfig_suffix(d)}"
+export EET_CONFIG = "${STAGING_BINDIR}/eet-config${@binconfig_suffix(d)}"
+export EVAS_CONFIG = "${STAGING_BINDIR}/evas-config${@binconfig_suffix(d)}"
+export ECORE_CONFIG = "${STAGING_BINDIR}/ecore-config${@binconfig_suffix(d)}"
+export EMBRYO_CONFIG = "${STAGING_BINDIR}/embryo-config${@binconfig_suffix(d)}"
+export ENGRAVE_CONFIG = "${STAGING_BINDIR}/engrave-config${@binconfig_suffix(d)}"
+export ENLIGHTENMENT_CONFIG = "${STAGING_BINDIR}/enlightenment-config${@binconfig_suffix(d)}"
+export EPSILON_CONFIG = "${STAGING_BINDIR}/epsilon-config${@binconfig_suffix(d)}"
+export EPEG_CONFIG = "${STAGING_BINDIR}/epeg-config${@binconfig_suffix(d)}"
+export ESMART_CONFIG = "${STAGING_BINDIR}/esmart-config${@binconfig_suffix(d)}"
+export FREETYPE_CONFIG = "${STAGING_BINDIR}/freetype-config${@binconfig_suffix(d)}"
+export IMLIB2_CONFIG = "${STAGING_BINDIR}/imlib2-config${@binconfig_suffix(d)}"
+
+do_compile_prepend() {
+ find ${S} -name Makefile | xargs sed -i 's:/usr/include:${STAGING_INCDIR}:'
+ find ${S} -name Makefile | xargs sed -i 's:/usr/X11R6/include:${STAGING_INCDIR}:'
+}
+
+PACKAGES = "${PN} ${PN}-themes"
+FILES_${PN} = "${libdir}/lib*.so*"
+FILES_${PN}-themes = "${datadir}/${PN}/themes ${datadir}/${PN}/data ${datadir}/${PN}/fonts ${datadir}/${PN}/pointers ${datadir}/${PN}/images ${datadir}/${PN}/users ${datadir}/${PN}/images ${datadir}/${PN}/styles"
diff --git a/meta/classes/efl.bbclass b/meta/classes/efl.bbclass
new file mode 100644
index 0000000000..9c490284c2
--- /dev/null
+++ b/meta/classes/efl.bbclass
@@ -0,0 +1,49 @@
+inherit e
+
+SECTION = "e/libs"
+
+SRCNAME = "${@bb.data.getVar('PN', d, 1).replace('-native', '')}"
+SRC_URI = "${E_URI}/${SRCNAME}-${PV}.tar.gz"
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+INHIBIT_AUTO_STAGE_INCLUDES = "1"
+INHIBIT_NATIVE_STAGE_INSTALL = "1"
+
+libdirectory = "src/lib"
+libraries = "lib${SRCNAME}"
+headers = "${@bb.data.getVar('SRCNAME',d,1).capitalize()}.h"
+
+do_stage_append () {
+ for i in ${libraries}
+ do
+ oe_libinstall -C ${libdirectory} $i ${STAGING_LIBDIR}
+ done
+ for i in ${headers}
+ do
+ install -m 0644 ${libdirectory}/$i ${STAGING_INCDIR}
+ done
+
+ # Install binaries automatically for native builds
+ if [ "${@binconfig_suffix(d)}" = "-native" ]
+ then
+
+ # Most EFL binaries start with the package name
+ for i in src/bin/${SRCNAME}*
+ do
+ if [ -x $i -a -f $i ]
+ then
+
+ # Don't install anything with an extension (.so, etc)
+ if echo $i | grep -v \\.
+ then
+ ${HOST_SYS}-libtool --mode=install install -m 0755 $i ${STAGING_BINDIR}
+ fi
+ fi
+ done
+ fi
+}
+
+PACKAGES = "${PN} ${PN}-themes ${PN}-dev ${PN}-examples"
+FILES_${PN}-dev = "${bindir}/${PN}-config ${libdir}/pkgconfig ${libdir}/lib*.?a ${libdir}/lib*.a"
+FILES_${PN}-examples = "${bindir} ${datadir}"
+
diff --git a/meta/classes/flow-lossage.bbclass b/meta/classes/flow-lossage.bbclass
new file mode 100644
index 0000000000..3e841e3cae
--- /dev/null
+++ b/meta/classes/flow-lossage.bbclass
@@ -0,0 +1,5 @@
+# gcc-3.4 blows up in gtktext with -frename-registers on arm-linux
+python () {
+ cflags = (bb.data.getVar('CFLAGS', d, 1) or '').replace('-frename-registers', '')
+ bb.data.setVar('CFLAGS', cflags, d)
+}
diff --git a/meta/classes/gconf.bbclass b/meta/classes/gconf.bbclass
new file mode 100644
index 0000000000..686f8e6596
--- /dev/null
+++ b/meta/classes/gconf.bbclass
@@ -0,0 +1,59 @@
+DEPENDS += "gconf"
+
+gconf_postinst() {
+if [ "$1" = configure ]; then
+ if [ "x$D" != "x" ]; then
+ exit 1
+ fi
+ SCHEMA_LOCATION=/etc/gconf/schemas
+ for SCHEMA in ${SCHEMA_FILES}; do
+ if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
+ HOME=/root GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source` \
+ gconftool-2 \
+ --makefile-install-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
+ fi
+ done
+fi
+}
+
+gconf_prerm() {
+if [ "$1" = remove ] || [ "$1" = upgrade ]; then
+ SCHEMA_LOCATION=/etc/gconf/schemas
+ for SCHEMA in ${SCHEMA_FILES}; do
+ if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
+ HOME=/root GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source` \
+ gconftool-2 \
+ --makefile-uninstall-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
+ fi
+ done
+fi
+}
+
+python populate_packages_append () {
+ import os.path, re
+ packages = bb.data.getVar('PACKAGES', d, 1).split()
+ workdir = bb.data.getVar('WORKDIR', d, 1)
+
+ for pkg in packages:
+ schema_dir = '%s/install/%s/etc/gconf/schemas' % (workdir, pkg)
+ schemas = []
+ schema_re = re.compile(".*\.schemas$")
+ if os.path.exists(schema_dir):
+ for f in os.listdir(schema_dir):
+ if schema_re.match(f):
+ schemas.append(f)
+ if schemas != []:
+ bb.note("adding gconf postinst and prerm scripts to %s" % pkg)
+ bb.data.setVar('SCHEMA_FILES', " ".join(schemas), d)
+ postinst = bb.data.getVar('pkg_postinst_%s' % pkg, d, 1) or bb.data.getVar('pkg_postinst', d, 1)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += bb.data.getVar('gconf_postinst', d, 1)
+ bb.data.setVar('pkg_postinst_%s' % pkg, postinst, d)
+ prerm = bb.data.getVar('pkg_prerm_%s' % pkg, d, 1) or bb.data.getVar('pkg_prerm', d, 1)
+ if not prerm:
+ prerm = '#!/bin/sh\n'
+ prerm += bb.data.getVar('gconf_prerm', d, 1)
+ bb.data.setVar('pkg_prerm_%s' % pkg, prerm, d)
+
+}
diff --git a/meta/classes/gettext.bbclass b/meta/classes/gettext.bbclass
new file mode 100644
index 0000000000..3785f5acd3
--- /dev/null
+++ b/meta/classes/gettext.bbclass
@@ -0,0 +1,12 @@
+python () {
+ # Remove the NLS bits if USE_NLS is no.
+ if bb.data.getVar('USE_NLS', d, 1) == 'no':
+ cfg = oe_filter_out('^--(dis|en)able-nls$', bb.data.getVar('EXTRA_OECONF', d, 1) or "", d)
+ cfg += " --disable-nls"
+ depends = bb.data.getVar('DEPENDS', d, 1) or ""
+ bb.data.setVar('DEPENDS', oe_filter_out('^(virtual/libiconv|virtual/libintl)$', depends, d), d)
+ bb.data.setVar('EXTRA_OECONF', cfg, d)
+}
+
+DEPENDS =+ "gettext-native"
+EXTRA_OECONF += "--enable-nls"
diff --git a/meta/classes/gnome.bbclass b/meta/classes/gnome.bbclass
new file mode 100644
index 0000000000..8643989b73
--- /dev/null
+++ b/meta/classes/gnome.bbclass
@@ -0,0 +1,20 @@
+def gnome_verdir(v):
+ import re
+ m = re.match("([0-9]+)\.([0-9]+)\..*", v)
+ return "%s.%s" % (m.group(1), m.group(2))
+
+SECTION ?= "x11/gnome"
+SRC_URI = "${GNOME_MIRROR}/${PN}/${@gnome_verdir("${PV}")}/${PN}-${PV}.tar.bz2"
+
+DEPENDS += "gnome-common"
+
+FILES_${PN} += "${datadir}/application-registry ${datadir}/mime-info \
+ ${datadir}/gnome-2.0"
+
+inherit autotools pkgconfig gconf
+
+EXTRA_AUTORECONF += "-I ${STAGING_DIR}/${HOST_SYS}/share/aclocal/gnome2-macros"
+
+gnome_stage_includes() {
+ autotools_stage_includes
+}
diff --git a/meta/classes/gpe.bbclass b/meta/classes/gpe.bbclass
new file mode 100644
index 0000000000..861ec416a0
--- /dev/null
+++ b/meta/classes/gpe.bbclass
@@ -0,0 +1,17 @@
+DEPENDS_prepend = "coreutils-native virtual/libintl intltool-native "
+GPE_TARBALL_SUFFIX ?= "gz"
+SRC_URI = "${GPE_MIRROR}/${PN}-${PV}.tar.${GPE_TARBALL_SUFFIX}"
+FILES_${PN} += "${datadir}/gpe ${datadir}/application-registry"
+MAINTAINER ?= "GPE Team <gpe@handhelds.org>"
+
+inherit gettext
+
+gpe_do_compile() {
+ oe_runmake PREFIX=${prefix}
+}
+
+gpe_do_install() {
+ oe_runmake PREFIX=${prefix} DESTDIR=${D} install
+}
+
+EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/gtk-icon-cache.bbclass b/meta/classes/gtk-icon-cache.bbclass
new file mode 100644
index 0000000000..0f68e6812b
--- /dev/null
+++ b/meta/classes/gtk-icon-cache.bbclass
@@ -0,0 +1,38 @@
+FILES_${PN} += "${datadir}/icons/hicolor"
+
+gtk-icon-cache_postinst() {
+if [ "x$D" != "x" ]; then
+ exit 1
+fi
+gtk-update-icon-cache -q /usr/share/icons/hicolor
+}
+
+gtk-icon-cache_postrm() {
+gtk-update-icon-cache -q /usr/share/icons/hicolor
+}
+
+python populate_packages_append () {
+ import os.path
+ packages = bb.data.getVar('PACKAGES', d, 1).split()
+ workdir = bb.data.getVar('WORKDIR', d, 1)
+
+ for pkg in packages:
+ icon_dir = '%s/install/%s/%s/icons/hicolor' % (workdir, pkg, bb.data.getVar('datadir', d, 1))
+ if not os.path.exists(icon_dir):
+ continue
+
+ bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
+
+ postinst = bb.data.getVar('pkg_postinst_%s' % pkg, d, 1) or bb.data.getVar('pkg_postinst', d, 1)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += bb.data.getVar('gtk-icon-cache_postinst', d, 1)
+ bb.data.setVar('pkg_postinst_%s' % pkg, postinst, d)
+
+ postrm = bb.data.getVar('pkg_postrm_%s' % pkg, d, 1) or bb.data.getVar('pkg_postrm', d, 1)
+ if not postrm:
+ postrm = '#!/bin/sh\n'
+ postrm += bb.data.getVar('gtk-icon-cache_postrm', d, 1)
+ bb.data.setVar('pkg_postrm_%s' % pkg, postrm, d)
+}
+
diff --git a/meta/classes/icecc.bbclass b/meta/classes/icecc.bbclass
new file mode 100644
index 0000000000..7dfcfc29a4
--- /dev/null
+++ b/meta/classes/icecc.bbclass
@@ -0,0 +1,156 @@
+# IceCream distributed compiling support
+#
+# We need to create a tar.bz2 of our toolchain and set
+# ICECC_VERSION, ICECC_CXX and ICEC_CC
+#
+
+def create_env(bb,d):
+ """
+ Create a tar.bz of the current toolchain
+ """
+
+ # Constin native-native compilation no environment needed if
+ # host prefix is empty (let us duplicate the query for ease)
+ prefix = bb.data.expand('${HOST_PREFIX}', d)
+ if len(prefix) == 0:
+ return ""
+
+ import tarfile
+ import socket
+ import time
+ import os
+ ice_dir = bb.data.expand('${CROSS_DIR}', d)
+ prefix = bb.data.expand('${HOST_PREFIX}' , d)
+ distro = bb.data.expand('${DISTRO}', d)
+ target_sys = bb.data.expand('${TARGET_SYS}', d)
+ #float = bb.data.getVar('${TARGET_FPU}', d)
+ float = "anyfloat"
+ name = socket.gethostname()
+
+ try:
+ os.stat(ice_dir + '/' + target_sys + '/lib/ld-linux.so.2')
+ os.stat(ice_dir + '/' + target_sys + '/bin/g++')
+ except:
+ return ""
+
+ VERSION = '3.4.3'
+ cross_name = prefix + distro + target_sys + float +VERSION+ name
+ tar_file = ice_dir + '/ice/' + cross_name + '.tar.bz2'
+
+ try:
+ os.stat(tar_file)
+ return tar_file
+ except:
+ try:
+ os.makedirs(ice_dir+'/ice')
+ except:
+ pass
+
+ # FIXME find out the version of the compiler
+ tar = tarfile.open(tar_file, 'w:bz2')
+ tar.add(ice_dir + '/' + target_sys + '/lib/ld-linux.so.2',
+ target_sys + 'cross/lib/ld-linux.so.2')
+ tar.add(ice_dir + '/' + target_sys + '/lib/ld-linux.so.2',
+ target_sys + 'cross/lib/ld-2.3.3.so')
+ tar.add(ice_dir + '/' + target_sys + '/lib/libc-2.3.3.so',
+ target_sys + 'cross/lib/libc-2.3.3.so')
+ tar.add(ice_dir + '/' + target_sys + '/lib/libc.so.6',
+ target_sys + 'cross/lib/libc.so.6')
+ tar.add(ice_dir + '/' + target_sys + '/bin/gcc',
+ target_sys + 'cross/usr/bin/gcc')
+ tar.add(ice_dir + '/' + target_sys + '/bin/g++',
+ target_sys + 'cross/usr/bin/g++')
+ tar.add(ice_dir + '/' + target_sys + '/bin/as',
+ target_sys + 'cross/usr/bin/as')
+ tar.add(ice_dir + '/lib/gcc/' + target_sys +'/'+ VERSION + '/specs',
+ target_sys+'cross/usr/lib/gcc/'+target_sys+'/'+VERSION+'/lib/specs')
+ tar.add(ice_dir + '/libexec/gcc/'+target_sys+'/' + VERSION + '/cc1',
+ target_sys + 'cross/usr/lib/gcc/'+target_sys+'/'+VERSION+'/lib/cc1')
+ tar.add(ice_dir + '/libexec/gcc/arm-linux/' + VERSION + '/cc1plus',
+ target_sys+'cross/usr/lib/gcc/'+target_sys+'/'+VERSION+'/lib/cc1plus')
+ tar.close()
+ return tar_file
+
+
+def create_path(compilers, type, bb, d):
+ """
+ Create Symlinks for the icecc in the staging directory
+ """
+ import os
+
+ staging = bb.data.expand('${STAGING_DIR}', d) + "/ice/" + type
+ icecc = bb.data.getVar('ICECC_PATH', d)
+
+ # Create the dir if necessary
+ try:
+ os.stat(staging)
+ except:
+ os.makedirs(staging)
+
+
+ for compiler in compilers:
+ gcc_path = staging + "/" + compiler
+ try:
+ os.stat(gcc_path)
+ except:
+ os.symlink(icecc, gcc_path)
+
+ return staging + ":"
+
+
+def use_icc_version(bb,d):
+ # Constin native native
+ prefix = bb.data.expand('${HOST_PREFIX}', d)
+ if len(prefix) == 0:
+ return "no"
+
+
+ native = bb.data.expand('${PN}', d)
+ blacklist = [ "-cross", "-native" ]
+
+ for black in blacklist:
+ if black in native:
+ return "no"
+
+ return "yes"
+
+def icc_path(bb,d,compile):
+ native = bb.data.expand('${PN}', d)
+ blacklist = [ "ulibc", "glibc", "ncurses" ]
+ for black in blacklist:
+ if black in native:
+ return ""
+
+ if "-native" in native:
+ compile = False
+ if "-cross" in native:
+ compile = False
+
+ prefix = bb.data.expand('${HOST_PREFIX}', d)
+ if compile and len(prefix) != 0:
+ return create_path( [prefix+"gcc", prefix+"g++"], "cross", bb, d )
+ elif not compile or len(prefix) == 0:
+ return create_path( ["gcc", "g++"], "native", bb, d)
+
+
+def icc_version(bb,d):
+ return create_env(bb,d)
+
+
+#
+# set the IceCream environment variables
+do_configure_prepend() {
+ export PATH=${@icc_path(bb,d,False)}$PATH
+ export ICECC_CC="gcc"
+ export ICECC_CXX="g++"
+}
+
+do_compile_prepend() {
+ export PATH=${@icc_path(bb,d,True)}$PATH
+ export ICECC_CC="${HOST_PREFIX}gcc"
+ export ICECC_CXX="${HOST_PREFIX}g++"
+
+ if [ "${@use_icc_version(bb,d)}" = "yes" ]; then
+ export ICECC_VERSION="${@icc_version(bb,d)}"
+ fi
+}
diff --git a/meta/classes/image_ipk.bbclass b/meta/classes/image_ipk.bbclass
new file mode 100644
index 0000000000..c2f1c8d682
--- /dev/null
+++ b/meta/classes/image_ipk.bbclass
@@ -0,0 +1,76 @@
+inherit rootfs_ipk
+
+# We need to follow RDEPENDS and RRECOMMENDS for images
+BUILD_ALL_DEPS = "1"
+
+# Images are generally built explicitly, do not need to be part of world.
+EXCLUDE_FROM_WORLD = "1"
+
+USE_DEVFS ?= "0"
+
+DEPENDS += "makedevs-native"
+PACKAGE_ARCH = "${MACHINE_ARCH}"
+
+def get_image_deps(d):
+ import bb
+ str = ""
+ for type in (bb.data.getVar('IMAGE_FSTYPES', d, 1) or "").split():
+ deps = bb.data.getVar('IMAGE_DEPENDS_%s' % type, d) or ""
+ if deps:
+ str += " %s" % deps
+ return str
+
+DEPENDS += "${@get_image_deps(d)}"
+
+IMAGE_DEVICE_TABLE ?= "${@bb.which(bb.data.getVar('BBPATH', d, 1), 'files/device_table-minimal.txt')}"
+IMAGE_POSTPROCESS_COMMAND ?= ""
+
+# Must call real_do_rootfs() from inside here, rather than as a separate
+# task, so that we have a single fakeroot context for the whole process.
+fakeroot do_rootfs () {
+ set -x
+ rm -rf ${IMAGE_ROOTFS}
+
+ if [ "${USE_DEVFS}" != "1" ]; then
+ mkdir -p ${IMAGE_ROOTFS}/dev
+ makedevs -r ${IMAGE_ROOTFS} -D ${IMAGE_DEVICE_TABLE}
+ fi
+
+ real_do_rootfs
+
+ insert_feed_uris
+
+ rm -f ${IMAGE_ROOTFS}${libdir}/ipkg/lists/oe
+
+ ${IMAGE_PREPROCESS_COMMAND}
+
+ export TOPDIR=${TOPDIR}
+
+ for type in ${IMAGE_FSTYPES}; do
+ if test -z "$FAKEROOTKEY"; then
+ fakeroot -i ${TMPDIR}/fakedb.image bbimage -t $type -e ${FILE}
+ else
+ bbimage -n "${IMAGE_NAME}" -t "$type" -e "${FILE}"
+ fi
+ done
+
+ ${IMAGE_POSTPROCESS_COMMAND}
+}
+
+insert_feed_uris () {
+
+ echo "Building feeds for [${DISTRO}].."
+
+ for line in ${FEED_URIS}
+ do
+ # strip leading and trailing spaces/tabs, then split into name and uri
+ line_clean="`echo "$line"|sed 's/^[ \t]*//;s/[ \t]*$//'`"
+ feed_name="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\1/p'`"
+ feed_uri="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\2/p'`"
+
+ echo "Added $feed_name feed with URL $feed_uri"
+
+ # insert new feed-sources
+ echo "src/gz $feed_name $feed_uri" >> ${IMAGE_ROOTFS}/etc/ipkg/${feed_name}-feed.conf
+ done
+}
diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass
new file mode 100644
index 0000000000..92a6c982fb
--- /dev/null
+++ b/meta/classes/kernel-arch.bbclass
@@ -0,0 +1,26 @@
+#
+# set the ARCH environment variable for kernel compilation (including
+# modules). return value must match one of the architecture directories
+# in the kernel source "arch" directory
+#
+
+valid_archs = "alpha cris ia64 m68knommu ppc sh \
+ sparc64 x86_64 arm h8300 m32r mips \
+ ppc64 sh64 um arm26 i386 m68k \
+ parisc s390 sparc v850"
+
+def map_kernel_arch(a, d):
+ import bb, re
+
+ valid_archs = bb.data.getVar('valid_archs', d, 1).split()
+
+ if re.match('(i.86|athlon)$', a): return 'i386'
+ elif re.match('arm26$', a): return 'arm26'
+ elif re.match('armeb$', a): return 'arm'
+ elif re.match('powerpc$', a): return 'ppc'
+ elif re.match('mipsel$', a): return 'mips'
+ elif a in valid_archs: return a
+ else:
+ bb.error("cannot map '%s' to a linux kernel architecture" % a)
+
+export ARCH = "${@map_kernel_arch(bb.data.getVar('TARGET_ARCH', d, 1), d)}"
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
new file mode 100644
index 0000000000..ad51c4e035
--- /dev/null
+++ b/meta/classes/kernel.bbclass
@@ -0,0 +1,435 @@
+inherit module_strip
+
+PROVIDES += "virtual/kernel"
+DEPENDS += "virtual/${TARGET_PREFIX}depmod-${@get_kernelmajorversion('${PV}')} virtual/${TARGET_PREFIX}gcc${KERNEL_CCSUFFIX} update-modules"
+
+inherit kernel-arch
+
+PACKAGES_DYNAMIC += "kernel-module-*"
+PACKAGES_DYNAMIC += "kernel-image-*"
+
+export OS = "${TARGET_OS}"
+export CROSS_COMPILE = "${TARGET_PREFIX}"
+KERNEL_IMAGETYPE = "zImage"
+
+KERNEL_PRIORITY = "${@bb.data.getVar('PV',d,1).split('-')[0].split('.')[-1]}"
+
+# [jbowler 20051109] ${PV}${KERNEL_LOCALVERSION} is used throughout this
+# .bbclass to (apparently) find the full 'uname -r' kernel version, this
+# should be the same as UTS_RELEASE or (in this file) KERNEL_VERSION:
+# KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)$(LOCALVERSION)
+# but since this is not certain this overridable setting is used here:
+KERNEL_RELEASE ?= "${PV}${KERNEL_LOCALVERSION}"
+
+KERNEL_CCSUFFIX ?= ""
+KERNEL_LDSUFFIX ?= ""
+
+# Set TARGET_??_KERNEL_ARCH in the machine .conf to set architecture
+# specific options necessary for building the kernel and modules.
+#FIXME: should be this: TARGET_CC_KERNEL_ARCH ?= "${TARGET_CC_ARCH}"
+TARGET_CC_KERNEL_ARCH ?= ""
+HOST_CC_KERNEL_ARCH ?= "${TARGET_CC_KERNEL_ARCH}"
+TARGET_LD_KERNEL_ARCH ?= ""
+HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
+
+KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc${KERNEL_CCSUFFIX} ${HOST_CC_KERNEL_ARCH}"
+KERNEL_LD = "${LD}${KERNEL_LDSUFFIX} ${HOST_LD_KERNEL_ARCH}"
+
+KERNEL_OUTPUT = "arch/${ARCH}/boot/${KERNEL_IMAGETYPE}"
+KERNEL_IMAGEDEST = "boot"
+
+#
+# configuration
+#
+export CMDLINE_CONSOLE = "console=${@bb.data.getVar("KERNEL_CONSOLE",d,1) or "ttyS0"}"
+
+# parse kernel ABI version out of <linux/version.h>
+def get_kernelversion(p):
+ import re
+ try:
+ f = open(p, 'r')
+ except IOError:
+ return None
+ l = f.readlines()
+ f.close()
+ r = re.compile("#define UTS_RELEASE \"(.*)\"")
+ for s in l:
+ m = r.match(s)
+ if m:
+ return m.group(1)
+ return None
+
+def get_kernelmajorversion(p):
+ import re
+ r = re.compile("([0-9]+\.[0-9]+).*")
+ m = r.match(p);
+ if m:
+ return m.group(1)
+ return None
+
+KERNEL_VERSION = "${@get_kernelversion('${S}/include/linux/version.h')}"
+KERNEL_MAJOR_VERSION = "${@get_kernelmajorversion('${KERNEL_VERSION}')}"
+
+KERNEL_LOCALVERSION ?= ""
+
+# kernels are generally machine specific
+PACKAGE_ARCH = "${MACHINE_ARCH}"
+
+kernel_do_compile() {
+ unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
+ oe_runmake include/linux/version.h CC="${KERNEL_CC}" LD="${KERNEL_LD}"
+ if [ "${KERNEL_MAJOR_VERSION}" != "2.6" ]; then
+ oe_runmake dep CC="${KERNEL_CC}" LD="${KERNEL_LD}"
+ fi
+ oe_runmake ${KERNEL_IMAGETYPE} CC="${KERNEL_CC}" LD="${KERNEL_LD}"
+ if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
+ oe_runmake modules CC="${KERNEL_CC}" LD="${KERNEL_LD}"
+ else
+ oenote "no modules to compile"
+ fi
+}
+
+kernel_do_stage() {
+ ASMDIR=`readlink include/asm`
+
+ mkdir -p ${STAGING_KERNEL_DIR}/include/$ASMDIR
+ cp -fR include/$ASMDIR/* ${STAGING_KERNEL_DIR}/include/$ASMDIR/
+ rm -f $ASMDIR ${STAGING_KERNEL_DIR}/include/asm
+ ln -sf $ASMDIR ${STAGING_KERNEL_DIR}/include/asm
+
+ mkdir -p ${STAGING_KERNEL_DIR}/include/asm-generic
+ cp -fR include/asm-generic/* ${STAGING_KERNEL_DIR}/include/asm-generic/
+
+ mkdir -p ${STAGING_KERNEL_DIR}/include/linux
+ cp -fR include/linux/* ${STAGING_KERNEL_DIR}/include/linux/
+
+ mkdir -p ${STAGING_KERNEL_DIR}/include/net
+ cp -fR include/net/* ${STAGING_KERNEL_DIR}/include/net/
+
+ mkdir -p ${STAGING_KERNEL_DIR}/include/pcmcia
+ cp -fR include/pcmcia/* ${STAGING_KERNEL_DIR}/include/pcmcia/
+
+ if [ -d include/sound ]; then
+ mkdir -p ${STAGING_KERNEL_DIR}/include/sound
+ cp -fR include/sound/* ${STAGING_KERNEL_DIR}/include/sound/
+ fi
+
+ if [ -d drivers/sound ]; then
+ # 2.4 alsa needs some headers from this directory
+ mkdir -p ${STAGING_KERNEL_DIR}/include/drivers/sound
+ cp -fR drivers/sound/*.h ${STAGING_KERNEL_DIR}/include/drivers/sound/
+ fi
+
+ install -m 0644 .config ${STAGING_KERNEL_DIR}/config-${KERNEL_RELEASE}
+ ln -sf config-${KERNEL_RELEASE} ${STAGING_KERNEL_DIR}/.config
+ ln -sf config-${KERNEL_RELEASE} ${STAGING_KERNEL_DIR}/kernel-config
+ echo "${KERNEL_VERSION}" >${STAGING_KERNEL_DIR}/kernel-abiversion
+ echo "${S}" >${STAGING_KERNEL_DIR}/kernel-source
+ echo "${KERNEL_CCSUFFIX}" >${STAGING_KERNEL_DIR}/kernel-ccsuffix
+ echo "${KERNEL_LDSUFFIX}" >${STAGING_KERNEL_DIR}/kernel-ldsuffix
+ [ -e Rules.make ] && install -m 0644 Rules.make ${STAGING_KERNEL_DIR}/
+ [ -e Makefile ] && install -m 0644 Makefile ${STAGING_KERNEL_DIR}/
+
+ # Check if arch/${ARCH}/Makefile exists and install it
+ if [ -e arch/${ARCH}/Makefile ]; then
+ install -d ${STAGING_KERNEL_DIR}/arch/${ARCH}
+ install -m 0644 arch/${ARCH}/Makefile ${STAGING_KERNEL_DIR}/arch/${ARCH}
+ fi
+ cp -fR include/config* ${STAGING_KERNEL_DIR}/include/
+ install -m 0644 ${KERNEL_OUTPUT} ${STAGING_KERNEL_DIR}/${KERNEL_IMAGETYPE}
+ install -m 0644 System.map ${STAGING_KERNEL_DIR}/System.map-${KERNEL_RELEASE}
+ [ -e Module.symvers ] && install -m 0644 Module.symvers ${STAGING_KERNEL_DIR}/
+
+ cp -fR scripts ${STAGING_KERNEL_DIR}/
+}
+
+kernel_do_install() {
+ unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
+ if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
+ oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" modules_install
+ else
+ oenote "no modules to install"
+ fi
+
+ install -d ${D}/${KERNEL_IMAGEDEST}
+ install -d ${D}/boot
+ install -m 0644 ${KERNEL_OUTPUT} ${D}/${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE}-${KERNEL_RELEASE}
+ install -m 0644 System.map ${D}/boot/System.map-${KERNEL_RELEASE}
+ install -m 0644 .config ${D}/boot/config-${KERNEL_RELEASE}
+ install -d ${D}/etc/modutils
+
+ # Check if scripts/genksyms exists and if so, build it
+ if [ -e scripts/genksyms/ ]; then
+ oe_runmake SUBDIRS="scripts/genksyms"
+ fi
+
+ install -d ${STAGING_KERNEL_DIR}
+ cp -fR scripts ${STAGING_KERNEL_DIR}/
+}
+
+kernel_do_configure() {
+ yes '' | oe_runmake oldconfig
+}
+
+pkg_postinst_kernel () {
+ update-alternatives --install /${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE} /${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE}-${KERNEL_RELEASE} ${KERNEL_PRIORITY} || true
+}
+
+pkg_postrm_kernel () {
+ update-alternatives --remove ${KERNEL_IMAGETYPE} /${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE}-${KERNEL_RELEASE} || true
+}
+
+inherit cml1
+
+EXPORT_FUNCTIONS do_compile do_install do_stage do_configure
+
+PACKAGES = "kernel kernel-image kernel-dev"
+FILES = ""
+FILES_kernel-image = "/boot/${KERNEL_IMAGETYPE}*"
+FILES_kernel-dev = "/boot/System.map* /boot/config*"
+RDEPENDS_kernel = "kernel-image-${KERNEL_VERSION}"
+PKG_kernel-image = "kernel-image-${KERNEL_VERSION}"
+ALLOW_EMPTY_kernel = "1"
+ALLOW_EMPTY_kernel-image = "1"
+
+pkg_postinst_kernel-image () {
+if [ ! -e "$D/lib/modules/${KERNEL_RELEASE}" ]; then
+ mkdir -p $D/lib/modules/${KERNEL_RELEASE}
+fi
+if [ -n "$D" ]; then
+ ${HOST_PREFIX}depmod-${KERNEL_MAJOR_VERSION} -A -b $D -F ${STAGING_KERNEL_DIR}/System.map-${KERNEL_RELEASE} ${KERNEL_VERSION}
+else
+ depmod -A
+fi
+}
+
+pkg_postinst_modules () {
+if [ -n "$D" ]; then
+ ${HOST_PREFIX}depmod-${KERNEL_MAJOR_VERSION} -A -b $D -F ${STAGING_KERNEL_DIR}/System.map-${KERNEL_RELEASE} ${KERNEL_VERSION}
+else
+ depmod -A
+ update-modules || true
+fi
+}
+
+pkg_postrm_modules () {
+update-modules || true
+}
+
+autoload_postinst_fragment() {
+if [ x"$D" = "x" ]; then
+ modprobe %s || true
+fi
+}
+
+# autoload defaults (alphabetically sorted)
+module_autoload_hidp = "hidp"
+module_autoload_ipv6 = "ipv6"
+module_autoload_ipsec = "ipsec"
+module_autoload_ircomm-tty = "ircomm-tty"
+module_autoload_rfcomm = "rfcomm"
+module_autoload_sa1100-rtc = "sa1100-rtc"
+
+# alias defaults (alphabetically sorted)
+module_conf_af_packet = "alias net-pf-17 af_packet"
+module_conf_bluez = "alias net-pf-31 bluez"
+module_conf_bnep = "alias bt-proto-4 bnep"
+module_conf_hci_uart = "alias tty-ldisc-15 hci_uart"
+module_conf_l2cap = "alias bt-proto-0 l2cap"
+module_conf_sco = "alias bt-proto-2 sco"
+module_conf_rfcomm = "alias bt-proto-3 rfcomm"
+
+python populate_packages_prepend () {
+ def extract_modinfo(file):
+ import os, re
+ tmpfile = os.tmpnam()
+ cmd = "PATH=\"%s\" %sobjcopy -j .modinfo -O binary %s %s" % (bb.data.getVar("PATH", d, 1), bb.data.getVar("HOST_PREFIX", d, 1) or "", file, tmpfile)
+ os.system(cmd)
+ f = open(tmpfile)
+ l = f.read().split("\000")
+ f.close()
+ os.unlink(tmpfile)
+ exp = re.compile("([^=]+)=(.*)")
+ vals = {}
+ for i in l:
+ m = exp.match(i)
+ if not m:
+ continue
+ vals[m.group(1)] = m.group(2)
+ return vals
+
+ def parse_depmod():
+ import os, re
+
+ dvar = bb.data.getVar('D', d, 1)
+ if not dvar:
+ bb.error("D not defined")
+ return
+
+ kernelver = bb.data.getVar('KERNEL_RELEASE', d, 1)
+ kernelver_stripped = kernelver
+ m = re.match('^(.*-hh.*)[\.\+].*$', kernelver)
+ if m:
+ kernelver_stripped = m.group(1)
+ path = bb.data.getVar("PATH", d, 1)
+ host_prefix = bb.data.getVar("HOST_PREFIX", d, 1) or ""
+ major_version = bb.data.getVar('KERNEL_MAJOR_VERSION', d, 1)
+
+ cmd = "PATH=\"%s\" %sdepmod-%s -n -a -r -b %s -F %s/boot/System.map-%s %s" % (path, host_prefix, major_version, dvar, dvar, kernelver, kernelver_stripped)
+ f = os.popen(cmd, 'r')
+
+ deps = {}
+ pattern0 = "^(.*\.k?o):..*$"
+ pattern1 = "^(.*\.k?o):\s*(.*\.k?o)\s*$"
+ pattern2 = "^(.*\.k?o):\s*(.*\.k?o)\s*\\\$"
+ pattern3 = "^\t(.*\.k?o)\s*\\\$"
+ pattern4 = "^\t(.*\.k?o)\s*$"
+
+ line = f.readline()
+ while line:
+ if not re.match(pattern0, line):
+ line = f.readline()
+ continue
+ m1 = re.match(pattern1, line)
+ if m1:
+ deps[m1.group(1)] = m1.group(2).split()
+ else:
+ m2 = re.match(pattern2, line)
+ if m2:
+ deps[m2.group(1)] = m2.group(2).split()
+ line = f.readline()
+ m3 = re.match(pattern3, line)
+ while m3:
+ deps[m2.group(1)].extend(m3.group(1).split())
+ line = f.readline()
+ m3 = re.match(pattern3, line)
+ m4 = re.match(pattern4, line)
+ deps[m2.group(1)].extend(m4.group(1).split())
+ line = f.readline()
+ f.close()
+ return deps
+
+ def get_dependencies(file, pattern, format):
+ file = file.replace(bb.data.getVar('D', d, 1) or '', '', 1)
+
+ if module_deps.has_key(file):
+ import os.path, re
+ dependencies = []
+ for i in module_deps[file]:
+ m = re.match(pattern, os.path.basename(i))
+ if not m:
+ continue
+ on = legitimize_package_name(m.group(1))
+ dependency_pkg = format % on
+ v = bb.data.getVar("PARALLEL_INSTALL_MODULES", d, 1) or "0"
+ if v == "1":
+ kv = bb.data.getVar("KERNEL_MAJOR_VERSION", d, 1)
+ dependency_pkg = "%s-%s" % (dependency_pkg, kv)
+ dependencies.append(dependency_pkg)
+ return dependencies
+ return []
+
+ def frob_metadata(file, pkg, pattern, format, basename):
+ import re
+ vals = extract_modinfo(file)
+
+ dvar = bb.data.getVar('D', d, 1)
+
+ # If autoloading is requested, output /etc/modutils/<name> and append
+ # appropriate modprobe commands to the postinst
+ autoload = bb.data.getVar('module_autoload_%s' % basename, d, 1)
+ if autoload:
+ name = '%s/etc/modutils/%s' % (dvar, basename)
+ f = open(name, 'w')
+ for m in autoload.split():
+ f.write('%s\n' % m)
+ f.close()
+ postinst = bb.data.getVar('pkg_postinst_%s' % pkg, d, 1)
+ if not postinst:
+ bb.fatal("pkg_postinst_%s not defined" % pkg)
+ postinst += bb.data.getVar('autoload_postinst_fragment', d, 1) % autoload
+ bb.data.setVar('pkg_postinst_%s' % pkg, postinst, d)
+
+ # Write out any modconf fragment
+ modconf = bb.data.getVar('module_conf_%s' % basename, d, 1)
+ if modconf:
+ name = '%s/etc/modutils/%s.conf' % (dvar, basename)
+ f = open(name, 'w')
+ f.write("%s\n" % modconf)
+ f.close()
+
+ files = bb.data.getVar('FILES_%s' % pkg, d, 1)
+ files = "%s /etc/modutils/%s /etc/modutils/%s.conf" % (files, basename, basename)
+ bb.data.setVar('FILES_%s' % pkg, files, d)
+
+ if vals.has_key("description"):
+ old_desc = bb.data.getVar('DESCRIPTION_' + pkg, d, 1) or ""
+ bb.data.setVar('DESCRIPTION_' + pkg, old_desc + "; " + vals["description"], d)
+
+ rdepends_str = bb.data.getVar('RDEPENDS_' + pkg, d, 1)
+ if rdepends_str:
+ rdepends = rdepends_str.split()
+ else:
+ rdepends = []
+ rdepends.extend(get_dependencies(file, pattern, format))
+ bb.data.setVar('RDEPENDS_' + pkg, ' '.join(rdepends), d)
+
+ module_deps = parse_depmod()
+ module_regex = '^(.*)\.k?o$'
+ module_pattern = 'kernel-module-%s'
+
+ postinst = bb.data.getVar('pkg_postinst_modules', d, 1)
+ postrm = bb.data.getVar('pkg_postrm_modules', d, 1)
+ do_split_packages(d, root='/lib/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='update-modules kernel-image-%s' % bb.data.getVar("KERNEL_VERSION", d, 1))
+
+ import re, os
+ metapkg = "kernel-modules"
+ bb.data.setVar('ALLOW_EMPTY_' + metapkg, "1", d)
+ bb.data.setVar('FILES_' + metapkg, "", d)
+ blacklist = [ 'kernel-dev', 'kernel-image' ]
+ for l in module_deps.values():
+ for i in l:
+ pkg = module_pattern % legitimize_package_name(re.match(module_regex, os.path.basename(i)).group(1))
+ blacklist.append(pkg)
+ metapkg_rdepends = []
+ packages = bb.data.getVar('PACKAGES', d, 1).split()
+ for pkg in packages[1:]:
+ if not pkg in blacklist and not pkg in metapkg_rdepends:
+ metapkg_rdepends.append(pkg)
+ bb.data.setVar('RDEPENDS_' + metapkg, ' '.join(metapkg_rdepends), d)
+ bb.data.setVar('DESCRIPTION_' + metapkg, 'Kernel modules meta package', d)
+ packages.append(metapkg)
+ bb.data.setVar('PACKAGES', ' '.join(packages), d)
+
+ v = bb.data.getVar("PARALLEL_INSTALL_MODULES", d, 1) or "0"
+ if v == "1":
+ kv = bb.data.getVar("KERNEL_MAJOR_VERSION", d, 1)
+ packages = bb.data.getVar("PACKAGES", d, 1).split()
+ module_re = re.compile("^kernel-module-")
+
+ newmetapkg = "kernel-modules-%s" % kv
+ bb.data.setVar('ALLOW_EMPTY_' + newmetapkg, "1", d)
+ bb.data.setVar('FILES_' + newmetapkg, "", d)
+
+ newmetapkg_rdepends = []
+
+ for p in packages:
+ if not module_re.match(p):
+ continue
+ pkg = bb.data.getVar("PKG_%s" % p, d, 1) or p
+ newpkg = "%s-%s" % (pkg, kv)
+ bb.data.setVar("PKG_%s" % p, newpkg, d)
+ rprovides = bb.data.getVar("RPROVIDES_%s" % p, d, 1)
+ if rprovides:
+ rprovides = "%s %s" % (rprovides, pkg)
+ else:
+ rprovides = pkg
+ bb.data.setVar("RPROVIDES_%s" % p, rprovides, d)
+ newmetapkg_rdepends.append(newpkg)
+
+ bb.data.setVar('RDEPENDS_' + newmetapkg, ' '.join(newmetapkg_rdepends), d)
+ bb.data.setVar('DESCRIPTION_' + newmetapkg, 'Kernel modules meta package', d)
+ packages.append(newmetapkg)
+ bb.data.setVar('PACKAGES', ' '.join(packages), d)
+
+}
diff --git a/meta/classes/lib_package.bbclass b/meta/classes/lib_package.bbclass
new file mode 100644
index 0000000000..e29d2659b0
--- /dev/null
+++ b/meta/classes/lib_package.bbclass
@@ -0,0 +1,9 @@
+PACKAGES = "${PN} ${PN}-dev ${PN}-doc ${PN}-bin"
+
+FILES_${PN} = "${libexecdir} ${libdir}/lib*.so.* \
+ ${sysconfdir} ${sharedstatedir} ${localstatedir} \
+ /lib/*.so* ${datadir}/${PN} ${libdir}/${PN}"
+FILES_${PN}-dev = "${includedir} ${libdir}/lib*.so ${libdir}/*.la \
+ ${libdir}/*.a ${libdir}/pkgconfig /lib/*.a /lib/*.o \
+ ${datadir}/aclocal ${bindir}/*-config"
+FILES_${PN}-bin = "${bindir} ${sbindir} /bin /sbin"
diff --git a/meta/classes/linux_modules.bbclass b/meta/classes/linux_modules.bbclass
new file mode 100644
index 0000000000..d5c4e74ca1
--- /dev/null
+++ b/meta/classes/linux_modules.bbclass
@@ -0,0 +1,19 @@
+def get_kernelmajorversion(p):
+ import re
+ r = re.compile("([0-9]+\.[0-9]+).*")
+ m = r.match(p);
+ if m:
+ return m.group(1)
+ return None
+
+def linux_module_packages(s, d):
+ import bb, os.path
+ suffix = ""
+ if (bb.data.getVar("PARALLEL_INSTALL_MODULES", d, 1) == "1"):
+ file = bb.data.expand('${STAGING_KERNEL_DIR}/kernel-abiversion', d)
+ if (os.path.exists(file)):
+ suffix = "-%s" % (get_kernelmajorversion(base_read_file(file)))
+ return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
+
+# that's all
+
diff --git a/meta/classes/manifest.bbclass b/meta/classes/manifest.bbclass
new file mode 100644
index 0000000000..687f4b756e
--- /dev/null
+++ b/meta/classes/manifest.bbclass
@@ -0,0 +1,80 @@
+
+python read_manifest () {
+ import sys, bb.manifest
+ mfn = bb.data.getVar("MANIFEST", d, 1)
+ if os.access(mfn, os.R_OK):
+ # we have a manifest, so emit do_stage and do_populate_pkgs,
+ # and stuff some additional bits of data into the metadata store
+ mfile = file(mfn, "r")
+ manifest = bb.manifest.parse(mfile, d)
+ if not manifest:
+ return
+
+ bb.data.setVar('manifest', manifest, d)
+}
+
+python parse_manifest () {
+ manifest = bb.data.getVar("manifest", d)
+ if not manifest:
+ return
+ for func in ("do_populate_staging", "do_populate_pkgs"):
+ value = bb.manifest.emit(func, manifest, d)
+ if value:
+ bb.data.setVar("manifest_" + func, value, d)
+ bb.data.delVarFlag("manifest_" + func, "python", d)
+ bb.data.delVarFlag("manifest_" + func, "fakeroot", d)
+ bb.data.setVarFlag("manifest_" + func, "func", 1, d)
+ packages = []
+ for l in manifest:
+ if "pkg" in l and l["pkg"] is not None:
+ packages.append(l["pkg"])
+ bb.data.setVar("PACKAGES", " ".join(packages), d)
+}
+
+python __anonymous () {
+ try:
+ bb.build.exec_func('read_manifest', d)
+ bb.build.exec_func('parse_manifest', d)
+ except exceptions.KeyboardInterrupt:
+ raise
+ except Exception, e:
+ bb.error("anonymous function: %s" % e)
+ pass
+}
+
+#python do_populate_staging () {
+# if not bb.data.getVar('manifest', d):
+# bb.build.exec_func('do_emit_manifest', d)
+# if bb.data.getVar('do_stage', d):
+# bb.build.exec_func('do_stage', d)
+# else:
+# bb.build.exec_func('manifest_do_populate_staging', d)
+#}
+
+#addtask populate_pkgs after do_compile
+#python do_populate_pkgs () {
+# if not bb.data.getVar('manifest', d):
+# bb.build.exec_func('do_emit_manifest', d)
+# bb.build.exec_func('manifest_do_populate_pkgs', d)
+# bb.build.exec_func('package_do_shlibs', d)
+#}
+
+addtask emit_manifest
+python do_emit_manifest () {
+# FIXME: emit a manifest here
+# 1) adjust PATH to hit the wrapper scripts
+ wrappers = bb.which(bb.data.getVar("BBPATH", d, 1), 'build/install', 0)
+ path = (bb.data.getVar('PATH', d, 1) or '').split(':')
+ path.insert(0, os.path.dirname(wrappers))
+ bb.data.setVar('PATH', ':'.join(path), d)
+# 2) exec_func("do_install", d)
+ bb.build.exec_func('do_install', d)
+# 3) read in data collected by the wrappers
+ print("Got here2 213")
+ bb.build.exec_func('read_manifest', d)
+# 4) mangle the manifest we just generated, get paths back into
+# our variable form
+# 5) write it back out
+# 6) re-parse it to ensure the generated functions are proper
+ bb.build.exec_func('parse_manifest', d)
+}
diff --git a/meta/classes/module-base.bbclass b/meta/classes/module-base.bbclass
new file mode 100644
index 0000000000..da5bd01dae
--- /dev/null
+++ b/meta/classes/module-base.bbclass
@@ -0,0 +1,25 @@
+inherit module_strip
+
+inherit kernel-arch
+
+export OS = "${TARGET_OS}"
+export CROSS_COMPILE = "${TARGET_PREFIX}"
+
+export KERNEL_VERSION = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-abiversion')}"
+export KERNEL_SOURCE = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-source')}"
+KERNEL_OBJECT_SUFFIX = "${@[".o", ".ko"][base_read_file('${STAGING_KERNEL_DIR}/kernel-abiversion') > "2.6.0"]}"
+KERNEL_CCSUFFIX = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-ccsuffix')}"
+KERNEL_LDSUFFIX = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-ldsuffix')}"
+
+# Set TARGET_??_KERNEL_ARCH in the machine .conf to set architecture
+# specific options necessary for building the kernel and modules.
+TARGET_CC_KERNEL_ARCH ?= ""
+HOST_CC_KERNEL_ARCH ?= "${TARGET_CC_KERNEL_ARCH}"
+TARGET_LD_KERNEL_ARCH ?= ""
+HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
+
+KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc${KERNEL_CCSUFFIX} ${HOST_CC_KERNEL_ARCH}"
+KERNEL_LD = "${LD}${KERNEL_LDSUFFIX} ${HOST_LD_KERNEL_ARCH}"
+
+# kernel modules are generally machine specific
+PACKAGE_ARCH = "${MACHINE_ARCH}"
diff --git a/meta/classes/module.bbclass b/meta/classes/module.bbclass
new file mode 100644
index 0000000000..8a13f1f858
--- /dev/null
+++ b/meta/classes/module.bbclass
@@ -0,0 +1,51 @@
+RDEPENDS += "kernel (${KERNEL_VERSION})"
+DEPENDS += "virtual/kernel"
+
+inherit module-base
+
+python populate_packages_prepend() {
+ v = bb.data.getVar("PARALLEL_INSTALL_MODULES", d, 1) or "0"
+ if v == "1":
+ kv = bb.data.getVar("KERNEL_VERSION", d, 1)
+ packages = bb.data.getVar("PACKAGES", d, 1)
+ for p in packages.split():
+ pkg = bb.data.getVar("PKG_%s" % p, d, 1) or p
+ newpkg = "%s-%s" % (pkg, kv)
+ bb.data.setVar("PKG_%s" % p, newpkg, d)
+ rprovides = bb.data.getVar("RPROVIDES_%s" % p, d, 1)
+ if rprovides:
+ rprovides = "%s %s" % (rprovides, pkg)
+ else:
+ rprovides = pkg
+ bb.data.setVar("RPROVIDES_%s" % p, rprovides, d)
+}
+
+module_do_compile() {
+ unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
+ oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \
+ KERNEL_SRC=${STAGING_KERNEL_DIR} \
+ KERNEL_VERSION=${KERNEL_VERSION} \
+ CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
+ ${MAKE_TARGETS}
+}
+
+module_do_install() {
+ unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
+ oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" CC="${KERNEL_CC}" LD="${KERNEL_LD}" modules_install
+}
+
+pkg_postinst_append () {
+ if [ -n "$D" ]; then
+ exit 1
+ fi
+ depmod -A
+ update-modules || true
+}
+
+pkg_postrm_append () {
+ update-modules || true
+}
+
+EXPORT_FUNCTIONS do_compile do_install
+
+FILES_${PN} = "/etc /lib/modules"
diff --git a/meta/classes/module_strip.bbclass b/meta/classes/module_strip.bbclass
new file mode 100644
index 0000000000..116e8b902f
--- /dev/null
+++ b/meta/classes/module_strip.bbclass
@@ -0,0 +1,18 @@
+#DEPENDS_append = " module-strip"
+
+do_strip_modules () {
+ for p in ${PACKAGES}; do
+ if test -e ${WORKDIR}/install/$p/lib/modules; then
+ modules="`find ${WORKDIR}/install/$p/lib/modules -name \*${KERNEL_OBJECT_SUFFIX}`"
+ if [ -n "$modules" ]; then
+ ${STRIP} -v -g $modules
+# NM="${CROSS_DIR}/bin/${HOST_PREFIX}nm" OBJCOPY="${CROSS_DIR}/bin/${HOST_PREFIX}objcopy" strip_module $modules
+ fi
+ fi
+ done
+}
+
+python do_package_append () {
+ if (bb.data.getVar('INHIBIT_PACKAGE_STRIP', d, 1) != '1'):
+ bb.build.exec_func('do_strip_modules', d)
+}
diff --git a/meta/classes/mozilla.bbclass b/meta/classes/mozilla.bbclass
new file mode 100644
index 0000000000..629f2531da
--- /dev/null
+++ b/meta/classes/mozilla.bbclass
@@ -0,0 +1,53 @@
+SECTION = "x11/utils"
+DEPENDS += "gnu-config-native virtual/libintl xt libxi \
+ zip-native gtk+ orbit2 libidl-native"
+LICENSE = "MPL NPL"
+SRC_URI += "file://mozconfig"
+
+inherit gettext
+
+EXTRA_OECONF = "--target=${TARGET_SYS} --host=${BUILD_SYS} \
+ --build=${BUILD_SYS} --prefix=${prefix}"
+EXTRA_OEMAKE = "'HOST_LIBIDL_LIBS=${HOST_LIBIDL_LIBS}' \
+ 'HOST_LIBIDL_CFLAGS=${HOST_LIBIDL_CFLAGS}'"
+SELECTED_OPTIMIZATION = "-Os -fsigned-char -fno-strict-aliasing"
+
+export CROSS_COMPILE = "1"
+export MOZCONFIG = "${WORKDIR}/mozconfig"
+export MOZ_OBJDIR = "${S}"
+
+export CONFIGURE_ARGS = "${EXTRA_OECONF}"
+export HOST_LIBIDL_CFLAGS = "`${HOST_LIBIDL_CONFIG} --cflags`"
+export HOST_LIBIDL_LIBS = "`${HOST_LIBIDL_CONFIG} --libs`"
+export HOST_LIBIDL_CONFIG = "PKG_CONFIG_PATH=${STAGING_BINDIR}/../share/pkgconfig pkg-config libIDL-2.0"
+export HOST_CC = "${BUILD_CC}"
+export HOST_CXX = "${BUILD_CXX}"
+export HOST_CFLAGS = "${BUILD_CFLAGS}"
+export HOST_CXXFLAGS = "${BUILD_CXXFLAGS}"
+export HOST_LDFLAGS = "${BUILD_LDFLAGS}"
+export HOST_RANLIB = "${BUILD_RANLIB}"
+export HOST_AR = "${BUILD_AR}"
+
+mozilla_do_configure() {
+ (
+ set -e
+ for cg in `find ${S} -name config.guess`; do
+ install -m 0755 \
+ ${STAGING_BINDIR}/../share/gnu-config/config.guess \
+ ${STAGING_BINDIR}/../share/gnu-config/config.sub \
+ `dirname $cg`/
+ done
+ )
+ oe_runmake -f client.mk ${MOZ_OBJDIR}/Makefile \
+ ${MOZ_OBJDIR}/config.status
+}
+
+mozilla_do_compile() {
+ oe_runmake -f client.mk build_all
+}
+
+mozilla_do_install() {
+ oe_runmake DESTDIR="${D}" destdir="${D}" install
+}
+
+EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/multimachine.bbclass b/meta/classes/multimachine.bbclass
new file mode 100644
index 0000000000..2248f326cc
--- /dev/null
+++ b/meta/classes/multimachine.bbclass
@@ -0,0 +1,22 @@
+STAMP = "${TMPDIR}/stamps/${MULTIMACH_ARCH}-${HOST_OS}/${PF}"
+WORKDIR = "${TMPDIR}/work/${MULTIMACH_ARCH}-${HOST_OS}/${PF}"
+STAGING_KERNEL_DIR = "${STAGING_DIR}/${MULTIMACH_ARCH}-${HOST_OS}/kernel"
+
+# Find any machine specific sub packages and if present, mark the
+# whole package as machine specific for multimachine purposes.
+python __anonymous () {
+ packages = bb.data.getVar('PACKAGES', d, 1).split()
+ macharch = bb.data.getVar('MACHINE_ARCH', d, 1)
+ multiarch = bb.data.getVar('PACKAGE_ARCH', d, 1)
+
+ for pkg in packages:
+ pkgarch = bb.data.getVar("PACKAGE_ARCH_%s" % pkg, d, 1)
+
+ # We could look for != PACKAGE_ARCH here but how to choose
+ # if multiple differences are present?
+ # Look through IPKG_ARCHS for the priority order?
+ if pkgarch and pkgarch == macharch:
+ multiarch = macharch
+
+ bb.data.setVar('MULTIMACH_ARCH', multiarch, d)
+}
diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass
new file mode 100644
index 0000000000..04ff7d92d1
--- /dev/null
+++ b/meta/classes/native.bbclass
@@ -0,0 +1,95 @@
+inherit base
+
+# Native packages are built indirectly via dependency,
+# no need for them to be a direct target of 'world'
+EXCLUDE_FROM_WORLD = "1"
+
+PACKAGES = ""
+PACKAGE_ARCH = "${BUILD_ARCH}"
+
+# When this class has packaging enabled, setting
+# RPROVIDES becomes unnecessary.
+RPROVIDES = "${PN}"
+
+# Need to resolve package RDEPENDS as well as DEPENDS
+BUILD_ALL_DEPS = "1"
+
+# Break the circular dependency as a result of DEPENDS
+# in package.bbclass
+PACKAGE_DEPENDS = ""
+
+TARGET_ARCH = "${BUILD_ARCH}"
+TARGET_OS = "${BUILD_OS}"
+TARGET_VENDOR = "${BUILD_VENDOR}"
+TARGET_PREFIX = "${BUILD_PREFIX}"
+TARGET_CC_ARCH = "${BUILD_CC_ARCH}"
+
+HOST_ARCH = "${BUILD_ARCH}"
+HOST_OS = "${BUILD_OS}"
+HOST_VENDOR = "${BUILD_VENDOR}"
+HOST_PREFIX = "${BUILD_PREFIX}"
+HOST_CC_ARCH = "${BUILD_CC_ARCH}"
+
+CPPFLAGS = "${BUILD_CPPFLAGS}"
+CFLAGS = "${BUILD_CFLAGS}"
+CXXFLAGS = "${BUILD_CFLAGS}"
+LDFLAGS = "${BUILD_LDFLAGS}"
+LDFLAGS_build-darwin = "-L${STAGING_DIR}/${BUILD_SYS}/lib "
+
+
+# set the compiler as well. It could have been set to something else
+export CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_ARCH}"
+export CXX = "${CCACHE}${HOST_PREFIX}g++ ${HOST_CC_ARCH}"
+export F77 = "${CCACHE}${HOST_PREFIX}g77 ${HOST_CC_ARCH}"
+export CPP = "${HOST_PREFIX}gcc -E"
+export LD = "${HOST_PREFIX}ld"
+export CCLD = "${CC}"
+export AR = "${HOST_PREFIX}ar"
+export AS = "${HOST_PREFIX}as"
+export RANLIB = "${HOST_PREFIX}ranlib"
+export STRIP = "${HOST_PREFIX}strip"
+
+
+# Path prefixes
+base_prefix = "${exec_prefix}"
+prefix = "${STAGING_DIR}"
+exec_prefix = "${STAGING_DIR}/${BUILD_ARCH}-${BUILD_OS}"
+
+# Base paths
+base_bindir = "${base_prefix}/bin"
+base_sbindir = "${base_prefix}/bin"
+base_libdir = "${base_prefix}/lib"
+
+# Architecture independent paths
+sysconfdir = "${prefix}/etc"
+sharedstatedir = "${prefix}/com"
+localstatedir = "${prefix}/var"
+infodir = "${datadir}/info"
+mandir = "${datadir}/man"
+docdir = "${datadir}/doc"
+servicedir = "${prefix}/srv"
+
+# Architecture dependent paths
+bindir = "${exec_prefix}/bin"
+sbindir = "${exec_prefix}/bin"
+libexecdir = "${exec_prefix}/libexec"
+libdir = "${exec_prefix}/lib"
+includedir = "${exec_prefix}/include"
+oldincludedir = "${exec_prefix}/include"
+
+# Datadir is made arch dependent here, primarily
+# for autoconf macros, and other things that
+# may be manipulated to handle crosscompilation
+# issues.
+datadir = "${exec_prefix}/share"
+
+do_stage () {
+ if [ "${INHIBIT_NATIVE_STAGE_INSTALL}" != "1" ]
+ then
+ oe_runmake install
+ fi
+}
+
+do_install () {
+ true
+}
diff --git a/meta/classes/nslu2-jffs2-image.bbclass b/meta/classes/nslu2-jffs2-image.bbclass
new file mode 100644
index 0000000000..56ad0f0659
--- /dev/null
+++ b/meta/classes/nslu2-jffs2-image.bbclass
@@ -0,0 +1,18 @@
+NSLU2_SLUGIMAGE_ARGS ?= ""
+
+nslu2_pack_image () {
+ install -d ${DEPLOY_DIR_IMAGE}/slug
+ install -m 0644 ${STAGING_LIBDIR}/nslu2-binaries/RedBoot \
+ ${STAGING_LIBDIR}/nslu2-binaries/Trailer \
+ ${STAGING_LIBDIR}/nslu2-binaries/SysConf \
+ ${DEPLOY_DIR_IMAGE}/slug/
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/zImage-${IMAGE_BASENAME} ${DEPLOY_DIR_IMAGE}/slug/vmlinuz
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.jffs2 ${DEPLOY_DIR_IMAGE}/slug/flashdisk.jffs2
+ cd ${DEPLOY_DIR_IMAGE}/slug
+ slugimage -p -b RedBoot -s SysConf -r Ramdisk:1,Flashdisk:flashdisk.jffs2 -t Trailer \
+ -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.flashdisk.img ${NSLU2_SLUGIMAGE_ARGS}
+ rm -rf ${DEPLOY_DIR_IMAGE}/slug
+}
+
+EXTRA_IMAGEDEPENDS += 'slugimage-native nslu2-linksys-firmware'
+IMAGE_POSTPROCESS_COMMAND += "nslu2_pack_image; "
diff --git a/meta/classes/nslu2-mirrors.bbclass b/meta/classes/nslu2-mirrors.bbclass
new file mode 100644
index 0000000000..1181edc716
--- /dev/null
+++ b/meta/classes/nslu2-mirrors.bbclass
@@ -0,0 +1,4 @@
+MIRRORS_append () {
+ftp://.*/.*/ http://sources.nslu2-linux.org/sources/
+http://.*/.*/ http://sources.nslu2-linux.org/sources/
+}
diff --git a/meta/classes/nslu2-ramdisk-image.bbclass b/meta/classes/nslu2-ramdisk-image.bbclass
new file mode 100644
index 0000000000..0b545854fd
--- /dev/null
+++ b/meta/classes/nslu2-ramdisk-image.bbclass
@@ -0,0 +1,18 @@
+NSLU2_SLUGIMAGE_ARGS ?= ""
+
+nslu2_pack_image () {
+ install -d ${DEPLOY_DIR_IMAGE}/slug
+ install -m 0644 ${STAGING_LIBDIR}/nslu2-binaries/RedBoot \
+ ${STAGING_LIBDIR}/nslu2-binaries/Trailer \
+ ${STAGING_LIBDIR}/nslu2-binaries/SysConf \
+ ${DEPLOY_DIR_IMAGE}/slug/
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/zImage-${IMAGE_BASENAME} ${DEPLOY_DIR_IMAGE}/slug/vmlinuz
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ext2.gz ${DEPLOY_DIR_IMAGE}/slug/ramdisk.ext2.gz
+ cd ${DEPLOY_DIR_IMAGE}/slug
+ slugimage -p -b RedBoot -s SysConf -r Ramdisk:ramdisk.ext2.gz -t Trailer \
+ -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.ramdisk.img ${NSLU2_SLUGIMAGE_ARGS}
+ rm -rf ${DEPLOY_DIR_IMAGE}/slug
+}
+
+EXTRA_IMAGEDEPENDS += 'slugimage-native nslu2-linksys-firmware'
+IMAGE_POSTPROCESS_COMMAND += "nslu2_pack_image; "
diff --git a/meta/classes/nylon-mirrors.bbclass b/meta/classes/nylon-mirrors.bbclass
new file mode 100644
index 0000000000..2986bd8f80
--- /dev/null
+++ b/meta/classes/nylon-mirrors.bbclass
@@ -0,0 +1,6 @@
+MIRRORS_append () {
+ftp://.*/.*/ http://meshcube.org/nylon/stable/sources/
+http://.*/.*/ http://meshcube.org/nylon/stable/sources/
+ftp://.*/.*/ http://meshcube.org/nylon/unstable/sources/
+http://.*/.*/ http://meshcube.org/nylon/unstable/sources/
+} \ No newline at end of file
diff --git a/meta/classes/oebuildstamp.bbclass b/meta/classes/oebuildstamp.bbclass
new file mode 100644
index 0000000000..1de1b95c2e
--- /dev/null
+++ b/meta/classes/oebuildstamp.bbclass
@@ -0,0 +1,16 @@
+#
+# Because base.oeclasses set the variable
+#
+# do_fetch[nostamp] = "1"
+# do_build[nostamp] = "1"
+#
+# for every build we're doing oemake calls all of the phases to check if
+# something new is to download. This class unset's this nostamp flag. This
+# makes a package "finished", once it's completely build.
+#
+# This means that the subsequent builds are faster, but when you change the
+# behaviour of the package, e.g. by adding INHERIT="package_ipk", you won't
+# get the ipk file except you delete the build stamp manually or all of them
+# with oebuild clean <oe-file>.
+
+do_build[nostamp] = ""
diff --git a/meta/classes/oelint.bbclass b/meta/classes/oelint.bbclass
new file mode 100644
index 0000000000..baa1c630b4
--- /dev/null
+++ b/meta/classes/oelint.bbclass
@@ -0,0 +1,174 @@
+addtask lint before do_fetch
+do_lint[nostamp] = 1
+python do_lint() {
+ def testVar(var, explain=None):
+ try:
+ s = d[var]
+ return s["content"]
+ except KeyError:
+ bb.error("%s is not set" % var)
+ if explain: bb.note(explain)
+ return None
+
+
+ ##############################
+ # Test that DESCRIPTION exists
+ #
+ testVar("DESCRIPTION")
+
+
+ ##############################
+ # Test that HOMEPAGE exists
+ #
+ s = testVar("HOMEPAGE")
+ if s=="unknown":
+ bb.error("HOMEPAGE is not set")
+ elif not s.startswith("http://"):
+ bb.error("HOMEPAGE doesn't start with http://")
+
+
+
+ ##############################
+ # Test for valid LICENSE
+ #
+ valid_licenses = {
+ "GPL-2" : "GPLv2",
+ "GPL LGPL FDL" : True,
+ "GPL PSF" : True,
+ "GPL/QPL" : True,
+ "GPL" : True,
+ "GPLv2" : True,
+ "IBM" : True,
+ "LGPL GPL" : True,
+ "LGPL" : True,
+ "MIT" : True,
+ "OSL" : True,
+ "Perl" : True,
+ "Public Domain" : True,
+ "QPL" : "GPL/QPL",
+ }
+ s = testVar("LICENSE")
+ if s=="unknown":
+ bb.error("LICENSE is not set")
+ elif s.startswith("Vendor"):
+ pass
+ else:
+ try:
+ newlic = valid_licenses[s]
+ if newlic == False:
+ bb.note("LICENSE '%s' is not recommended" % s)
+ elif newlic != True:
+ bb.note("LICENSE '%s' is not recommended, better use '%s'" % (s, newsect))
+ except:
+ bb.note("LICENSE '%s' is not recommended" % s)
+
+
+ ##############################
+ # Test for valid MAINTAINER
+ #
+ s = testVar("MAINTAINER")
+ if s=="OpenEmbedded Team <oe@handhelds.org>":
+ bb.error("explicit MAINTAINER is missing, using default")
+ elif s and s.find("@") == -1:
+ bb.error("You forgot to put an e-mail address into MAINTAINER")
+
+
+ ##############################
+ # Test for valid SECTION
+ #
+ # if Correct section: True section name is valid
+ # False section name is invalid, no suggestion
+ # string section name is invalid, better name suggested
+ #
+ valid_sections = {
+ # Current Section Correct section
+ "apps" : True,
+ "audio" : True,
+ "base" : True,
+ "console/games" : True,
+ "console/net" : "console/network",
+ "console/network" : True,
+ "console/utils" : True,
+ "devel" : True,
+ "developing" : "devel",
+ "devel/python" : True,
+ "fonts" : True,
+ "games" : True,
+ "games/libs" : True,
+ "gnome/base" : True,
+ "gnome/libs" : True,
+ "gpe" : True,
+ "gpe/libs" : True,
+ "gui" : False,
+ "libc" : "libs",
+ "libs" : True,
+ "libs/net" : True,
+ "multimedia" : True,
+ "net" : "network",
+ "NET" : "network",
+ "network" : True,
+ "opie/applets" : True,
+ "opie/applications" : True,
+ "opie/base" : True,
+ "opie/codecs" : True,
+ "opie/decorations" : True,
+ "opie/fontfactories" : True,
+ "opie/fonts" : True,
+ "opie/games" : True,
+ "opie/help" : True,
+ "opie/inputmethods" : True,
+ "opie/libs" : True,
+ "opie/multimedia" : True,
+ "opie/pim" : True,
+ "opie/setting" : "opie/settings",
+ "opie/settings" : True,
+ "opie/Shell" : False,
+ "opie/styles" : True,
+ "opie/today" : True,
+ "scientific" : True,
+ "utils" : True,
+ "x11" : True,
+ "x11/libs" : True,
+ "x11/wm" : True,
+ }
+ s = testVar("SECTION")
+ if s:
+ try:
+ newsect = valid_sections[s]
+ if newsect == False:
+ bb.note("SECTION '%s' is not recommended" % s)
+ elif newsect != True:
+ bb.note("SECTION '%s' is not recommended, better use '%s'" % (s, newsect))
+ except:
+ bb.note("SECTION '%s' is not recommended" % s)
+
+ if not s.islower():
+ bb.error("SECTION should only use lower case")
+
+
+
+
+ ##############################
+ # Test for valid PRIORITY
+ #
+ valid_priorities = {
+ "standard" : True,
+ "required" : True,
+ "optional" : True,
+ "extra" : True,
+ }
+ s = testVar("PRIORITY")
+ if s:
+ try:
+ newprio = valid_priorities[s]
+ if newprio == False:
+ bb.note("PRIORITY '%s' is not recommended" % s)
+ elif newprio != True:
+ bb.note("PRIORITY '%s' is not recommended, better use '%s'" % (s, newprio))
+ except:
+ bb.note("PRIORITY '%s' is not recommended" % s)
+
+ if not s.islower():
+ bb.error("PRIORITY should only use lower case")
+
+}
diff --git a/meta/classes/opie.bbclass b/meta/classes/opie.bbclass
new file mode 100644
index 0000000000..47f364a644
--- /dev/null
+++ b/meta/classes/opie.bbclass
@@ -0,0 +1,105 @@
+#
+# This oeclass takes care about some of the itchy details of installing parts
+# of Opie applications. Depending on quicklaunch or not, plugin or not, the
+# TARGET is either a shared object, a shared object with a link to quicklauncher,
+# or a usual binary.
+#
+# You have to provide two things: 1.) A proper SECTION field, and 2.) a proper APPNAME
+# Then opie.oeclass will:
+# * create the directory for the binary and install the binary file(s)
+# * for applications: create the directory for the .desktop and install the .desktop file
+# * for quicklauncher applications: create the startup symlink to the quicklauncher
+# You can override the automatic detection of APPTYPE, valid values are 'quicklaunch', 'binary', 'plugin'
+# You can override the default location of APPDESKTOP (<workdir>/apps/<section>/)
+#
+
+inherit palmtop
+
+# Note that when CVS changes to 1.2.2, the dash
+# should be removed from OPIE_CVS_PV to convert
+# to the standardised version format
+OPIE_CVS_PV = "1.2.1+cvs-${SRCDATE}"
+
+DEPENDS_prepend = "${@["libopie2 ", ""][(bb.data.getVar('PN', d, 1) == 'libopie2')]}"
+
+# to be consistent, put all targets into workdir
+# NOTE: leave one space at the end, other files are expecting that
+EXTRA_QMAKEVARS_POST += "DESTDIR=${S} "
+
+# Opie standard TAG value
+TAG = "${@'v' + bb.data.getVar('PV',d,1).replace('.', '_')}"
+
+# plan for later:
+# add common scopes for opie applications, see qmake-native/common.pro
+# qmake should care about all the details then. qmake can do that, i know it :)
+#
+
+python opie_do_opie_install() {
+ import os, shutil
+ section = bb.data.getVar( "SECTION", d ).split( '/' )[1] or "Applications"
+ section = section.title()
+ if section in ( "Base", "Libs" ):
+ bb.note( "Section = Base or Libs. Target won't be installed automatically." )
+ return
+
+ # SECTION : BINDIR DESKTOPDIR
+ dirmap = { "Applets" : ( "/plugins/applets", None ),
+ "Applications" : ( "<BINDIR>", "/apps/Applications" ),
+ "Multimedia" : ( "<BINDIR>", "/apps/Applications" ),
+ "Games" : ( "<BINDIR>", "/apps/Games" ),
+ "Settings" : ( "<BINDIR>", "/apps/Settings" ),
+ "Pim" : ( "<BINDIR>", "/apps/1Pim" ),
+ "Examples" : ( "<BINDIR>", "/apps/Examples" ),
+ "Shell" : ( "/bin", "/apps/Opie-SH" ),
+ "Codecs" : ( "/plugins/codecs", None ),
+ "Decorations" : ( "/plugins/decorations", None ),
+ "Inputmethods" : ( "/plugins/inputmethods", None ),
+ "Fontfactories" : ( "/plugins/fontfactories", None ),
+ "Security" : ( "/plugins/security", None ),
+ "Styles" : ( "/plugins/styles", None ),
+ "Today" : ( "/plugins/today", None ),
+ "Datebook" : ( "/plugins/holidays", None ),
+ "Networksettings" : ( "/plugins/networksettings", None ) }
+
+ if section not in dirmap:
+ raise ValueError, "Unknown section '%s'. Valid sections are: %s" % ( section, dirmap.keys() )
+
+ bindir, desktopdir = dirmap[section]
+ APPNAME = bb.data.getVar( "APPNAME", d, True ) or bb.data.getVar( "PN", d, True )
+ APPTYPE = bb.data.getVar( "APPTYPE", d, True )
+ if not APPTYPE:
+ if bindir == "<BINDIR>":
+ APPTYPE = "quicklaunch"
+ else:
+ APPTYPE = "plugin"
+
+ appmap = { "binary":"/bin", "quicklaunch":"/plugins/application" }
+ if bindir == "<BINDIR>": bindir = appmap[APPTYPE]
+
+ bb.note( "Section='%s', bindir='%s', desktopdir='%s', name='%s', type='%s'" %
+ ( section, bindir, desktopdir, APPNAME, APPTYPE ) )
+
+ S = bb.data.getVar( "S", d, 1 )
+ D = "%s/image" % bb.data.getVar( "WORKDIR", d, True )
+ WORKDIR = bb.data.getVar( "WORKDIR", d, True )
+ palmtopdir = bb.data.getVar( "palmtopdir", d )
+ APPDESKTOP = bb.data.getVar( "APPDESKTOP", d, True ) or "%s/%s" % ( WORKDIR, desktopdir )
+
+ if desktopdir is not None:
+ os.system( "install -d %s%s%s/" % ( D, palmtopdir, desktopdir ) )
+ os.system( "install -m 0644 %s/%s.desktop %s%s%s/" % ( APPDESKTOP, APPNAME, D, palmtopdir, desktopdir ) )
+
+ os.system( "install -d %s%s%s/" % ( D, palmtopdir, bindir ) )
+
+ if APPTYPE == "binary":
+ os.system( "install -m 0755 %s/%s %s%s%s/" % ( S, APPNAME, D, palmtopdir, bindir ) )
+ elif APPTYPE == "quicklaunch":
+ os.system( "install -m 0755 %s/lib%s.so %s%s%s/" % ( S, APPNAME, D, palmtopdir, bindir ) )
+ os.system( "install -d %s%s/bin/" % ( D, palmtopdir ) )
+ os.system( "ln -sf %s/bin/quicklauncher %s%s/bin/%s" % ( palmtopdir, D, palmtopdir, APPNAME ) )
+ elif APPTYPE == "plugin":
+ os.system( "install -m 0755 %s/lib%s.so %s%s%s/" % ( S, APPNAME, D, palmtopdir, bindir ) )
+}
+
+EXPORT_FUNCTIONS do_opie_install
+addtask opie_install after do_compile before do_populate_staging
diff --git a/meta/classes/opie_i18n.bbclass b/meta/classes/opie_i18n.bbclass
new file mode 100644
index 0000000000..cb3d07de75
--- /dev/null
+++ b/meta/classes/opie_i18n.bbclass
@@ -0,0 +1,163 @@
+# classes/opie_i18n.oeclass Matthias 'CoreDump' Hentges 16-10-2004
+#
+# Automatically builds i18n ipks for opie packages. It downloads opie-i18n from opie CVS
+# and tries to guess the name of the .ts file based on the package name:
+# ${PN}.ts, lib${PN}.ts and opie-${PN}.ts are all valid. The .ts "guessing" can be
+# disabled by setting I18N_FILES in the .oe file.
+#
+# Todo:
+#
+
+I18N_STATS = "1"
+SRC_URI += "${HANDHELDS_CVS};module=opie/i18n"
+DEPENDS += "opie-i18n"
+
+die () {
+ echo -e "opie_18n: ERROR: $1"
+ exit 1
+}
+
+python do_build_opie_i18n_data() {
+
+ import os, bb, re
+ workdir = bb.data.getVar("WORKDIR", d, 1)
+ packages = bb.data.getVar("PACKAGES", d, 1)
+ files = bb.data.getVar("FILES", d, 1)
+ section = bb.data.getVar("SECTION", d, 1)
+ pn = bb.data.getVar("PN", d, 1)
+ rdepends = bb.data.getVar("RDEPENDS", d, 1)
+
+ if os.path.exists(workdir + "/PACKAGES.tmp"):
+ fd = open(workdir + "/PACKAGES.tmp", 'r')
+ lines = fd.readlines()
+ fd.close()
+
+ bb.data.setVar('PACKAGES', " ".join(lines).lower() + " " + packages, d)
+
+ fd = open(workdir + "/FILES.tmp", 'r')
+ lines = fd.readlines()
+ fd.close()
+
+ for l in lines:
+ x = re.split("\#", l)
+ bb.data.setVar('FILES_%s' % x[0].lower(), " " + x[1].strip('\n'), d)
+ bb.data.setVar('SECTION_%s' % x[0].lower(), "opie/translations", d)
+ bb.data.setVar('RDEPENDS_%s' % x[0].lower(), pn, d)
+
+ bb.data.setVar('SECTION_%s' % pn, section, d)
+ bb.data.setVar('RDEPENDS', rdepends, d)
+ else:
+ bb.note("No translations found for package " + pn)
+}
+
+do_build_opie_i18n () {
+
+ cd "${WORKDIR}/i18n" || die "ERROR:\nCouldn't find Opies i18n sources in ${PN}/i18n\nMake sure that <inherit opie_i18n> or <inherit opie> is *below* <SRC_URIS =>!"
+
+ if test -z "${I18N_FILES}"
+ then
+ package_name="`echo "${PN}"| sed "s/^opie\-//"`"
+ package_name2="`echo "${PN}"| sed "s/^opie\-//;s/\-//"`"
+ test "$package_name" != "$package_name2" && I18N_FILES="${package_name}.ts lib${package_name}.ts opie-${package_name}.ts ${package_name2}.ts lib${package_name2}.ts opie-${package_name2}.ts"
+ test "$package_name" = "$package_name2" && I18N_FILES="${package_name}.ts lib${package_name}.ts opie-${package_name}.ts"
+ echo -e "I18N Datafiles: ${I18N_FILES} (auto-detected)\nYou can overide the auto-detection by setting I18N_FILES in your .oe file"
+ else
+ echo "I18N Datafiles: ${I18N_FILES} (provided by .bb)"
+ fi
+
+ rm -f "${WORKDIR}/FILES.tmp" "${WORKDIR}/PACKAGES.tmp"
+
+ echo -e "\nFILES is set to [${FILES}]\n"
+
+ for file in ${I18N_FILES}
+ do
+ echo "Working on [$file]"
+ for ts_file in `ls -1 */*.ts | egrep "/$file"`
+ do
+ echo -e "\tCompiling [$ts_file]"
+ cd "${WORKDIR}/i18n/`dirname $ts_file`" || die "[${WORKDIR}/i18n/`dirname $ts_file`] not found"
+ opie-lrelease "`basename $ts_file`" || die "lrelease failed! Make sure that <inherit opie_i18n> or <inherit opie> is *below* <DEPENDS =>!"
+
+ # $lang is the language as in de_DE, $lang_sane replaces "_" with "-"
+ # to allow packaging as "_" is not allowed in a package name
+ lang="`echo "$ts_file" | sed -n "s#\(.*\)/\(.*\)#\1#p"`"
+ lang_sane="`echo "$ts_file" | sed -n "s#\(.*\)/\(.*\)#\1#p"|sed s/\_/\-/`"
+ echo -e "\tPackaging [`basename $ts_file`] for language [$lang]"
+
+ install -d ${D}${palmtopdir}/i18n/$lang
+ install -m 0644 ${WORKDIR}/i18n/$lang/.directory ${D}${palmtopdir}/i18n/$lang/
+ install -m 0644 ${WORKDIR}/i18n/$lang/*.qm "${D}${palmtopdir}/i18n/$lang/"
+
+ # As it is not possible to modify OE vars from within a _shell_ function,
+ # some major hacking was needed. These two files will be read by the python
+ # function do_build_opie_i18n_data() which sets the variables FILES_* and
+ # PACKAGES as needed.
+ echo -n "${PN}-${lang_sane} " >> "${WORKDIR}/PACKAGES.tmp"
+ echo -e "${PN}-${lang_sane}#${palmtopdir}/i18n/$lang" >> "${WORKDIR}/FILES.tmp"
+
+ ts_found_something=1
+ done
+
+ if test "$ts_found_something" != 1
+ then
+ echo -e "\tNo translations found"
+ else
+ ts_found_something=""
+ ts_found="$ts_found $file"
+ fi
+
+ # Only used for debugging purposes
+ test "${I18N_STATS}" = 1 && cd "${WORKDIR}/i18n"
+
+ echo -e "Completed [$file]\n\n"
+ done
+
+ qt_dirs="apps bin etc lib pics plugins share sounds"
+
+ for dir in $qt_dirs
+ do
+ dir_="$dir_ ${palmtopdir}/$dir "
+ done
+
+
+ # If we don't adjust FILES to exclude the i18n directory, we will end up with
+ # _lots_ of empty i18n/$lang directories in the original .ipk.
+ if (echo "${FILES}" | egrep "${palmtopdir}/? |${palmtopdir}/?$") &>/dev/null
+ then
+ echo "NOTE: FILES was set to ${palmtopdir} which would include the i18n directory"
+ echo -e "\n\nI'll remove ${palmtopdir} from FILES and replace it with all directories"
+ echo "below QtPalmtop, except i18n ($qt_dirs). See classes/opie_i18n.oeclass for details"
+
+ # Removes /opt/QtPalmtop from FILES but keeps /opt/QtPalmtop/$some_dir
+ FILES="`echo "$FILES"| sed "s#${palmtopdir}[/]\?\$\|${palmtopdir}[/]\? ##"`"
+
+ echo "${PN}#$FILES $dir_" >> "${WORKDIR}/FILES.tmp"
+ fi
+
+ # This is the common case for OPIE apps which are installed by opie.oeclass magic
+ if test -z "${FILES}"
+ then
+ echo "NOTE:"
+ echo -e "Since FILES is empty, i'll add all directories below ${palmtopdir} to it,\nexcluding i18n: ( $qt_dirs )"
+ echo "${PN}#$FILES $dir_" >> "${WORKDIR}/FILES.tmp"
+ fi
+
+ if ! test -e "${WORKDIR}/PACKAGES.tmp" -a "${I18N_STATS}" = 1
+ then
+ echo "No translations for package [${PN}]" >> /tmp/oe-i18n-missing.log
+ else
+ echo "Using [$ts_found ] for package [${PN}]" >> /tmp/oe-i18n.log
+ fi
+
+ # While this might not be very elegant, it safes a _ton_ of space (~30Mb) for
+ # each opie package.
+ for file in $(ls */*.ts | egrep -v "`echo "$ts_found"| sed "s/^\ //;s/\ /\|/"`")
+ do
+ rm "$file"
+ done
+
+ return 0
+}
+
+addtask build_opie_i18n before do_compile
+addtask build_opie_i18n_data after do_build_opie_i18n before do_compile
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
new file mode 100644
index 0000000000..9edcc1e5ed
--- /dev/null
+++ b/meta/classes/package.bbclass
@@ -0,0 +1,729 @@
+def legitimize_package_name(s):
+ return s.lower().replace('_', '-').replace('@', '+').replace(',', '+').replace('/', '-')
+
+STAGING_PKGMAPS_DIR ?= "${STAGING_DIR}/pkgmaps"
+
+def add_package_mapping (pkg, new_name, d):
+ import bb, os
+
+ def encode(str):
+ import codecs
+ c = codecs.getencoder("string_escape")
+ return c(str)[0]
+
+ pmap_dir = bb.data.getVar('STAGING_PKGMAPS_DIR', d, 1)
+
+ bb.mkdirhier(pmap_dir)
+
+ data_file = os.path.join(pmap_dir, pkg)
+
+ f = open(data_file, 'w')
+ f.write("%s\n" % encode(new_name))
+ f.close()
+
+def get_package_mapping (pkg, d):
+ import bb, os
+
+ def decode(str):
+ import codecs
+ c = codecs.getdecoder("string_escape")
+ return c(str)[0]
+
+ data_file = bb.data.expand("${STAGING_PKGMAPS_DIR}/%s" % pkg, d)
+
+ if os.access(data_file, os.R_OK):
+ f = file(data_file, 'r')
+ lines = f.readlines()
+ f.close()
+ for l in lines:
+ return decode(l).strip()
+ return pkg
+
+def runtime_mapping_rename (varname, d):
+ import bb, os
+
+ #bb.note("%s before: %s" % (varname, bb.data.getVar(varname, d, 1)))
+
+ new_depends = []
+ for depend in explode_deps(bb.data.getVar(varname, d, 1) or ""):
+ # Have to be careful with any version component of the depend
+ split_depend = depend.split(' (')
+ new_depend = get_package_mapping(split_depend[0].strip(), d)
+ if len(split_depend) > 1:
+ new_depends.append("%s (%s" % (new_depend, split_depend[1]))
+ else:
+ new_depends.append(new_depend)
+
+ bb.data.setVar(varname, " ".join(new_depends) or None, d)
+
+ #bb.note("%s after: %s" % (varname, bb.data.getVar(varname, d, 1)))
+
+python package_mapping_rename_hook () {
+ runtime_mapping_rename("RDEPENDS", d)
+ runtime_mapping_rename("RRECOMMENDS", d)
+ runtime_mapping_rename("RSUGGESTS", d)
+ runtime_mapping_rename("RPROVIDES", d)
+ runtime_mapping_rename("RREPLACES", d)
+ runtime_mapping_rename("RCONFLICTS", d)
+}
+
+
+def do_split_packages(d, root, file_regex, output_pattern, description, postinst=None, recursive=False, hook=None, extra_depends=None, aux_files_pattern=None, postrm=None, allow_dirs=False, prepend=False, match_path=False, aux_files_pattern_verbatim=None):
+ import os, os.path, bb
+
+ dvar = bb.data.getVar('D', d, 1)
+ if not dvar:
+ bb.error("D not defined")
+ return
+
+ packages = bb.data.getVar('PACKAGES', d, 1).split()
+ if not packages:
+ # nothing to do
+ return
+
+ if postinst:
+ postinst = '#!/bin/sh\n' + postinst + '\n'
+ if postrm:
+ postrm = '#!/bin/sh\n' + postrm + '\n'
+ if not recursive:
+ objs = os.listdir(dvar + root)
+ else:
+ objs = []
+ for walkroot, dirs, files in os.walk(dvar + root):
+ for file in files:
+ relpath = os.path.join(walkroot, file).replace(dvar + root + '/', '', 1)
+ if relpath:
+ objs.append(relpath)
+
+ if extra_depends == None:
+ extra_depends = bb.data.getVar('PKG_' + packages[0], d, 1) or packages[0]
+
+ for o in objs:
+ import re, stat
+ if match_path:
+ m = re.match(file_regex, o)
+ else:
+ m = re.match(file_regex, os.path.basename(o))
+
+ if not m:
+ continue
+ f = os.path.join(dvar + root, o)
+ mode = os.lstat(f).st_mode
+ if not (stat.S_ISREG(mode) or (allow_dirs and stat.S_ISDIR(mode))):
+ continue
+ on = legitimize_package_name(m.group(1))
+ pkg = output_pattern % on
+ if not pkg in packages:
+ if prepend:
+ packages = [pkg] + packages
+ else:
+ packages.append(pkg)
+ the_files = [os.path.join(root, o)]
+ if aux_files_pattern:
+ if type(aux_files_pattern) is list:
+ for fp in aux_files_pattern:
+ the_files.append(fp % on)
+ else:
+ the_files.append(aux_files_pattern % on)
+ if aux_files_pattern_verbatim:
+ if type(aux_files_pattern_verbatim) is list:
+ for fp in aux_files_pattern_verbatim:
+ the_files.append(fp % m.group(1))
+ else:
+ the_files.append(aux_files_pattern_verbatim % m.group(1))
+ bb.data.setVar('FILES_' + pkg, " ".join(the_files), d)
+ if extra_depends != '':
+ the_depends = bb.data.getVar('RDEPENDS_' + pkg, d, 1)
+ if the_depends:
+ the_depends = '%s %s' % (the_depends, extra_depends)
+ else:
+ the_depends = extra_depends
+ bb.data.setVar('RDEPENDS_' + pkg, the_depends, d)
+ bb.data.setVar('DESCRIPTION_' + pkg, description % on, d)
+ if postinst:
+ bb.data.setVar('pkg_postinst_' + pkg, postinst, d)
+ if postrm:
+ bb.data.setVar('pkg_postrm_' + pkg, postrm, d)
+ else:
+ oldfiles = bb.data.getVar('FILES_' + pkg, d, 1)
+ if not oldfiles:
+ bb.fatal("Package '%s' exists but has no files" % pkg)
+ bb.data.setVar('FILES_' + pkg, oldfiles + " " + os.path.join(root, o), d)
+ if callable(hook):
+ hook(f, pkg, file_regex, output_pattern, m.group(1))
+
+ bb.data.setVar('PACKAGES', ' '.join(packages), d)
+
+# Function to strip a single file, called from RUNSTRIP below
+# A working 'file' (one which works on the target architecture)
+# is necessary for this stuff to work.
+#PACKAGE_DEPENDS ?= "file-native"
+#DEPENDS_prepend =+ "${PACKAGE_DEPENDS} "
+#FIXME: this should be "" when any errors are gone!
+IGNORE_STRIP_ERRORS ?= "1"
+
+runstrip() {
+ local ro st
+ st=0
+ if { file "$1" || {
+ oewarn "file $1: failed (forced strip)" >&2
+ echo 'not stripped'
+ }
+ } | grep -q 'not stripped'
+ then
+ oenote "${STRIP} $1"
+ ro=
+ test -w "$1" || {
+ ro=1
+ chmod +w "$1"
+ }
+ '${STRIP}' "$1"
+ st=$?
+ test -n "$ro" && chmod -w "$1"
+ if test $st -ne 0
+ then
+ oewarn "runstrip: ${STRIP} $1: strip failed" >&2
+ if [ x${IGNORE_STRIP_ERRORS} == x1 ]
+ then
+ #FIXME: remove this, it's for error detection
+ if file "$1" 2>/dev/null >&2
+ then
+ (oefatal "${STRIP} $1: command failed" >/dev/tty)
+ else
+ (oefatal "file $1: command failed" >/dev/tty)
+ fi
+ st=0
+ fi
+ fi
+ else
+ oenote "runstrip: skip $1"
+ fi
+ return $st
+}
+
+python populate_packages () {
+ import glob, stat, errno, re
+
+ workdir = bb.data.getVar('WORKDIR', d, 1)
+ if not workdir:
+ bb.error("WORKDIR not defined, unable to package")
+ return
+
+ import os # path manipulations
+ outdir = bb.data.getVar('DEPLOY_DIR', d, 1)
+ if not outdir:
+ bb.error("DEPLOY_DIR not defined, unable to package")
+ return
+ bb.mkdirhier(outdir)
+
+ dvar = bb.data.getVar('D', d, 1)
+ if not dvar:
+ bb.error("D not defined, unable to package")
+ return
+ bb.mkdirhier(dvar)
+
+ packages = bb.data.getVar('PACKAGES', d, 1)
+ if not packages:
+ bb.debug(1, "PACKAGES not defined, nothing to package")
+ return
+
+ pn = bb.data.getVar('PN', d, 1)
+ if not pn:
+ bb.error("PN not defined")
+ return
+
+ os.chdir(dvar)
+
+ def isexec(path):
+ try:
+ s = os.stat(path)
+ except (os.error, AttributeError):
+ return 0
+ return (s[stat.ST_MODE] & stat.S_IEXEC)
+
+ # Sanity check PACKAGES for duplicates - should be moved to
+ # sanity.bbclass once we have he infrastucture
+ pkgs = []
+ for pkg in packages.split():
+ if pkg in pkgs:
+ bb.error("%s is listed in PACKAGES mutliple times. Undefined behaviour will result." % pkg)
+ pkgs += pkg
+
+ for pkg in packages.split():
+ localdata = bb.data.createCopy(d)
+ root = os.path.join(workdir, "install", pkg)
+
+ os.system('rm -rf %s' % root)
+
+ bb.data.setVar('ROOT', '', localdata)
+ bb.data.setVar('ROOT_%s' % pkg, root, localdata)
+ pkgname = bb.data.getVar('PKG_%s' % pkg, localdata, 1)
+ if not pkgname:
+ pkgname = pkg
+ bb.data.setVar('PKG', pkgname, localdata)
+
+ overrides = bb.data.getVar('OVERRIDES', localdata, 1)
+ if not overrides:
+ raise bb.build.FuncFailed('OVERRIDES not defined')
+ bb.data.setVar('OVERRIDES', overrides+':'+pkg, localdata)
+
+ bb.data.update_data(localdata)
+
+ root = bb.data.getVar('ROOT', localdata, 1)
+ bb.mkdirhier(root)
+ filesvar = bb.data.getVar('FILES', localdata, 1) or ""
+ files = filesvar.split()
+ stripfunc = ""
+ for file in files:
+ if os.path.isabs(file):
+ file = '.' + file
+ if not os.path.islink(file):
+ if os.path.isdir(file):
+ newfiles = [ os.path.join(file,x) for x in os.listdir(file) ]
+ if newfiles:
+ files += newfiles
+ continue
+ globbed = glob.glob(file)
+ if globbed:
+ if [ file ] != globbed:
+ files += globbed
+ continue
+ if (not os.path.islink(file)) and (not os.path.exists(file)):
+ continue
+ fpath = os.path.join(root,file)
+ dpath = os.path.dirname(fpath)
+ bb.mkdirhier(dpath)
+ if (bb.data.getVar('INHIBIT_PACKAGE_STRIP', d, 1) != '1') and not os.path.islink(file) and isexec(file):
+ stripfunc += "\trunstrip %s || st=1\n" % fpath
+ ret = bb.movefile(file,fpath)
+ if ret is None or ret == 0:
+ raise bb.build.FuncFailed("File population failed")
+ if not stripfunc == "":
+ from bb import build
+ # strip
+ bb.data.setVar('RUNSTRIP', '\tlocal st\n\tst=0\n%s\treturn $st' % stripfunc, localdata)
+ bb.data.setVarFlag('RUNSTRIP', 'func', 1, localdata)
+ bb.build.exec_func('RUNSTRIP', localdata)
+ del localdata
+ os.chdir(workdir)
+
+ unshipped = []
+ for root, dirs, files in os.walk(dvar):
+ for f in files:
+ path = os.path.join(root[len(dvar):], f)
+ unshipped.append(path)
+
+ if unshipped != []:
+ bb.note("the following files were installed but not shipped in any package:")
+ for f in unshipped:
+ bb.note(" " + f)
+
+ bb.build.exec_func("package_name_hook", d)
+
+ for pkg in packages.split():
+ pkgname = bb.data.getVar('PKG_%s' % pkg, d, 1)
+ if pkgname is None:
+ bb.data.setVar('PKG_%s' % pkg, pkg, d)
+ else:
+ add_package_mapping(pkg, pkgname, d)
+
+ dangling_links = {}
+ pkg_files = {}
+ for pkg in packages.split():
+ dangling_links[pkg] = []
+ pkg_files[pkg] = []
+ inst_root = os.path.join(workdir, "install", pkg)
+ for root, dirs, files in os.walk(inst_root):
+ for f in files:
+ path = os.path.join(root, f)
+ rpath = path[len(inst_root):]
+ pkg_files[pkg].append(rpath)
+ try:
+ s = os.stat(path)
+ except OSError, (err, strerror):
+ if err != errno.ENOENT:
+ raise
+ target = os.readlink(path)
+ if target[0] != '/':
+ target = os.path.join(root[len(inst_root):], target)
+ dangling_links[pkg].append(os.path.normpath(target))
+
+ for pkg in packages.split():
+ rdepends = explode_deps(bb.data.getVar('RDEPENDS_' + pkg, d, 1) or bb.data.getVar('RDEPENDS', d, 1) or "")
+ for l in dangling_links[pkg]:
+ found = False
+ bb.debug(1, "%s contains dangling link %s" % (pkg, l))
+ for p in packages.split():
+ for f in pkg_files[p]:
+ if f == l:
+ found = True
+ bb.debug(1, "target found in %s" % p)
+ if p == pkg:
+ break
+ dp = bb.data.getVar('PKG_' + p, d, 1) or p
+ if not dp in rdepends:
+ rdepends.append(dp)
+ break
+ if found == False:
+ bb.note("%s contains dangling symlink to %s" % (pkg, l))
+ bb.data.setVar('RDEPENDS_' + pkg, " " + " ".join(rdepends), d)
+
+ def write_if_exists(f, pkg, var):
+ def encode(str):
+ import codecs
+ c = codecs.getencoder("string_escape")
+ return c(str)[0]
+
+ val = bb.data.getVar('%s_%s' % (var, pkg), d, 1)
+ if val:
+ f.write('%s_%s: %s\n' % (var, pkg, encode(val)))
+
+ data_file = os.path.join(workdir, "install", pn + ".package")
+ f = open(data_file, 'w')
+ f.write("PACKAGES: %s\n" % packages)
+ for pkg in packages.split():
+ write_if_exists(f, pkg, 'DESCRIPTION')
+ write_if_exists(f, pkg, 'RDEPENDS')
+ write_if_exists(f, pkg, 'RPROVIDES')
+ write_if_exists(f, pkg, 'PKG')
+ write_if_exists(f, pkg, 'ALLOW_EMPTY')
+ write_if_exists(f, pkg, 'FILES')
+ write_if_exists(f, pkg, 'pkg_postinst')
+ write_if_exists(f, pkg, 'pkg_postrm')
+ write_if_exists(f, pkg, 'pkg_preinst')
+ write_if_exists(f, pkg, 'pkg_prerm')
+ f.close()
+ bb.build.exec_func("read_subpackage_metadata", d)
+}
+
+ldconfig_postinst_fragment() {
+if [ x"$D" = "x" ]; then
+ ldconfig
+fi
+}
+
+python package_do_shlibs() {
+ import os, re, os.path
+
+ exclude_shlibs = bb.data.getVar('EXCLUDE_FROM_SHLIBS', d, 0)
+ if exclude_shlibs:
+ bb.note("not generating shlibs")
+ return
+
+ lib_re = re.compile("^lib.*\.so")
+ libdir_re = re.compile(".*/lib$")
+
+ packages = bb.data.getVar('PACKAGES', d, 1)
+ if not packages:
+ bb.debug(1, "no packages to build; not calculating shlibs")
+ return
+
+ workdir = bb.data.getVar('WORKDIR', d, 1)
+ if not workdir:
+ bb.error("WORKDIR not defined")
+ return
+
+ staging = bb.data.getVar('STAGING_DIR', d, 1)
+ if not staging:
+ bb.error("STAGING_DIR not defined")
+ return
+
+ ver = bb.data.getVar('PV', d, 1)
+ if not ver:
+ bb.error("PV not defined")
+ return
+
+ target_sys = bb.data.getVar('TARGET_SYS', d, 1)
+ if not target_sys:
+ bb.error("TARGET_SYS not defined")
+ return
+
+ shlibs_dir = os.path.join(staging, target_sys, "shlibs")
+ old_shlibs_dir = os.path.join(staging, "shlibs")
+ bb.mkdirhier(shlibs_dir)
+
+ needed = {}
+ for pkg in packages.split():
+ needs_ldconfig = False
+ bb.debug(2, "calculating shlib provides for %s" % pkg)
+
+ pkgname = bb.data.getVar('PKG_%s' % pkg, d, 1)
+ if not pkgname:
+ pkgname = pkg
+
+ needed[pkg] = []
+ sonames = list()
+ top = os.path.join(workdir, "install", pkg)
+ for root, dirs, files in os.walk(top):
+ for file in files:
+ soname = None
+ path = os.path.join(root, file)
+ if os.access(path, os.X_OK) or lib_re.match(file):
+ cmd = (bb.data.getVar('BUILD_PREFIX', d, 1) or "") + "objdump -p " + path + " 2>/dev/null"
+ fd = os.popen(cmd)
+ lines = fd.readlines()
+ fd.close()
+ for l in lines:
+ m = re.match("\s+NEEDED\s+([^\s]*)", l)
+ if m:
+ needed[pkg].append(m.group(1))
+ m = re.match("\s+SONAME\s+([^\s]*)", l)
+ if m and not m.group(1) in sonames:
+ sonames.append(m.group(1))
+ if m and libdir_re.match(root):
+ needs_ldconfig = True
+ shlibs_file = os.path.join(shlibs_dir, pkgname + ".list")
+ if os.path.exists(shlibs_file):
+ os.remove(shlibs_file)
+ shver_file = os.path.join(shlibs_dir, pkgname + ".ver")
+ if os.path.exists(shver_file):
+ os.remove(shver_file)
+ if len(sonames):
+ fd = open(shlibs_file, 'w')
+ for s in sonames:
+ fd.write(s + '\n')
+ fd.close()
+ fd = open(shver_file, 'w')
+ fd.write(ver + '\n')
+ fd.close()
+ if needs_ldconfig:
+ bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
+ postinst = bb.data.getVar('pkg_postinst_%s' % pkg, d, 1) or bb.data.getVar('pkg_postinst', d, 1)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += bb.data.getVar('ldconfig_postinst_fragment', d, 1)
+ bb.data.setVar('pkg_postinst_%s' % pkg, postinst, d)
+
+ shlib_provider = {}
+ list_re = re.compile('^(.*)\.list$')
+ for dir in [old_shlibs_dir, shlibs_dir]:
+ if not os.path.exists(dir):
+ continue
+ for file in os.listdir(dir):
+ m = list_re.match(file)
+ if m:
+ dep_pkg = m.group(1)
+ fd = open(os.path.join(dir, file))
+ lines = fd.readlines()
+ fd.close()
+ ver_file = os.path.join(dir, dep_pkg + '.ver')
+ lib_ver = None
+ if os.path.exists(ver_file):
+ fd = open(ver_file)
+ lib_ver = fd.readline().rstrip()
+ fd.close()
+ for l in lines:
+ shlib_provider[l.rstrip()] = (dep_pkg, lib_ver)
+
+
+ for pkg in packages.split():
+ bb.debug(2, "calculating shlib requirements for %s" % pkg)
+
+ p_pkg = bb.data.getVar("PKG_%s" % pkg, d, 1) or pkg
+
+ deps = list()
+ for n in needed[pkg]:
+ if n in shlib_provider.keys():
+ (dep_pkg, ver_needed) = shlib_provider[n]
+
+ if dep_pkg == p_pkg:
+ continue
+
+ if ver_needed:
+ dep = "%s (>= %s)" % (dep_pkg, ver_needed)
+ else:
+ dep = dep_pkg
+ if not dep in deps:
+ deps.append(dep)
+ else:
+ bb.note("Couldn't find shared library provider for %s" % n)
+
+
+ deps_file = os.path.join(workdir, "install", pkg + ".shlibdeps")
+ if os.path.exists(deps_file):
+ os.remove(deps_file)
+ if len(deps):
+ fd = open(deps_file, 'w')
+ for dep in deps:
+ fd.write(dep + '\n')
+ fd.close()
+}
+
+python package_do_pkgconfig () {
+ import re, os
+
+ packages = bb.data.getVar('PACKAGES', d, 1)
+ if not packages:
+ bb.debug(1, "no packages to build; not calculating pkgconfig dependencies")
+ return
+
+ workdir = bb.data.getVar('WORKDIR', d, 1)
+ if not workdir:
+ bb.error("WORKDIR not defined")
+ return
+
+ staging = bb.data.getVar('STAGING_DIR', d, 1)
+ if not staging:
+ bb.error("STAGING_DIR not defined")
+ return
+
+ target_sys = bb.data.getVar('TARGET_SYS', d, 1)
+ if not target_sys:
+ bb.error("TARGET_SYS not defined")
+ return
+
+ shlibs_dir = os.path.join(staging, target_sys, "shlibs")
+ old_shlibs_dir = os.path.join(staging, "shlibs")
+ bb.mkdirhier(shlibs_dir)
+
+ pc_re = re.compile('(.*)\.pc$')
+ var_re = re.compile('(.*)=(.*)')
+ field_re = re.compile('(.*): (.*)')
+
+ pkgconfig_provided = {}
+ pkgconfig_needed = {}
+ for pkg in packages.split():
+ pkgconfig_provided[pkg] = []
+ pkgconfig_needed[pkg] = []
+ top = os.path.join(workdir, "install", pkg)
+ for root, dirs, files in os.walk(top):
+ for file in files:
+ m = pc_re.match(file)
+ if m:
+ pd = bb.data.init()
+ name = m.group(1)
+ pkgconfig_provided[pkg].append(name)
+ path = os.path.join(root, file)
+ if not os.access(path, os.R_OK):
+ continue
+ f = open(path, 'r')
+ lines = f.readlines()
+ f.close()
+ for l in lines:
+ m = var_re.match(l)
+ if m:
+ name = m.group(1)
+ val = m.group(2)
+ bb.data.setVar(name, bb.data.expand(val, pd), pd)
+ continue
+ m = field_re.match(l)
+ if m:
+ hdr = m.group(1)
+ exp = bb.data.expand(m.group(2), pd)
+ if hdr == 'Requires':
+ pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
+
+ for pkg in packages.split():
+ ppkg = bb.data.getVar("PKG_" + pkg, d, 1) or pkg
+ pkgs_file = os.path.join(shlibs_dir, ppkg + ".pclist")
+ if os.path.exists(pkgs_file):
+ os.remove(pkgs_file)
+ if pkgconfig_provided[pkg] != []:
+ f = open(pkgs_file, 'w')
+ for p in pkgconfig_provided[pkg]:
+ f.write('%s\n' % p)
+ f.close()
+
+ for dir in [old_shlibs_dir, shlibs_dir]:
+ if not os.path.exists(dir):
+ continue
+ for file in os.listdir(dir):
+ m = re.match('^(.*)\.pclist$', file)
+ if m:
+ pkg = m.group(1)
+ fd = open(os.path.join(dir, file))
+ lines = fd.readlines()
+ fd.close()
+ pkgconfig_provided[pkg] = []
+ for l in lines:
+ pkgconfig_provided[pkg].append(l.rstrip())
+
+ for pkg in packages.split():
+ deps = []
+ for n in pkgconfig_needed[pkg]:
+ found = False
+ for k in pkgconfig_provided.keys():
+ if n in pkgconfig_provided[k]:
+ if k != pkg and not (k in deps):
+ deps.append(k)
+ found = True
+ if found == False:
+ bb.note("couldn't find pkgconfig module '%s' in any package" % n)
+ deps_file = os.path.join(workdir, "install", pkg + ".pcdeps")
+ if os.path.exists(deps_file):
+ os.remove(deps_file)
+ if len(deps):
+ fd = open(deps_file, 'w')
+ for dep in deps:
+ fd.write(dep + '\n')
+ fd.close()
+}
+
+python package_do_split_locales() {
+ import os
+
+ if (bb.data.getVar('PACKAGE_NO_LOCALE', d, 1) == '1'):
+ bb.debug(1, "package requested not splitting locales")
+ return
+
+ packages = (bb.data.getVar('PACKAGES', d, 1) or "").split()
+ if not packages:
+ bb.debug(1, "no packages to build; not splitting locales")
+ return
+
+ datadir = bb.data.getVar('datadir', d, 1)
+ if not datadir:
+ bb.note("datadir not defined")
+ return
+
+ dvar = bb.data.getVar('D', d, 1)
+ if not dvar:
+ bb.error("D not defined")
+ return
+
+ pn = bb.data.getVar('PN', d, 1)
+ if not pn:
+ bb.error("PN not defined")
+ return
+
+ if pn + '-locale' in packages:
+ packages.remove(pn + '-locale')
+
+ localedir = os.path.join(dvar + datadir, 'locale')
+
+ if not os.path.isdir(localedir):
+ bb.debug(1, "No locale files in this package")
+ return
+
+ locales = os.listdir(localedir)
+
+ mainpkg = packages[0]
+
+ for l in locales:
+ ln = legitimize_package_name(l)
+ pkg = pn + '-locale-' + ln
+ packages.append(pkg)
+ bb.data.setVar('FILES_' + pkg, os.path.join(datadir, 'locale', l), d)
+ bb.data.setVar('RDEPENDS_' + pkg, '${PKG_%s} virtual-locale-%s' % (mainpkg, ln), d)
+ bb.data.setVar('RPROVIDES_' + pkg, '%s-locale %s-translation' % (pn, ln), d)
+ bb.data.setVar('DESCRIPTION_' + pkg, '%s translation for %s' % (l, pn), d)
+
+ bb.data.setVar('PACKAGES', ' '.join(packages), d)
+
+ rdep = (bb.data.getVar('RDEPENDS_%s' % mainpkg, d, 1) or bb.data.getVar('RDEPENDS', d, 1) or "").split()
+ rdep.append('%s-locale*' % pn)
+ bb.data.setVar('RDEPENDS_%s' % mainpkg, ' '.join(rdep), d)
+}
+
+PACKAGEFUNCS = "do_install package_do_split_locales \
+ populate_packages package_do_shlibs \
+ package_do_pkgconfig read_shlibdeps"
+python package_do_package () {
+ for f in (bb.data.getVar('PACKAGEFUNCS', d, 1) or '').split():
+ bb.build.exec_func(f, d)
+}
+
+do_package[dirs] = "${D}"
+populate_packages[dirs] = "${D}"
+EXPORT_FUNCTIONS do_package do_shlibs do_split_locales mapping_rename_hook
+addtask package before do_build after do_populate_staging
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass
new file mode 100644
index 0000000000..9ae526bb3b
--- /dev/null
+++ b/meta/classes/package_ipk.bbclass
@@ -0,0 +1,234 @@
+inherit package
+DEPENDS_prepend="${@["ipkg-utils-native ", ""][(bb.data.getVar('PACKAGES', d, 1) == '')]}"
+BOOTSTRAP_EXTRA_RDEPENDS += "ipkg-collateral ipkg ipkg-link"
+PACKAGEFUNCS += "do_package_ipk"
+
+python package_ipk_fn () {
+ from bb import data
+ bb.data.setVar('PKGFN', bb.data.getVar('PKG',d), d)
+}
+
+python package_ipk_install () {
+ import os, sys
+ pkg = bb.data.getVar('PKG', d, 1)
+ pkgfn = bb.data.getVar('PKGFN', d, 1)
+ rootfs = bb.data.getVar('IMAGE_ROOTFS', d, 1)
+ ipkdir = bb.data.getVar('DEPLOY_DIR_IPK', d, 1)
+ stagingdir = bb.data.getVar('STAGING_DIR', d, 1)
+ tmpdir = bb.data.getVar('TMPDIR', d, 1)
+
+ if None in (pkg,pkgfn,rootfs):
+ raise bb.build.FuncFailed("missing variables (one or more of PKG, PKGFN, IMAGEROOTFS)")
+ try:
+ bb.mkdirhier(rootfs)
+ os.chdir(rootfs)
+ except OSError:
+ (type, value, traceback) = sys.exc_info()
+ print value
+ raise bb.build.FuncFailed
+
+ # Generate ipk.conf if it or the stamp doesnt exist
+ conffile = os.path.join(stagingdir,"ipkg.conf")
+ if not os.access(conffile, os.R_OK):
+ ipkg_archs = bb.data.getVar('IPKG_ARCHS',d)
+ if ipkg_archs is None:
+ bb.error("IPKG_ARCHS missing")
+ raise FuncFailed
+ ipkg_archs = ipkg_archs.split()
+ arch_priority = 1
+
+ f = open(conffile,"w")
+ for arch in ipkg_archs:
+ f.write("arch %s %s\n" % ( arch, arch_priority ))
+ arch_priority += 1
+ f.write("src local file:%s" % ipkdir)
+ f.close()
+
+
+ if (not os.access(os.path.join(ipkdir,"Packages"), os.R_OK) or
+ not os.access(os.path.join(os.path.join(tmpdir, "stamps"),"do_packages"),os.R_OK)):
+ ret = os.system('ipkg-make-index -p %s %s ' % (os.path.join(ipkdir, "Packages"), ipkdir))
+ if (ret != 0 ):
+ raise bb.build.FuncFailed
+ f=open(os.path.join(os.path.join(tmpdir, "stamps"),"do_packages"),"w")
+ f.close()
+
+ ret = os.system('ipkg-cl -o %s -f %s update' % (rootfs, conffile))
+ ret = os.system('ipkg-cl -o %s -f %s install %s' % (rootfs, conffile, pkgfn))
+ if (ret != 0 ):
+ raise bb.build.FuncFailed
+}
+
+python do_package_ipk () {
+ import copy # to back up env data
+ import sys
+ import re
+
+ workdir = bb.data.getVar('WORKDIR', d, 1)
+ if not workdir:
+ bb.error("WORKDIR not defined, unable to package")
+ return
+
+ import os # path manipulations
+ outdir = bb.data.getVar('DEPLOY_DIR_IPK', d, 1)
+ if not outdir:
+ bb.error("DEPLOY_DIR_IPK not defined, unable to package")
+ return
+ bb.mkdirhier(outdir)
+
+ dvar = bb.data.getVar('D', d, 1)
+ if not dvar:
+ bb.error("D not defined, unable to package")
+ return
+ bb.mkdirhier(dvar)
+
+ packages = bb.data.getVar('PACKAGES', d, 1)
+ if not packages:
+ bb.debug(1, "PACKAGES not defined, nothing to package")
+ return
+
+ tmpdir = bb.data.getVar('TMPDIR', d, 1)
+ # Invalidate the packages file
+ if os.access(os.path.join(os.path.join(tmpdir, "stamps"),"do_packages"),os.R_OK):
+ os.unlink(os.path.join(os.path.join(tmpdir, "stamps"),"do_packages"))
+
+ if packages == []:
+ bb.debug(1, "No packages; nothing to do")
+ return
+
+ for pkg in packages.split():
+ localdata = bb.data.createCopy(d)
+ root = "%s/install/%s" % (workdir, pkg)
+
+ bb.data.setVar('ROOT', '', localdata)
+ bb.data.setVar('ROOT_%s' % pkg, root, localdata)
+ pkgname = bb.data.getVar('PKG_%s' % pkg, localdata, 1)
+ if not pkgname:
+ pkgname = pkg
+ bb.data.setVar('PKG', pkgname, localdata)
+
+ overrides = bb.data.getVar('OVERRIDES', localdata)
+ if not overrides:
+ raise bb.build.FuncFailed('OVERRIDES not defined')
+ overrides = bb.data.expand(overrides, localdata)
+ bb.data.setVar('OVERRIDES', overrides + ':' + pkg, localdata)
+
+ bb.data.update_data(localdata)
+ basedir = os.path.join(os.path.dirname(root))
+ pkgoutdir = outdir
+ bb.mkdirhier(pkgoutdir)
+ os.chdir(root)
+ from glob import glob
+ g = glob('*')
+ try:
+ del g[g.index('CONTROL')]
+ del g[g.index('./CONTROL')]
+ except ValueError:
+ pass
+ if not g and not bb.data.getVar('ALLOW_EMPTY', localdata):
+ from bb import note
+ note("Not creating empty archive for %s-%s-%s" % (pkg, bb.data.getVar('PV', localdata, 1), bb.data.getVar('PR', localdata, 1)))
+ continue
+ controldir = os.path.join(root, 'CONTROL')
+ bb.mkdirhier(controldir)
+ try:
+ ctrlfile = file(os.path.join(controldir, 'control'), 'w')
+ except OSError:
+ raise bb.build.FuncFailed("unable to open control file for writing.")
+
+ fields = []
+ fields.append(["Version: %s-%s\n", ['PV', 'PR']])
+ fields.append(["Description: %s\n", ['DESCRIPTION']])
+ fields.append(["Section: %s\n", ['SECTION']])
+ fields.append(["Priority: %s\n", ['PRIORITY']])
+ fields.append(["Maintainer: %s\n", ['MAINTAINER']])
+ fields.append(["Architecture: %s\n", ['PACKAGE_ARCH']])
+ fields.append(["OE: %s\n", ['P']])
+ fields.append(["Homepage: %s\n", ['HOMEPAGE']])
+
+ def pullData(l, d):
+ l2 = []
+ for i in l:
+ l2.append(bb.data.getVar(i, d, 1))
+ return l2
+
+ ctrlfile.write("Package: %s\n" % pkgname)
+ # check for required fields
+ try:
+ for (c, fs) in fields:
+ for f in fs:
+ if bb.data.getVar(f, localdata) is None:
+ raise KeyError(f)
+ ctrlfile.write(c % tuple(pullData(fs, localdata)))
+ except KeyError:
+ (type, value, traceback) = sys.exc_info()
+ ctrlfile.close()
+ raise bb.build.FuncFailed("Missing field for ipk generation: %s" % value)
+ # more fields
+
+ bb.build.exec_func("mapping_rename_hook", localdata)
+
+ rdepends = explode_deps(bb.data.getVar("RDEPENDS", localdata, 1) or "")
+ rrecommends = explode_deps(bb.data.getVar("RRECOMMENDS", localdata, 1) or "")
+ rsuggests = (bb.data.getVar("RSUGGESTS", localdata, 1) or "").split()
+ rprovides = (bb.data.getVar("RPROVIDES", localdata, 1) or "").split()
+ rreplaces = (bb.data.getVar("RREPLACES", localdata, 1) or "").split()
+ rconflicts = (bb.data.getVar("RCONFLICTS", localdata, 1) or "").split()
+ if rdepends:
+ ctrlfile.write("Depends: %s\n" % ", ".join(rdepends))
+ if rsuggests:
+ ctrlfile.write("Suggests: %s\n" % ", ".join(rsuggests))
+ if rrecommends:
+ ctrlfile.write("Recommends: %s\n" % ", ".join(rrecommends))
+ if rprovides:
+ ctrlfile.write("Provides: %s\n" % ", ".join(rprovides))
+ if rreplaces:
+ ctrlfile.write("Replaces: %s\n" % ", ".join(rreplaces))
+ if rconflicts:
+ ctrlfile.write("Conflicts: %s\n" % ", ".join(rconflicts))
+ src_uri = bb.data.getVar("SRC_URI", localdata, 1)
+ if src_uri:
+ src_uri = re.sub("\s+", " ", src_uri)
+ ctrlfile.write("Source: %s\n" % " ".join(src_uri.split()))
+ ctrlfile.close()
+
+ for script in ["preinst", "postinst", "prerm", "postrm"]:
+ scriptvar = bb.data.getVar('pkg_%s' % script, localdata, 1)
+ if not scriptvar:
+ continue
+ try:
+ scriptfile = file(os.path.join(controldir, script), 'w')
+ except OSError:
+ raise bb.build.FuncFailed("unable to open %s script file for writing." % script)
+ scriptfile.write(scriptvar)
+ scriptfile.close()
+ os.chmod(os.path.join(controldir, script), 0755)
+
+ conffiles_str = bb.data.getVar("CONFFILES", localdata, 1)
+ if conffiles_str:
+ try:
+ conffiles = file(os.path.join(controldir, 'conffiles'), 'w')
+ except OSError:
+ raise bb.build.FuncFailed("unable to open conffiles for writing.")
+ for f in conffiles_str.split():
+ conffiles.write('%s\n' % f)
+ conffiles.close()
+
+ os.chdir(basedir)
+ ret = os.system("PATH=\"%s\" %s %s %s" % (bb.data.getVar("PATH", localdata, 1),
+ bb.data.getVar("IPKGBUILDCMD",d,1), pkg, pkgoutdir))
+ if ret != 0:
+ raise bb.build.FuncFailed("ipkg-build execution failed")
+
+ for script in ["preinst", "postinst", "prerm", "postrm", "control" ]:
+ scriptfile = os.path.join(controldir, script)
+ try:
+ os.remove(scriptfile)
+ except OSError:
+ pass
+ try:
+ os.rmdir(controldir)
+ except OSError:
+ pass
+ del localdata
+}
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
new file mode 100644
index 0000000000..c29ab5f423
--- /dev/null
+++ b/meta/classes/package_rpm.bbclass
@@ -0,0 +1,133 @@
+inherit package
+inherit rpm_core
+
+RPMBUILD="rpmbuild --short-circuit ${RPMOPTS}"
+PACKAGEFUNCS += "do_package_rpm"
+
+python write_specfile() {
+ from bb import data, build
+ import sys
+ out_vartranslate = {
+ "PKG": "Name",
+ "PV": "Version",
+ "PR": "Release",
+ "DESCRIPTION": "%description",
+ "ROOT": "BuildRoot",
+ "LICENSE": "License",
+ "SECTION": "Group",
+ }
+
+ root = bb.data.getVar('ROOT', d)
+
+ # get %files
+ filesvar = bb.data.expand(bb.data.getVar('FILES', d), d) or ""
+ from glob import glob
+ files = filesvar.split()
+ todelete = []
+ for file in files:
+ if file[0] == '.':
+ newfile = file[1:]
+ files[files.index(file)] = newfile
+ file = newfile
+ else:
+ newfile = file
+ realfile = os.path.join(root, './'+file)
+ if not glob(realfile):
+ todelete.append(files[files.index(newfile)])
+ for r in todelete:
+ try:
+ del files[files.index(r)]
+ except ValueError:
+ pass
+ if not files:
+ from bb import note
+ note("Not creating empty archive for %s-%s-%s" % (bb.data.getVar('PKG',d, 1), bb.data.getVar('PV', d, 1), bb.data.getVar('PR', d, 1)))
+ return
+
+ # output .spec using this metadata store
+ try:
+ from __builtin__ import file
+ if not bb.data.getVar('OUTSPECFILE', d):
+ raise OSError('eek!')
+ specfile = file(bb.data.getVar('OUTSPECFILE', d), 'w')
+ except OSError:
+ raise bb.build.FuncFailed("unable to open spec file for writing.")
+
+# fd = sys.__stdout__
+ fd = specfile
+ for var in out_vartranslate.keys():
+ if out_vartranslate[var][0] == "%":
+ continue
+ fd.write("%s\t: %s\n" % (out_vartranslate[var], bb.data.getVar(var, d)))
+ fd.write("Summary\t: .\n")
+
+ for var in out_vartranslate.keys():
+ if out_vartranslate[var][0] != "%":
+ continue
+ fd.write(out_vartranslate[var] + "\n")
+ fd.write(bb.data.getVar(var, d) + "\n\n")
+
+ fd.write("%files\n")
+ for file in files:
+ fd.write("%s\n" % file)
+
+ fd.close()
+
+ # call out rpm -bb on the .spec, thereby creating an rpm
+
+ bb.data.setVar('BUILDSPEC', "${RPMBUILD} -bb ${OUTSPECFILE}\n", d)
+ bb.data.setVarFlag('BUILDSPEC', 'func', '1', d)
+ bb.build.exec_func('BUILDSPEC', d)
+
+ # move the rpm into the pkgoutdir
+ rpm = bb.data.expand('${RPMBUILDPATH}/RPMS/${TARGET_ARCH}/${PKG}-${PV}-${PR}.${TARGET_ARCH}.rpm', d)
+ outrpm = bb.data.expand('${DEPLOY_DIR_RPM}/${PKG}-${PV}-${PR}.${TARGET_ARCH}.rpm', d)
+ bb.movefile(rpm, outrpm)
+}
+
+python do_package_rpm () {
+ workdir = bb.data.getVar('WORKDIR', d)
+ if not workdir:
+ raise bb.build.FuncFailed("WORKDIR not defined")
+ workdir = bb.data.expand(workdir, d)
+
+ import os # path manipulations
+ outdir = bb.data.getVar('DEPLOY_DIR_RPM', d)
+ if not outdir:
+ raise bb.build.FuncFailed("DEPLOY_DIR_RPM not defined")
+ outdir = bb.data.expand(outdir, d)
+ bb.mkdirhier(outdir)
+
+ packages = bb.data.getVar('PACKAGES', d)
+ if not packages:
+ packages = "${PN}"
+ bb.data.setVar('FILES', '', d)
+ ddir = bb.data.expand(bb.data.getVar('D', d), d)
+ bb.mkdirhier(ddir)
+ bb.data.setVar(bb.data.expand('FILES_${PN}', d), ''.join([ "./%s" % x for x in os.listdir(ddir)]), d)
+ packages = bb.data.expand(packages, d)
+
+ for pkg in packages.split():
+ localdata = bb.data.createCopy(d)
+ root = "%s/install/%s" % (workdir, pkg)
+
+ bb.data.setVar('ROOT', '', localdata)
+ bb.data.setVar('ROOT_%s' % pkg, root, localdata)
+ bb.data.setVar('PKG', pkg, localdata)
+
+ overrides = bb.data.getVar('OVERRIDES', localdata)
+ if not overrides:
+ raise bb.build.FuncFailed('OVERRIDES not defined')
+ overrides = bb.data.expand(overrides, localdata)
+ bb.data.setVar('OVERRIDES', '%s:%s' % (overrides, pkg), localdata)
+
+ bb.data.update_data(localdata)
+# stuff
+ root = bb.data.getVar('ROOT', localdata)
+ basedir = os.path.dirname(root)
+ pkgoutdir = outdir
+ bb.mkdirhier(pkgoutdir)
+ bb.data.setVar('OUTSPECFILE', os.path.join(workdir, "%s.spec" % pkg), localdata)
+ bb.build.exec_func('write_specfile', localdata)
+ del localdata
+}
diff --git a/meta/classes/package_tar.bbclass b/meta/classes/package_tar.bbclass
new file mode 100644
index 0000000000..359e35f113
--- /dev/null
+++ b/meta/classes/package_tar.bbclass
@@ -0,0 +1,99 @@
+inherit package
+
+PACKAGEFUNCS += "do_package_tar"
+
+python package_tar_fn () {
+ import os
+ from bb import data
+ fn = os.path.join(bb.data.getVar('DEPLOY_DIR_TAR', d), "%s-%s-%s.tar.gz" % (bb.data.getVar('PKG', d), bb.data.getVar('PV', d), bb.data.getVar('PR', d)))
+ fn = bb.data.expand(fn, d)
+ bb.data.setVar('PKGFN', fn, d)
+}
+
+python package_tar_install () {
+ import os, sys
+ pkg = bb.data.getVar('PKG', d, 1)
+ pkgfn = bb.data.getVar('PKGFN', d, 1)
+ rootfs = bb.data.getVar('IMAGE_ROOTFS', d, 1)
+
+ if None in (pkg,pkgfn,rootfs):
+ bb.error("missing variables (one or more of PKG, PKGFN, IMAGEROOTFS)")
+ raise bb.build.FuncFailed
+ try:
+ bb.mkdirhier(rootfs)
+ os.chdir(rootfs)
+ except OSError:
+ (type, value, traceback) = sys.exc_info()
+ print value
+ raise bb.build.FuncFailed
+
+ if not os.access(pkgfn, os.R_OK):
+ bb.debug(1, "%s does not exist, skipping" % pkgfn)
+ raise bb.build.FuncFailed
+
+ ret = os.system('zcat %s | tar -xf -' % pkgfn)
+ if ret != 0:
+ raise bb.build.FuncFailed
+}
+
+python do_package_tar () {
+ workdir = bb.data.getVar('WORKDIR', d, 1)
+ if not workdir:
+ bb.error("WORKDIR not defined, unable to package")
+ return
+
+ import os # path manipulations
+ outdir = bb.data.getVar('DEPLOY_DIR_TAR', d, 1)
+ if not outdir:
+ bb.error("DEPLOY_DIR_TAR not defined, unable to package")
+ return
+ bb.mkdirhier(outdir)
+
+ dvar = bb.data.getVar('D', d, 1)
+ if not dvar:
+ bb.error("D not defined, unable to package")
+ return
+ bb.mkdirhier(dvar)
+
+ packages = bb.data.getVar('PACKAGES', d, 1)
+ if not packages:
+ bb.debug(1, "PACKAGES not defined, nothing to package")
+ return
+
+ for pkg in packages.split():
+ localdata = bb.data.createCopy(d)
+ root = "%s/install/%s" % (workdir, pkg)
+
+ bb.data.setVar('ROOT', '', localdata)
+ bb.data.setVar('ROOT_%s' % pkg, root, localdata)
+ bb.data.setVar('PKG', pkg, localdata)
+
+ overrides = bb.data.getVar('OVERRIDES', localdata)
+ if not overrides:
+ raise bb.build.FuncFailed('OVERRIDES not defined')
+ overrides = bb.data.expand(overrides, localdata)
+ bb.data.setVar('OVERRIDES', '%s:%s' % (overrides, pkg), localdata)
+
+ bb.data.update_data(localdata)
+# stuff
+ root = bb.data.getVar('ROOT', localdata)
+ bb.mkdirhier(root)
+ basedir = os.path.dirname(root)
+ pkgoutdir = outdir
+ bb.mkdirhier(pkgoutdir)
+ bb.build.exec_func('package_tar_fn', localdata)
+ tarfn = bb.data.getVar('PKGFN', localdata, 1)
+# if os.path.exists(tarfn):
+# del localdata
+# continue
+ os.chdir(root)
+ from glob import glob
+ if not glob('*'):
+ bb.note("Not creating empty archive for %s-%s-%s" % (pkg, bb.data.getVar('PV', localdata, 1), bb.data.getVar('PR', localdata, 1)))
+ continue
+ ret = os.system("tar -czvf %s %s" % (tarfn, '.'))
+ if ret != 0:
+ bb.error("Creation of tar %s failed." % tarfn)
+# end stuff
+ del localdata
+}
diff --git a/meta/classes/palmtop.bbclass b/meta/classes/palmtop.bbclass
new file mode 100644
index 0000000000..9d54de8748
--- /dev/null
+++ b/meta/classes/palmtop.bbclass
@@ -0,0 +1,20 @@
+# this build class sets up qmake variables to
+# * build using the Qt Windowing System (QWS)
+# * use qt
+# * link against supc++ instead of stdc++
+# * use threads, if requested via PALMTOP_USE_MULTITHREADED_QT = "yes"
+# inherit this class to build programs against libqpe
+# inherit opie if you want to build programs against libopie2
+# don't override EXTRA_QMAKEVARS_POST, if you use inherit this class
+
+inherit qmake
+
+# special case for DISTRO = sharprom
+CPP_SUPPORT_LIB = "LIBS-=-lstdc++ LIBS+=-lsupc++"
+CPP_SUPPORT_LIB_sharprom = "LIBS-=-lstdc++"
+EXTRA_QMAKEVARS_POST += "DEFINES+=QWS CONFIG+=qt ${CPP_SUPPORT_LIB}"
+EXTRA_QMAKEVARS_POST += '${@base_conditional("PALMTOP_USE_MULTITHREADED_QT", "yes", "CONFIG+=thread", "CONFIG-=thread",d)}'
+EXTRA_QMAKEVARS_POST += "${@["LIBS+=-lqpe ", ""][(bb.data.getVar('PN', d, 1) == 'libqpe-opie')]}"
+DEPENDS_prepend = "${@["virtual/libqpe1 uicmoc-native ", ""][(bb.data.getVar('PN', d, 1) == 'libqpe-opie')]}"
+
+FILES_${PN} = "${palmtopdir}"
diff --git a/meta/classes/patcher.bbclass b/meta/classes/patcher.bbclass
new file mode 100644
index 0000000000..c8a1b0350f
--- /dev/null
+++ b/meta/classes/patcher.bbclass
@@ -0,0 +1,7 @@
+# Now that BitBake/OpenEmbedded uses Quilt by default, you can simply add an
+# inherit patcher
+# to one of your config files to let BB/OE use patcher again.
+
+PATCHCLEANCMD = "patcher -B"
+PATCHCMD = "patcher -R -p '%s' -n '%s' -i '%s'"
+PATCH_DEPENDS = "${@["patcher-native", ""][(bb.data.getVar('PN', d, 1) == 'patcher-native')]}"
diff --git a/meta/classes/pkg_distribute.bbclass b/meta/classes/pkg_distribute.bbclass
new file mode 100644
index 0000000000..81978e3e3b
--- /dev/null
+++ b/meta/classes/pkg_distribute.bbclass
@@ -0,0 +1,29 @@
+PKG_DISTRIBUTECOMMAND[func] = "1"
+python do_distribute_packages () {
+ cmd = bb.data.getVar('PKG_DISTRIBUTECOMMAND', d, 1)
+ if not cmd:
+ raise bb.build.FuncFailed("Unable to distribute packages, PKG_DISTRIBUTECOMMAND not defined")
+ bb.build.exec_func('PKG_DISTRIBUTECOMMAND', d)
+}
+
+addtask distribute_packages before do_build after do_fetch
+
+PKG_DIST_LOCAL ?= "symlink"
+PKG_DISTRIBUTEDIR ?= "${DEPLOY_DIR}/packages"
+
+PKG_DISTRIBUTECOMMAND () {
+ p=`dirname ${FILE}`
+ d=`basename $p`
+ mkdir -p ${PKG_DISTRIBUTEDIR}
+ case "${PKG_DIST_LOCAL}" in
+ copy)
+ # use this weird tar command to copy because we want to
+ # exclude the BitKeeper directories
+ test -e ${PKG_DISTRIBUTEDIR}/${d} || mkdir ${PKG_DISTRIBUTEDIR}/${d};
+ (cd ${p}; tar -c --exclude SCCS -f - . ) | tar -C ${PKG_DISTRIBUTEDIR}/${d} -xpf -
+ ;;
+ symlink)
+ ln -sf $p ${PKG_DISTRIBUTEDIR}/
+ ;;
+ esac
+}
diff --git a/meta/classes/pkg_metainfo.bbclass b/meta/classes/pkg_metainfo.bbclass
new file mode 100644
index 0000000000..ac4f73c77b
--- /dev/null
+++ b/meta/classes/pkg_metainfo.bbclass
@@ -0,0 +1,22 @@
+python do_pkg_write_metainfo () {
+ deploydir = bb.data.getVar('DEPLOY_DIR', d, 1)
+ if not deploydir:
+ bb.error("DEPLOY_DIR not defined, unable to write package info")
+ return
+
+ try:
+ infofile = file(os.path.join(deploydir, 'package-metainfo'), 'a')
+ except OSError:
+ raise bb.build.FuncFailed("unable to open package-info file for writing.")
+
+ name = bb.data.getVar('PN', d, 1)
+ version = bb.data.getVar('PV', d, 1)
+ desc = bb.data.getVar('DESCRIPTION', d, 1)
+ page = bb.data.getVar('HOMEPAGE', d, 1)
+ lic = bb.data.getVar('LICENSE', d, 1)
+
+ infofile.write("|| "+ name +" || "+ version + " || "+ desc +" || "+ page +" || "+ lic + " ||\n" )
+ infofile.close()
+}
+
+addtask pkg_write_metainfo after do_package before do_build \ No newline at end of file
diff --git a/meta/classes/pkgconfig.bbclass b/meta/classes/pkgconfig.bbclass
new file mode 100644
index 0000000000..62f15f312d
--- /dev/null
+++ b/meta/classes/pkgconfig.bbclass
@@ -0,0 +1,28 @@
+inherit base
+
+DEPENDS_prepend = "pkgconfig-native "
+
+# The namespaces can clash here hence the two step replace
+def get_pkgconfig_mangle(d):
+ import bb.data
+ s = "-e ''"
+ if not bb.data.inherits_class('native', d):
+ s += " -e 's:=${libdir}:=OELIBDIR:;'"
+ s += " -e 's:=${includedir}:=OEINCDIR:;'"
+ s += " -e 's:=${datadir}:=OEDATADIR:'"
+ s += " -e 's:=${prefix}:=OEPREFIX:'"
+ s += " -e 's:=${exec_prefix}:=OEEXECPREFIX:'"
+ s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'"
+ s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'"
+ s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'"
+ s += " -e 's:OEPREFIX:${STAGING_LIBDIR}/..:'"
+ s += " -e 's:OEEXECPREFIX:${STAGING_LIBDIR}/..:'"
+ return s
+
+do_stage_append () {
+ for pc in `find ${S} -name '*.pc' | grep -v -- '-uninstalled.pc$'`; do
+ pcname=`basename $pc`
+ install -d ${PKG_CONFIG_PATH}
+ cat $pc | sed ${@get_pkgconfig_mangle(d)} > ${PKG_CONFIG_PATH}/$pcname
+ done
+}
diff --git a/meta/classes/poky.bbclass b/meta/classes/poky.bbclass
new file mode 100644
index 0000000000..885fb77441
--- /dev/null
+++ b/meta/classes/poky.bbclass
@@ -0,0 +1,4 @@
+MIRRORS_append () {
+ftp://.*/.*/ http://www.o-hand.com/~richard/poky/sources/
+http://.*/.*/ http://www.o-hand.com/~richard/poky/sources/
+}
diff --git a/meta/classes/qmake-base.bbclass b/meta/classes/qmake-base.bbclass
new file mode 100644
index 0000000000..36ecfb622f
--- /dev/null
+++ b/meta/classes/qmake-base.bbclass
@@ -0,0 +1,44 @@
+DEPENDS_prepend = "qmake-native "
+
+OE_QMAKE_PLATFORM = "${TARGET_OS}-oe-g++"
+QMAKESPEC := "${QMAKE_MKSPEC_PATH}/${OE_QMAKE_PLATFORM}"
+
+# We override this completely to eliminate the -e normally passed in
+EXTRA_OEMAKE = ' MAKEFLAGS= '
+
+export OE_QMAKE_CC="${CC}"
+export OE_QMAKE_CFLAGS="${CFLAGS}"
+export OE_QMAKE_CXX="${CXX}"
+export OE_QMAKE_CXXFLAGS="-fno-exceptions -fno-rtti ${CXXFLAGS}"
+export OE_QMAKE_LDFLAGS="${LDFLAGS}"
+export OE_QMAKE_LINK="${CCLD}"
+export OE_QMAKE_AR="${AR}"
+export OE_QMAKE_STRIP="${STRIP}"
+export OE_QMAKE_UIC="${STAGING_BINDIR}/uic"
+export OE_QMAKE_MOC="${STAGING_BINDIR}/moc"
+export OE_QMAKE_RCC="non-existant"
+export OE_QMAKE_QMAKE="${STAGING_BINDIR}/qmake"
+export OE_QMAKE_RPATH="-Wl,-rpath-link,"
+
+# default to qte2 via bb.conf, inherit qt3x11 to configure for qt3x11
+export OE_QMAKE_INCDIR_QT="${QTDIR}/include"
+export OE_QMAKE_LIBDIR_QT="${QTDIR}/lib"
+export OE_QMAKE_LIBS_QT="qte"
+export OE_QMAKE_LIBS_X11=""
+
+oe_qmake_mkspecs () {
+ mkdir -p mkspecs/${OE_QMAKE_PLATFORM}
+ for f in ${QMAKE_MKSPEC_PATH}/${OE_QMAKE_PLATFORM}/*; do
+ if [ -L $f ]; then
+ lnk=`readlink $f`
+ if [ -f mkspecs/${OE_QMAKE_PLATFORM}/$lnk ]; then
+ ln -s $lnk mkspecs/${OE_QMAKE_PLATFORM}/`basename $f`
+ else
+ cp $f mkspecs/${OE_QMAKE_PLATFORM}/
+ fi
+ else
+ cp $f mkspecs/${OE_QMAKE_PLATFORM}/
+ fi
+ done
+}
+
diff --git a/meta/classes/qmake.bbclass b/meta/classes/qmake.bbclass
new file mode 100644
index 0000000000..4f2fceff35
--- /dev/null
+++ b/meta/classes/qmake.bbclass
@@ -0,0 +1,57 @@
+inherit qmake-base
+
+qmake_do_configure() {
+ case ${QMAKESPEC} in
+ *linux-oe-g++|*linux-uclibc-oe-g++|*linux-gnueabi-oe-g++)
+ ;;
+ *-oe-g++)
+ die Unsupported target ${TARGET_OS} for oe-g++ qmake spec
+ ;;
+ *)
+ oenote Searching for qmake spec file
+ paths="${QMAKE_MKSPEC_PATH}/qws/${TARGET_OS}-${TARGET_ARCH}-g++"
+ paths="${QMAKE_MKSPEC_PATH}/${TARGET_OS}-g++ $paths"
+
+ if (echo "${TARGET_ARCH}"|grep -q 'i.86'); then
+ paths="${QMAKE_MKSPEC_PATH}/qws/${TARGET_OS}-x86-g++ $paths"
+ fi
+ for i in $paths; do
+ if test -e $i; then
+ export QMAKESPEC=$i
+ break
+ fi
+ done
+ ;;
+ esac
+
+ oenote "using qmake spec in ${QMAKESPEC}, using profiles '${QMAKE_PROFILES}'"
+
+ if [ -z "${QMAKE_PROFILES}" ]; then
+ PROFILES="`ls *.pro`"
+ else
+ PROFILES="${QMAKE_PROFILES}"
+ fi
+
+ if [ -z "$PROFILES" ]; then
+ die "QMAKE_PROFILES not set and no profiles found in $PWD"
+ fi
+
+ if [ ! -z "${EXTRA_QMAKEVARS_POST}" ]; then
+ AFTER="-after"
+ QMAKE_VARSUBST_POST="${EXTRA_QMAKEVARS_POST}"
+ oenote "qmake postvar substitution: ${EXTRA_QMAKEVARS_POST}"
+ fi
+
+ if [ ! -z "${EXTRA_QMAKEVARS_PRE}" ]; then
+ QMAKE_VARSUBST_PRE="${EXTRA_QMAKEVARS_PRE}"
+ oenote "qmake prevar substitution: ${EXTRA_QMAKEVARS_PRE}"
+ fi
+
+#oenote "Calling '${OE_QMAKE_QMAKE} -makefile -spec ${QMAKESPEC} -o Makefile $QMAKE_VARSUBST_PRE $AFTER $PROFILES $QMAKE_VARSUBST_POST'"
+ unset QMAKESPEC || true
+ ${OE_QMAKE_QMAKE} -makefile -spec ${QMAKESPEC} -o Makefile $QMAKE_VARSUBST_PRE $AFTER $PROFILES $QMAKE_VARSUBST_POST || die "Error calling ${OE_QMAKE_QMAKE} on $PROFILES"
+}
+
+EXPORT_FUNCTIONS do_configure
+
+addtask configure after do_unpack do_patch before do_compile
diff --git a/meta/classes/qpf.bbclass b/meta/classes/qpf.bbclass
new file mode 100644
index 0000000000..d6e58871d5
--- /dev/null
+++ b/meta/classes/qpf.bbclass
@@ -0,0 +1,36 @@
+PACKAGE_ARCH = "all"
+
+do_configure() {
+ :
+}
+
+do_compile() {
+ :
+}
+
+pkg_postinst_fonts() {
+#!/bin/sh
+set -e
+. /etc/profile
+${sbindir}/update-qtfontdir
+}
+
+pkg_postrm_fonts() {
+#!/bin/sh
+set -e
+. /etc/profile
+${sbindir}/update-qtfontdir -f
+}
+
+python populate_packages_prepend() {
+ postinst = bb.data.getVar('pkg_postinst_fonts', d, 1)
+ postrm = bb.data.getVar('pkg_postrm_fonts', d, 1)
+ fontdir = bb.data.getVar('palmtopdir', d, 1) + '/lib/fonts'
+ pkgregex = "^([a-z-]*_[0-9]*).*.qpf$"
+ pkgpattern = bb.data.getVar('QPF_PKGPATTERN', d, 1) or 'qpf-%s'
+ pkgdescription = bb.data.getVar('QPF_DESCRIPTION', d, 1) or 'QPF font %s'
+
+ do_split_packages(d, root=fontdir, file_regex=pkgregex, output_pattern=pkgpattern,
+ description=pkgdescription, postinst=postinst, postrm=postrm, recursive=True, hook=None,
+ extra_depends='qpf-font-common')
+}
diff --git a/meta/classes/qt3e.bbclass b/meta/classes/qt3e.bbclass
new file mode 100644
index 0000000000..c34d7c04f5
--- /dev/null
+++ b/meta/classes/qt3e.bbclass
@@ -0,0 +1,11 @@
+#
+# override variables set by qmake-base to compile Qt/X11 apps
+#
+export QTDIR="${STAGING_DIR}/${HOST_SYS}/qte3"
+export QTEDIR="${STAGING_DIR}/${HOST_SYS}/qte3"
+export OE_QMAKE_UIC="${STAGING_BINDIR}/uic3"
+export OE_QMAKE_MOC="${STAGING_BINDIR}/moc3"
+export OE_QMAKE_CXXFLAGS="${CXXFLAGS} "
+export OE_QMAKE_INCDIR_QT="${QTEDIR}/include"
+export OE_QMAKE_LIBDIR_QT="${QTEDIR}/lib"
+export OE_QMAKE_LIBS_QT="qte"
diff --git a/meta/classes/qt3x11.bbclass b/meta/classes/qt3x11.bbclass
new file mode 100644
index 0000000000..6e3d5f8ba2
--- /dev/null
+++ b/meta/classes/qt3x11.bbclass
@@ -0,0 +1,15 @@
+DEPENDS_prepend = "${@["qt3x11 ", ""][(bb.data.getVar('PN', d, 1) == 'qt-x11-free')]}"
+EXTRA_QMAKEVARS_POST += "CONFIG+=thread"
+#
+# override variables set by qmake-base to compile Qt/X11 apps
+#
+export QTDIR = "${STAGING_DIR}/${HOST_SYS}/qt3"
+export OE_QMAKE_UIC = "${STAGING_BINDIR}/uic3"
+export OE_QMAKE_MOC = "${STAGING_BINDIR}/moc3"
+export OE_QMAKE_CXXFLAGS = "${CXXFLAGS} -DQT_NO_XIM"
+export OE_QMAKE_INCDIR_QT = "${QTDIR}/include"
+export OE_QMAKE_LIBDIR_QT = "${QTDIR}/lib"
+export OE_QMAKE_LIBS_QT = "qt"
+export OE_QMAKE_LIBS_X11 = "-lXext -lX11 -lm"
+
+
diff --git a/meta/classes/qt4x11.bbclass b/meta/classes/qt4x11.bbclass
new file mode 100644
index 0000000000..635fc67694
--- /dev/null
+++ b/meta/classes/qt4x11.bbclass
@@ -0,0 +1,17 @@
+DEPENDS_prepend = "qmake2-native "
+DEPENDS_prepend = "${@["qt4x11 ", ""][(bb.data.getVar('PN', d, 1) == 'qt4-x11-free')]}"
+#
+# override variables set by qmake-base to compile Qt4/X11 apps
+#
+export QTDIR = "${STAGING_DIR}/${HOST_SYS}/qt4"
+export QMAKESPEC = "${QTDIR}/mkspecs/${TARGET_OS}-oe-g++"
+export OE_QMAKE_UIC = "${STAGING_BINDIR}/uic4"
+export OE_QMAKE_MOC = "${STAGING_BINDIR}/moc4"
+export OE_QMAKE_RCC = "${STAGING_BINDIR}/rcc4"
+export OE_QMAKE_QMAKE = "${STAGING_BINDIR}/qmake2"
+export OE_QMAKE_LINK = "${CXX}"
+export OE_QMAKE_CXXFLAGS = "${CXXFLAGS}"
+export OE_QMAKE_INCDIR_QT = "${QTDIR}/include"
+export OE_QMAKE_LIBDIR_QT = "${QTDIR}/lib"
+export OE_QMAKE_LIBS_QT = "qt"
+export OE_QMAKE_LIBS_X11 = "-lXext -lX11 -lm"
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
new file mode 100644
index 0000000000..340446917e
--- /dev/null
+++ b/meta/classes/rm_work.bbclass
@@ -0,0 +1,22 @@
+#
+# Removes source after build
+#
+# To use it add that line to conf/local.conf:
+#
+# INHERIT += "rm_work"
+#
+
+do_rm_work () {
+ cd ${WORKDIR}
+ for dir in *
+ do
+ if [ `basename ${S}` == $dir ]; then
+ rm -rf $dir/*
+ elif [ $dir != 'temp' ]; then
+ rm -rf $dir
+ fi
+ done
+}
+
+addtask rm_work before do_build
+addtask rm_work after do_package
diff --git a/meta/classes/rootfs_ipk.bbclass b/meta/classes/rootfs_ipk.bbclass
new file mode 100644
index 0000000000..2729503507
--- /dev/null
+++ b/meta/classes/rootfs_ipk.bbclass
@@ -0,0 +1,145 @@
+#
+# Creates a root filesystem out of IPKs
+#
+# This rootfs can be mounted via root-nfs or it can be put into an cramfs/jffs etc.
+# See image_ipk.oeclass for a usage of this.
+#
+
+DEPENDS_prepend="ipkg-native ipkg-utils-native fakeroot-native "
+DEPENDS_append=" ${EXTRA_IMAGEDEPENDS}"
+
+PACKAGES = ""
+
+do_rootfs[nostamp] = 1
+do_rootfs[dirs] = ${TOPDIR}
+do_build[nostamp] = 1
+
+IPKG_ARGS = "-f ${T}/ipkg.conf -o ${IMAGE_ROOTFS}"
+
+ROOTFS_POSTPROCESS_COMMAND ?= ""
+
+PID = "${@os.getpid()}"
+
+# some default locales
+IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
+
+LINGUAS_INSTALL = "${@" ".join(map(lambda s: "locale-base-%s" % s, bb.data.getVar('IMAGE_LINGUAS', d, 1).split()))}"
+
+real_do_rootfs () {
+ set -x
+
+ mkdir -p ${IMAGE_ROOTFS}/dev
+
+ if [ -z "${DEPLOY_KEEP_PACKAGES}" ]; then
+ rm -f ${DEPLOY_DIR_IPK}/Packages
+ touch ${DEPLOY_DIR_IPK}/Packages
+ ipkg-make-index -r ${DEPLOY_DIR_IPK}/Packages -p ${DEPLOY_DIR_IPK}/Packages -l ${DEPLOY_DIR_IPK}/Packages.filelist -m ${DEPLOY_DIR_IPK}
+ fi
+ mkdir -p ${T}
+ echo "src oe file:${DEPLOY_DIR_IPK}" > ${T}/ipkg.conf
+ ipkgarchs="${IPKG_ARCHS}"
+ priority=1
+ for arch in $ipkgarchs; do
+ echo "arch $arch $priority" >> ${T}/ipkg.conf
+ priority=$(expr $priority + 5)
+ done
+ ipkg-cl ${IPKG_ARGS} update
+ if [ ! -z "${LINGUAS_INSTALL}" ]; then
+ ipkg-cl ${IPKG_ARGS} install glibc-localedata-i18n
+ for i in ${LINGUAS_INSTALL}; do
+ ipkg-cl ${IPKG_ARGS} install $i
+ done
+ fi
+ if [ ! -z "${IPKG_INSTALL}" ]; then
+ ipkg-cl ${IPKG_ARGS} install ${IPKG_INSTALL}
+ fi
+
+ export D=${IMAGE_ROOTFS}
+ export IPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
+ mkdir -p ${IMAGE_ROOTFS}/etc/ipkg/
+ grep "^arch" ${T}/ipkg.conf >${IMAGE_ROOTFS}/etc/ipkg/arch.conf
+
+ for i in ${IMAGE_ROOTFS}${libdir}/ipkg/info/*.preinst; do
+ if [ -f $i ] && ! sh $i; then
+ ipkg-cl ${IPKG_ARGS} flag unpacked `basename $i .preinst`
+ fi
+ done
+ for i in ${IMAGE_ROOTFS}${libdir}/ipkg/info/*.postinst; do
+ if [ -f $i ] && ! sh $i configure; then
+ ipkg-cl ${IPKG_ARGS} flag unpacked `basename $i .postinst`
+ fi
+ done
+
+ install -d ${IMAGE_ROOTFS}/${sysconfdir}
+ echo ${BUILDNAME} > ${IMAGE_ROOTFS}/${sysconfdir}/version
+
+ ${ROOTFS_POSTPROCESS_COMMAND}
+
+ log_check rootfs
+}
+
+log_check() {
+ set +x
+ for target in $*
+ do
+ lf_path="${WORKDIR}/temp/log.do_$target.${PID}"
+
+ echo "log_check: Using $lf_path as logfile"
+
+ if test -e "$lf_path"
+ then
+ lf_txt="`cat $lf_path`"
+
+ for keyword_die in "Cannot find package" "exit 1" ERR Fail
+ do
+
+ if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") &>/dev/null
+ then
+ echo "log_check: There were error messages in the logfile"
+ echo -e "log_check: Matched keyword: [$keyword_die]\n"
+ echo "$lf_txt" | grep -v log_check | grep -i "$keyword_die"
+ echo ""
+ do_exit=1
+ fi
+ done
+ test "$do_exit" = 1 && exit 1
+ else
+ echo "Cannot find logfile [$lf_path]"
+ fi
+ echo "Logfile is clean"
+ done
+
+ set -x
+
+}
+
+fakeroot do_rootfs () {
+ rm -rf ${IMAGE_ROOTFS}
+ real_do_rootfs
+}
+
+# set '*' as the rootpassword so the images
+# can decide if they want it or not
+
+zap_root_password () {
+ sed 's%^root:[^:]*:%root:*:%' < ${IMAGE_ROOTFS}/etc/passwd >${IMAGE_ROOTFS}/etc/passwd.new
+ mv ${IMAGE_ROOTFS}/etc/passwd.new ${IMAGE_ROOTFS}/etc/passwd
+}
+
+create_etc_timestamp() {
+ date +%2m%2d%2H%2M%Y >${IMAGE_ROOTFS}/etc/timestamp
+}
+
+# Turn any symbolic /sbin/init link into a file
+remove_init_link () {
+ if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then
+ LINKFILE=${IMAGE_ROOTFS}`readlink ${IMAGE_ROOTFS}/sbin/init`
+ rm ${IMAGE_ROOTFS}/sbin/init
+ cp $LINKFILE ${IMAGE_ROOTFS}/sbin/init
+ fi
+}
+
+# export the zap_root_password, create_etc_timestamp and remote_init_link
+EXPORT_FUNCTIONS zap_root_password create_etc_timestamp remove_init_link
+
+addtask rootfs before do_build after do_install
diff --git a/meta/classes/rpm_core.bbclass b/meta/classes/rpm_core.bbclass
new file mode 100644
index 0000000000..f28abbb1c3
--- /dev/null
+++ b/meta/classes/rpm_core.bbclass
@@ -0,0 +1,16 @@
+RPMBUILDPATH="${WORKDIR}/rpm"
+
+RPMOPTS="--rcfile=${WORKDIR}/rpmrc"
+RPMOPTS="--rcfile=${WORKDIR}/rpmrc --target ${TARGET_SYS}"
+RPM="rpm ${RPMOPTS}"
+RPMBUILD="rpmbuild --buildroot ${D} --short-circuit ${RPMOPTS}"
+
+rpm_core_do_preprpm() {
+ mkdir -p ${RPMBUILDPATH}/{SPECS,RPMS/{i386,i586,i686,noarch,ppc,mips,mipsel,arm},SRPMS,SOURCES,BUILD}
+ echo 'macrofiles:/usr/lib/rpm/macros:${WORKDIR}/macros' > ${WORKDIR}/rpmrc
+ echo '%_topdir ${RPMBUILDPATH}' > ${WORKDIR}/macros
+ echo '%_repackage_dir ${WORKDIR}' >> ${WORKDIR}/macros
+}
+
+EXPORT_FUNCTIONS do_preprpm
+addtask preprpm before do_fetch
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
new file mode 100644
index 0000000000..a626162ffb
--- /dev/null
+++ b/meta/classes/sanity.bbclass
@@ -0,0 +1,112 @@
+#
+# Sanity check the users setup for common misconfigurations
+#
+
+def raise_sanity_error(msg):
+ import bb
+ bb.fatal(""" Openembedded's config sanity checker detected a potential misconfiguration.
+ Either fix the cause of this error or at your own risk disable the checker (see sanity.conf).
+ Following is the list of potential problems / advisories:
+
+ %s""" % msg)
+
+def check_conf_exists(fn, data):
+ import bb, os
+
+ bbpath = []
+ fn = bb.data.expand(fn, data)
+ vbbpath = bb.data.getVar("BBPATH", data)
+ if vbbpath:
+ bbpath += vbbpath.split(":")
+ for p in bbpath:
+ currname = os.path.join(bb.data.expand(p, data), fn)
+ if os.access(currname, os.R_OK):
+ return True
+ return False
+
+def check_app_exists(app, d):
+ from bb import which, data
+
+ app = data.expand(app, d)
+ path = data.getVar('PATH', d)
+ return len(which(path, app)) != 0
+
+
+def check_sanity(e):
+ from bb import note, error, data, __version__
+ from bb.event import Handled, NotHandled, getName
+ try:
+ from distutils.version import LooseVersion
+ except ImportError:
+ def LooseVersion(v): print "WARNING: sanity.bbclass can't compare versions without python-distutils"; return 1
+ import os
+
+ # Check the bitbake version meets minimum requirements
+ minversion = data.getVar('BB_MIN_VERSION', e.data , True)
+ if not minversion:
+ # Hack: BB_MIN_VERSION hasn't been parsed yet so return
+ # and wait for the next call
+ print "Foo %s" % minversion
+ return
+
+ if (LooseVersion(__version__) < LooseVersion(minversion)):
+ raise_sanity_error('Bitbake version %s is required and version %s was found' % (minversion, __version__))
+
+ # Check TARGET_ARCH is set
+ if data.getVar('TARGET_ARCH', e.data, True) == 'INVALID':
+ raise_sanity_error('Please set TARGET_ARCH directly, or choose a MACHINE or DISTRO that does so.')
+
+ # Check TARGET_OS is set
+ if data.getVar('TARGET_OS', e.data, True) == 'INVALID':
+ raise_sanity_error('Please set TARGET_OS directly, or choose a MACHINE or DISTRO that does so.')
+
+ # Check user doesn't have ASSUME_PROVIDED = instead of += in local.conf
+ if "diffstat-native" not in data.getVar('ASSUME_PROVIDED', e.data, True).split():
+ raise_sanity_error('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf')
+
+ # Check the MACHINE is valid
+ if not check_conf_exists("conf/machine/${MACHINE}.conf", e.data):
+ raise_sanity_error('Please set a valid MACHINE in your local.conf')
+
+ # Check the distro is valid
+ if not check_conf_exists("conf/distro/${DISTRO}.conf", e.data):
+ raise_sanity_error('Please set a valid DISTRO in your local.conf')
+
+ if not check_app_exists("${MAKE}", e.data):
+ raise_sanity_error('GNU make missing. Please install GNU make')
+
+ if not check_app_exists('${BUILD_PREFIX}gcc', e.data):
+ raise_sanity_error('C Host-Compiler is missing, please install one' )
+
+ if not check_app_exists('${BUILD_PREFIX}g++', e.data):
+ raise_sanity_error('C++ Host-Compiler is missing, please install one' )
+
+ if not check_app_exists('patch', e.data):
+ raise_sanity_error('Please install the patch utility, preferable GNU patch.')
+
+ if not check_app_exists('diffstat', e.data):
+ raise_sanity_error('Please install the diffstat utility')
+
+ if not check_app_exists('texi2html', e.data):
+ raise_sanity_error('Please install the texi2html binary')
+
+ if not check_app_exists('cvs', e.data):
+ raise_sanity_error('Please install the cvs utility')
+
+ if not check_app_exists('svn', e.data):
+ raise_sanity_error('Please install the svn utility')
+
+ oes_bb_conf = data.getVar( 'OES_BITBAKE_CONF', e.data, True )
+ if not oes_bb_conf:
+ raise_sanity_error('You do not include OpenEmbeddeds version of conf/bitbake.conf')
+
+addhandler check_sanity_eventhandler
+python check_sanity_eventhandler() {
+ from bb import note, error, data, __version__
+ from bb.event import getName
+
+ if getName(e) == "BuildStarted":
+ check_sanity(e)
+
+ return NotHandled
+}
diff --git a/meta/classes/scons.bbclass b/meta/classes/scons.bbclass
new file mode 100644
index 0000000000..3160eca69a
--- /dev/null
+++ b/meta/classes/scons.bbclass
@@ -0,0 +1,13 @@
+DEPENDS += "python-scons-native"
+
+scons_do_compile() {
+ ${STAGING_BINDIR}/scons || \
+ oefatal "scons build execution failed."
+}
+
+scons_do_install() {
+ ${STAGING_BINDIR}/scons install || \
+ oefatal "scons install execution failed."
+}
+
+EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/sdk.bbclass b/meta/classes/sdk.bbclass
new file mode 100644
index 0000000000..bcabbc79bd
--- /dev/null
+++ b/meta/classes/sdk.bbclass
@@ -0,0 +1,26 @@
+# SDK packages are built either explicitly by the user,
+# or indirectly via dependency. No need to be in 'world'.
+EXCLUDE_FROM_WORLD = "1"
+
+SDK_NAME = "${TARGET_ARCH}/oe"
+PACKAGE_ARCH = "${BUILD_ARCH}"
+
+HOST_ARCH = "${BUILD_ARCH}"
+HOST_VENDOR = "${BUILD_VENDOR}"
+HOST_OS = "${BUILD_OS}"
+HOST_PREFIX = "${BUILD_PREFIX}"
+HOST_CC_ARCH = "${BUILD_CC_ARCH}"
+
+CPPFLAGS = "${BUILD_CPPFLAGS}"
+CFLAGS = "${BUILD_CFLAGS}"
+CXXFLAGS = "${BUILD_CFLAGS}"
+LDFLAGS = "${BUILD_LDFLAGS}"
+
+prefix = "/usr/local/${SDK_NAME}"
+exec_prefix = "${prefix}"
+base_prefix = "${exec_prefix}"
+
+FILES_${PN} = "${prefix}"
+
+
+
diff --git a/meta/classes/sdl.bbclass b/meta/classes/sdl.bbclass
new file mode 100644
index 0000000000..c0b21427a4
--- /dev/null
+++ b/meta/classes/sdl.bbclass
@@ -0,0 +1,44 @@
+#
+# (C) Michael 'Mickey' Lauer <mickey@Vanille.de>
+#
+
+DEPENDS += "virtual/libsdl libsdl-mixer libsdl-image"
+
+APPDESKTOP ?= "${PN}.desktop"
+APPNAME ?= "${PN}"
+APPIMAGE ?= "${PN}.png"
+
+sdl_do_sdl_install() {
+ install -d ${D}${palmtopdir}/bin
+ install -d ${D}${palmtopdir}/pics
+ install -d ${D}${palmtopdir}/apps/Games
+ ln -sf ${bindir}/${APPNAME} ${D}${palmtopdir}/bin/${APPNAME}
+ install -m 0644 ${APPIMAGE} ${D}${palmtopdir}/pics/${PN}.png
+
+ if [ -e "${APPDESKTOP}" ]
+ then
+ echo ${APPDESKTOP} present, installing to palmtopdir...
+ install -m 0644 ${APPDESKTOP} ${D}${palmtopdir}/apps/Games/${PN}.desktop
+ else
+ echo ${APPDESKTOP} not present, creating one on-the-fly...
+ cat >${D}${palmtopdir}/apps/Games/${PN}.desktop <<EOF
+[Desktop Entry]
+Note=Auto Generated... this may be not what you want
+Comment=${DESCRIPTION}
+Exec=${APPNAME}
+Icon=${APPIMAGE}
+Type=Application
+Name=${PN}
+EOF
+ fi
+}
+
+EXPORT_FUNCTIONS do_sdl_install
+addtask sdl_install after do_compile before do_populate_staging
+
+SECTION = "x11/games"
+SECTION_${PN}-opie = "opie/games"
+
+PACKAGES += "${PN}-opie"
+RDEPENDS_${PN}-opie += "${PN}"
+FILES_${PN}-opie = "${palmtopdir}"
diff --git a/meta/classes/sip.bbclass b/meta/classes/sip.bbclass
new file mode 100644
index 0000000000..adf179b130
--- /dev/null
+++ b/meta/classes/sip.bbclass
@@ -0,0 +1,58 @@
+# Build Class for Sip based Python Bindings
+# (C) Michael 'Mickey' Lauer <mickey@Vanille.de>
+#
+
+DEPENDS =+ "sip-native python-sip"
+
+# default stuff, do not uncomment
+# EXTRA_SIPTAGS = "-tWS_QWS -tQtPE_1_6_0 -tQt_2_3_1"
+
+sip_do_generate() {
+ if [ -z "${SIP_MODULES}" ]; then
+ MODULES="`ls sip/*mod.sip`"
+ else
+ MODULES="${SIP_MODULES}"
+ fi
+
+ if [ -z "$MODULES" ]; then
+ die "SIP_MODULES not set and no modules found in $PWD"
+ else
+ oenote "using modules '${SIP_MODULES}' and tags '${EXTRA_SIPTAGS}'"
+ fi
+
+ if [ -z "${EXTRA_SIPTAGS}" ]; then
+ die "EXTRA_SIPTAGS needs to be set!"
+ else
+ SIPTAGS="${EXTRA_SIPTAGS}"
+ fi
+
+ if [ ! -z "${SIP_FEATURES}" ]; then
+ FEATURES="-z ${SIP_FEATURES}"
+ oenote "sip feature file: ${SIP_FEATURES}"
+ fi
+
+ for module in $MODULES
+ do
+ install -d ${module}/
+ oenote "calling 'sip -I sip -I ${STAGING_SIPDIR} ${SIPTAGS} ${FEATURES} -c ${module} -b ${module}/${module}.pro.in sip/${module}/${module}mod.sip'"
+ sip -I ${STAGING_SIPDIR} -I sip ${SIPTAGS} ${FEATURES} -c ${module} -b ${module}/${module}.sbf sip/${module}/${module}mod.sip \
+ || die "Error calling sip on ${module}"
+ cat ${module}/${module}.sbf | sed s,target,TARGET, \
+ | sed s,sources,SOURCES, \
+ | sed s,headers,HEADERS, \
+ | sed s,"moc_HEADERS =","HEADERS +=", \
+ >${module}/${module}.pro
+ echo "TEMPLATE=lib" >>${module}/${module}.pro
+ [ "${module}" = "qt" ] && echo "" >>${module}/${module}.pro
+ [ "${module}" = "qtcanvas" ] && echo "" >>${module}/${module}.pro
+ [ "${module}" = "qttable" ] && echo "" >>${module}/${module}.pro
+ [ "${module}" = "qwt" ] && echo "" >>${module}/${module}.pro
+ [ "${module}" = "qtpe" ] && echo "" >>${module}/${module}.pro
+ [ "${module}" = "qtpe" ] && echo "LIBS+=-lqpe" >>${module}/${module}.pro
+ true
+ done
+}
+
+EXPORT_FUNCTIONS do_generate
+
+addtask generate after do_unpack do_patch before do_configure
diff --git a/meta/classes/sourcepkg.bbclass b/meta/classes/sourcepkg.bbclass
new file mode 100644
index 0000000000..390d3684d4
--- /dev/null
+++ b/meta/classes/sourcepkg.bbclass
@@ -0,0 +1,111 @@
+DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/source"
+EXCLUDE_FROM ?= ".pc autom4te.cache"
+
+# used as part of a path. make sure it's set
+DISTRO ?= "openembedded"
+
+def get_src_tree(d):
+ import bb
+ import os, os.path
+
+ workdir = bb.data.getVar('WORKDIR', d, 1)
+ if not workdir:
+ bb.error("WORKDIR not defined, unable to find source tree.")
+ return
+
+ s = bb.data.getVar('S', d, 0)
+ if not s:
+ bb.error("S not defined, unable to find source tree.")
+ return
+
+ s_tree_raw = s.split('/')[1]
+ s_tree = bb.data.expand(s_tree_raw, d)
+
+ src_tree_path = os.path.join(workdir, s_tree)
+ try:
+ os.listdir(src_tree_path)
+ except OSError:
+ bb.fatal("Expected to find source tree in '%s' which doesn't exist." % src_tree_path)
+ bb.debug("Assuming source tree is '%s'" % src_tree_path)
+
+ return s_tree
+
+sourcepkg_do_create_orig_tgz(){
+
+ mkdir -p ${DEPLOY_DIR_SRC}
+ cd ${WORKDIR}
+ for i in ${EXCLUDE_FROM}; do
+ echo $i >> temp/exclude-from-file
+ done
+
+ src_tree=${@get_src_tree(d)}
+
+ echo $src_tree
+ oenote "Creating .orig.tar.gz in ${DEPLOY_DIR_SRC}/${P}.orig.tar.gz"
+ tar cvzf ${DEPLOY_DIR_SRC}/${P}.orig.tar.gz --exclude-from temp/exclude-from-file $src_tree
+ cp -pPR $src_tree $src_tree.orig
+}
+
+sourcepkg_do_archive_bb() {
+
+ src_tree=${@get_src_tree(d)}
+ dest=${WORKDIR}/$src_tree/${DISTRO}
+ mkdir -p $dest
+
+ cp ${FILE} $dest
+}
+
+python sourcepkg_do_dumpdata() {
+ import os
+ import os.path
+
+ workdir = bb.data.getVar('WORKDIR', d, 1)
+ distro = bb.data.getVar('DISTRO', d, 1)
+ s_tree = get_src_tree(d)
+ openembeddeddir = os.path.join(workdir, s_tree, distro)
+ dumpfile = os.path.join(openembeddeddir, bb.data.expand("${P}-${PR}.showdata.dump",d))
+
+ try:
+ os.mkdir(openembeddeddir)
+ except OSError:
+ # dir exists
+ pass
+
+ bb.note("Dumping metadata into '%s'" % dumpfile)
+ f = open(dumpfile, "w")
+ # emit variables and shell functions
+ bb.data.emit_env(f, d, True)
+ # emit the metadata which isnt valid shell
+ for e in d.keys():
+ if bb.data.getVarFlag(e, 'python', d):
+ f.write("\npython %s () {\n%s}\n" % (e, bb.data.getVar(e, d, 1)))
+ f.close()
+}
+
+sourcepkg_do_create_diff_gz(){
+
+ cd ${WORKDIR}
+ for i in ${EXCLUDE_FROM}; do
+ echo $i >> temp/exclude-from-file
+ done
+
+
+ src_tree=${@get_src_tree(d)}
+
+ for i in `find . -maxdepth 1 -type f`; do
+ mkdir -p $src_tree/${DISTRO}/files
+ cp $i $src_tree/${DISTRO}/files
+ done
+
+ oenote "Creating .diff.gz in ${DEPLOY_DIR_SRC}/${P}-${PR}.diff.gz"
+ LC_ALL=C TZ=UTC0 diff --exclude-from=temp/exclude-from-file -Naur $src_tree.orig $src_tree | gzip -c > ${DEPLOY_DIR_SRC}/${P}-${PR}.diff.gz
+ rm -rf $src_tree.orig
+}
+
+EXPORT_FUNCTIONS do_create_orig_tgz do_archive_bb do_dumpdata do_create_diff_gz
+
+addtask create_orig_tgz after do_unpack before do_patch
+addtask archive_bb after do_patch before do_dumpdata
+addtask dumpdata after archive_bb before do_create_diff_gz
+addtask create_diff_gz after do_dump_data before do_configure
+
diff --git a/meta/classes/src_distribute.bbclass b/meta/classes/src_distribute.bbclass
new file mode 100644
index 0000000000..5daf526018
--- /dev/null
+++ b/meta/classes/src_distribute.bbclass
@@ -0,0 +1,40 @@
+include conf/licenses.conf
+
+SRC_DISTRIBUTECOMMAND[func] = "1"
+python do_distribute_sources () {
+ l = bb.data.createCopy(d)
+ bb.data.update_data(l)
+ licenses = (bb.data.getVar('LICENSE', d, 1) or "").split()
+ if not licenses:
+ bb.note("LICENSE not defined")
+ src_distribute_licenses = (bb.data.getVar('SRC_DISTRIBUTE_LICENSES', d, 1) or "").split()
+ # Explanation:
+ # Space seperated items in LICENSE must *all* be distributable
+ # Each space seperated item may be used under any number of | seperated licenses.
+ # If any of those | seperated licenses are distributable, then that component is.
+ # i.e. LICENSE = "GPL LGPL"
+ # In this case, both components are distributable.
+ # LICENSE = "GPL|QPL|Proprietary"
+ # In this case, GPL is distributable, so the component is.
+ valid = 1
+ for l in licenses:
+ lvalid = 0
+ for i in l.split("|"):
+ if i in src_distribute_licenses:
+ lvalid = 1
+ if lvalid != 1:
+ valid = 0
+ if valid == 0:
+ bb.note("Licenses (%s) are not all listed in SRC_DISTRIBUTE_LICENSES, skipping source distribution" % licenses)
+ return
+ import re
+ for s in (bb.data.getVar('A', d, 1) or "").split():
+ s = re.sub(';.*$', '', s)
+ cmd = bb.data.getVar('SRC_DISTRIBUTECOMMAND', d, 1)
+ if not cmd:
+ raise bb.build.FuncFailed("Unable to distribute sources, SRC_DISTRIBUTECOMMAND not defined")
+ bb.data.setVar('SRC', s, d)
+ bb.build.exec_func('SRC_DISTRIBUTECOMMAND', d)
+}
+
+addtask distribute_sources before do_build after do_fetch
diff --git a/meta/classes/src_distribute_local.bbclass b/meta/classes/src_distribute_local.bbclass
new file mode 100644
index 0000000000..5f0cef5bec
--- /dev/null
+++ b/meta/classes/src_distribute_local.bbclass
@@ -0,0 +1,31 @@
+inherit src_distribute
+
+# SRC_DIST_LOCAL possible values:
+# copy copies the files from ${A} to the distributedir
+# symlink symlinks the files from ${A} to the distributedir
+# move+symlink moves the files into distributedir, and symlinks them back
+SRC_DIST_LOCAL ?= "move+symlink"
+SRC_DISTRIBUTEDIR ?= "${DEPLOY_DIR}/sources"
+SRC_DISTRIBUTECOMMAND () {
+ s="${SRC}"
+ if [ ! -L "$s" ] && (echo "$s"|grep "^${DL_DIR}"); then
+ :
+ else
+ exit 0;
+ fi
+ mkdir -p ${SRC_DISTRIBUTEDIR}
+ case "${SRC_DIST_LOCAL}" in
+ copy)
+ test -e $s.md5 && cp -f $s.md5 ${SRC_DISTRIBUTEDIR}/
+ cp -f $s ${SRC_DISTRIBUTEDIR}/
+ ;;
+ symlink)
+ test -e $s.md5 && ln -sf $s.md5 ${SRC_DISTRIBUTEDIR}/
+ ln -sf $s ${SRC_DISTRIBUTEDIR}/
+ ;;
+ move+symlink)
+ mv $s ${SRC_DISTRIBUTEDIR}/
+ ln -sf ${SRC_DISTRIBUTEDIR}/`basename $s` $s
+ ;;
+ esac
+}
diff --git a/meta/classes/srec.bbclass b/meta/classes/srec.bbclass
new file mode 100644
index 0000000000..e7bdc6c75d
--- /dev/null
+++ b/meta/classes/srec.bbclass
@@ -0,0 +1,28 @@
+#
+# Creates .srec files from images.
+#
+# Useful for loading with Yamon.
+
+# Define SREC_VMAADDR in your machine.conf.
+
+SREC_CMD = "${TARGET_PREFIX}objcopy -O srec -I binary --adjust-vma ${SREC_VMAADDR} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.${type} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.${type}.srec"
+
+# Do not build srec files for these types of images:
+SREC_SKIP = "tar"
+
+do_srec[nostamp] = 1
+
+do_srec () {
+ if [ ${SREC_VMAADDR} = "" ] ; then
+ oefatal Cannot do_srec without SREC_VMAADDR defined.
+ fi
+ for type in ${IMAGE_FSTYPES}; do
+ for skiptype in ${SREC_SKIP}; do
+ if [ $type = $skiptype ] ; then continue 2 ; fi
+ done
+ ${SREC_CMD}
+ done
+ return 0
+}
+
+addtask srec after do_rootfs before do_build
diff --git a/meta/classes/tinderclient.bbclass b/meta/classes/tinderclient.bbclass
new file mode 100644
index 0000000000..6e10d0f34b
--- /dev/null
+++ b/meta/classes/tinderclient.bbclass
@@ -0,0 +1,332 @@
+def tinder_http_post(server, selector, content_type, body):
+ import httplib
+ # now post it
+ for i in range(0,5):
+ try:
+ h = httplib.HTTP(server)
+ h.putrequest('POST', selector)
+ h.putheader('content-type', content_type)
+ h.putheader('content-length', str(len(body)))
+ h.endheaders()
+ h.send(body)
+ errcode, errmsg, headers = h.getreply()
+ #print errcode, errmsg, headers
+ return (errcode,errmsg, headers, h.file)
+ except:
+ # try again
+ pass
+
+def tinder_form_data(bound, dict, log):
+ output = []
+ #br
+ # for each key in the dictionary
+ for name in dict:
+ output.append( "--" + bound )
+ output.append( 'Content-Disposition: form-data; name="%s"' % name )
+ output.append( "" )
+ output.append( dict[name] )
+ if log:
+ output.append( "--" + bound )
+ output.append( 'Content-Disposition: form-data; name="log"; filename="log.txt"' )
+ output.append( '' )
+ output.append( log )
+ output.append( '--' + bound + '--' )
+ output.append( '' )
+
+ return "\r\n".join(output)
+
+def tinder_time_string():
+ """
+ Return the time as GMT
+ """
+ return ""
+
+def tinder_format_http_post(d,status,log):
+ """
+ Format the Tinderbox HTTP post with the data needed
+ for the tinderbox to be happy.
+ """
+
+ from bb import data, build
+ import os,random
+
+ # the variables we will need to send on this form post
+ variables = {
+ "tree" : data.getVar('TINDER_TREE', d, True),
+ "machine_name" : data.getVar('TINDER_MACHINE', d, True),
+ "os" : os.uname()[0],
+ "os_version" : os.uname()[2],
+ "compiler" : "gcc",
+ "clobber" : data.getVar('TINDER_CLOBBER', d, True)
+ }
+
+ # optionally add the status
+ if status:
+ variables["status"] = str(status)
+
+ # try to load the machine id
+ # we only need on build_status.pl but sending it
+ # always does not hurt
+ try:
+ f = file(data.getVar('TMPDIR',d,True)+'/tinder-machine.id', 'r')
+ id = f.read()
+ variables['machine_id'] = id
+ except:
+ pass
+
+ # the boundary we will need
+ boundary = "----------------------------------%d" % int(random.random()*1000000000000)
+
+ # now format the body
+ body = tinder_form_data( boundary, variables, log )
+
+ return ("multipart/form-data; boundary=%s" % boundary),body
+
+
+def tinder_build_start(d):
+ """
+ Inform the tinderbox that a build is starting. We do this
+ by posting our name and tree to the build_start.pl script
+ on the server.
+ """
+ from bb import data
+
+ # get the body and type
+ content_type, body = tinder_format_http_post(d,None,None)
+ server = data.getVar('TINDER_HOST', d, True )
+ url = data.getVar('TINDER_URL', d, True )
+
+ selector = url + "/xml/build_start.pl"
+
+ #print "selector %s and url %s" % (selector, url)
+
+ # now post it
+ errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
+ #print errcode, errmsg, headers
+ report = h_file.read()
+
+ # now let us find the machine id that was assigned to us
+ search = "<machine id='"
+ report = report[report.find(search)+len(search):]
+ report = report[0:report.find("'")]
+
+ import bb
+ bb.note("Machine ID assigned by tinderbox: %s" % report )
+
+ # now we will need to save the machine number
+ # we will override any previous numbers
+ f = file(data.getVar('TMPDIR', d, True)+"/tinder-machine.id", 'w')
+ f.write(report)
+
+
+def tinder_send_http(d, status, log):
+ """
+ Send this log as build status
+ """
+ from bb import data
+
+
+ # get the body and type
+ content_type, body = tinder_format_http_post(d,status,log)
+ server = data.getVar('TINDER_HOST', d, True )
+ url = data.getVar('TINDER_URL', d, True )
+
+ selector = url + "/xml/build_status.pl"
+
+ # now post it
+ errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
+ #print errcode, errmsg, headers
+ #print h.file.read()
+
+
+def tinder_print_info(d):
+ """
+ Print the TinderBox Info
+ Including informations of the BaseSystem and the Tree
+ we use.
+ """
+
+ from bb import data
+ import os
+ # get the local vars
+
+ time = tinder_time_string()
+ ops = os.uname()[0]
+ version = os.uname()[2]
+ url = data.getVar( 'TINDER_URL' , d, True )
+ tree = data.getVar( 'TINDER_TREE', d, True )
+ branch = data.getVar( 'TINDER_BRANCH', d, True )
+ srcdate = data.getVar( 'SRCDATE', d, True )
+ machine = data.getVar( 'MACHINE', d, True )
+ distro = data.getVar( 'DISTRO', d, True )
+ bbfiles = data.getVar( 'BBFILES', d, True )
+ tarch = data.getVar( 'TARGET_ARCH', d, True )
+ fpu = data.getVar( 'TARGET_FPU', d, True )
+ oerev = data.getVar( 'OE_REVISION', d, True ) or "unknown"
+
+ # there is a bug with tipple quoted strings
+ # i will work around but will fix the original
+ # bug as well
+ output = []
+ output.append("== Tinderbox Info" )
+ output.append("Time: %(time)s" )
+ output.append("OS: %(ops)s" )
+ output.append("%(version)s" )
+ output.append("Compiler: gcc" )
+ output.append("Tinderbox Client: 0.1" )
+ output.append("Tinderbox Client Last Modified: yesterday" )
+ output.append("Tinderbox Protocol: 0.1" )
+ output.append("URL: %(url)s" )
+ output.append("Tree: %(tree)s" )
+ output.append("Config:" )
+ output.append("branch = '%(branch)s'" )
+ output.append("TARGET_ARCH = '%(tarch)s'" )
+ output.append("TARGET_FPU = '%(fpu)s'" )
+ output.append("SRCDATE = '%(srcdate)s'" )
+ output.append("MACHINE = '%(machine)s'" )
+ output.append("DISTRO = '%(distro)s'" )
+ output.append("BBFILES = '%(bbfiles)s'" )
+ output.append("OEREV = '%(oerev)s'" )
+ output.append("== End Tinderbox Client Info" )
+
+ # now create the real output
+ return "\n".join(output) % vars()
+
+
+def tinder_print_env():
+ """
+ Print the environment variables of this build
+ """
+ from bb import data
+ import os
+
+ time_start = tinder_time_string()
+ time_end = tinder_time_string()
+
+ # build the environment
+ env = ""
+ for var in os.environ:
+ env += "%s=%s\n" % (var, os.environ[var])
+
+ output = []
+ output.append( "---> TINDERBOX RUNNING env %(time_start)s" )
+ output.append( env )
+ output.append( "<--- TINDERBOX FINISHED (SUCCESS) %(time_end)s" )
+
+ return "\n".join(output) % vars()
+
+def tinder_tinder_start(d, event):
+ """
+ PRINT the configuration of this build
+ """
+
+ time_start = tinder_time_string()
+ config = tinder_print_info(d)
+ #env = tinder_print_env()
+ time_end = tinder_time_string()
+ packages = " ".join( event.getPkgs() )
+
+ output = []
+ output.append( "---> TINDERBOX PRINTING CONFIGURATION %(time_start)s" )
+ output.append( config )
+ #output.append( env )
+ output.append( "<--- TINDERBOX FINISHED PRINTING CONFIGURATION %(time_end)s" )
+ output.append( "---> TINDERBOX BUILDING '%(packages)s'" )
+ output.append( "<--- TINDERBOX STARTING BUILD NOW" )
+
+ output.append( "" )
+
+ return "\n".join(output) % vars()
+
+def tinder_do_tinder_report(event):
+ """
+ Report to the tinderbox:
+ On the BuildStart we will inform the box directly
+ On the other events we will write to the TINDER_LOG and
+ when the Task is finished we will send the report.
+
+ The above is not yet fully implemented. Currently we send
+ information immediately. The caching/queuing needs to be
+ implemented. Also sending more or less information is not
+ implemented yet.
+ """
+ from bb.event import getName
+ from bb import data, mkdirhier, build
+ import os, glob
+
+ # variables
+ name = getName(event)
+ log = ""
+ status = 1
+ #print asd
+ # Check what we need to do Build* shows we start or are done
+ if name == "BuildStarted":
+ tinder_build_start(event.data)
+ log = tinder_tinder_start(event.data,event)
+
+ try:
+ # truncate the tinder log file
+ f = file(data.getVar('TINDER_LOG', event.data, True), 'rw+')
+ f.truncate(0)
+ f.close()
+ except IOError:
+ pass
+
+ # Append the Task-Log (compile,configure...) to the log file
+ # we will send to the server
+ if name == "TaskSucceeded" or name == "TaskFailed":
+ log_file = glob.glob("%s/log.%s.*" % (data.getVar('T', event.data, True), event.task))
+
+ if len(log_file) != 0:
+ to_file = data.getVar('TINDER_LOG', event.data, True)
+ log += "".join(open(log_file[0], 'r').readlines())
+
+ # set the right 'HEADER'/Summary for the TinderBox
+ if name == "TaskStarted":
+ log += "---> TINDERBOX Task %s started\n" % event.task
+ elif name == "TaskSucceeded":
+ log += "<--- TINDERBOX Task %s done (SUCCESS)\n" % event.task
+ elif name == "TaskFailed":
+ log += "<--- TINDERBOX Task %s failed (FAILURE)\n" % event.task
+ elif name == "PkgStarted":
+ log += "---> TINDERBOX Package %s started\n" % data.getVar('P', event.data, True)
+ elif name == "PkgSucceeded":
+ log += "<--- TINDERBOX Package %s done (SUCCESS)\n" % data.getVar('P', event.data, True)
+ elif name == "PkgFailed":
+ build.exec_task('do_clean', event.data)
+ log += "<--- TINDERBOX Package %s failed (FAILURE)\n" % data.getVar('P', event.data, True)
+ status = 200
+ elif name == "BuildCompleted":
+ log += "Build Completed\n"
+ status = 100
+ elif name == "MultipleProviders":
+ log += "---> TINDERBOX Multiple Providers\n"
+ log += "multiple providers are available (%s);\n" % ", ".join(event.getCandidates())
+ log += "consider defining PREFERRED_PROVIDER_%s\n" % event.getItem()
+ log += "is runtime: %d\n" % event.isRuntime()
+ log += "<--- TINDERBOX Multiple Providers\n"
+ elif name == "NoProvider":
+ log += "Error: No Provider for: %s\n" % event.getItem()
+ log += "Error:Was Runtime: %d\n" % event.isRuntime()
+ status = 200
+
+ # now post the log
+ if len(log) == 0:
+ return
+
+ # for now we will use the http post method as it is the only one
+ log_post_method = tinder_send_http
+ log_post_method(event.data, status, log)
+
+
+# we want to be an event handler
+addhandler tinderclient_eventhandler
+python tinderclient_eventhandler() {
+ from bb import note, error, data
+ from bb.event import NotHandled
+ do_tinder_report = data.getVar('TINDER_REPORT', e.data, True)
+ if do_tinder_report and do_tinder_report == "1":
+ tinder_do_tinder_report(e)
+
+ return NotHandled
+}
diff --git a/meta/classes/tmake.bbclass b/meta/classes/tmake.bbclass
new file mode 100644
index 0000000000..05b82e496d
--- /dev/null
+++ b/meta/classes/tmake.bbclass
@@ -0,0 +1,77 @@
+DEPENDS_prepend="tmake "
+
+python tmake_do_createpro() {
+ import glob, sys
+ from bb import note
+ out_vartranslate = {
+ "TMAKE_HEADERS": "HEADERS",
+ "TMAKE_INTERFACES": "INTERFACES",
+ "TMAKE_TEMPLATE": "TEMPLATE",
+ "TMAKE_CONFIG": "CONFIG",
+ "TMAKE_DESTDIR": "DESTDIR",
+ "TMAKE_SOURCES": "SOURCES",
+ "TMAKE_DEPENDPATH": "DEPENDPATH",
+ "TMAKE_INCLUDEPATH": "INCLUDEPATH",
+ "TMAKE_TARGET": "TARGET",
+ "TMAKE_LIBS": "LIBS",
+ }
+ s = data.getVar('S', d, 1) or ""
+ os.chdir(s)
+ profiles = (data.getVar('TMAKE_PROFILES', d, 1) or "").split()
+ if not profiles:
+ profiles = ["*.pro"]
+ for pro in profiles:
+ ppro = glob.glob(pro)
+ if ppro:
+ if ppro != [pro]:
+ del profiles[profiles.index(pro)]
+ profiles += ppro
+ continue
+ if ppro[0].find('*'):
+ del profiles[profiles.index(pro)]
+ continue
+ else:
+ del profiles[profiles.index(pro)]
+ if len(profiles) != 0:
+ return
+
+ # output .pro using this metadata store
+ try:
+ from __builtin__ import file
+ profile = file(data.expand('${PN}.pro', d), 'w')
+ except OSError:
+ raise FuncFailed("unable to open pro file for writing.")
+
+# fd = sys.__stdout__
+ fd = profile
+ for var in out_vartranslate.keys():
+ val = data.getVar(var, d, 1)
+ if val:
+ fd.write("%s\t: %s\n" % (out_vartranslate[var], val))
+
+# if fd is not sys.__stdout__:
+ fd.close()
+}
+
+tmake_do_configure() {
+ paths="${STAGING_DATADIR}/tmake/qws/${TARGET_OS}-${TARGET_ARCH}-g++ $STAGING_DIR/share/tmake/$OS-g++"
+ if (echo "${TARGET_ARCH}"|grep -q 'i.86'); then
+ paths="${STAGING_DATADIR}/tmake/qws/${TARGET_OS}-x86-g++ $paths"
+ fi
+ for i in $paths; do
+ if test -e $i; then
+ export TMAKEPATH=$i
+ break
+ fi
+ done
+
+ if [ -z "${TMAKE_PROFILES}" ]; then
+ TMAKE_PROFILES="`ls *.pro`"
+ fi
+ tmake -o Makefile $TMAKE_PROFILES || die "Error calling tmake on ${TMAKE_PROFILES}"
+}
+
+EXPORT_FUNCTIONS do_configure do_createpro
+
+addtask configure after do_unpack do_patch before do_compile
+addtask createpro before do_configure after do_unpack do_patch
diff --git a/meta/classes/update-alternatives.bbclass b/meta/classes/update-alternatives.bbclass
new file mode 100644
index 0000000000..6b2b547d5f
--- /dev/null
+++ b/meta/classes/update-alternatives.bbclass
@@ -0,0 +1,33 @@
+# defaults
+ALTERNATIVE_PRIORITY = "10"
+ALTERNATIVE_LINK = "${bindir}/${ALTERNATIVE_NAME}"
+
+update_alternatives_postinst() {
+update-alternatives --install ${ALTERNATIVE_LINK} ${ALTERNATIVE_NAME} ${ALTERNATIVE_PATH} ${ALTERNATIVE_PRIORITY}
+}
+
+update_alternatives_postrm() {
+update-alternatives --remove ${ALTERNATIVE_NAME} ${ALTERNATIVE_PATH}
+}
+
+python __anonymous() {
+ if bb.data.getVar('ALTERNATIVE_NAME', d) == None:
+ raise bb.build.FuncFailed, "%s inherits update-alternatives but doesn't set ALTERNATIVE_NAME" % bb.data.getVar('FILE', d)
+ if bb.data.getVar('ALTERNATIVE_PATH', d) == None:
+ raise bb.build.FuncFailed, "%s inherits update-alternatives but doesn't set ALTERNATIVE_PATH" % bb.data.getVar('FILE', d)
+}
+
+python populate_packages_prepend () {
+ pkg = bb.data.getVar('PN', d, 1)
+ bb.note('adding update-alternatives calls to postinst/postrm for %s' % pkg)
+ postinst = bb.data.getVar('pkg_postinst_%s' % pkg, d, 1) or bb.data.getVar('pkg_postinst', d, 1)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += bb.data.getVar('update_alternatives_postinst', d, 1)
+ bb.data.setVar('pkg_postinst_%s' % pkg, postinst, d)
+ postrm = bb.data.getVar('pkg_postrm_%s' % pkg, d, 1) or bb.data.getVar('pkg_postrm', d, 1)
+ if not postrm:
+ postrm = '#!/bin/sh\n'
+ postrm += bb.data.getVar('update_alternatives_postrm', d, 1)
+ bb.data.setVar('pkg_postrm_%s' % pkg, postrm, d)
+}
diff --git a/meta/classes/update-rc.d.bbclass b/meta/classes/update-rc.d.bbclass
new file mode 100644
index 0000000000..0bfba467c1
--- /dev/null
+++ b/meta/classes/update-rc.d.bbclass
@@ -0,0 +1,69 @@
+DEPENDS_append = " update-rc.d"
+RDEPENDS_append = " update-rc.d"
+
+INITSCRIPT_PARAMS ?= "defaults"
+
+INIT_D_DIR = "${sysconfdir}/init.d"
+
+updatercd_postinst() {
+if test "x$D" != "x"; then
+ D="-r $D"
+else
+ D="-s"
+fi
+update-rc.d $D ${INITSCRIPT_NAME} ${INITSCRIPT_PARAMS}
+}
+
+updatercd_prerm() {
+if test "x$D" != "x"; then
+ D="-r $D"
+else
+ ${INIT_D_DIR}/${INITSCRIPT_NAME} stop
+fi
+}
+
+updatercd_postrm() {
+update-rc.d $D ${INITSCRIPT_NAME} remove
+}
+
+python __anonymous() {
+ if bb.data.getVar('INITSCRIPT_PACKAGES', d) == None:
+ if bb.data.getVar('INITSCRIPT_NAME', d) == None:
+ raise bb.build.FuncFailed, "%s inherits update-rc.d but doesn't set INITSCRIPT_NAME" % bb.data.getVar('FILE', d)
+ if bb.data.getVar('INITSCRIPT_PARAMS', d) == None:
+ raise bb.build.FuncFailed, "%s inherits update-rc.d but doesn't set INITSCRIPT_PARAMS" % bb.data.getVar('FILE', d)
+}
+
+python populate_packages_prepend () {
+ def update_rcd_package(pkg):
+ bb.debug(1, 'adding update-rc.d calls to postinst/postrm for %s' % pkg)
+ localdata = bb.data.createCopy(d)
+ overrides = bb.data.getVar("OVERRIDES", localdata, 1)
+ bb.data.setVar("OVERRIDES", "%s:%s" % (pkg, overrides), localdata)
+ bb.data.update_data(localdata)
+
+ postinst = bb.data.getVar('pkg_postinst', localdata, 1)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += bb.data.getVar('updatercd_postinst', localdata, 1)
+ bb.data.setVar('pkg_postinst_%s' % pkg, postinst, d)
+ prerm = bb.data.getVar('pkg_prerm', localdata, 1)
+ if not prerm:
+ prerm = '#!/bin/sh\n'
+ prerm += bb.data.getVar('updatercd_prerm', localdata, 1)
+ bb.data.setVar('pkg_prerm_%s' % pkg, prerm, d)
+ postrm = bb.data.getVar('pkg_postrm', localdata, 1)
+ if not postrm:
+ postrm = '#!/bin/sh\n'
+ postrm += bb.data.getVar('updatercd_postrm', localdata, 1)
+ bb.data.setVar('pkg_postrm_%s' % pkg, postrm, d)
+
+ pkgs = bb.data.getVar('INITSCRIPT_PACKAGES', d, 1)
+ if pkgs == None:
+ pkgs = bb.data.getVar('PN', d, 1)
+ packages = (bb.data.getVar('PACKAGES', d, 1) or "").split()
+ if not pkgs in packages and packages != []:
+ pkgs = packages[0]
+ for pkg in pkgs.split():
+ update_rcd_package(pkg)
+}
diff --git a/meta/classes/wrt-image.bbclass b/meta/classes/wrt-image.bbclass
new file mode 100644
index 0000000000..ba1163a719
--- /dev/null
+++ b/meta/classes/wrt-image.bbclass
@@ -0,0 +1,33 @@
+# we dont need the kernel in the image
+ROOTFS_POSTPROCESS_COMMAND += "rm -f ${IMAGE_ROOTFS}/boot/zImage*"
+
+def wrt_get_kernel_version(d):
+ import bb
+ if bb.data.inherits_class('image_ipk', d):
+ skd = bb.data.getVar('STAGING_KERNEL_DIR', d, 1)
+ return base_read_file(skd+'/kernel-abiversion')
+ return "-no kernel version for available-"
+
+wrt_create_images() {
+ I=${DEPLOY_DIR}/images
+ KERNEL_VERSION="${@wrt_get_kernel_version(d)}"
+
+ for type in ${IMAGE_FSTYPES}; do
+ # generic
+ trx -o ${I}/wrt-generic-${type}.trx ${I}/loader.gz \
+ ${I}/wrt-kernel-${KERNEL_VERSION}.lzma -a 0x10000 ${I}/${IMAGE_NAME}.rootfs.${type}
+
+ # WRT54GS
+ addpattern -2 -i ${I}/wrt-generic-${type}.trx -o ${I}/wrt54gs-${type}.trx -g
+
+ # WRT54G
+ sed "1s,^W54S,W54G," ${I}/wrt54gs-${type}.trx > ${I}/wrt54g-${type}.trx
+
+ # motorola
+ motorola-bin ${I}/wrt-generic-${type}.trx ${I}/motorola-${type}.bin
+ done;
+}
+
+IMAGE_POSTPROCESS_COMMAND += "wrt_create_images;"
+
+DEPENDS_prepend = "${@["wrt-imagetools-native ", ""][(bb.data.getVar('PACKAGES', d, 1) == '')]}" \ No newline at end of file
diff --git a/meta/classes/xfce.bbclass b/meta/classes/xfce.bbclass
new file mode 100644
index 0000000000..793348597f
--- /dev/null
+++ b/meta/classes/xfce.bbclass
@@ -0,0 +1,19 @@
+# xfce.oeclass
+# Copyright (C) 2004, Advanced Micro Devices, Inc. All Rights Reserved
+# Released under the MIT license (see packages/COPYING)
+
+# Global class to make it easier to maintain XFCE packages
+
+HOMEPAGE = "http://www.xfce.org"
+LICENSE = "LGPL-2"
+
+SRC_URI = "http://www.us.xfce.org/archive/xfce-${PV}/src/${PN}-${PV}.tar.gz"
+
+inherit autotools
+
+EXTRA_OECONF += "--with-pluginsdir=${libdir}/xfce4/panel-plugins/"
+
+# FIXME: Put icons in their own package too?
+
+FILES_${PN} += "${datadir}/icons/* ${datadir}/applications/* ${libdir}/xfce4/modules/*.so*"
+FILES_${PN}-doc += "${datadir}/xfce4/doc"
diff --git a/meta/classes/xlibs.bbclass b/meta/classes/xlibs.bbclass
new file mode 100644
index 0000000000..e797748770
--- /dev/null
+++ b/meta/classes/xlibs.bbclass
@@ -0,0 +1,15 @@
+LICENSE= "BSD-X"
+SECTION = "x11/libs"
+
+XLIBS_CVS = "${FREEDESKTOP_CVS}/xlibs"
+
+inherit autotools pkgconfig
+
+do_stage() {
+ oe_runmake install prefix=${STAGING_DIR} \
+ bindir=${STAGING_BINDIR} \
+ includedir=${STAGING_INCDIR} \
+ libdir=${STAGING_LIBDIR} \
+ datadir=${STAGING_DATADIR} \
+ mandir=${STAGING_DATADIR}/man
+}