summaryrefslogtreecommitdiffstats
path: root/meta/lib
diff options
context:
space:
mode:
Diffstat (limited to 'meta/lib')
-rw-r--r--meta/lib/bblayers/create.py77
-rw-r--r--meta/lib/bblayers/templates/README41
-rw-r--r--meta/lib/bblayers/templates/example.bb11
-rw-r--r--meta/lib/bblayers/templates/layer.conf13
-rw-r--r--meta/lib/buildstats.py161
-rw-r--r--meta/lib/oe/__init__.py4
-rw-r--r--meta/lib/oe/buildhistory_analysis.py312
-rw-r--r--meta/lib/oe/cachedpath.py2
-rw-r--r--meta/lib/oe/classextend.py20
-rw-r--r--meta/lib/oe/classutils.py5
-rw-r--r--meta/lib/oe/copy_buildsystem.py83
-rw-r--r--meta/lib/oe/data.py38
-rw-r--r--meta/lib/oe/distro_check.py313
-rw-r--r--meta/lib/oe/elf.py133
-rw-r--r--meta/lib/oe/gpg_sign.py89
-rw-r--r--meta/lib/oe/license.py30
-rw-r--r--meta/lib/oe/lsb.py91
-rw-r--r--meta/lib/oe/maketype.py12
-rw-r--r--meta/lib/oe/manifest.py36
-rw-r--r--meta/lib/oe/package.py230
-rw-r--r--meta/lib/oe/package_manager.py2077
-rw-r--r--meta/lib/oe/packagedata.py11
-rw-r--r--meta/lib/oe/packagegroup.py14
-rw-r--r--meta/lib/oe/patch.py163
-rw-r--r--meta/lib/oe/path.py120
-rw-r--r--meta/lib/oe/prservice.py29
-rw-r--r--meta/lib/oe/qa.py111
-rw-r--r--meta/lib/oe/recipeutils.py453
-rw-r--r--meta/lib/oe/rootfs.py330
-rw-r--r--meta/lib/oe/sdk.py193
-rw-r--r--meta/lib/oe/sstatesig.py373
-rw-r--r--meta/lib/oe/terminal.py104
-rw-r--r--meta/lib/oe/tests/test_utils.py51
-rw-r--r--meta/lib/oe/types.py37
-rw-r--r--meta/lib/oe/useradd.py71
-rw-r--r--meta/lib/oe/utils.py315
-rw-r--r--meta/lib/oeqa/buildperf/__init__.py8
-rw-r--r--meta/lib/oeqa/buildperf/base.py422
-rw-r--r--meta/lib/oeqa/buildperf/test_basic.py63
-rw-r--r--meta/lib/oeqa/controllers/__init__.py3
-rw-r--r--meta/lib/oeqa/controllers/masterimage.py64
-rw-r--r--meta/lib/oeqa/controllers/testtargetloader.py6
-rw-r--r--meta/lib/oeqa/core/README76
-rw-r--r--meta/lib/oeqa/core/__init__.py (renamed from meta/lib/oe/tests/__init__.py)0
-rw-r--r--meta/lib/oeqa/core/case.py100
-rw-r--r--meta/lib/oeqa/core/cases/__init__.py0
-rw-r--r--meta/lib/oeqa/core/cases/example/data.json1
-rw-r--r--meta/lib/oeqa/core/cases/example/test_basic.py22
-rw-r--r--meta/lib/oeqa/core/context.py212
-rw-r--r--meta/lib/oeqa/core/decorator/__init__.py79
-rw-r--r--meta/lib/oeqa/core/decorator/data.py222
-rw-r--r--meta/lib/oeqa/core/decorator/depends.py98
-rw-r--r--meta/lib/oeqa/core/decorator/oetimeout.py28
-rw-r--r--meta/lib/oeqa/core/exception.py26
-rw-r--r--meta/lib/oeqa/core/loader.py342
-rw-r--r--meta/lib/oeqa/core/runner.py328
-rw-r--r--meta/lib/oeqa/core/target/__init__.py36
-rw-r--r--meta/lib/oeqa/core/target/qemu.py64
-rw-r--r--meta/lib/oeqa/core/target/ssh.py271
-rw-r--r--meta/lib/oeqa/core/tests/__init__.py0
-rw-r--r--meta/lib/oeqa/core/tests/cases/data.py23
-rw-r--r--meta/lib/oeqa/core/tests/cases/depends.py41
-rw-r--r--meta/lib/oeqa/core/tests/cases/loader/valid/another.py12
-rw-r--r--meta/lib/oeqa/core/tests/cases/oetag.py38
-rw-r--r--meta/lib/oeqa/core/tests/cases/timeout.py21
-rw-r--r--meta/lib/oeqa/core/tests/common.py38
-rwxr-xr-xmeta/lib/oeqa/core/tests/test_data.py55
-rwxr-xr-xmeta/lib/oeqa/core/tests/test_decorators.py137
-rwxr-xr-xmeta/lib/oeqa/core/tests/test_loader.py63
-rwxr-xr-xmeta/lib/oeqa/core/tests/test_runner.py40
-rw-r--r--meta/lib/oeqa/core/utils/__init__.py0
-rw-r--r--meta/lib/oeqa/core/utils/concurrencytest.py354
-rw-r--r--meta/lib/oeqa/core/utils/misc.py47
-rw-r--r--meta/lib/oeqa/core/utils/path.py22
-rw-r--r--meta/lib/oeqa/core/utils/test.py89
-rw-r--r--meta/lib/oeqa/files/test.c (renamed from meta/lib/oeqa/runtime/files/test.c)0
-rw-r--r--meta/lib/oeqa/files/test.cpp (renamed from meta/lib/oeqa/runtime/files/test.cpp)0
-rw-r--r--meta/lib/oeqa/files/testresults/testresults.json40
-rw-r--r--meta/lib/oeqa/manual/abat.patch64
-rw-r--r--meta/lib/oeqa/manual/bsp-hw.json1000
-rw-r--r--meta/lib/oeqa/manual/build-appliance.json96
-rw-r--r--meta/lib/oeqa/manual/crops.json294
-rw-r--r--meta/lib/oeqa/manual/eclipse-plugin.json322
-rw-r--r--meta/lib/oeqa/manual/kernel-dev.json200
-rw-r--r--meta/lib/oeqa/manual/oe-core.json158
-rw-r--r--meta/lib/oeqa/manual/sdk.json32
-rw-r--r--meta/lib/oeqa/manual/toaster-managed-mode.json2572
-rw-r--r--meta/lib/oeqa/manual/toaster-unmanaged-mode.json1170
-rw-r--r--meta/lib/oeqa/oetest.py170
-rwxr-xr-xmeta/lib/oeqa/runexported.py14
-rw-r--r--meta/lib/oeqa/runtime/_ptest.py125
-rw-r--r--meta/lib/oeqa/runtime/_qemutiny.py9
-rw-r--r--meta/lib/oeqa/runtime/buildcvs.py31
-rw-r--r--meta/lib/oeqa/runtime/buildgalculator.py23
-rw-r--r--meta/lib/oeqa/runtime/buildiptables.py31
-rw-r--r--meta/lib/oeqa/runtime/case.py20
-rw-r--r--meta/lib/oeqa/runtime/cases/_qemutiny.py12
-rw-r--r--meta/lib/oeqa/runtime/cases/apt.py53
-rw-r--r--meta/lib/oeqa/runtime/cases/boot.py33
-rw-r--r--meta/lib/oeqa/runtime/cases/buildcpio.py32
-rw-r--r--meta/lib/oeqa/runtime/cases/buildgalculator.py32
-rw-r--r--meta/lib/oeqa/runtime/cases/buildlzip.py34
-rw-r--r--meta/lib/oeqa/runtime/cases/connman.py31
-rw-r--r--meta/lib/oeqa/runtime/cases/date.py42
-rw-r--r--meta/lib/oeqa/runtime/cases/df.py17
-rw-r--r--meta/lib/oeqa/runtime/cases/dnf.py191
-rw-r--r--meta/lib/oeqa/runtime/cases/gcc.py69
-rw-r--r--meta/lib/oeqa/runtime/cases/gi.py19
-rw-r--r--meta/lib/oeqa/runtime/cases/gstreamer.py18
-rw-r--r--meta/lib/oeqa/runtime/cases/kernelmodule.py46
-rw-r--r--meta/lib/oeqa/runtime/cases/ksample.py231
-rw-r--r--meta/lib/oeqa/runtime/cases/ldd.py26
-rw-r--r--meta/lib/oeqa/runtime/cases/logrotate.py49
-rw-r--r--meta/lib/oeqa/runtime/cases/ltp.py118
-rw-r--r--meta/lib/oeqa/runtime/cases/ltp_compliance.py97
-rw-r--r--meta/lib/oeqa/runtime/cases/ltp_stress.py98
-rw-r--r--meta/lib/oeqa/runtime/cases/multilib.py44
-rw-r--r--meta/lib/oeqa/runtime/cases/oe_syslog.py131
-rw-r--r--meta/lib/oeqa/runtime/cases/opkg.py58
-rw-r--r--meta/lib/oeqa/runtime/cases/pam.py35
-rw-r--r--meta/lib/oeqa/runtime/cases/parselogs.py (renamed from meta/lib/oeqa/runtime/parselogs.py)264
-rw-r--r--meta/lib/oeqa/runtime/cases/perl.py17
-rw-r--r--meta/lib/oeqa/runtime/cases/ping.py26
-rw-r--r--meta/lib/oeqa/runtime/cases/ptest.py84
-rw-r--r--meta/lib/oeqa/runtime/cases/python.py19
-rw-r--r--meta/lib/oeqa/runtime/cases/rpm.py153
-rw-r--r--meta/lib/oeqa/runtime/cases/scons.py37
-rw-r--r--meta/lib/oeqa/runtime/cases/scp.py37
-rw-r--r--meta/lib/oeqa/runtime/cases/skeletoninit.py35
-rw-r--r--meta/lib/oeqa/runtime/cases/ssh.py19
-rw-r--r--meta/lib/oeqa/runtime/cases/stap.py37
-rw-r--r--meta/lib/oeqa/runtime/cases/storage.py149
-rw-r--r--meta/lib/oeqa/runtime/cases/systemd.py194
-rw-r--r--meta/lib/oeqa/runtime/cases/x32lib.py21
-rw-r--r--meta/lib/oeqa/runtime/cases/xorg.py21
-rw-r--r--meta/lib/oeqa/runtime/connman.py31
-rw-r--r--meta/lib/oeqa/runtime/context.py226
-rw-r--r--meta/lib/oeqa/runtime/date.py31
-rw-r--r--meta/lib/oeqa/runtime/decorator/package.py56
-rw-r--r--meta/lib/oeqa/runtime/df.py12
-rw-r--r--meta/lib/oeqa/runtime/files/SConstruct1
-rw-r--r--meta/lib/oeqa/runtime/files/hello.c5
-rw-r--r--meta/lib/oeqa/runtime/files/hello.stp1
-rw-r--r--meta/lib/oeqa/runtime/files/test.pl2
-rw-r--r--meta/lib/oeqa/runtime/files/test.py6
-rw-r--r--meta/lib/oeqa/runtime/gcc.py47
-rw-r--r--meta/lib/oeqa/runtime/kernelmodule.py34
-rw-r--r--meta/lib/oeqa/runtime/ldd.py21
-rw-r--r--meta/lib/oeqa/runtime/loader.py19
-rw-r--r--meta/lib/oeqa/runtime/logrotate.py28
-rw-r--r--meta/lib/oeqa/runtime/multilib.py42
-rw-r--r--meta/lib/oeqa/runtime/pam.py25
-rw-r--r--meta/lib/oeqa/runtime/perl.py30
-rw-r--r--meta/lib/oeqa/runtime/ping.py22
-rw-r--r--meta/lib/oeqa/runtime/python.py35
-rw-r--r--meta/lib/oeqa/runtime/rpm.py120
-rw-r--r--meta/lib/oeqa/runtime/scanelf.py28
-rw-r--r--meta/lib/oeqa/runtime/scp.py22
-rw-r--r--meta/lib/oeqa/runtime/skeletoninit.py29
-rw-r--r--meta/lib/oeqa/runtime/smart.py218
-rw-r--r--meta/lib/oeqa/runtime/ssh.py19
-rw-r--r--meta/lib/oeqa/runtime/syslog.py52
-rw-r--r--meta/lib/oeqa/runtime/systemd.py178
-rw-r--r--meta/lib/oeqa/runtime/utils/__init__.py0
-rw-r--r--meta/lib/oeqa/runtime/utils/targetbuildproject.py44
-rw-r--r--meta/lib/oeqa/runtime/x32lib.py18
-rw-r--r--meta/lib/oeqa/runtime/xorg.py16
-rw-r--r--meta/lib/oeqa/sdk/__init__.py3
-rw-r--r--meta/lib/oeqa/sdk/buildcvs.py25
-rw-r--r--meta/lib/oeqa/sdk/buildgalculator.py27
-rw-r--r--meta/lib/oeqa/sdk/buildiptables.py26
-rw-r--r--meta/lib/oeqa/sdk/case.py54
-rw-r--r--meta/lib/oeqa/sdk/cases/assimp.py40
-rw-r--r--meta/lib/oeqa/sdk/cases/buildcpio.py35
-rw-r--r--meta/lib/oeqa/sdk/cases/buildepoxy.py41
-rw-r--r--meta/lib/oeqa/sdk/cases/buildgalculator.py43
-rw-r--r--meta/lib/oeqa/sdk/cases/buildlzip.py37
-rw-r--r--meta/lib/oeqa/sdk/cases/gcc.py50
-rw-r--r--meta/lib/oeqa/sdk/cases/perl.py20
-rw-r--r--meta/lib/oeqa/sdk/cases/python.py31
-rw-r--r--meta/lib/oeqa/sdk/context.py153
-rw-r--r--meta/lib/oeqa/sdk/files/testsdkmakefile (renamed from meta/lib/oeqa/runtime/files/testsdkmakefile)0
-rw-r--r--meta/lib/oeqa/sdk/gcc.py36
-rw-r--r--meta/lib/oeqa/sdk/perl.py28
-rw-r--r--meta/lib/oeqa/sdk/python.py32
-rw-r--r--meta/lib/oeqa/sdk/testsdk.py145
-rw-r--r--meta/lib/oeqa/sdk/utils/__init__.py0
-rw-r--r--meta/lib/oeqa/sdk/utils/sdkbuildproject.py53
-rw-r--r--meta/lib/oeqa/sdkext/__init__.py3
-rw-r--r--meta/lib/oeqa/sdkext/case.py24
-rw-r--r--meta/lib/oeqa/sdkext/cases/devtool.py120
-rw-r--r--meta/lib/oeqa/sdkext/context.py32
-rw-r--r--meta/lib/oeqa/sdkext/devtool.py108
-rw-r--r--meta/lib/oeqa/sdkext/sdk_update.py36
-rw-r--r--meta/lib/oeqa/sdkext/testsdk.py107
-rw-r--r--meta/lib/oeqa/selftest/__init__.py2
-rw-r--r--meta/lib/oeqa/selftest/_toaster.py320
-rw-r--r--meta/lib/oeqa/selftest/archiver.py50
-rw-r--r--meta/lib/oeqa/selftest/base.py235
-rw-r--r--meta/lib/oeqa/selftest/case.py290
-rw-r--r--meta/lib/oeqa/selftest/cases/_sstatetests_noauto.py (renamed from meta/lib/oeqa/selftest/_sstatetests_noauto.py)11
-rw-r--r--meta/lib/oeqa/selftest/cases/archiver.py197
-rw-r--r--meta/lib/oeqa/selftest/cases/bblayers.py (renamed from meta/lib/oeqa/selftest/bblayers.py)60
-rw-r--r--meta/lib/oeqa/selftest/cases/bbtests.py (renamed from meta/lib/oeqa/selftest/bbtests.py)192
-rw-r--r--meta/lib/oeqa/selftest/cases/binutils.py50
-rw-r--r--meta/lib/oeqa/selftest/cases/buildhistory.py (renamed from meta/lib/oeqa/selftest/buildhistory.py)14
-rw-r--r--meta/lib/oeqa/selftest/cases/buildoptions.py (renamed from meta/lib/oeqa/selftest/buildoptions.py)164
-rw-r--r--meta/lib/oeqa/selftest/cases/containerimage.py88
-rw-r--r--meta/lib/oeqa/selftest/cases/devtool.py (renamed from meta/lib/oeqa/selftest/devtool.py)829
-rw-r--r--meta/lib/oeqa/selftest/cases/distrodata.py89
-rw-r--r--meta/lib/oeqa/selftest/cases/eSDK.py (renamed from meta/lib/oeqa/selftest/eSDK.py)91
-rw-r--r--meta/lib/oeqa/selftest/cases/efibootpartition.py46
-rw-r--r--meta/lib/oeqa/selftest/cases/fetch.py51
-rw-r--r--meta/lib/oeqa/selftest/cases/gcc.py152
-rw-r--r--meta/lib/oeqa/selftest/cases/glibc.py89
-rw-r--r--meta/lib/oeqa/selftest/cases/gotoolchain.py71
-rw-r--r--meta/lib/oeqa/selftest/cases/image_typedep.py58
-rw-r--r--meta/lib/oeqa/selftest/cases/imagefeatures.py264
-rw-r--r--meta/lib/oeqa/selftest/cases/incompatible_lic.py135
-rw-r--r--meta/lib/oeqa/selftest/cases/kerneldevelopment.py67
-rw-r--r--meta/lib/oeqa/selftest/cases/layerappend.py (renamed from meta/lib/oeqa/selftest/layerappend.py)22
-rw-r--r--meta/lib/oeqa/selftest/cases/liboe.py (renamed from meta/lib/oeqa/selftest/liboe.py)33
-rw-r--r--meta/lib/oeqa/selftest/cases/lic_checksum.py (renamed from meta/lib/oeqa/selftest/lic-checksum.py)13
-rw-r--r--meta/lib/oeqa/selftest/cases/manifest.py (renamed from meta/lib/oeqa/selftest/manifest.py)61
-rw-r--r--meta/lib/oeqa/selftest/cases/meta_ide.py51
-rw-r--r--meta/lib/oeqa/selftest/cases/multiconfig.py72
-rw-r--r--meta/lib/oeqa/selftest/cases/oelib/__init__.py0
-rw-r--r--meta/lib/oeqa/selftest/cases/oelib/buildhistory.py99
-rw-r--r--meta/lib/oeqa/selftest/cases/oelib/elf.py26
-rw-r--r--meta/lib/oeqa/selftest/cases/oelib/license.py (renamed from meta/lib/oe/tests/test_license.py)41
-rw-r--r--meta/lib/oeqa/selftest/cases/oelib/path.py (renamed from meta/lib/oe/tests/test_path.py)18
-rw-r--r--meta/lib/oeqa/selftest/cases/oelib/types.py (renamed from meta/lib/oe/tests/test_types.py)22
-rw-r--r--meta/lib/oeqa/selftest/cases/oelib/utils.py103
-rw-r--r--meta/lib/oeqa/selftest/cases/oescripts.py188
-rw-r--r--meta/lib/oeqa/selftest/cases/package.py150
-rw-r--r--meta/lib/oeqa/selftest/cases/pkgdata.py (renamed from meta/lib/oeqa/selftest/pkgdata.py)76
-rw-r--r--meta/lib/oeqa/selftest/cases/prservice.py (renamed from meta/lib/oeqa/selftest/prservice.py)39
-rw-r--r--meta/lib/oeqa/selftest/cases/recipetool.py (renamed from meta/lib/oeqa/selftest/recipetool.py)210
-rw-r--r--meta/lib/oeqa/selftest/cases/recipeutils.py140
-rw-r--r--meta/lib/oeqa/selftest/cases/reproducible.py202
-rw-r--r--meta/lib/oeqa/selftest/cases/resulttooltests.py98
-rw-r--r--meta/lib/oeqa/selftest/cases/runcmd.py121
-rw-r--r--meta/lib/oeqa/selftest/cases/runqemu.py211
-rw-r--r--meta/lib/oeqa/selftest/cases/runtime_test.py439
-rw-r--r--meta/lib/oeqa/selftest/cases/selftest.py53
-rw-r--r--meta/lib/oeqa/selftest/cases/signing.py224
-rw-r--r--meta/lib/oeqa/selftest/cases/sstate.py (renamed from meta/lib/oeqa/selftest/sstate.py)34
-rw-r--r--meta/lib/oeqa/selftest/cases/sstatetests.py (renamed from meta/lib/oeqa/selftest/sstatetests.py)297
-rw-r--r--meta/lib/oeqa/selftest/cases/tinfoil.py224
-rw-r--r--meta/lib/oeqa/selftest/cases/wic.py1051
-rw-r--r--meta/lib/oeqa/selftest/context.py341
-rw-r--r--meta/lib/oeqa/selftest/imagefeatures.py127
-rw-r--r--meta/lib/oeqa/selftest/oescripts.py54
-rw-r--r--meta/lib/oeqa/selftest/runtime-test.py105
-rw-r--r--meta/lib/oeqa/selftest/signing.py178
-rw-r--r--meta/lib/oeqa/selftest/wic.py286
-rw-r--r--meta/lib/oeqa/targetcontrol.py133
-rw-r--r--meta/lib/oeqa/utils/__init__.py70
-rw-r--r--meta/lib/oeqa/utils/buildproject.py63
-rw-r--r--meta/lib/oeqa/utils/commands.py201
-rw-r--r--meta/lib/oeqa/utils/decorators.py11
-rw-r--r--meta/lib/oeqa/utils/dump.py22
-rw-r--r--meta/lib/oeqa/utils/ftools.py4
-rw-r--r--meta/lib/oeqa/utils/git.py24
-rw-r--r--meta/lib/oeqa/utils/gitarchive.py237
-rw-r--r--meta/lib/oeqa/utils/httpserver.py43
-rw-r--r--meta/lib/oeqa/utils/logparser.py249
-rw-r--r--meta/lib/oeqa/utils/metadata.py124
-rw-r--r--meta/lib/oeqa/utils/network.py8
-rw-r--r--meta/lib/oeqa/utils/nfs.py39
-rw-r--r--meta/lib/oeqa/utils/package_manager.py200
-rw-r--r--meta/lib/oeqa/utils/qemurunner.py461
-rw-r--r--meta/lib/oeqa/utils/qemutinyrunner.py20
-rw-r--r--meta/lib/oeqa/utils/sshcontrol.py38
-rw-r--r--meta/lib/oeqa/utils/subprocesstweak.py22
-rw-r--r--meta/lib/oeqa/utils/targetbuild.py33
-rw-r--r--meta/lib/oeqa/utils/testexport.py18
-rw-r--r--meta/lib/rootfspostcommands.py60
278 files changed, 25207 insertions, 7228 deletions
diff --git a/meta/lib/bblayers/create.py b/meta/lib/bblayers/create.py
new file mode 100644
index 0000000000..542f31fc81
--- /dev/null
+++ b/meta/lib/bblayers/create.py
@@ -0,0 +1,77 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import logging
+import os
+import sys
+import shutil
+
+import bb.utils
+
+from bblayers.common import LayerPlugin
+
+logger = logging.getLogger('bitbake-layers')
+
+def plugin_init(plugins):
+ return CreatePlugin()
+
+def read_template(template, template_dir='templates'):
+ lines = str()
+ with open(os.path.join(os.path.dirname(__file__), template_dir, template)) as fd:
+ lines = ''.join(fd.readlines())
+ return lines
+
+class CreatePlugin(LayerPlugin):
+ def do_create_layer(self, args):
+ """Create a basic layer"""
+ layerdir = os.path.abspath(args.layerdir)
+ if os.path.exists(layerdir):
+ sys.stderr.write("Specified layer directory exists\n")
+ return 1
+
+ # create dirs
+ conf = os.path.join(layerdir, 'conf')
+ bb.utils.mkdirhier(conf)
+
+ layername = os.path.basename(os.path.normpath(args.layerdir))
+
+ # Create the README from templates/README
+ readme_template = read_template('README').format(layername=layername)
+ readme = os.path.join(layerdir, 'README')
+ with open(readme, 'w') as fd:
+ fd.write(readme_template)
+
+ # Copy the MIT license from meta
+ copying = 'COPYING.MIT'
+ dn = os.path.dirname
+ license_src = os.path.join(dn(dn(dn(__file__))), copying)
+ license_dst = os.path.join(layerdir, copying)
+ shutil.copy(license_src, license_dst)
+
+ # Get the compat value for core layer.
+ compat = self.tinfoil.config_data.getVar('LAYERSERIES_COMPAT_core') or ""
+
+ # Create the layer.conf from templates/layer.conf
+ layerconf_template = read_template('layer.conf').format(
+ layername=layername, priority=args.priority, compat=compat)
+ layerconf = os.path.join(conf, 'layer.conf')
+ with open(layerconf, 'w') as fd:
+ fd.write(layerconf_template)
+
+ # Create the example from templates/example.bb
+ example_template = read_template('example.bb')
+ example = os.path.join(layerdir, 'recipes-' + args.examplerecipe, args.examplerecipe)
+ bb.utils.mkdirhier(example)
+ with open(os.path.join(example, args.examplerecipe + '_%s.bb') % args.version, 'w') as fd:
+ fd.write(example_template)
+
+ logger.plain('Add your new layer with \'bitbake-layers add-layer %s\'' % args.layerdir)
+
+ def register_commands(self, sp):
+ parser_create_layer = self.add_command(sp, 'create-layer', self.do_create_layer, parserecipes=False)
+ parser_create_layer.add_argument('layerdir', help='Layer directory to create')
+ parser_create_layer.add_argument('--priority', '-p', default=6, help='Layer directory to create')
+ parser_create_layer.add_argument('--example-recipe-name', '-e', dest='examplerecipe', default='example', help='Filename of the example recipe')
+ parser_create_layer.add_argument('--example-recipe-version', '-v', dest='version', default='0.1', help='Version number for the example recipe')
+
diff --git a/meta/lib/bblayers/templates/README b/meta/lib/bblayers/templates/README
new file mode 100644
index 0000000000..fb2d28e177
--- /dev/null
+++ b/meta/lib/bblayers/templates/README
@@ -0,0 +1,41 @@
+This README file contains information on the contents of the {layername} layer.
+
+Please see the corresponding sections below for details.
+
+Dependencies
+============
+
+ URI: <first dependency>
+ branch: <branch name>
+
+ URI: <second dependency>
+ branch: <branch name>
+
+ .
+ .
+ .
+
+Patches
+=======
+
+Please submit any patches against the {layername} layer to the xxxx mailing list (xxxx@zzzz.org)
+and cc: the maintainer:
+
+Maintainer: XXX YYYYYY <xxx.yyyyyy@zzzzz.com>
+
+Table of Contents
+=================
+
+ I. Adding the {layername} layer to your build
+ II. Misc
+
+
+I. Adding the {layername} layer to your build
+=================================================
+
+Run 'bitbake-layers add-layer {layername}'
+
+II. Misc
+========
+
+--- replace with specific information about the {layername} layer ---
diff --git a/meta/lib/bblayers/templates/example.bb b/meta/lib/bblayers/templates/example.bb
new file mode 100644
index 0000000000..c4b873d593
--- /dev/null
+++ b/meta/lib/bblayers/templates/example.bb
@@ -0,0 +1,11 @@
+SUMMARY = "bitbake-layers recipe"
+DESCRIPTION = "Recipe created by bitbake-layers"
+LICENSE = "MIT"
+
+python do_build() {
+ bb.plain("***********************************************");
+ bb.plain("* *");
+ bb.plain("* Example recipe created by bitbake-layers *");
+ bb.plain("* *");
+ bb.plain("***********************************************");
+}
diff --git a/meta/lib/bblayers/templates/layer.conf b/meta/lib/bblayers/templates/layer.conf
new file mode 100644
index 0000000000..e2eaff4346
--- /dev/null
+++ b/meta/lib/bblayers/templates/layer.conf
@@ -0,0 +1,13 @@
+# We have a conf and classes directory, add to BBPATH
+BBPATH .= ":${{LAYERDIR}}"
+
+# We have recipes-* directories, add to BBFILES
+BBFILES += "${{LAYERDIR}}/recipes-*/*/*.bb \
+ ${{LAYERDIR}}/recipes-*/*/*.bbappend"
+
+BBFILE_COLLECTIONS += "{layername}"
+BBFILE_PATTERN_{layername} = "^${{LAYERDIR}}/"
+BBFILE_PRIORITY_{layername} = "{priority}"
+
+LAYERDEPENDS_{layername} = "core"
+LAYERSERIES_COMPAT_{layername} = "{compat}"
diff --git a/meta/lib/buildstats.py b/meta/lib/buildstats.py
new file mode 100644
index 0000000000..8627ed3c31
--- /dev/null
+++ b/meta/lib/buildstats.py
@@ -0,0 +1,161 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Implements system state sampling. Called by buildstats.bbclass.
+# Because it is a real Python module, it can hold persistent state,
+# like open log files and the time of the last sampling.
+
+import time
+import re
+import bb.event
+
+class SystemStats:
+ def __init__(self, d):
+ bn = d.getVar('BUILDNAME')
+ bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
+ bb.utils.mkdirhier(bsdir)
+
+ self.proc_files = []
+ for filename, handler in (
+ ('diskstats', self._reduce_diskstats),
+ ('meminfo', self._reduce_meminfo),
+ ('stat', self._reduce_stat),
+ ):
+ # The corresponding /proc files might not exist on the host.
+ # For example, /proc/diskstats is not available in virtualized
+ # environments like Linux-VServer. Silently skip collecting
+ # the data.
+ if os.path.exists(os.path.join('/proc', filename)):
+ # In practice, this class gets instantiated only once in
+ # the bitbake cooker process. Therefore 'append' mode is
+ # not strictly necessary, but using it makes the class
+ # more robust should two processes ever write
+ # concurrently.
+ destfile = os.path.join(bsdir, '%sproc_%s.log' % ('reduced_' if handler else '', filename))
+ self.proc_files.append((filename, open(destfile, 'ab'), handler))
+ self.monitor_disk = open(os.path.join(bsdir, 'monitor_disk.log'), 'ab')
+ # Last time that we sampled /proc data resp. recorded disk monitoring data.
+ self.last_proc = 0
+ self.last_disk_monitor = 0
+ # Minimum number of seconds between recording a sample. This
+ # becames relevant when we get called very often while many
+ # short tasks get started. Sampling during quiet periods
+ # depends on the heartbeat event, which fires less often.
+ self.min_seconds = 1
+
+ self.meminfo_regex = re.compile(b'^(MemTotal|MemFree|Buffers|Cached|SwapTotal|SwapFree):\s*(\d+)')
+ self.diskstats_regex = re.compile(b'^([hsv]d.|mtdblock\d|mmcblk\d|cciss/c\d+d\d+.*)$')
+ self.diskstats_ltime = None
+ self.diskstats_data = None
+ self.stat_ltimes = None
+
+ def close(self):
+ self.monitor_disk.close()
+ for _, output, _ in self.proc_files:
+ output.close()
+
+ def _reduce_meminfo(self, time, data):
+ """
+ Extracts 'MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree'
+ and writes their values into a single line, in that order.
+ """
+ values = {}
+ for line in data.split(b'\n'):
+ m = self.meminfo_regex.match(line)
+ if m:
+ values[m.group(1)] = m.group(2)
+ if len(values) == 6:
+ return (time,
+ b' '.join([values[x] for x in
+ (b'MemTotal', b'MemFree', b'Buffers', b'Cached', b'SwapTotal', b'SwapFree')]) + b'\n')
+
+ def _diskstats_is_relevant_line(self, linetokens):
+ if len(linetokens) != 14:
+ return False
+ disk = linetokens[2]
+ return self.diskstats_regex.match(disk)
+
+ def _reduce_diskstats(self, time, data):
+ relevant_tokens = filter(self._diskstats_is_relevant_line, map(lambda x: x.split(), data.split(b'\n')))
+ diskdata = [0] * 3
+ reduced = None
+ for tokens in relevant_tokens:
+ # rsect
+ diskdata[0] += int(tokens[5])
+ # wsect
+ diskdata[1] += int(tokens[9])
+ # use
+ diskdata[2] += int(tokens[12])
+ if self.diskstats_ltime:
+ # We need to compute information about the time interval
+ # since the last sampling and record the result as sample
+ # for that point in the past.
+ interval = time - self.diskstats_ltime
+ if interval > 0:
+ sums = [ a - b for a, b in zip(diskdata, self.diskstats_data) ]
+ readTput = sums[0] / 2.0 * 100.0 / interval
+ writeTput = sums[1] / 2.0 * 100.0 / interval
+ util = float( sums[2] ) / 10 / interval
+ util = max(0.0, min(1.0, util))
+ reduced = (self.diskstats_ltime, (readTput, writeTput, util))
+
+ self.diskstats_ltime = time
+ self.diskstats_data = diskdata
+ return reduced
+
+
+ def _reduce_nop(self, time, data):
+ return (time, data)
+
+ def _reduce_stat(self, time, data):
+ if not data:
+ return None
+ # CPU times {user, nice, system, idle, io_wait, irq, softirq} from first line
+ tokens = data.split(b'\n', 1)[0].split()
+ times = [ int(token) for token in tokens[1:] ]
+ reduced = None
+ if self.stat_ltimes:
+ user = float((times[0] + times[1]) - (self.stat_ltimes[0] + self.stat_ltimes[1]))
+ system = float((times[2] + times[5] + times[6]) - (self.stat_ltimes[2] + self.stat_ltimes[5] + self.stat_ltimes[6]))
+ idle = float(times[3] - self.stat_ltimes[3])
+ iowait = float(times[4] - self.stat_ltimes[4])
+
+ aSum = max(user + system + idle + iowait, 1)
+ reduced = (time, (user/aSum, system/aSum, iowait/aSum))
+
+ self.stat_ltimes = times
+ return reduced
+
+ def sample(self, event, force):
+ now = time.time()
+ if (now - self.last_proc > self.min_seconds) or force:
+ for filename, output, handler in self.proc_files:
+ with open(os.path.join('/proc', filename), 'rb') as input:
+ data = input.read()
+ if handler:
+ reduced = handler(now, data)
+ else:
+ reduced = (now, data)
+ if reduced:
+ if isinstance(reduced[1], bytes):
+ # Use as it is.
+ data = reduced[1]
+ else:
+ # Convert to a single line.
+ data = (' '.join([str(x) for x in reduced[1]]) + '\n').encode('ascii')
+ # Unbuffered raw write, less overhead and useful
+ # in case that we end up with concurrent writes.
+ os.write(output.fileno(),
+ ('%.0f\n' % reduced[0]).encode('ascii') +
+ data +
+ b'\n')
+ self.last_proc = now
+
+ if isinstance(event, bb.event.MonitorDiskEvent) and \
+ ((now - self.last_disk_monitor > self.min_seconds) or force):
+ os.write(self.monitor_disk.fileno(),
+ ('%.0f\n' % now).encode('ascii') +
+ ''.join(['%s: %d\n' % (dev, sample.total_bytes - sample.free_bytes)
+ for dev, sample in event.disk_usage.items()]).encode('ascii') +
+ b'\n')
+ self.last_disk_monitor = now
diff --git a/meta/lib/oe/__init__.py b/meta/lib/oe/__init__.py
index 3ad9513f40..4e7c09da04 100644
--- a/meta/lib/oe/__init__.py
+++ b/meta/lib/oe/__init__.py
@@ -1,2 +1,6 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
diff --git a/meta/lib/oe/buildhistory_analysis.py b/meta/lib/oe/buildhistory_analysis.py
index b6c0265c15..5b28774c98 100644
--- a/meta/lib/oe/buildhistory_analysis.py
+++ b/meta/lib/oe/buildhistory_analysis.py
@@ -1,8 +1,10 @@
# Report significant differences in the buildhistory repository since a specific revision
#
-# Copyright (C) 2012 Intel Corporation
+# Copyright (C) 2012-2013, 2016-2017 Intel Corporation
# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Note: requires GitPython 0.3.1+
#
# You can use this from the command line by running scripts/buildhistory-diff
@@ -13,7 +15,11 @@ import os.path
import difflib
import git
import re
+import shlex
+import hashlib
+import collections
import bb.utils
+import bb.tinfoil
# How to display fields
@@ -28,15 +34,27 @@ ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR']
monitor_numeric_threshold = 10
# Image files to monitor (note that image-info.txt is handled separately)
img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt']
-# Related context fields for reporting (note: PE, PV & PR are always reported for monitored package fields)
-related_fields = {}
-related_fields['RDEPENDS'] = ['DEPENDS']
-related_fields['RRECOMMENDS'] = ['DEPENDS']
-related_fields['FILELIST'] = ['FILES']
-related_fields['PKGSIZE'] = ['FILELIST']
-related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND']
-related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
+colours = {
+ 'colour_default': '',
+ 'colour_add': '',
+ 'colour_remove': '',
+}
+
+def init_colours(use_colours):
+ global colours
+ if use_colours:
+ colours = {
+ 'colour_default': '\033[0m',
+ 'colour_add': '\033[1;32m',
+ 'colour_remove': '\033[1;31m',
+ }
+ else:
+ colours = {
+ 'colour_default': '',
+ 'colour_add': '',
+ 'colour_remove': '',
+ }
class ChangeRecord:
def __init__(self, path, fieldname, oldvalue, newvalue, monitored):
@@ -45,7 +63,6 @@ class ChangeRecord:
self.oldvalue = oldvalue
self.newvalue = newvalue
self.monitored = monitored
- self.related = []
self.filechanges = None
def __str__(self):
@@ -69,24 +86,77 @@ class ChangeRecord:
pkglist.append(k)
return pkglist
+ def detect_renamed_dirs(aitems, bitems):
+ adirs = set(map(os.path.dirname, aitems))
+ bdirs = set(map(os.path.dirname, bitems))
+ files_ab = [(name, sorted(os.path.basename(item) for item in aitems if os.path.dirname(item) == name)) \
+ for name in adirs - bdirs]
+ files_ba = [(name, sorted(os.path.basename(item) for item in bitems if os.path.dirname(item) == name)) \
+ for name in bdirs - adirs]
+ renamed_dirs = []
+ for dir1, files1 in files_ab:
+ rename = False
+ for dir2, files2 in files_ba:
+ if files1 == files2 and not rename:
+ renamed_dirs.append((dir1,dir2))
+ # Make sure that we don't use this (dir, files) pair again.
+ files_ba.remove((dir2,files2))
+ # If a dir has already been found to have a rename, stop and go no further.
+ rename = True
+
+ # remove files that belong to renamed dirs from aitems and bitems
+ for dir1, dir2 in renamed_dirs:
+ aitems = [item for item in aitems if os.path.dirname(item) not in (dir1, dir2)]
+ bitems = [item for item in bitems if os.path.dirname(item) not in (dir1, dir2)]
+ return renamed_dirs, aitems, bitems
+
if self.fieldname in list_fields or self.fieldname in list_order_fields:
+ renamed_dirs = []
+ changed_order = False
if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
(depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue)
aitems = pkglist_combine(depvera)
bitems = pkglist_combine(depverb)
else:
- aitems = self.oldvalue.split()
- bitems = self.newvalue.split()
+ if self.fieldname == 'FILELIST':
+ aitems = shlex.split(self.oldvalue)
+ bitems = shlex.split(self.newvalue)
+ renamed_dirs, aitems, bitems = detect_renamed_dirs(aitems, bitems)
+ else:
+ aitems = self.oldvalue.split()
+ bitems = self.newvalue.split()
+
removed = list(set(aitems) - set(bitems))
added = list(set(bitems) - set(aitems))
+ if not removed and not added and self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
+ depvera = bb.utils.explode_dep_versions2(self.oldvalue, sort=False)
+ depverb = bb.utils.explode_dep_versions2(self.newvalue, sort=False)
+ for i, j in zip(depvera.items(), depverb.items()):
+ if i[0] != j[0]:
+ changed_order = True
+ break
+
+ lines = []
+ if renamed_dirs:
+ for dfrom, dto in renamed_dirs:
+ lines.append('directory renamed {colour_remove}{}{colour_default} -> {colour_add}{}{colour_default}'.format(dfrom, dto, **colours))
if removed or added:
if removed and not bitems:
- out = '%s: removed all items "%s"' % (self.fieldname, ' '.join(removed))
+ lines.append('removed all items "{colour_remove}{}{colour_default}"'.format(' '.join(removed), **colours))
else:
- out = '%s:%s%s' % (self.fieldname, ' removed "%s"' % ' '.join(removed) if removed else '', ' added "%s"' % ' '.join(added) if added else '')
+ if removed:
+ lines.append('removed "{colour_remove}{value}{colour_default}"'.format(value=' '.join(removed), **colours))
+ if added:
+ lines.append('added "{colour_add}{value}{colour_default}"'.format(value=' '.join(added), **colours))
+ else:
+ lines.append('changed order')
+
+ if not (removed or added or changed_order):
+ out = ''
else:
- out = '%s changed order' % self.fieldname
+ out = '%s: %s' % (self.fieldname, ', '.join(lines))
+
elif self.fieldname in numeric_fields:
aval = int(self.oldvalue or 0)
bval = int(self.newvalue or 0)
@@ -94,9 +164,9 @@ class ChangeRecord:
percentchg = ((bval - aval) / float(aval)) * 100
else:
percentchg = 100
- out = '%s changed from %s to %s (%s%d%%)' % (self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg)
+ out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default} ({}{:.0f}%)'.format(self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg, **colours)
elif self.fieldname in defaultval_map:
- out = '%s changed from %s to %s' % (self.fieldname, self.oldvalue, self.newvalue)
+ out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default}'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
if self.fieldname == 'PKG' and '[default]' in self.newvalue:
out += ' - may indicate debian renaming failure'
elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']:
@@ -111,34 +181,30 @@ class ChangeRecord:
diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='')
out += '\n '.join(list(diff)[2:])
out += '\n --'
- elif self.fieldname in img_monitor_files or '/image-files/' in self.path:
- fieldname = self.fieldname
- if '/image-files/' in self.path:
- fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname)
- out = 'Changes to %s:\n ' % fieldname
- else:
- if outer:
- prefix = 'Changes to %s ' % self.path
- out = '(%s):\n ' % self.fieldname
- if self.filechanges:
- out += '\n '.join(['%s' % i for i in self.filechanges])
+ elif self.fieldname in img_monitor_files or '/image-files/' in self.path or self.fieldname == "sysroot":
+ if self.filechanges or (self.oldvalue and self.newvalue):
+ fieldname = self.fieldname
+ if '/image-files/' in self.path:
+ fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname)
+ out = 'Changes to %s:\n ' % fieldname
+ else:
+ if outer:
+ prefix = 'Changes to %s ' % self.path
+ out = '(%s):\n ' % self.fieldname
+ if self.filechanges:
+ out += '\n '.join(['%s' % i for i in self.filechanges])
+ else:
+ alines = self.oldvalue.splitlines()
+ blines = self.newvalue.splitlines()
+ diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='')
+ out += '\n '.join(list(diff))
+ out += '\n --'
else:
- alines = self.oldvalue.splitlines()
- blines = self.newvalue.splitlines()
- diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='')
- out += '\n '.join(list(diff))
- out += '\n --'
+ out = ''
else:
- out = '%s changed from "%s" to "%s"' % (self.fieldname, self.oldvalue, self.newvalue)
-
- if self.related:
- for chg in self.related:
- if not outer and chg.fieldname in ['PE', 'PV', 'PR']:
- continue
- for line in chg._str_internal(False).splitlines():
- out += '\n * %s' % line
+ out = '{} changed from "{colour_remove}{}{colour_default}" to "{colour_add}{}{colour_default}"'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
- return '%s%s' % (prefix, out)
+ return '%s%s' % (prefix, out) if out else ''
class FileChange:
changetype_add = 'A'
@@ -216,7 +282,7 @@ def file_list_to_dict(lines):
return adict
-def compare_file_lists(alines, blines):
+def compare_file_lists(alines, blines, compare_ownership=True):
adict = file_list_to_dict(alines)
bdict = file_list_to_dict(blines)
filechanges = []
@@ -228,16 +294,20 @@ def compare_file_lists(alines, blines):
newvalue = newsplitv[0][0]
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue))
+
# Check permissions
oldvalue = splitv[0][1:]
newvalue = newsplitv[0][1:]
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue))
- # Check owner/group
- oldvalue = '%s/%s' % (splitv[1], splitv[2])
- newvalue = '%s/%s' % (newsplitv[1], newsplitv[2])
- if oldvalue != newvalue:
- filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue))
+
+ if compare_ownership:
+ # Check owner/group
+ oldvalue = '%s/%s' % (splitv[1], splitv[2])
+ newvalue = '%s/%s' % (newsplitv[1], newsplitv[2])
+ if oldvalue != newvalue:
+ filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue))
+
# Check symlink target
if newsplitv[0][0] == 'l':
if len(splitv) > 3:
@@ -343,15 +413,19 @@ def compare_dict_blobs(path, ablob, bblob, report_all, report_ver):
if abs(percentchg) < monitor_numeric_threshold:
continue
elif (not report_all) and key in list_fields:
- if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '':
+ if key == "FILELIST" and (path.endswith("-dbg") or path.endswith("-src")) and bstr.strip() != '':
continue
if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
(depvera, depverb) = compare_pkg_lists(astr, bstr)
if depvera == depverb:
continue
- alist = astr.split()
+ if key == 'FILELIST':
+ alist = shlex.split(astr)
+ blist = shlex.split(bstr)
+ else:
+ alist = astr.split()
+ blist = bstr.split()
alist.sort()
- blist = bstr.split()
blist.sort()
# We don't care about the removal of self-dependencies
if pkgname in alist and not pkgname in blist:
@@ -382,13 +456,116 @@ def compare_dict_blobs(path, ablob, bblob, report_all, report_ver):
return changes
-def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False):
+def compare_siglists(a_blob, b_blob, taskdiff=False):
+ # FIXME collapse down a recipe's tasks?
+ alines = a_blob.data_stream.read().decode('utf-8').splitlines()
+ blines = b_blob.data_stream.read().decode('utf-8').splitlines()
+ keys = []
+ pnmap = {}
+ def readsigs(lines):
+ sigs = {}
+ for line in lines:
+ linesplit = line.split()
+ if len(linesplit) > 2:
+ sigs[linesplit[0]] = linesplit[2]
+ if not linesplit[0] in keys:
+ keys.append(linesplit[0])
+ pnmap[linesplit[1]] = linesplit[0].rsplit('.', 1)[0]
+ return sigs
+ adict = readsigs(alines)
+ bdict = readsigs(blines)
+ out = []
+
+ changecount = 0
+ addcount = 0
+ removecount = 0
+ if taskdiff:
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+
+ changes = collections.OrderedDict()
+
+ def compare_hashfiles(pn, taskname, hash1, hash2):
+ hashes = [hash1, hash2]
+ hashfiles = bb.siggen.find_siginfo(pn, taskname, hashes, tinfoil.config_data)
+
+ if not taskname:
+ (pn, taskname) = pn.rsplit('.', 1)
+ pn = pnmap.get(pn, pn)
+ desc = '%s.%s' % (pn, taskname)
+
+ if len(hashfiles) == 0:
+ out.append("Unable to find matching sigdata for %s with hashes %s or %s" % (desc, hash1, hash2))
+ elif not hash1 in hashfiles:
+ out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash1))
+ elif not hash2 in hashfiles:
+ out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash2))
+ else:
+ out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, collapsed=True)
+ for line in out2:
+ m = hashlib.sha256()
+ m.update(line.encode('utf-8'))
+ entry = changes.get(m.hexdigest(), (line, []))
+ if desc not in entry[1]:
+ changes[m.hexdigest()] = (line, entry[1] + [desc])
+
+ # Define recursion callback
+ def recursecb(key, hash1, hash2):
+ compare_hashfiles(key, None, hash1, hash2)
+ return []
+
+ for key in keys:
+ siga = adict.get(key, None)
+ sigb = bdict.get(key, None)
+ if siga is not None and sigb is not None and siga != sigb:
+ changecount += 1
+ (pn, taskname) = key.rsplit('.', 1)
+ compare_hashfiles(pn, taskname, siga, sigb)
+ elif siga is None:
+ addcount += 1
+ elif sigb is None:
+ removecount += 1
+ for key, item in changes.items():
+ line, tasks = item
+ if len(tasks) == 1:
+ desc = tasks[0]
+ elif len(tasks) == 2:
+ desc = '%s and %s' % (tasks[0], tasks[1])
+ else:
+ desc = '%s and %d others' % (tasks[-1], len(tasks)-1)
+ out.append('%s: %s' % (desc, line))
+ else:
+ for key in keys:
+ siga = adict.get(key, None)
+ sigb = bdict.get(key, None)
+ if siga is not None and sigb is not None and siga != sigb:
+ out.append('%s changed from %s to %s' % (key, siga, sigb))
+ changecount += 1
+ elif siga is None:
+ out.append('%s was added' % key)
+ addcount += 1
+ elif sigb is None:
+ out.append('%s was removed' % key)
+ removecount += 1
+ out.append('Summary: %d tasks added, %d tasks removed, %d tasks modified (%.1f%%)' % (addcount, removecount, changecount, (changecount / float(len(bdict)) * 100)))
+ return '\n'.join(out)
+
+
+def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False,
+ sigs=False, sigsdiff=False, exclude_path=None):
repo = git.Repo(repopath)
assert repo.bare == False
commit = repo.commit(revision1)
diff = commit.diff(revision2)
changes = []
+
+ if sigs or sigsdiff:
+ for d in diff.iter_change_type('M'):
+ if d.a_blob.path == 'siglist.txt':
+ changes.append(compare_siglists(d.a_blob, d.b_blob, taskdiff=sigsdiff))
+ return changes
+
for d in diff.iter_change_type('M'):
path = os.path.dirname(d.a_blob.path)
if path.startswith('packages/'):
@@ -398,6 +575,15 @@ def process_changes(repopath, revision1, revision2='HEAD', report_all=False, rep
elif filename.startswith('latest.'):
chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
changes.append(chg)
+ elif filename == 'sysroot':
+ alines = d.a_blob.data_stream.read().decode('utf-8').splitlines()
+ blines = d.b_blob.data_stream.read().decode('utf-8').splitlines()
+ filechanges = compare_file_lists(alines,blines, compare_ownership=False)
+ if filechanges:
+ chg = ChangeRecord(path, filename, None, None, True)
+ chg.filechanges = filechanges
+ changes.append(chg)
+
elif path.startswith('images/'):
filename = os.path.basename(d.a_blob.path)
if filename in img_monitor_files:
@@ -457,16 +643,18 @@ def process_changes(repopath, revision1, revision2='HEAD', report_all=False, rep
chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read().decode('utf-8'), '', True)
changes.append(chg)
- # Link related changes
- for chg in changes:
- if chg.monitored:
- for chg2 in changes:
- # (Check dirname in the case of fields from recipe info files)
- if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path:
- if chg2.fieldname in related_fields.get(chg.fieldname, []):
- chg.related.append(chg2)
- elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']:
- chg.related.append(chg2)
+ # filter out unwanted paths
+ if exclude_path:
+ for chg in changes:
+ if chg.filechanges:
+ fchgs = []
+ for fchg in chg.filechanges:
+ for epath in exclude_path:
+ if fchg.path.startswith(epath):
+ break
+ else:
+ fchgs.append(fchg)
+ chg.filechanges = fchgs
if report_all:
return changes
diff --git a/meta/lib/oe/cachedpath.py b/meta/lib/oe/cachedpath.py
index 0840cc4c3f..254257a83f 100644
--- a/meta/lib/oe/cachedpath.py
+++ b/meta/lib/oe/cachedpath.py
@@ -1,4 +1,6 @@
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Based on standard python library functions but avoid
# repeated stat calls. Its assumed the files will not change from under us
# so we can cache stat calls.
diff --git a/meta/lib/oe/classextend.py b/meta/lib/oe/classextend.py
index 4c8a00070c..f02fbe9fba 100644
--- a/meta/lib/oe/classextend.py
+++ b/meta/lib/oe/classextend.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import collections
class ClassExtender(object):
@@ -20,12 +24,14 @@ class ClassExtender(object):
if not subs.startswith(self.extname):
return "virtual/" + self.extname + "-" + subs
return name
+ if name.startswith("/"):
+ return name
if not name.startswith(self.extname):
return self.extname + "-" + name
return name
def map_variable(self, varname, setvar = True):
- var = self.d.getVar(varname, True)
+ var = self.d.getVar(varname)
if not var:
return ""
var = var.split()
@@ -38,7 +44,7 @@ class ClassExtender(object):
return newdata
def map_regexp_variable(self, varname, setvar = True):
- var = self.d.getVar(varname, True)
+ var = self.d.getVar(varname)
if not var:
return ""
var = var.split()
@@ -60,7 +66,7 @@ class ClassExtender(object):
return dep
else:
# Do not extend for that already have multilib prefix
- var = self.d.getVar("MULTILIB_VARIANTS", True)
+ var = self.d.getVar("MULTILIB_VARIANTS")
if var:
var = var.split()
for v in var:
@@ -74,7 +80,7 @@ class ClassExtender(object):
varname = varname + "_" + suffix
orig = self.d.getVar("EXTENDPKGV", False)
self.d.setVar("EXTENDPKGV", "EXTENDPKGV")
- deps = self.d.getVar(varname, True)
+ deps = self.d.getVar(varname)
if not deps:
self.d.setVar("EXTENDPKGV", orig)
return
@@ -87,7 +93,7 @@ class ClassExtender(object):
self.d.setVar("EXTENDPKGV", orig)
def map_packagevars(self):
- for pkg in (self.d.getVar("PACKAGES", True).split() + [""]):
+ for pkg in (self.d.getVar("PACKAGES").split() + [""]):
self.map_depends_variable("RDEPENDS", pkg)
self.map_depends_variable("RRECOMMENDS", pkg)
self.map_depends_variable("RSUGGESTS", pkg)
@@ -97,7 +103,7 @@ class ClassExtender(object):
self.map_depends_variable("PKG", pkg)
def rename_packages(self):
- for pkg in (self.d.getVar("PACKAGES", True) or "").split():
+ for pkg in (self.d.getVar("PACKAGES") or "").split():
if pkg.startswith(self.extname):
self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
continue
@@ -114,7 +120,7 @@ class NativesdkClassExtender(ClassExtender):
def map_depends(self, dep):
if dep.startswith(self.extname):
return dep
- if dep.endswith(("-gcc-initial", "-gcc", "-g++")):
+ if dep.endswith(("-gcc", "-g++")):
return dep + "-crosssdk"
elif dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
return dep
diff --git a/meta/lib/oe/classutils.py b/meta/lib/oe/classutils.py
index e7856c86f2..08bb66b365 100644
--- a/meta/lib/oe/classutils.py
+++ b/meta/lib/oe/classutils.py
@@ -1,3 +1,6 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
class ClassRegistryMeta(type):
"""Give each ClassRegistry their own registry"""
@@ -36,7 +39,7 @@ abstract base classes out of the registry)."""
@classmethod
def prioritized(tcls):
return sorted(list(tcls.registry.values()),
- key=lambda v: v.priority, reverse=True)
+ key=lambda v: (v.priority, v.name), reverse=True)
def unregister(cls):
for key in cls.registry.keys():
diff --git a/meta/lib/oe/copy_buildsystem.py b/meta/lib/oe/copy_buildsystem.py
index afaff68598..31a84f5b06 100644
--- a/meta/lib/oe/copy_buildsystem.py
+++ b/meta/lib/oe/copy_buildsystem.py
@@ -1,14 +1,28 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# This class should provide easy access to the different aspects of the
# buildsystem such as layers, bitbake location, etc.
+#
+# SDK_LAYERS_EXCLUDE: Layers which will be excluded from SDK layers.
+# SDK_LAYERS_EXCLUDE_PATTERN: The simiar to SDK_LAYERS_EXCLUDE, this supports
+# python regular expression, use space as separator,
+# e.g.: ".*-downloads closed-.*"
+#
+
import stat
import shutil
def _smart_copy(src, dest):
+ import subprocess
# smart_copy will choose the correct function depending on whether the
# source is a file or a directory.
mode = os.stat(src).st_mode
if stat.S_ISDIR(mode):
- shutil.copytree(src, dest, symlinks=True, ignore=shutil.ignore_patterns('.git'))
+ bb.utils.mkdirhier(dest)
+ cmd = "tar --exclude='.git' --xattrs --xattrs-include='*' -chf - -C %s -p . \
+ | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dest)
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
else:
shutil.copyfile(src, dest)
shutil.copymode(src, dest)
@@ -17,23 +31,42 @@ class BuildSystem(object):
def __init__(self, context, d):
self.d = d
self.context = context
- self.layerdirs = [os.path.abspath(pth) for pth in d.getVar('BBLAYERS', True).split()]
- self.layers_exclude = (d.getVar('SDK_LAYERS_EXCLUDE', True) or "").split()
+ self.layerdirs = [os.path.abspath(pth) for pth in d.getVar('BBLAYERS').split()]
+ self.layers_exclude = (d.getVar('SDK_LAYERS_EXCLUDE') or "").split()
+ self.layers_exclude_pattern = d.getVar('SDK_LAYERS_EXCLUDE_PATTERN')
def copy_bitbake_and_layers(self, destdir, workspace_name=None):
+ import re
# Copy in all metadata layers + bitbake (as repositories)
+ copied_corebase = None
layers_copied = []
bb.utils.mkdirhier(destdir)
layers = list(self.layerdirs)
- corebase = os.path.abspath(self.d.getVar('COREBASE', True))
+ corebase = os.path.abspath(self.d.getVar('COREBASE'))
layers.append(corebase)
+ # Get relationship between TOPDIR and COREBASE
+ # Layers should respect it
+ corebase_relative = os.path.dirname(os.path.relpath(os.path.abspath(self.d.getVar('TOPDIR')), corebase))
+ # The bitbake build system uses the meta-skeleton layer as a layout
+ # for common recipies, e.g: the recipetool script to create kernel recipies
+ # Add the meta-skeleton layer to be included as part of the eSDK installation
+ layers.append(os.path.join(corebase, 'meta-skeleton'))
# Exclude layers
for layer_exclude in self.layers_exclude:
if layer_exclude in layers:
+ bb.note('Excluded %s from sdk layers since it is in SDK_LAYERS_EXCLUDE' % layer_exclude)
layers.remove(layer_exclude)
+ if self.layers_exclude_pattern:
+ layers_cp = layers[:]
+ for pattern in self.layers_exclude_pattern.split():
+ for layer in layers_cp:
+ if re.match(pattern, layer):
+ bb.note('Excluded %s from sdk layers since matched SDK_LAYERS_EXCLUDE_PATTERN' % layer)
+ layers.remove(layer)
+
workspace_newname = workspace_name
if workspace_newname:
layernames = [os.path.basename(layer) for layer in layers]
@@ -42,7 +75,7 @@ class BuildSystem(object):
extranum += 1
workspace_newname = '%s-%d' % (workspace_name, extranum)
- corebase_files = self.d.getVar('COREBASE_FILES', True).split()
+ corebase_files = self.d.getVar('COREBASE_FILES').split()
corebase_files = [corebase + '/' +x for x in corebase_files]
# Make sure bitbake goes in
bitbake_dir = bb.__file__.rsplit('/', 3)[0]
@@ -67,22 +100,31 @@ class BuildSystem(object):
layerdestpath = destdir
if corebase == os.path.dirname(layer):
layerdestpath += '/' + os.path.basename(corebase)
+ else:
+ layer_relative = os.path.relpath(layer, corebase)
+ if os.path.dirname(layer_relative) == corebase_relative:
+ layer_relative = os.path.dirname(corebase_relative) + '/' + layernewname
+ layer_relative = os.path.basename(corebase) + '/' + layer_relative
+ if os.path.dirname(layer_relative) != layernewname:
+ layerdestpath += '/' + os.path.dirname(layer_relative)
+
layerdestpath += '/' + layernewname
layer_relative = os.path.relpath(layerdestpath,
destdir)
- layers_copied.append(layer_relative)
-
# Treat corebase as special since it typically will contain
# build directories or other custom items.
if corebase == layer:
+ copied_corebase = layer_relative
bb.utils.mkdirhier(layerdestpath)
for f in corebase_files:
f_basename = os.path.basename(f)
destname = os.path.join(layerdestpath, f_basename)
_smart_copy(f, destname)
else:
- if os.path.exists(layerdestpath):
+ layers_copied.append(layer_relative)
+
+ if os.path.exists(os.path.join(layerdestpath, 'conf/layer.conf')):
bb.note("Skipping layer %s, already handled" % layer)
else:
_smart_copy(layer, layerdestpath)
@@ -96,7 +138,7 @@ class BuildSystem(object):
# Drop all bbappends except the one for the image the SDK is being built for
# (because of externalsrc, the workspace bbappends will interfere with the
# locked signatures if present, and we don't need them anyway)
- image_bbappend = os.path.splitext(os.path.basename(self.d.getVar('FILE', True)))[0] + '.bbappend'
+ image_bbappend = os.path.splitext(os.path.basename(self.d.getVar('FILE')))[0] + '.bbappend'
appenddir = os.path.join(layerdestpath, 'appends')
if os.path.isdir(appenddir):
for fn in os.listdir(appenddir):
@@ -119,15 +161,23 @@ class BuildSystem(object):
line = line.replace('workspacelayer', workspace_newname)
f.write(line)
- return layers_copied
+ # meta-skeleton layer is added as part of the build system
+ # but not as a layer included in the build, therefore it is
+ # not reported to the function caller.
+ for layer in layers_copied:
+ if layer.endswith('/meta-skeleton'):
+ layers_copied.remove(layer)
+ break
+
+ return copied_corebase, layers_copied
def generate_locked_sigs(sigfile, d):
bb.utils.mkdirhier(os.path.dirname(sigfile))
depd = d.getVar('BB_TASKDEPDATA', False)
- tasks = ['%s.%s' % (v[2], v[1]) for v in depd.values()]
+ tasks = ['%s:%s' % (v[2], v[1]) for v in depd.values()]
bb.parse.siggen.dump_lockedsigs(sigfile, tasks)
-def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, pruned_output):
+def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, onlynative, pruned_output):
with open(lockedsigs, 'r') as infile:
bb.utils.mkdirhier(os.path.dirname(pruned_output))
with open(pruned_output, 'w') as f:
@@ -137,7 +187,11 @@ def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, pruned_output
if line.endswith('\\\n'):
splitval = line.strip().split(':')
if not splitval[1] in excluded_tasks and not splitval[0] in excluded_targets:
- f.write(line)
+ if onlynative:
+ if 'nativesdk' in splitval[0]:
+ f.write(line)
+ else:
+ f.write(line)
else:
f.write(line)
invalue = False
@@ -204,7 +258,7 @@ def create_locked_sstate_cache(lockedsigs, input_sstate_cache, output_sstate_cac
import shutil
bb.note('Generating sstate-cache...')
- nativelsbstring = d.getVar('NATIVELSBSTRING', True)
+ nativelsbstring = d.getVar('NATIVELSBSTRING')
bb.process.run("gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or ''))
if fixedlsbstring and nativelsbstring != fixedlsbstring:
nativedir = output_sstate_cache + '/' + nativelsbstring
@@ -235,6 +289,7 @@ def check_sstate_task_list(d, targets, filteroutfile, cmdprefix='', cwd=None, lo
cmd = "%sBB_SETSCENE_ENFORCE=1 PSEUDO_DISABLED=1 oe-check-sstate %s -s -o %s %s" % (cmdprefix, targets, filteroutfile, logparam)
env = dict(d.getVar('BB_ORIGENV', False))
env.pop('BUILDDIR', '')
+ env.pop('BBPATH', '')
pathitems = env['PATH'].split(':')
env['PATH'] = ':'.join([item for item in pathitems if not item.endswith('/bitbake/bin')])
bb.process.run(cmd, stderr=subprocess.STDOUT, env=env, cwd=cwd, executable='/bin/bash')
diff --git a/meta/lib/oe/data.py b/meta/lib/oe/data.py
index ee48950a82..602130a904 100644
--- a/meta/lib/oe/data.py
+++ b/meta/lib/oe/data.py
@@ -1,9 +1,14 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import json
import oe.maketype
def typed_value(key, d):
"""Construct a value for the specified metadata variable, using its flags
to determine the type and parameters for construction."""
- var_type = d.getVarFlag(key, 'type', True)
+ var_type = d.getVarFlag(key, 'type')
flags = d.getVarFlags(key)
if flags is not None:
flags = dict((flag, d.expand(value))
@@ -12,6 +17,35 @@ def typed_value(key, d):
flags = {}
try:
- return oe.maketype.create(d.getVar(key, True) or '', var_type, **flags)
+ return oe.maketype.create(d.getVar(key) or '', var_type, **flags)
except (TypeError, ValueError) as exc:
bb.msg.fatal("Data", "%s: %s" % (key, str(exc)))
+
+def export2json(d, json_file, expand=True, searchString="",replaceString=""):
+ data2export = {}
+ keys2export = []
+
+ for key in d.keys():
+ if key.startswith("_"):
+ continue
+ elif key.startswith("BB"):
+ continue
+ elif key.startswith("B_pn"):
+ continue
+ elif key.startswith("do_"):
+ continue
+ elif d.getVarFlag(key, "func"):
+ continue
+
+ keys2export.append(key)
+
+ for key in keys2export:
+ try:
+ data2export[key] = d.getVar(key, expand).replace(searchString,replaceString)
+ except bb.data_smart.ExpansionError:
+ data2export[key] = ''
+ except AttributeError:
+ pass
+
+ with open(json_file, "w") as f:
+ json.dump(data2export, f, skipkeys=True, indent=4, sort_keys=True)
diff --git a/meta/lib/oe/distro_check.py b/meta/lib/oe/distro_check.py
index 87c52fae9c..88e46c354d 100644
--- a/meta/lib/oe/distro_check.py
+++ b/meta/lib/oe/distro_check.py
@@ -1,32 +1,21 @@
-from contextlib import contextmanager
-
-from bb.utils import export_proxies
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
def create_socket(url, d):
import urllib
+ from bb.utils import export_proxies
- socket = None
- try:
- export_proxies(d)
- socket = urllib.request.urlopen(url)
- except:
- bb.warn("distro_check: create_socket url %s can't access" % url)
-
- return socket
+ export_proxies(d)
+ return urllib.request.urlopen(url)
def get_links_from_url(url, d):
"Return all the href links found on the web location"
from bs4 import BeautifulSoup, SoupStrainer
+ soup = BeautifulSoup(create_socket(url,d), "html.parser", parse_only=SoupStrainer("a"))
hyperlinks = []
-
- webpage = ''
- sock = create_socket(url,d)
- if sock:
- webpage = sock.read()
-
- soup = BeautifulSoup(webpage, "html.parser", parse_only=SoupStrainer("a"))
for line in soup.find_all('a', href=True):
hyperlinks.append(line['href'].strip('/'))
return hyperlinks
@@ -37,6 +26,7 @@ def find_latest_numeric_release(url, d):
maxstr=""
for link in get_links_from_url(url, d):
try:
+ # TODO use LooseVersion
release = float(link)
except:
release = 0
@@ -47,144 +37,105 @@ def find_latest_numeric_release(url, d):
def is_src_rpm(name):
"Check if the link is pointing to a src.rpm file"
- if name[-8:] == ".src.rpm":
- return True
- else:
- return False
+ return name.endswith(".src.rpm")
def package_name_from_srpm(srpm):
"Strip out the package name from the src.rpm filename"
- strings = srpm.split('-')
- package_name = strings[0]
- for i in range(1, len (strings) - 1):
- str = strings[i]
- if not str[0].isdigit():
- package_name += '-' + str
- return package_name
-
-def clean_package_list(package_list):
- "Removes multiple entries of packages and sorts the list"
- set = {}
- map(set.__setitem__, package_list, [])
- return set.keys()
-
-def get_latest_released_meego_source_package_list(d):
- "Returns list of all the name os packages in the latest meego distro"
-
- package_names = []
- try:
- f = open("/tmp/Meego-1.1", "r")
- for line in f:
- package_names.append(line[:-1] + ":" + "main") # Also strip the '\n' at the end
- except IOError: pass
- package_list=clean_package_list(package_names)
- return "1.0", package_list
+ # ca-certificates-2016.2.7-1.0.fc24.src.rpm
+ # ^name ^ver ^release^removed
+ (name, version, release) = srpm.replace(".src.rpm", "").rsplit("-", 2)
+ return name
def get_source_package_list_from_url(url, section, d):
"Return a sectioned list of package names from a URL list"
bb.note("Reading %s: %s" % (url, section))
links = get_links_from_url(url, d)
- srpms = list(filter(is_src_rpm, links))
- names_list = list(map(package_name_from_srpm, srpms))
+ srpms = filter(is_src_rpm, links)
+ names_list = map(package_name_from_srpm, srpms)
- new_pkgs = []
+ new_pkgs = set()
for pkgs in names_list:
- new_pkgs.append(pkgs + ":" + section)
-
+ new_pkgs.add(pkgs + ":" + section)
return new_pkgs
+def get_source_package_list_from_url_by_letter(url, section, d):
+ import string
+ from urllib.error import HTTPError
+ packages = set()
+ for letter in (string.ascii_lowercase + string.digits):
+ # Not all subfolders may exist, so silently handle 404
+ try:
+ packages |= get_source_package_list_from_url(url + "/" + letter, section, d)
+ except HTTPError as e:
+ if e.code != 404: raise
+ return packages
+
def get_latest_released_fedora_source_package_list(d):
"Returns list of all the name os packages in the latest fedora distro"
latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/", d)
-
- package_names = get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Fedora/source/SRPMS/" % latest, "main", d)
-
-# package_names += get_source_package_list_from_url("http://download.fedora.redhat.com/pub/fedora/linux/releases/%s/Everything/source/SPRMS/" % latest, "everything")
- package_names += get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates", d)
-
- package_list=clean_package_list(package_names)
-
- return latest, package_list
+ package_names = get_source_package_list_from_url_by_letter("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Everything/source/tree/Packages/" % latest, "main", d)
+ package_names |= get_source_package_list_from_url_by_letter("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates", d)
+ return latest, package_names
def get_latest_released_opensuse_source_package_list(d):
"Returns list of all the name os packages in the latest opensuse distro"
- latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/",d)
-
- package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/%s/repo/oss/suse/src/" % latest, "main", d)
- package_names += get_source_package_list_from_url("http://download.opensuse.org/update/%s/rpm/src/" % latest, "updates", d)
-
- package_list=clean_package_list(package_names)
- return latest, package_list
+ latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/leap", d)
-def get_latest_released_mandriva_source_package_list(d):
- "Returns list of all the name os packages in the latest mandriva distro"
- latest = find_latest_numeric_release("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/", d)
- package_names = get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/release/" % latest, "main", d)
-# package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/contrib/release/" % latest, "contrib")
- package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates", d)
+ package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/leap/%s/repo/oss/suse/src/" % latest, "main", d)
+ package_names |= get_source_package_list_from_url("http://download.opensuse.org/update/leap/%s/oss/src/" % latest, "updates", d)
+ return latest, package_names
- package_list=clean_package_list(package_names)
- return latest, package_list
+def get_latest_released_clear_source_package_list(d):
+ latest = find_latest_numeric_release("https://download.clearlinux.org/releases/", d)
+ package_names = get_source_package_list_from_url("https://download.clearlinux.org/releases/%s/clear/source/SRPMS/" % latest, "main", d)
+ return latest, package_names
def find_latest_debian_release(url, d):
"Find the latest listed debian release on the given url"
- releases = []
- for link in get_links_from_url(url, d):
- if link[:6] == "Debian":
- if ';' not in link:
- releases.append(link)
+ releases = [link.replace("Debian", "")
+ for link in get_links_from_url(url, d)
+ if link.startswith("Debian")]
releases.sort()
try:
- return releases.pop()[6:]
+ return releases[-1]
except:
return "_NotFound_"
def get_debian_style_source_package_list(url, section, d):
"Return the list of package-names stored in the debian style Sources.gz file"
- import tempfile
import gzip
- webpage = ''
- sock = create_socket(url,d)
- if sock:
- webpage = sock.read()
-
- tmpfile = tempfile.NamedTemporaryFile(mode='wb', prefix='oecore.', suffix='.tmp', delete=False)
- tmpfilename=tmpfile.name
- tmpfile.write(sock.read())
- tmpfile.close()
- bb.note("Reading %s: %s" % (url, section))
-
- f = gzip.open(tmpfilename)
- package_names = []
- for line in f:
- if line[:9] == "Package: ":
- package_names.append(line[9:-1] + ":" + section) # Also strip the '\n' at the end
- os.unlink(tmpfilename)
-
+ package_names = set()
+ for line in gzip.open(create_socket(url, d), mode="rt"):
+ if line.startswith("Package:"):
+ pkg = line.split(":", 1)[1].strip()
+ package_names.add(pkg + ":" + section)
return package_names
def get_latest_released_debian_source_package_list(d):
- "Returns list of all the name os packages in the latest debian distro"
+ "Returns list of all the name of packages in the latest debian distro"
latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/", d)
- url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz"
+ url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz"
package_names = get_debian_style_source_package_list(url, "main", d)
-# url = "http://ftp.debian.org/debian/dists/stable/contrib/source/Sources.gz"
-# package_names += get_debian_style_source_package_list(url, "contrib")
- url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz"
- package_names += get_debian_style_source_package_list(url, "updates", d)
- package_list=clean_package_list(package_names)
- return latest, package_list
+ url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz"
+ package_names |= get_debian_style_source_package_list(url, "updates", d)
+ return latest, package_names
def find_latest_ubuntu_release(url, d):
- "Find the latest listed ubuntu release on the given url"
+ """
+ Find the latest listed Ubuntu release on the given ubuntu/dists/ URL.
+
+ To avoid matching development releases look for distributions that have
+ updates, so the resulting distro could be any supported release.
+ """
url += "?C=M;O=D" # Descending Sort by Last Modified
for link in get_links_from_url(url, d):
- if link[-8:] == "-updates":
- return link[:-8]
+ if "-updates" in link:
+ distro = link.replace("-updates", "")
+ return distro
return "_NotFound_"
def get_latest_released_ubuntu_source_package_list(d):
@@ -192,52 +143,44 @@ def get_latest_released_ubuntu_source_package_list(d):
latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/", d)
url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest
package_names = get_debian_style_source_package_list(url, "main", d)
-# url = "http://archive.ubuntu.com/ubuntu/dists/%s/multiverse/source/Sources.gz" % latest
-# package_names += get_debian_style_source_package_list(url, "multiverse")
-# url = "http://archive.ubuntu.com/ubuntu/dists/%s/universe/source/Sources.gz" % latest
-# package_names += get_debian_style_source_package_list(url, "universe")
url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest
- package_names += get_debian_style_source_package_list(url, "updates", d)
- package_list=clean_package_list(package_names)
- return latest, package_list
+ package_names |= get_debian_style_source_package_list(url, "updates", d)
+ return latest, package_names
def create_distro_packages_list(distro_check_dir, d):
+ import shutil
+
pkglst_dir = os.path.join(distro_check_dir, "package_lists")
- if not os.path.isdir (pkglst_dir):
- os.makedirs(pkglst_dir)
- # first clear old stuff
- for file in os.listdir(pkglst_dir):
- os.unlink(os.path.join(pkglst_dir, file))
-
- per_distro_functions = [
- ["Debian", get_latest_released_debian_source_package_list],
- ["Ubuntu", get_latest_released_ubuntu_source_package_list],
- ["Fedora", get_latest_released_fedora_source_package_list],
- ["OpenSuSE", get_latest_released_opensuse_source_package_list],
- ["Mandriva", get_latest_released_mandriva_source_package_list],
- ["Meego", get_latest_released_meego_source_package_list]
- ]
-
- from datetime import datetime
- begin = datetime.now()
- for distro in per_distro_functions:
- name = distro[0]
- release, package_list = distro[1](d)
+ bb.utils.remove(pkglst_dir, True)
+ bb.utils.mkdirhier(pkglst_dir)
+
+ per_distro_functions = (
+ ("Debian", get_latest_released_debian_source_package_list),
+ ("Ubuntu", get_latest_released_ubuntu_source_package_list),
+ ("Fedora", get_latest_released_fedora_source_package_list),
+ ("openSUSE", get_latest_released_opensuse_source_package_list),
+ ("Clear", get_latest_released_clear_source_package_list),
+ )
+
+ for name, fetcher_func in per_distro_functions:
+ try:
+ release, package_list = fetcher_func(d)
+ except Exception as e:
+ bb.warn("Cannot fetch packages for %s: %s" % (name, e))
bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list)))
+ if len(package_list) == 0:
+ bb.error("Didn't fetch any packages for %s %s" % (name, release))
+
package_list_file = os.path.join(pkglst_dir, name + "-" + release)
- f = open(package_list_file, "w+b")
- for pkg in package_list:
- f.write(pkg + "\n")
- f.close()
- end = datetime.now()
- delta = end - begin
- bb.note("package_list generatiosn took this much time: %d seconds" % delta.seconds)
+ with open(package_list_file, 'w') as f:
+ for pkg in sorted(package_list):
+ f.write(pkg + "\n")
def update_distro_data(distro_check_dir, datetime, d):
"""
- If distro packages list data is old then rebuild it.
- The operations has to be protected by a lock so that
- only one thread performes it at a time.
+ If distro packages list data is old then rebuild it.
+ The operations has to be protected by a lock so that
+ only one thread performes it at a time.
"""
if not os.path.isdir (distro_check_dir):
try:
@@ -264,71 +207,59 @@ def update_distro_data(distro_check_dir, datetime, d):
f.seek(0)
f.write(datetime)
- except OSError:
- raise Exception('Unable to read/write this file: %s' % (datetime_file))
+ except OSError as e:
+ raise Exception('Unable to open timestamp: %s' % e)
finally:
fcntl.lockf(f, fcntl.LOCK_UN)
f.close()
-
+
def compare_in_distro_packages_list(distro_check_dir, d):
if not os.path.isdir(distro_check_dir):
raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed")
-
+
localdata = bb.data.createCopy(d)
pkglst_dir = os.path.join(distro_check_dir, "package_lists")
matching_distros = []
- pn = d.getVar('PN', True)
- recipe_name = d.getVar('PN', True)
+ pn = recipe_name = d.getVar('PN')
bb.note("Checking: %s" % pn)
- trim_dict = dict({"-native":"-native", "-cross":"-cross", "-initial":"-initial"})
-
if pn.find("-native") != -1:
pnstripped = pn.split("-native")
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
recipe_name = pnstripped[0]
if pn.startswith("nativesdk-"):
pnstripped = pn.split("nativesdk-")
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES'))
recipe_name = pnstripped[1]
if pn.find("-cross") != -1:
pnstripped = pn.split("-cross")
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
recipe_name = pnstripped[0]
if pn.find("-initial") != -1:
pnstripped = pn.split("-initial")
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
recipe_name = pnstripped[0]
bb.note("Recipe: %s" % recipe_name)
- tmp = localdata.getVar('DISTRO_PN_ALIAS', True)
distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'})
-
- if tmp:
- list = tmp.split(' ')
- for str in list:
- if str and str.find("=") == -1 and distro_exceptions[str]:
- matching_distros.append(str)
+ tmp = localdata.getVar('DISTRO_PN_ALIAS') or ""
+ for str in tmp.split():
+ if str and str.find("=") == -1 and distro_exceptions[str]:
+ matching_distros.append(str)
distro_pn_aliases = {}
- if tmp:
- list = tmp.split(' ')
- for str in list:
- if str.find("=") != -1:
- (dist, pn_alias) = str.split('=')
- distro_pn_aliases[dist.strip().lower()] = pn_alias.strip()
-
+ for str in tmp.split():
+ if "=" in str:
+ (dist, pn_alias) = str.split('=')
+ distro_pn_aliases[dist.strip().lower()] = pn_alias.strip()
+
for file in os.listdir(pkglst_dir):
(distro, distro_release) = file.split("-")
- f = open(os.path.join(pkglst_dir, file), "rb")
+ f = open(os.path.join(pkglst_dir, file), "r")
for line in f:
(pkg, section) = line.split(":")
if distro.lower() in distro_pn_aliases:
@@ -341,38 +272,34 @@ def compare_in_distro_packages_list(distro_check_dir, d):
break
f.close()
-
- if tmp != None:
- list = tmp.split(' ')
- for item in list:
- matching_distros.append(item)
+ for item in tmp.split():
+ matching_distros.append(item)
bb.note("Matching: %s" % matching_distros)
return matching_distros
def create_log_file(d, logname):
- import subprocess
- logpath = d.getVar('LOG_DIR', True)
+ logpath = d.getVar('LOG_DIR')
bb.utils.mkdirhier(logpath)
logfn, logsuffix = os.path.splitext(logname)
- logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME', True), logsuffix))
+ logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME'), logsuffix))
if not os.path.exists(logfile):
slogfile = os.path.join(logpath, logname)
if os.path.exists(slogfile):
os.remove(slogfile)
- subprocess.call("touch %s" % logfile, shell=True)
+ open(logfile, 'w+').close()
os.symlink(logfile, slogfile)
d.setVar('LOG_FILE', logfile)
return logfile
def save_distro_check_result(result, datetime, result_file, d):
- pn = d.getVar('PN', True)
- logdir = d.getVar('LOG_DIR', True)
+ pn = d.getVar('PN')
+ logdir = d.getVar('LOG_DIR')
if not logdir:
bb.error("LOG_DIR variable is not defined, can't write the distro_check results")
return
- if not os.path.isdir(logdir):
- os.makedirs(logdir)
+ bb.utils.mkdirhier(logdir)
+
line = pn
for i in result:
line = line + "," + i
diff --git a/meta/lib/oe/elf.py b/meta/lib/oe/elf.py
new file mode 100644
index 0000000000..2562cea1dd
--- /dev/null
+++ b/meta/lib/oe/elf.py
@@ -0,0 +1,133 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+def machine_dict(d):
+# TARGET_OS TARGET_ARCH MACHINE, OSABI, ABIVERSION, Little Endian, 32bit?
+ machdata = {
+ "darwin9" : {
+ "arm" : (40, 0, 0, True, 32),
+ },
+ "eabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ },
+ "elf" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "aarch64_be" :(183, 0, 0, False, 64),
+ "i586" : (3, 0, 0, True, 32),
+ "x86_64": (62, 0, 0, True, 64),
+ "epiphany": (4643, 0, 0, True, 32),
+ "lm32": (138, 0, 0, False, 32),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
+ "microblaze": (189, 0, 0, False, 32),
+ "microblazeeb":(189, 0, 0, False, 32),
+ "microblazeel":(189, 0, 0, True, 32),
+ "powerpc": (20, 0, 0, False, 32),
+ "riscv32": (243, 0, 0, True, 32),
+ "riscv64": (243, 0, 0, True, 64),
+ },
+ "linux" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "aarch64_be" :(183, 0, 0, False, 64),
+ "arm" : (40, 97, 0, True, 32),
+ "armeb": (40, 97, 0, False, 32),
+ "powerpc": (20, 0, 0, False, 32),
+ "powerpc64": (21, 0, 0, False, 64),
+ "i386": ( 3, 0, 0, True, 32),
+ "i486": ( 3, 0, 0, True, 32),
+ "i586": ( 3, 0, 0, True, 32),
+ "i686": ( 3, 0, 0, True, 32),
+ "x86_64": (62, 0, 0, True, 64),
+ "ia64": (50, 0, 0, True, 64),
+ "alpha": (36902, 0, 0, True, 64),
+ "hppa": (15, 3, 0, False, 32),
+ "m68k": ( 4, 0, 0, False, 32),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
+ "mips64": ( 8, 0, 0, False, 64),
+ "mips64el": ( 8, 0, 0, True, 64),
+ "mipsisa32r6": ( 8, 0, 0, False, 32),
+ "mipsisa32r6el": ( 8, 0, 0, True, 32),
+ "mipsisa64r6": ( 8, 0, 0, False, 64),
+ "mipsisa64r6el": ( 8, 0, 0, True, 64),
+ "nios2": (113, 0, 0, True, 32),
+ "riscv32": (243, 0, 0, True, 32),
+ "riscv64": (243, 0, 0, True, 64),
+ "s390": (22, 0, 0, False, 32),
+ "sh4": (42, 0, 0, True, 32),
+ "sparc": ( 2, 0, 0, False, 32),
+ "microblaze": (189, 0, 0, False, 32),
+ "microblazeeb":(189, 0, 0, False, 32),
+ "microblazeel":(189, 0, 0, True, 32),
+ },
+ "linux-musl" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "aarch64_be" :(183, 0, 0, False, 64),
+ "arm" : ( 40, 97, 0, True, 32),
+ "armeb": ( 40, 97, 0, False, 32),
+ "powerpc": ( 20, 0, 0, False, 32),
+ "powerpc64": ( 21, 0, 0, False, 64),
+ "i386": ( 3, 0, 0, True, 32),
+ "i486": ( 3, 0, 0, True, 32),
+ "i586": ( 3, 0, 0, True, 32),
+ "i686": ( 3, 0, 0, True, 32),
+ "x86_64": ( 62, 0, 0, True, 64),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
+ "mips64": ( 8, 0, 0, False, 64),
+ "mips64el": ( 8, 0, 0, True, 64),
+ "microblaze": (189, 0, 0, False, 32),
+ "microblazeeb":(189, 0, 0, False, 32),
+ "microblazeel":(189, 0, 0, True, 32),
+ "riscv32": (243, 0, 0, True, 32),
+ "riscv64": (243, 0, 0, True, 64),
+ "sh4": ( 42, 0, 0, True, 32),
+ },
+ "uclinux-uclibc" : {
+ "bfin": ( 106, 0, 0, True, 32),
+ },
+ "linux-gnueabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ "armeb" : (40, 0, 0, False, 32),
+ },
+ "linux-musleabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ "armeb" : (40, 0, 0, False, 32),
+ },
+ "linux-gnuspe" : {
+ "powerpc": (20, 0, 0, False, 32),
+ },
+ "linux-muslspe" : {
+ "powerpc": (20, 0, 0, False, 32),
+ },
+ "linux-gnu" : {
+ "powerpc": (20, 0, 0, False, 32),
+ "sh4": (42, 0, 0, True, 32),
+ },
+ "linux-gnu_ilp32" : {
+ "aarch64" : (183, 0, 0, True, 32),
+ },
+ "linux-gnux32" : {
+ "x86_64": (62, 0, 0, True, 32),
+ },
+ "linux-muslx32" : {
+ "x86_64": (62, 0, 0, True, 32),
+ },
+ "linux-gnun32" : {
+ "mips64": ( 8, 0, 0, False, 32),
+ "mips64el": ( 8, 0, 0, True, 32),
+ "mipsisa64r6": ( 8, 0, 0, False, 32),
+ "mipsisa64r6el":( 8, 0, 0, True, 32),
+ },
+ }
+
+ # Add in any extra user supplied data which may come from a BSP layer, removing the
+ # need to always change this class directly
+ extra_machdata = (d and d.getVar("PACKAGEQA_EXTRA_MACHDEFFUNCS" or None) or "").split()
+ for m in extra_machdata:
+ call = m + "(machdata, d)"
+ locs = { "machdata" : machdata, "d" : d}
+ machdata = bb.utils.better_eval(call, locs)
+
+ return machdata
diff --git a/meta/lib/oe/gpg_sign.py b/meta/lib/oe/gpg_sign.py
index 38eb0cb137..7634d7ef1d 100644
--- a/meta/lib/oe/gpg_sign.py
+++ b/meta/lib/oe/gpg_sign.py
@@ -1,55 +1,71 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
"""Helper module for GPG signing"""
import os
import bb
-import oe.utils
+import subprocess
+import shlex
class LocalSigner(object):
"""Class for handling local (on the build host) signing"""
def __init__(self, d):
- self.gpg_bin = d.getVar('GPG_BIN', True) or \
+ self.gpg_bin = d.getVar('GPG_BIN') or \
bb.utils.which(os.getenv('PATH'), 'gpg')
- self.gpg_path = d.getVar('GPG_PATH', True)
- self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpm")
+ self.gpg_cmd = [self.gpg_bin]
+ self.gpg_agent_bin = bb.utils.which(os.getenv('PATH'), "gpg-agent")
+ # Without this we see "Cannot allocate memory" errors when running processes in parallel
+ # It needs to be set for any gpg command since any agent launched can stick around in memory
+ # and this parameter must be set.
+ if self.gpg_agent_bin:
+ self.gpg_cmd += ["--agent-program=%s|--auto-expand-secmem" % (self.gpg_agent_bin)]
+ self.gpg_path = d.getVar('GPG_PATH')
+ self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmsign")
+ self.gpg_version = self.get_gpg_version()
+
def export_pubkey(self, output_file, keyid, armor=True):
"""Export GPG public key to a file"""
- cmd = '%s --batch --yes --export -o %s ' % \
- (self.gpg_bin, output_file)
+ cmd = self.gpg_cmd + ["--no-permission-warning", "--batch", "--yes", "--export", "-o", output_file]
if self.gpg_path:
- cmd += "--homedir %s " % self.gpg_path
+ cmd += ["--homedir", self.gpg_path]
if armor:
- cmd += "--armor "
- cmd += keyid
- status, output = oe.utils.getstatusoutput(cmd)
- if status:
- raise bb.build.FuncFailed('Failed to export gpg public key (%s): %s' %
- (keyid, output))
-
- def sign_rpms(self, files, keyid, passphrase):
+ cmd += ["--armor"]
+ cmd += [keyid]
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+
+ def sign_rpms(self, files, keyid, passphrase, digest, sign_chunk, fsk=None, fsk_password=None):
"""Sign RPM files"""
cmd = self.rpm_bin + " --addsign --define '_gpg_name %s' " % keyid
- cmd += "--define '_gpg_passphrase %s' " % passphrase
+ gpg_args = '--no-permission-warning --batch --passphrase=%s --agent-program=%s|--auto-expand-secmem' % (passphrase, self.gpg_agent_bin)
+ if self.gpg_version > (2,1,):
+ gpg_args += ' --pinentry-mode=loopback'
+ cmd += "--define '_gpg_sign_cmd_extra_args %s' " % gpg_args
+ cmd += "--define '_binary_filedigest_algorithm %s' " % digest
if self.gpg_bin:
- cmd += "--define '%%__gpg %s' " % self.gpg_bin
+ cmd += "--define '__gpg %s' " % self.gpg_bin
if self.gpg_path:
cmd += "--define '_gpg_path %s' " % self.gpg_path
- cmd += ' '.join(files)
+ if fsk:
+ cmd += "--signfiles --fskpath %s " % fsk
+ if fsk_password:
+ cmd += "--define '_file_signing_key_password %s' " % fsk_password
- status, output = oe.utils.getstatusoutput(cmd)
- if status:
- raise bb.build.FuncFailed("Failed to sign RPM packages: %s" % output)
+ # Sign in chunks
+ for i in range(0, len(files), sign_chunk):
+ subprocess.check_output(shlex.split(cmd + ' '.join(files[i:i+sign_chunk])), stderr=subprocess.STDOUT)
def detach_sign(self, input_file, keyid, passphrase_file, passphrase=None, armor=True):
"""Create a detached signature of a file"""
- import subprocess
if passphrase_file and passphrase:
raise Exception("You should use either passphrase_file of passphrase, not both")
- cmd = [self.gpg_bin, '--detach-sign', '--batch', '--no-tty', '--yes',
- '--passphrase-fd', '0', '-u', keyid]
+ cmd = self.gpg_cmd + ['--detach-sign', '--no-permission-warning', '--batch',
+ '--no-tty', '--yes', '--passphrase-fd', '0', '-u', keyid]
if self.gpg_path:
cmd += ['--homedir', self.gpg_path]
@@ -58,9 +74,7 @@ class LocalSigner(object):
#gpg > 2.1 supports password pipes only through the loopback interface
#gpg < 2.1 errors out if given unknown parameters
- dots = self.get_gpg_version().split('.')
- assert len(dots) >= 2
- if int(dots[0]) >= 2 and int(dots[1]) >= 1:
+ if self.gpg_version > (2,1,):
cmd += ['--pinentry-mode', 'loopback']
cmd += [input_file]
@@ -74,8 +88,7 @@ class LocalSigner(object):
(_, stderr) = job.communicate(passphrase.encode("utf-8"))
if job.returncode:
- raise bb.build.FuncFailed("GPG exited with code %d: %s" %
- (job.returncode, stderr.decode("utf-8")))
+ bb.fatal("GPG exited with code %d: %s" % (job.returncode, stderr.decode("utf-8")))
except IOError as e:
bb.error("IO error (%s): %s" % (e.errno, e.strerror))
@@ -87,21 +100,23 @@ class LocalSigner(object):
def get_gpg_version(self):
- """Return the gpg version"""
- import subprocess
+ """Return the gpg version as a tuple of ints"""
try:
- return subprocess.check_output((self.gpg_bin, "--version")).split()[2].decode("utf-8")
+ cmd = self.gpg_cmd + ["--version", "--no-permission-warning"]
+ ver_str = subprocess.check_output(cmd).split()[2].decode("utf-8")
+ return tuple([int(i) for i in ver_str.split("-")[0].split('.')])
except subprocess.CalledProcessError as e:
- raise bb.build.FuncFailed("Could not get gpg version: %s" % e)
+ bb.fatal("Could not get gpg version: %s" % e)
def verify(self, sig_file):
"""Verify signature"""
- cmd = self.gpg_bin + " --verify "
+ cmd = self.gpg_cmd + [" --verify", "--no-permission-warning"]
if self.gpg_path:
- cmd += "--homedir %s " % self.gpg_path
- cmd += sig_file
- status, _ = oe.utils.getstatusoutput(cmd)
+ cmd += ["--homedir", self.gpg_path]
+
+ cmd += [sig_file]
+ status = subprocess.call(cmd)
ret = False if status else True
return ret
diff --git a/meta/lib/oe/license.py b/meta/lib/oe/license.py
index 8d2fd1709c..c1274a61de 100644
--- a/meta/lib/oe/license.py
+++ b/meta/lib/oe/license.py
@@ -1,4 +1,6 @@
-# vi:sts=4:sw=4:et
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
"""Code for parsing OpenEmbedded license strings"""
import ast
@@ -13,8 +15,8 @@ def license_ok(license, dont_want_licenses):
# will exclude a trailing '+' character from LICENSE in
# case INCOMPATIBLE_LICENSE is not a 'X+' license.
lic = license
- if not re.search('\+$', dwl):
- lic = re.sub('\+', '', license)
+ if not re.search(r'\+$', dwl):
+ lic = re.sub(r'\+', '', license)
if fnmatch(lic, dwl):
return False
return True
@@ -40,8 +42,8 @@ class InvalidLicense(LicenseError):
return "invalid characters in license '%s'" % self.license
license_operator_chars = '&|() '
-license_operator = re.compile('([' + license_operator_chars + '])')
-license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$')
+license_operator = re.compile(r'([' + license_operator_chars + '])')
+license_pattern = re.compile(r'[a-zA-Z0-9.+_\-]+$')
class LicenseVisitor(ast.NodeVisitor):
"""Get elements based on OpenEmbedded license strings"""
@@ -106,7 +108,8 @@ def is_included(licensestr, whitelist=None, blacklist=None):
license string matches the whitelist and does not match the blacklist.
Returns a tuple holding the boolean state and a list of the applicable
- licenses which were excluded (or None, if the state is True)
+ licenses that were excluded if state is False, or the licenses that were
+ included if the state is True.
"""
def include_license(license):
@@ -117,10 +120,17 @@ def is_included(licensestr, whitelist=None, blacklist=None):
def choose_licenses(alpha, beta):
"""Select the option in an OR which is the 'best' (has the most
- included licenses)."""
- alpha_weight = len(list(filter(include_license, alpha)))
- beta_weight = len(list(filter(include_license, beta)))
- if alpha_weight > beta_weight:
+ included licenses and no excluded licenses)."""
+ # The factor 1000 below is arbitrary, just expected to be much larger
+ # that the number of licenses actually specified. That way the weight
+ # will be negative if the list of licenses contains an excluded license,
+ # but still gives a higher weight to the list with the most included
+ # licenses.
+ alpha_weight = (len(list(filter(include_license, alpha))) -
+ 1000 * (len(list(filter(exclude_license, alpha))) > 0))
+ beta_weight = (len(list(filter(include_license, beta))) -
+ 1000 * (len(list(filter(exclude_license, beta))) > 0))
+ if alpha_weight >= beta_weight:
return alpha
else:
return beta
diff --git a/meta/lib/oe/lsb.py b/meta/lib/oe/lsb.py
index e0bdfba255..43e46380d7 100644
--- a/meta/lib/oe/lsb.py
+++ b/meta/lib/oe/lsb.py
@@ -1,26 +1,65 @@
-def release_dict():
- """Return the output of lsb_release -ir as a dictionary"""
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+def get_os_release():
+ """Get all key-value pairs from /etc/os-release as a dict"""
+ from collections import OrderedDict
+
+ data = OrderedDict()
+ if os.path.exists('/etc/os-release'):
+ with open('/etc/os-release') as f:
+ for line in f:
+ try:
+ key, val = line.rstrip().split('=', 1)
+ except ValueError:
+ continue
+ data[key.strip()] = val.strip('"')
+ return data
+
+def release_dict_osr():
+ """ Populate a dict with pertinent values from /etc/os-release """
+ data = {}
+ os_release = get_os_release()
+ if 'ID' in os_release:
+ data['DISTRIB_ID'] = os_release['ID']
+ if 'VERSION_ID' in os_release:
+ data['DISTRIB_RELEASE'] = os_release['VERSION_ID']
+
+ return data
+
+def release_dict_lsb():
+ """ Return the output of lsb_release -ir as a dictionary """
from subprocess import PIPE
try:
output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE)
except bb.process.CmdError as exc:
- return None
+ return {}
+
+ lsb_map = { 'Distributor ID': 'DISTRIB_ID',
+ 'Release': 'DISTRIB_RELEASE'}
+ lsb_keys = lsb_map.keys()
data = {}
for line in output.splitlines():
- if line.startswith("-e"): line = line[3:]
+ if line.startswith("-e"):
+ line = line[3:]
try:
key, value = line.split(":\t", 1)
except ValueError:
continue
- else:
- data[key] = value
+ if key in lsb_keys:
+ data[lsb_map[key]] = value
+
+ if len(data.keys()) != 2:
+ return None
+
return data
def release_dict_file():
- """ Try to gather LSB release information manually when lsb_release tool is unavailable """
- data = None
+ """ Try to gather release information manually when other methods fail """
+ data = {}
try:
if os.path.exists('/etc/lsb-release'):
data = {}
@@ -37,14 +76,6 @@ def release_dict_file():
if match:
data['DISTRIB_ID'] = match.group(1)
data['DISTRIB_RELEASE'] = match.group(2)
- elif os.path.exists('/etc/os-release'):
- data = {}
- with open('/etc/os-release') as f:
- for line in f:
- if line.startswith('NAME='):
- data['DISTRIB_ID'] = line[5:].rstrip().strip('"')
- if line.startswith('VERSION_ID='):
- data['DISTRIB_RELEASE'] = line[11:].rstrip().strip('"')
elif os.path.exists('/etc/SuSE-release'):
data = {}
data['DISTRIB_ID'] = 'SUSE LINUX'
@@ -55,7 +86,7 @@ def release_dict_file():
break
except IOError:
- return None
+ return {}
return data
def distro_identifier(adjust_hook=None):
@@ -64,22 +95,24 @@ def distro_identifier(adjust_hook=None):
import re
- lsb_data = release_dict()
- if lsb_data:
- distro_id, release = lsb_data['Distributor ID'], lsb_data['Release']
- else:
- lsb_data_file = release_dict_file()
- if lsb_data_file:
- distro_id, release = lsb_data_file['DISTRIB_ID'], lsb_data_file.get('DISTRIB_RELEASE', None)
- else:
- distro_id, release = None, None
+ # Try /etc/os-release first, then the output of `lsb_release -ir` and
+ # finally fall back on parsing various release files in order to determine
+ # host distro name and version.
+ distro_data = release_dict_osr()
+ if not distro_data:
+ distro_data = release_dict_lsb()
+ if not distro_data:
+ distro_data = release_dict_file()
+
+ distro_id = distro_data.get('DISTRIB_ID', '')
+ release = distro_data.get('DISTRIB_RELEASE', '')
if adjust_hook:
distro_id, release = adjust_hook(distro_id, release)
if not distro_id:
- return "Unknown"
- # Filter out any non-alphanumerics
- distro_id = re.sub(r'\W', '', distro_id)
+ return "unknown"
+ # Filter out any non-alphanumerics and convert to lowercase
+ distro_id = re.sub(r'\W', '', distro_id).lower()
if release:
id_str = '{0}-{1}'.format(distro_id, release)
diff --git a/meta/lib/oe/maketype.py b/meta/lib/oe/maketype.py
index f88981dd90..d929c8b3e5 100644
--- a/meta/lib/oe/maketype.py
+++ b/meta/lib/oe/maketype.py
@@ -1,3 +1,6 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
"""OpenEmbedded variable typing support
Types are defined in the metadata by name, using the 'type' flag on a
@@ -7,7 +10,12 @@ the arguments of the type's factory for details.
import inspect
import oe.types as types
-import collections
+try:
+ # Python 3.7+
+ from collections.abc import Callable
+except ImportError:
+ # Python < 3.7
+ from collections import Callable
available_types = {}
@@ -96,7 +104,7 @@ for name in dir(types):
continue
obj = getattr(types, name)
- if not isinstance(obj, collections.Callable):
+ if not isinstance(obj, Callable):
continue
register(name, obj)
diff --git a/meta/lib/oe/manifest.py b/meta/lib/oe/manifest.py
index 95f8eb2df3..f7c88f9a09 100644
--- a/meta/lib/oe/manifest.py
+++ b/meta/lib/oe/manifest.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
from abc import ABCMeta, abstractmethod
import os
import re
@@ -59,9 +63,9 @@ class Manifest(object, metaclass=ABCMeta):
if manifest_dir is None:
if manifest_type != self.MANIFEST_TYPE_IMAGE:
- self.manifest_dir = self.d.getVar('SDK_DIR', True)
+ self.manifest_dir = self.d.getVar('SDK_DIR')
else:
- self.manifest_dir = self.d.getVar('WORKDIR', True)
+ self.manifest_dir = self.d.getVar('WORKDIR')
else:
self.manifest_dir = manifest_dir
@@ -82,7 +86,7 @@ class Manifest(object, metaclass=ABCMeta):
This will be used for testing until the class is implemented properly!
"""
def _create_dummy_initial(self):
- image_rootfs = self.d.getVar('IMAGE_ROOTFS', True)
+ image_rootfs = self.d.getVar('IMAGE_ROOTFS')
pkg_list = dict()
if image_rootfs.find("core-image-sato-sdk") > 0:
pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
@@ -104,7 +108,7 @@ class Manifest(object, metaclass=ABCMeta):
pkg_list['lgp'] = \
"locale-base-en-us locale-base-en-gb"
elif image_rootfs.find("core-image-minimal") > 0:
- pkg_list[self.PKG_TYPE_MUST_INSTALL] = "run-postinsts packagegroup-core-boot"
+ pkg_list[self.PKG_TYPE_MUST_INSTALL] = "packagegroup-core-boot"
with open(self.initial_manifest, "w+") as manifest:
manifest.write(self.initial_manifest_file_header)
@@ -195,7 +199,7 @@ class RpmManifest(Manifest):
for pkg in pkg_list.split():
pkg_type = self.PKG_TYPE_MUST_INSTALL
- ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split()
+ ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
for ml_variant in ml_variants:
if pkg.startswith(ml_variant + '-'):
@@ -216,13 +220,13 @@ class RpmManifest(Manifest):
for var in self.var_maps[self.manifest_type]:
if var in self.vars_to_split:
- split_pkgs = self._split_multilib(self.d.getVar(var, True))
+ split_pkgs = self._split_multilib(self.d.getVar(var))
if split_pkgs is not None:
pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
else:
- pkg_list = self.d.getVar(var, True)
+ pkg_list = self.d.getVar(var)
if pkg_list is not None:
- pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True)
+ pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
for pkg_type in pkgs:
for pkg in pkgs[pkg_type].split():
@@ -245,7 +249,7 @@ class OpkgManifest(Manifest):
for pkg in pkg_list.split():
pkg_type = self.PKG_TYPE_MUST_INSTALL
- ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split()
+ ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
for ml_variant in ml_variants:
if pkg.startswith(ml_variant + '-'):
@@ -266,16 +270,16 @@ class OpkgManifest(Manifest):
for var in self.var_maps[self.manifest_type]:
if var in self.vars_to_split:
- split_pkgs = self._split_multilib(self.d.getVar(var, True))
+ split_pkgs = self._split_multilib(self.d.getVar(var))
if split_pkgs is not None:
pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
else:
- pkg_list = self.d.getVar(var, True)
+ pkg_list = self.d.getVar(var)
if pkg_list is not None:
- pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True)
+ pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
- for pkg_type in pkgs:
- for pkg in pkgs[pkg_type].split():
+ for pkg_type in sorted(pkgs):
+ for pkg in sorted(pkgs[pkg_type].split()):
manifest.write("%s,%s\n" % (pkg_type, pkg))
def create_final(self):
@@ -310,7 +314,7 @@ class DpkgManifest(Manifest):
manifest.write(self.initial_manifest_file_header)
for var in self.var_maps[self.manifest_type]:
- pkg_list = self.d.getVar(var, True)
+ pkg_list = self.d.getVar(var)
if pkg_list is None:
continue
@@ -332,7 +336,7 @@ def create_manifest(d, final_manifest=False, manifest_dir=None,
'ipk': OpkgManifest,
'deb': DpkgManifest}
- manifest = manifest_map[d.getVar('IMAGE_PKGTYPE', True)](d, manifest_dir, manifest_type)
+ manifest = manifest_map[d.getVar('IMAGE_PKGTYPE')](d, manifest_dir, manifest_type)
if final_manifest:
manifest.create_final()
diff --git a/meta/lib/oe/package.py b/meta/lib/oe/package.py
index 02642f29f0..b8585d4253 100644
--- a/meta/lib/oe/package.py
+++ b/meta/lib/oe/package.py
@@ -1,15 +1,21 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import stat
+import mmap
+import subprocess
+
def runstrip(arg):
# Function to strip a single file, called from split_and_strip_files below
# A working 'file' (one which works on the target architecture)
#
- # The elftype is a bit pattern (explained in split_and_strip_files) to tell
+ # The elftype is a bit pattern (explained in is_elf below) to tell
# us what type of file we're processing...
# 4 - executable
# 8 - shared library
# 16 - kernel module
- import stat, subprocess
-
(file, elftype, strip) = arg
newmode = None
@@ -18,30 +24,157 @@ def runstrip(arg):
newmode = origmode | stat.S_IWRITE | stat.S_IREAD
os.chmod(file, newmode)
- extraflags = ""
-
+ stripcmd = [strip]
+ skip_strip = False
# kernel module
if elftype & 16:
- extraflags = "--strip-debug --remove-section=.comment --remove-section=.note --preserve-dates"
+ if is_kernel_module_signed(file):
+ bb.debug(1, "Skip strip on signed module %s" % file)
+ skip_strip = True
+ else:
+ stripcmd.extend(["--strip-debug", "--remove-section=.comment",
+ "--remove-section=.note", "--preserve-dates"])
# .so and shared library
elif ".so" in file and elftype & 8:
- extraflags = "--remove-section=.comment --remove-section=.note --strip-unneeded"
+ stripcmd.extend(["--remove-section=.comment", "--remove-section=.note", "--strip-unneeded"])
# shared or executable:
elif elftype & 8 or elftype & 4:
- extraflags = "--remove-section=.comment --remove-section=.note"
+ stripcmd.extend(["--remove-section=.comment", "--remove-section=.note"])
- stripcmd = "'%s' %s '%s'" % (strip, extraflags, file)
+ stripcmd.append(file)
bb.debug(1, "runstrip: %s" % stripcmd)
- try:
- output = subprocess.check_output(stripcmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
- bb.error("runstrip: '%s' strip command failed with %s (%s)" % (stripcmd, e.returncode, e.output))
+ if not skip_strip:
+ output = subprocess.check_output(stripcmd, stderr=subprocess.STDOUT)
if newmode:
os.chmod(file, origmode)
- return
+# Detect .ko module by searching for "vermagic=" string
+def is_kernel_module(path):
+ with open(path) as f:
+ return mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ).find(b"vermagic=") >= 0
+
+# Detect if .ko module is signed
+def is_kernel_module_signed(path):
+ with open(path, "rb") as f:
+ f.seek(-28, 2)
+ module_tail = f.read()
+ return "Module signature appended" in "".join(chr(c) for c in bytearray(module_tail))
+
+# Return type (bits):
+# 0 - not elf
+# 1 - ELF
+# 2 - stripped
+# 4 - executable
+# 8 - shared library
+# 16 - kernel module
+def is_elf(path):
+ exec_type = 0
+ result = subprocess.check_output(["file", "-b", path], stderr=subprocess.STDOUT).decode("utf-8")
+
+ if "ELF" in result:
+ exec_type |= 1
+ if "not stripped" not in result:
+ exec_type |= 2
+ if "executable" in result:
+ exec_type |= 4
+ if "shared" in result:
+ exec_type |= 8
+ if "relocatable" in result:
+ if path.endswith(".ko") and path.find("/lib/modules/") != -1 and is_kernel_module(path):
+ exec_type |= 16
+ return (path, exec_type)
+
+def is_static_lib(path):
+ if path.endswith('.a') and not os.path.islink(path):
+ with open(path, 'rb') as fh:
+ # The magic must include the first slash to avoid
+ # matching golang static libraries
+ magic = b'!<arch>\x0a/'
+ start = fh.read(len(magic))
+ return start == magic
+ return False
+
+def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d, qa_already_stripped=False):
+ """
+ Strip executable code (like executables, shared libraries) _in_place_
+ - Based on sysroot_strip in staging.bbclass
+ :param dstdir: directory in which to strip files
+ :param strip_cmd: Strip command (usually ${STRIP})
+ :param libdir: ${libdir} - strip .so files in this directory
+ :param base_libdir: ${base_libdir} - strip .so files in this directory
+ :param qa_already_stripped: Set to True if already-stripped' in ${INSANE_SKIP}
+ This is for proper logging and messages only.
+ """
+ import stat, errno, oe.path, oe.utils
+
+ elffiles = {}
+ inodes = {}
+ libdir = os.path.abspath(dstdir + os.sep + libdir)
+ base_libdir = os.path.abspath(dstdir + os.sep + base_libdir)
+ exec_mask = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
+ #
+ # First lets figure out all of the files we may have to process
+ #
+ checkelf = []
+ inodecache = {}
+ for root, dirs, files in os.walk(dstdir):
+ for f in files:
+ file = os.path.join(root, f)
+
+ try:
+ ltarget = oe.path.realpath(file, dstdir, False)
+ s = os.lstat(ltarget)
+ except OSError as e:
+ (err, strerror) = e.args
+ if err != errno.ENOENT:
+ raise
+ # Skip broken symlinks
+ continue
+ if not s:
+ continue
+ # Check its an excutable
+ if s[stat.ST_MODE] & exec_mask \
+ or ((file.startswith(libdir) or file.startswith(base_libdir)) and ".so" in f) \
+ or file.endswith('.ko'):
+ # If it's a symlink, and points to an ELF file, we capture the readlink target
+ if os.path.islink(file):
+ continue
+
+ # It's a file (or hardlink), not a link
+ # ...but is it ELF, and is it already stripped?
+ checkelf.append(file)
+ inodecache[file] = s.st_ino
+ results = oe.utils.multiprocess_launch(is_elf, checkelf, d)
+ for (file, elf_file) in results:
+ #elf_file = is_elf(file)
+ if elf_file & 1:
+ if elf_file & 2:
+ if qa_already_stripped:
+ bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dstdir):], pn))
+ else:
+ bb.warn("File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dstdir):], pn))
+ continue
+
+ if inodecache[file] in inodes:
+ os.unlink(file)
+ os.link(inodes[inodecache[file]], file)
+ else:
+ # break hardlinks so that we do not strip the original.
+ inodes[inodecache[file]] = file
+ bb.utils.break_hardlinks(file)
+ elffiles[file] = elf_file
+
+ #
+ # Now strip them (in parallel)
+ #
+ sfiles = []
+ for file in elffiles:
+ elf_file = int(elffiles[file])
+ sfiles.append((file, elf_file, strip_cmd))
+
+ oe.utils.multiprocess_launch(runstrip, sfiles, d)
def file_translate(file):
@@ -60,41 +193,63 @@ def filedeprunner(arg):
provides = {}
requires = {}
- r = re.compile(r'[<>=]+ +[^ ]*')
+ file_re = re.compile(r'\s+\d+\s(.*)')
+ dep_re = re.compile(r'\s+(\S)\s+(.*)')
+ r = re.compile(r'[<>=]+\s+\S*')
def process_deps(pipe, pkg, pkgdest, provides, requires):
- for line in pipe:
- f = line.decode("utf-8").split(" ", 1)[0].strip()
- line = line.decode("utf-8").split(" ", 1)[1].strip()
+ file = None
+ for line in pipe.split("\n"):
+
+ m = file_re.match(line)
+ if m:
+ file = m.group(1)
+ file = file.replace(pkgdest + "/" + pkg, "")
+ file = file_translate(file)
+ continue
- if line.startswith("Requires:"):
+ m = dep_re.match(line)
+ if not m or not file:
+ continue
+
+ type, dep = m.groups()
+
+ if type == 'R':
i = requires
- elif line.startswith("Provides:"):
+ elif type == 'P':
i = provides
else:
- continue
+ continue
- file = f.replace(pkgdest + "/" + pkg, "")
- file = file_translate(file)
- value = line.split(":", 1)[1].strip()
- value = r.sub(r'(\g<0>)', value)
+ if dep.startswith("python("):
+ continue
- if value.startswith("rpmlib("):
+ # Ignore all perl(VMS::...) and perl(Mac::...) dependencies. These
+ # are typically used conditionally from the Perl code, but are
+ # generated as unconditional dependencies.
+ if dep.startswith('perl(VMS::') or dep.startswith('perl(Mac::'):
continue
- if value == "python":
+
+ # Ignore perl dependencies on .pl files.
+ if dep.startswith('perl(') and dep.endswith('.pl)'):
continue
+
+ # Remove perl versions and perl module versions since they typically
+ # do not make sense when used as package versions.
+ if dep.startswith('perl') and r.search(dep):
+ dep = dep.split()[0]
+
+ # Put parentheses around any version specifications.
+ dep = r.sub(r'(\g<0>)',dep)
+
if file not in i:
i[file] = []
- i[file].append(value)
+ i[file].append(dep)
return provides, requires
- try:
- dep_popen = subprocess.Popen(shlex.split(rpmdeps) + pkgfiles, stdout=subprocess.PIPE)
- provides, requires = process_deps(dep_popen.stdout, pkg, pkgdest, provides, requires)
- except OSError as e:
- bb.error("rpmdeps: '%s' command failed, '%s'" % (shlex.split(rpmdeps) + pkgfiles, e))
- raise e
+ output = subprocess.check_output(shlex.split(rpmdeps) + pkgfiles, stderr=subprocess.STDOUT).decode("utf-8")
+ provides, requires = process_deps(output, pkg, pkgdest, provides, requires)
return (pkg, provides, requires)
@@ -103,14 +258,14 @@ def read_shlib_providers(d):
import re
shlib_provider = {}
- shlibs_dirs = d.getVar('SHLIBSDIRS', True).split()
- list_re = re.compile('^(.*)\.list$')
+ shlibs_dirs = d.getVar('SHLIBSDIRS').split()
+ list_re = re.compile(r'^(.*)\.list$')
# Go from least to most specific since the last one found wins
for dir in reversed(shlibs_dirs):
bb.debug(2, "Reading shlib providers in %s" % (dir))
if not os.path.exists(dir):
continue
- for file in os.listdir(dir):
+ for file in sorted(os.listdir(dir)):
m = list_re.match(file)
if m:
dep_pkg = m.group(1)
@@ -149,6 +304,7 @@ def npm_split_package_dirs(pkgdir):
continue
pkgitems.append(pathitem)
pkgname = '-'.join(pkgitems).replace('_', '-')
+ pkgname = pkgname.replace('@', '')
pkgfile = os.path.join(root, dn, 'package.json')
data = None
if os.path.exists(pkgfile):
diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py
index 434b898d3d..4ff19cf09c 100644
--- a/meta/lib/oe/package_manager.py
+++ b/meta/lib/oe/package_manager.py
@@ -1,9 +1,12 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
from abc import ABCMeta, abstractmethod
import os
import glob
import subprocess
import shutil
-import multiprocessing
import re
import collections
import bb
@@ -12,30 +15,25 @@ import oe.utils
import oe.path
import string
from oe.gpg_sign import get_signer
+import hashlib
+import fnmatch
# this can be used by all PM backends to create the index files in parallel
def create_index(arg):
index_cmd = arg
- try:
- bb.note("Executing '%s' ..." % index_cmd)
- result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- except subprocess.CalledProcessError as e:
- return("Index creation command '%s' failed with return code %d:\n%s" %
- (e.cmd, e.returncode, e.output.decode("utf-8")))
-
+ bb.note("Executing '%s' ..." % index_cmd)
+ result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
if result:
bb.note(result)
- return None
-
-"""
-This method parse the output from the package managerand return
-a dictionary with the information of the packages. This is used
-when the packages are in deb or ipk format.
-"""
def opkg_query(cmd_output):
- verregex = re.compile(' \([=<>]* [^ )]*\)')
+ """
+ This method parse the output from the package managerand return
+ a dictionary with the information of the packages. This is used
+ when the packages are in deb or ipk format.
+ """
+ verregex = re.compile(r' \([=<>]* [^ )]*\)')
output = dict()
pkg = ""
arch = ""
@@ -90,6 +88,56 @@ def opkg_query(cmd_output):
return output
+def failed_postinsts_abort(pkgs, log_path):
+ bb.fatal("""Postinstall scriptlets of %s have failed. If the intention is to defer them to first boot,
+then please place them into pkg_postinst_ontarget_${PN} ().
+Deferring to first boot via 'exit 1' is no longer supported.
+Details of the failure are in %s.""" %(pkgs, log_path))
+
+def generate_locale_archive(d, rootfs, target_arch, localedir):
+ # Pretty sure we don't need this for locale archive generation but
+ # keeping it to be safe...
+ locale_arch_options = { \
+ "arc": ["--uint32-align=4", "--little-endian"],
+ "arceb": ["--uint32-align=4", "--big-endian"],
+ "arm": ["--uint32-align=4", "--little-endian"],
+ "armeb": ["--uint32-align=4", "--big-endian"],
+ "aarch64": ["--uint32-align=4", "--little-endian"],
+ "aarch64_be": ["--uint32-align=4", "--big-endian"],
+ "sh4": ["--uint32-align=4", "--big-endian"],
+ "powerpc": ["--uint32-align=4", "--big-endian"],
+ "powerpc64": ["--uint32-align=4", "--big-endian"],
+ "mips": ["--uint32-align=4", "--big-endian"],
+ "mipsisa32r6": ["--uint32-align=4", "--big-endian"],
+ "mips64": ["--uint32-align=4", "--big-endian"],
+ "mipsisa64r6": ["--uint32-align=4", "--big-endian"],
+ "mipsel": ["--uint32-align=4", "--little-endian"],
+ "mipsisa32r6el": ["--uint32-align=4", "--little-endian"],
+ "mips64el": ["--uint32-align=4", "--little-endian"],
+ "mipsisa64r6el": ["--uint32-align=4", "--little-endian"],
+ "riscv64": ["--uint32-align=4", "--little-endian"],
+ "riscv32": ["--uint32-align=4", "--little-endian"],
+ "i586": ["--uint32-align=4", "--little-endian"],
+ "i686": ["--uint32-align=4", "--little-endian"],
+ "x86_64": ["--uint32-align=4", "--little-endian"]
+ }
+ if target_arch in locale_arch_options:
+ arch_options = locale_arch_options[target_arch]
+ else:
+ bb.error("locale_arch_options not found for target_arch=" + target_arch)
+ bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
+
+ # Need to set this so cross-localedef knows where the archive is
+ env = dict(os.environ)
+ env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive")
+
+ for name in os.listdir(localedir):
+ path = os.path.join(localedir, name)
+ if os.path.isdir(path):
+ cmd = ["cross-localedef", "--verbose"]
+ cmd += arch_options
+ cmd += ["--add-to-archive", path]
+ subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT)
class Indexer(object, metaclass=ABCMeta):
def __init__(self, d, deploy_dir):
@@ -102,118 +150,50 @@ class Indexer(object, metaclass=ABCMeta):
class RpmIndexer(Indexer):
- def get_ml_prefix_and_os_list(self, arch_var=None, os_var=None):
- package_archs = collections.OrderedDict()
- target_os = collections.OrderedDict()
-
- if arch_var is not None and os_var is not None:
- package_archs['default'] = self.d.getVar(arch_var, True).split()
- package_archs['default'].reverse()
- target_os['default'] = self.d.getVar(os_var, True).strip()
- else:
- package_archs['default'] = self.d.getVar("PACKAGE_ARCHS", True).split()
- # arch order is reversed. This ensures the -best- match is
- # listed first!
- package_archs['default'].reverse()
- target_os['default'] = self.d.getVar("TARGET_OS", True).strip()
- multilibs = self.d.getVar('MULTILIBS', True) or ""
- for ext in multilibs.split():
- eext = ext.split(':')
- if len(eext) > 1 and eext[0] == 'multilib':
- localdata = bb.data.createCopy(self.d)
- default_tune_key = "DEFAULTTUNE_virtclass-multilib-" + eext[1]
- default_tune = localdata.getVar(default_tune_key, False)
- if default_tune is None:
- default_tune_key = "DEFAULTTUNE_ML_" + eext[1]
- default_tune = localdata.getVar(default_tune_key, False)
- if default_tune:
- localdata.setVar("DEFAULTTUNE", default_tune)
- bb.data.update_data(localdata)
- package_archs[eext[1]] = localdata.getVar('PACKAGE_ARCHS',
- True).split()
- package_archs[eext[1]].reverse()
- target_os[eext[1]] = localdata.getVar("TARGET_OS",
- True).strip()
-
- ml_prefix_list = collections.OrderedDict()
- for mlib in package_archs:
- if mlib == 'default':
- ml_prefix_list[mlib] = package_archs[mlib]
- else:
- ml_prefix_list[mlib] = list()
- for arch in package_archs[mlib]:
- if arch in ['all', 'noarch', 'any']:
- ml_prefix_list[mlib].append(arch)
- else:
- ml_prefix_list[mlib].append(mlib + "_" + arch)
-
- return (ml_prefix_list, target_os)
-
def write_index(self):
- sdk_pkg_archs = (self.d.getVar('SDK_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
- all_mlb_pkg_archs = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
+ self.do_write_index(self.deploy_dir)
- mlb_prefix_list = self.get_ml_prefix_and_os_list()[0]
-
- archs = set()
- for item in mlb_prefix_list:
- archs = archs.union(set(i.replace('-', '_') for i in mlb_prefix_list[item]))
-
- if len(archs) == 0:
- archs = archs.union(set(all_mlb_pkg_archs))
-
- archs = archs.union(set(sdk_pkg_archs))
-
- rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo")
- if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
- signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND', True))
+ def do_write_index(self, deploy_dir):
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
else:
signer = None
- index_cmds = []
- repomd_files = []
- rpm_dirs_found = False
- for arch in archs:
- dbpath = os.path.join(self.d.getVar('WORKDIR', True), 'rpmdb', arch)
- if os.path.exists(dbpath):
- bb.utils.remove(dbpath, True)
- arch_dir = os.path.join(self.deploy_dir, arch)
- if not os.path.isdir(arch_dir):
- continue
-
- index_cmds.append("%s --dbpath %s --update -q %s" % \
- (rpm_createrepo, dbpath, arch_dir))
- repomd_files.append(os.path.join(arch_dir, 'repodata', 'repomd.xml'))
-
- rpm_dirs_found = True
- if not rpm_dirs_found:
- bb.note("There are no packages in %s" % self.deploy_dir)
- return
-
- # Create repodata
- result = oe.utils.multiprocess_exec(index_cmds, create_index)
+ createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c")
+ result = create_index("%s --update -q %s" % (createrepo_c, deploy_dir))
if result:
- bb.fatal('%s' % ('\n'.join(result)))
+ bb.fatal(result)
+
# Sign repomd
if signer:
- for repomd in repomd_files:
- feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE', True)
- is_ascii_sig = (feed_sig_type.upper() != "BIN")
- signer.detach_sign(repomd,
- self.d.getVar('PACKAGE_FEED_GPG_NAME', True),
- self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE', True),
- armor=is_ascii_sig)
-
+ sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
+ is_ascii_sig = (sig_type.upper() != "BIN")
+ signer.detach_sign(os.path.join(deploy_dir, 'repodata', 'repomd.xml'),
+ self.d.getVar('PACKAGE_FEED_GPG_NAME'),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
+ armor=is_ascii_sig)
+
+class RpmSubdirIndexer(RpmIndexer):
+ def write_index(self):
+ bb.note("Generating package index for %s" %(self.deploy_dir))
+ self.do_write_index(self.deploy_dir)
+ for entry in os.walk(self.deploy_dir):
+ if os.path.samefile(self.deploy_dir, entry[0]):
+ for dir in entry[1]:
+ if dir != 'repodata':
+ dir_path = oe.path.join(self.deploy_dir, dir)
+ bb.note("Generating package index for %s" %(dir_path))
+ self.do_write_index(dir_path)
class OpkgIndexer(Indexer):
def write_index(self):
arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
"SDK_PACKAGE_ARCHS",
- "MULTILIB_ARCHS"]
+ ]
opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
- if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
- signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND', True))
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
else:
signer = None
@@ -223,7 +203,7 @@ class OpkgIndexer(Indexer):
index_cmds = set()
index_sign_files = set()
for arch_var in arch_vars:
- archs = self.d.getVar(arch_var, True)
+ archs = self.d.getVar(arch_var)
if archs is None:
continue
@@ -237,7 +217,7 @@ class OpkgIndexer(Indexer):
if not os.path.exists(pkgs_file):
open(pkgs_file, "w").close()
- index_cmds.add('%s -r %s -p %s -m %s' %
+ index_cmds.add('%s --checksum md5 --checksum sha256 -r %s -p %s -m %s' %
(opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
index_sign_files.add(pkgs_file)
@@ -246,17 +226,15 @@ class OpkgIndexer(Indexer):
bb.note("There are no packages in %s!" % self.deploy_dir)
return
- result = oe.utils.multiprocess_exec(index_cmds, create_index)
- if result:
- bb.fatal('%s' % ('\n'.join(result)))
+ oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
if signer:
- feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE', True)
+ feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
is_ascii_sig = (feed_sig_type.upper() != "BIN")
for f in index_sign_files:
signer.detach_sign(f,
- self.d.getVar('PACKAGE_FEED_GPG_NAME', True),
- self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE', True),
+ self.d.getVar('PACKAGE_FEED_GPG_NAME'),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
armor=is_ascii_sig)
@@ -278,8 +256,8 @@ class DpkgIndexer(Indexer):
with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"),
"apt", "apt.conf.sample")) as apt_conf_sample:
for line in apt_conf_sample.read().split("\n"):
- line = re.sub("#ROOTFS#", "/dev/null", line)
- line = re.sub("#APTCONF#", self.apt_conf_dir, line)
+ line = re.sub(r"#ROOTFS#", "/dev/null", line)
+ line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
apt_conf.write(line + "\n")
def write_index(self):
@@ -290,16 +268,16 @@ class DpkgIndexer(Indexer):
os.environ['APT_CONFIG'] = self.apt_conf_file
- pkg_archs = self.d.getVar('PACKAGE_ARCHS', True)
+ pkg_archs = self.d.getVar('PACKAGE_ARCHS')
if pkg_archs is not None:
arch_list = pkg_archs.split()
- sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS', True)
+ sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS')
if sdk_pkg_archs is not None:
for a in sdk_pkg_archs.split():
if a not in pkg_archs:
arch_list.append(a)
- all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").split()
+ all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
@@ -314,13 +292,13 @@ class DpkgIndexer(Indexer):
cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
- cmd += "%s -fc Packages > Packages.gz;" % gzip
+ cmd += "%s -fcn Packages > Packages.gz;" % gzip
with open(os.path.join(arch_dir, "Release"), "w+") as release:
release.write("Label: %s\n" % arch)
cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
-
+
index_cmds.append(cmd)
deb_dirs_found = True
@@ -329,10 +307,8 @@ class DpkgIndexer(Indexer):
bb.note("There are no packages in %s" % self.deploy_dir)
return
- result = oe.utils.multiprocess_exec(index_cmds, create_index)
- if result:
- bb.fatal('%s' % ('\n'.join(result)))
- if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
+ oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
raise NotImplementedError('Package feed signing not implementd for dpkg')
@@ -346,119 +322,9 @@ class PkgsList(object, metaclass=ABCMeta):
def list_pkgs(self):
pass
-
class RpmPkgsList(PkgsList):
- def __init__(self, d, rootfs_dir, arch_var=None, os_var=None):
- super(RpmPkgsList, self).__init__(d, rootfs_dir)
-
- self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
- self.image_rpmlib = os.path.join(self.rootfs_dir, 'var/lib/rpm')
-
- self.ml_prefix_list, self.ml_os_list = \
- RpmIndexer(d, rootfs_dir).get_ml_prefix_and_os_list(arch_var, os_var)
-
- # Determine rpm version
- cmd = "%s --version" % self.rpm_cmd
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Getting rpm version failed. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- '''
- Translate the RPM/Smart format names to the OE multilib format names
- '''
- def _pkg_translate_smart_to_oe(self, pkg, arch):
- new_pkg = pkg
- new_arch = arch
- fixed_arch = arch.replace('_', '-')
- found = 0
- for mlib in self.ml_prefix_list:
- for cmp_arch in self.ml_prefix_list[mlib]:
- fixed_cmp_arch = cmp_arch.replace('_', '-')
- if fixed_arch == fixed_cmp_arch:
- if mlib == 'default':
- new_pkg = pkg
- new_arch = cmp_arch
- else:
- new_pkg = mlib + '-' + pkg
- # We need to strip off the ${mlib}_ prefix on the arch
- new_arch = cmp_arch.replace(mlib + '_', '')
-
- # Workaround for bug 3565. Simply look to see if we
- # know of a package with that name, if not try again!
- filename = os.path.join(self.d.getVar('PKGDATA_DIR', True),
- 'runtime-reverse',
- new_pkg)
- if os.path.exists(filename):
- found = 1
- break
-
- if found == 1 and fixed_arch == fixed_cmp_arch:
- break
- #bb.note('%s, %s -> %s, %s' % (pkg, arch, new_pkg, new_arch))
- return new_pkg, new_arch
-
- def _list_pkg_deps(self):
- cmd = [bb.utils.which(os.getenv('PATH'), "rpmresolve"),
- "-t", self.image_rpmlib]
-
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot get the package dependencies. Command '%s' "
- "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
-
- return output
-
def list_pkgs(self):
- cmd = self.rpm_cmd + ' --root ' + self.rootfs_dir
- cmd += ' -D "_dbpath /var/lib/rpm" -qa'
- cmd += " --qf '[%{NAME} %{ARCH} %{VERSION} %{PACKAGEORIGIN}\n]'"
-
- try:
- # bb.note(cmd)
- tmp_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip().decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot get the installed packages list. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- output = dict()
- deps = dict()
- dependencies = self._list_pkg_deps()
-
- # Populate deps dictionary for better manipulation
- for line in dependencies.splitlines():
- try:
- pkg, dep = line.split("|")
- if not pkg in deps:
- deps[pkg] = list()
- if not dep in deps[pkg]:
- deps[pkg].append(dep)
- except:
- # Ignore any other lines they're debug or errors
- pass
-
- for line in tmp_output.split('\n'):
- if len(line.strip()) == 0:
- continue
- pkg = line.split()[0]
- arch = line.split()[1]
- ver = line.split()[2]
- dep = deps.get(pkg, [])
-
- # Skip GPG keys
- if pkg == 'gpg-pubkey':
- continue
-
- pkgorigin = line.split()[3]
- new_pkg, new_arch = self._pkg_translate_smart_to_oe(pkg, arch)
-
- output[new_pkg] = {"arch":new_arch, "ver":ver,
- "filename":pkgorigin, "deps":dep}
-
- return output
-
+ return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR'), needfeed=False).list_installed()
class OpkgPkgsList(PkgsList):
def __init__(self, d, rootfs_dir, config_file):
@@ -466,7 +332,7 @@ class OpkgPkgsList(PkgsList):
self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
- self.opkg_args += self.d.getVar("OPKG_ARGS", True)
+ self.opkg_args += self.d.getVar("OPKG_ARGS")
def list_pkgs(self, format=None):
cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args)
@@ -510,42 +376,129 @@ class PackageManager(object, metaclass=ABCMeta):
This is an abstract class. Do not instantiate this directly.
"""
- def __init__(self, d):
+ def __init__(self, d, target_rootfs):
self.d = d
+ self.target_rootfs = target_rootfs
self.deploy_dir = None
self.deploy_lock = None
- self.feed_uris = self.d.getVar('PACKAGE_FEED_URIS', True) or ""
- self.feed_base_paths = self.d.getVar('PACKAGE_FEED_BASE_PATHS', True) or ""
- self.feed_archs = self.d.getVar('PACKAGE_FEED_ARCHS', True)
+ self._initialize_intercepts()
+
+ def _initialize_intercepts(self):
+ bb.note("Initializing intercept dir for %s" % self.target_rootfs)
+ # As there might be more than one instance of PackageManager operating at the same time
+ # we need to isolate the intercept_scripts directories from each other,
+ # hence the ugly hash digest in dir name.
+ self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'), "intercept_scripts-%s" %
+ (hashlib.sha256(self.target_rootfs.encode()).hexdigest()))
+
+ postinst_intercepts = (self.d.getVar("POSTINST_INTERCEPTS") or "").split()
+ if not postinst_intercepts:
+ postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_PATH")
+ if not postinst_intercepts_path:
+ postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_DIR") or self.d.expand("${COREBASE}/scripts/postinst-intercepts")
+ postinst_intercepts = oe.path.which_wild('*', postinst_intercepts_path)
+
+ bb.debug(1, 'Collected intercepts:\n%s' % ''.join(' %s\n' % i for i in postinst_intercepts))
+ bb.utils.remove(self.intercepts_dir, True)
+ bb.utils.mkdirhier(self.intercepts_dir)
+ for intercept in postinst_intercepts:
+ bb.utils.copyfile(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept)))
+
+ @abstractmethod
+ def _handle_intercept_failure(self, failed_script):
+ pass
+
+ def _postpone_to_first_boot(self, postinst_intercept_hook):
+ with open(postinst_intercept_hook) as intercept:
+ registered_pkgs = None
+ for line in intercept.read().split("\n"):
+ m = re.match(r"^##PKGS:(.*)", line)
+ if m is not None:
+ registered_pkgs = m.group(1).strip()
+ break
+
+ if registered_pkgs is not None:
+ bb.note("If an image is being built, the postinstalls for the following packages "
+ "will be postponed for first boot: %s" %
+ registered_pkgs)
+
+ # call the backend dependent handler
+ self._handle_intercept_failure(registered_pkgs)
+
+
+ def run_intercepts(self, populate_sdk=None):
+ intercepts_dir = self.intercepts_dir
+
+ bb.note("Running intercept scripts:")
+ os.environ['D'] = self.target_rootfs
+ os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
+ for script in os.listdir(intercepts_dir):
+ script_full = os.path.join(intercepts_dir, script)
+
+ if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
+ continue
+
+ # we do not want to run any multilib variant of this
+ if script.startswith("delay_to_first_boot"):
+ self._postpone_to_first_boot(script_full)
+ continue
+
+ if populate_sdk == 'host' and self.d.getVar('SDK_OS') == 'mingw32':
+ bb.note("The postinstall intercept hook '%s' could not be executed due to missing wine support, details in %s/log.do_%s"
+ % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ continue
+
+ bb.note("> Executing %s intercept ..." % script)
+
+ try:
+ output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
+ if output: bb.note(output.decode("utf-8"))
+ except subprocess.CalledProcessError as e:
+ bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
+ if populate_sdk == 'host':
+ bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ elif populate_sdk == 'target':
+ if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
+ bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
+ % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ else:
+ bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ else:
+ if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
+ bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
+ % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ self._postpone_to_first_boot(script_full)
+ else:
+ bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
- """
- Update the package manager package database.
- """
@abstractmethod
def update(self):
+ """
+ Update the package manager package database.
+ """
pass
- """
- Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
- True, installation failures are ignored.
- """
@abstractmethod
def install(self, pkgs, attempt_only=False):
+ """
+ Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
+ True, installation failures are ignored.
+ """
pass
- """
- Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
- is False, the any dependencies are left in place.
- """
@abstractmethod
def remove(self, pkgs, with_dependencies=True):
+ """
+ Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
+ is False, then any dependencies are left in place.
+ """
pass
- """
- This function creates the index files
- """
@abstractmethod
def write_index(self):
+ """
+ This function creates the index files
+ """
pass
@abstractmethod
@@ -557,31 +510,59 @@ class PackageManager(object, metaclass=ABCMeta):
pass
@abstractmethod
- def insert_feeds_uris(self):
+ def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+ Deleting the tmpdir is responsability of the caller.
+ """
pass
- """
- Install complementary packages based upon the list of currently installed
- packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
- these packages, if they don't exist then no error will occur. Note: every
- backend needs to call this function explicitly after the normal package
- installation
- """
- def install_complementary(self, globs=None):
- # we need to write the list of installed packages to a file because the
- # oe-pkgdata-util reads it from a file
- installed_pkgs_file = os.path.join(self.d.getVar('WORKDIR', True),
- "installed_pkgs.txt")
- with open(installed_pkgs_file, "w+") as installed_pkgs:
- pkgs = self.list_installed()
- output = oe.utils.format_pkg_list(pkgs, "arch")
- installed_pkgs.write(output)
+ @abstractmethod
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ """
+ Add remote package feeds into repository manager configuration. The parameters
+ for the feeds are set by feed_uris, feed_base_paths and feed_archs.
+ See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
+ for their description.
+ """
+ pass
+
+ def install_glob(self, globs, sdk=False):
+ """
+ Install all packages that match a glob.
+ """
+ # TODO don't have sdk here but have a property on the superclass
+ # (and respect in install_complementary)
+ if sdk:
+ pkgdatadir = self.d.expand("${TMPDIR}/pkgdata/${SDK_SYS}")
+ else:
+ pkgdatadir = self.d.getVar("PKGDATA_DIR")
+
+ try:
+ bb.note("Installing globbed packages...")
+ cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs]
+ pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
+ self.install(pkgs.split(), attempt_only=True)
+ except subprocess.CalledProcessError as e:
+ # Return code 1 means no packages matched
+ if e.returncode != 1:
+ bb.fatal("Could not compute globbed packages list. Command "
+ "'%s' returned %d:\n%s" %
+ (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+ def install_complementary(self, globs=None):
+ """
+ Install complementary packages based upon the list of currently installed
+ packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
+ these packages, if they don't exist then no error will occur. Note: every
+ backend needs to call this function explicitly after the normal package
+ installation
+ """
if globs is None:
- globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY', True)
+ globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
split_linguas = set()
- for translation in self.d.getVar('IMAGE_LINGUAS', True).split():
+ for translation in self.d.getVar('IMAGE_LINGUAS').split():
split_linguas.add(translation)
split_linguas.add(translation.split('-')[0])
@@ -589,26 +570,42 @@ class PackageManager(object, metaclass=ABCMeta):
for lang in split_linguas:
globs += " *-locale-%s" % lang
+ for complementary_linguas in (self.d.getVar('IMAGE_LINGUAS_COMPLEMENTARY') or "").split():
+ globs += (" " + complementary_linguas) % lang
if globs is None:
return
- cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"),
- "-p", self.d.getVar('PKGDATA_DIR', True), "glob", installed_pkgs_file,
- globs]
- exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY', True)
- if exclude:
- cmd.extend(['-x', exclude])
- try:
- bb.note("Installing complementary packages ...")
- bb.note('Running %s' % cmd)
- complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Could not compute complementary packages list. Command "
- "'%s' returned %d:\n%s" %
- (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
- self.install(complementary_pkgs.split(), attempt_only=True)
- os.remove(installed_pkgs_file)
+ # we need to write the list of installed packages to a file because the
+ # oe-pkgdata-util reads it from a file
+ with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs:
+ pkgs = self.list_installed()
+ output = oe.utils.format_pkg_list(pkgs, "arch")
+ installed_pkgs.write(output)
+ installed_pkgs.flush()
+
+ cmd = ["oe-pkgdata-util",
+ "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name,
+ globs]
+ exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY')
+ if exclude:
+ cmd.extend(['--exclude=' + '|'.join(exclude.split())])
+ try:
+ bb.note("Installing complementary packages ...")
+ bb.note('Running %s' % cmd)
+ complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
+ self.install(complementary_pkgs.split(), attempt_only=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not compute complementary packages list. Command "
+ "'%s' returned %d:\n%s" %
+ (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ target_arch = self.d.getVar('TARGET_ARCH')
+ localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale")
+ if os.path.exists(localedir) and os.listdir(localedir):
+ generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir)
+ # And now delete the binary locales
+ self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False)
def deploy_dir_lock(self):
if self.deploy_dir is None:
@@ -626,13 +623,13 @@ class PackageManager(object, metaclass=ABCMeta):
self.deploy_lock = None
- """
- Construct URIs based on the following pattern: uri/base_path where 'uri'
- and 'base_path' correspond to each element of the corresponding array
- argument leading to len(uris) x len(base_paths) elements on the returned
- array
- """
def construct_uris(self, uris, base_paths):
+ """
+ Construct URIs based on the following pattern: uri/base_path where 'uri'
+ and 'base_path' correspond to each element of the corresponding array
+ argument leading to len(uris) x len(base_paths) elements on the returned
+ array
+ """
def _append(arr1, arr2, sep='/'):
res = []
narr1 = [a.rstrip(sep) for a in arr1]
@@ -646,837 +643,416 @@ class PackageManager(object, metaclass=ABCMeta):
return res
return _append(uris, base_paths)
+def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencies):
+ """
+ Go through our do_package_write_X dependencies and hardlink the packages we depend
+ upon into the repo directory. This prevents us seeing other packages that may
+ have been built that we don't depend upon and also packages for architectures we don't
+ support.
+ """
+ import errno
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ mytaskname = d.getVar("BB_RUNTASK")
+ pn = d.getVar("PN")
+ seendirs = set()
+ multilibs = {}
+
+ bb.utils.remove(subrepo_dir, recurse=True)
+ bb.utils.mkdirhier(subrepo_dir)
+
+ # Detect bitbake -b usage
+ nodeps = d.getVar("BB_LIMITEDDEPS") or False
+ if nodeps or not filterbydependencies:
+ oe.path.symlink(deploydir, subrepo_dir, True)
+ return
+
+ start = None
+ for dep in taskdepdata:
+ data = taskdepdata[dep]
+ if data[1] == mytaskname and data[0] == pn:
+ start = dep
+ break
+ if start is None:
+ bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
+ pkgdeps = set()
+ start = [start]
+ seen = set(start)
+ # Support direct dependencies (do_rootfs -> do_package_write_X)
+ # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> do_package_write_X)
+ while start:
+ next = []
+ for dep2 in start:
+ for dep in taskdepdata[dep2][3]:
+ if taskdepdata[dep][0] != pn:
+ if "do_" + taskname in dep:
+ pkgdeps.add(dep)
+ elif dep not in seen:
+ next.append(dep)
+ seen.add(dep)
+ start = next
+
+ for dep in pkgdeps:
+ c = taskdepdata[dep][0]
+ manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs)
+ if not manifest:
+ bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2]))
+ if not os.path.exists(manifest):
+ continue
+ with open(manifest, "r") as f:
+ for l in f:
+ l = l.strip()
+ deploydir = os.path.normpath(deploydir)
+ if bb.data.inherits_class('packagefeed-stability', d):
+ dest = l.replace(deploydir + "-prediff", "")
+ else:
+ dest = l.replace(deploydir, "")
+ dest = subrepo_dir + dest
+ if l.endswith("/"):
+ if dest not in seendirs:
+ bb.utils.mkdirhier(dest)
+ seendirs.add(dest)
+ continue
+ # Try to hardlink the file, copy if that fails
+ destdir = os.path.dirname(dest)
+ if destdir not in seendirs:
+ bb.utils.mkdirhier(destdir)
+ seendirs.add(destdir)
+ try:
+ os.link(l, dest)
+ except OSError as err:
+ if err.errno == errno.EXDEV:
+ bb.utils.copyfile(l, dest)
+ else:
+ raise
+
class RpmPM(PackageManager):
def __init__(self,
d,
target_rootfs,
target_vendor,
task_name='target',
- providename=None,
arch_var=None,
- os_var=None):
- super(RpmPM, self).__init__(d)
- self.target_rootfs = target_rootfs
+ os_var=None,
+ rpm_repo_workdir="oe-rootfs-repo",
+ filterbydependencies=True,
+ needfeed=True):
+ super(RpmPM, self).__init__(d, target_rootfs)
self.target_vendor = target_vendor
self.task_name = task_name
- self.providename = providename
- self.fullpkglist = list()
- self.deploy_dir = self.d.getVar('DEPLOY_DIR_RPM', True)
- self.etcrpm_dir = os.path.join(self.target_rootfs, "etc/rpm")
- self.install_dir_name = "oe_install"
- self.install_dir_path = os.path.join(self.target_rootfs, self.install_dir_name)
- self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
- self.smart_cmd = bb.utils.which(os.getenv('PATH'), "smart")
- # 0 = default, only warnings
- # 1 = --log-level=info (includes information about executing scriptlets and their output)
- # 2 = --log-level=debug
- # 3 = --log-level=debug plus dumps of scriplet content and command invocation
- self.debug_level = int(d.getVar('ROOTFS_RPM_DEBUG', True) or "0")
- self.smart_opt = "--log-level=%s --data-dir=%s" % \
- ("warning" if self.debug_level == 0 else
- "info" if self.debug_level == 1 else
- "debug",
- os.path.join(target_rootfs, 'var/lib/smart'))
- self.scriptlet_wrapper = self.d.expand('${WORKDIR}/scriptlet_wrapper')
+ if arch_var == None:
+ self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_")
+ else:
+ self.archs = self.d.getVar(arch_var).replace("-","_")
+ if task_name == "host":
+ self.primary_arch = self.d.getVar('SDK_ARCH')
+ else:
+ self.primary_arch = self.d.getVar('MACHINE_ARCH')
+
+ if needfeed:
+ self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir)
+ create_packages_dir(self.d, oe.path.join(self.rpm_repo_dir, "rpm"), d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies)
+
+ self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
+ if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
+ bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data'))
+ self.packaging_data_dirs = ['etc/rpm', 'etc/rpmrc', 'etc/dnf', 'var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf']
self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
self.task_name)
- self.saved_rpmlib = self.d.expand('${T}/saved/%s' % self.task_name)
- self.image_rpmlib = os.path.join(self.target_rootfs, 'var/lib/rpm')
-
if not os.path.exists(self.d.expand('${T}/saved')):
bb.utils.mkdirhier(self.d.expand('${T}/saved'))
- packageindex_dir = os.path.join(self.d.getVar('WORKDIR', True), 'rpms')
- self.indexer = RpmIndexer(self.d, packageindex_dir)
- self.pkgs_list = RpmPkgsList(self.d, self.target_rootfs, arch_var, os_var)
+ def _configure_dnf(self):
+ # libsolv handles 'noarch' internally, we don't need to specify it explicitly
+ archs = [i for i in reversed(self.archs.split()) if i not in ["any", "all", "noarch"]]
+ # This prevents accidental matching against libsolv's built-in policies
+ if len(archs) <= 1:
+ archs = archs + ["bogusarch"]
+ confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/")
+ bb.utils.mkdirhier(confdir)
+ open(confdir + "arch", 'w').write(":".join(archs))
+ distro_codename = self.d.getVar('DISTRO_CODENAME')
+ open(confdir + "releasever", 'w').write(distro_codename if distro_codename is not None else '')
+
+ open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("")
+
+
+ def _configure_rpm(self):
+ # We need to configure rpm to use our primary package architecture as the installation architecture,
+ # and to make it compatible with other package architectures that we use.
+ # Otherwise it will refuse to proceed with packages installation.
+ platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/")
+ rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
+ bb.utils.mkdirhier(platformconfdir)
+ open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch)
+ with open(rpmrcconfdir + "rpmrc", 'w') as f:
+ f.write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
+ f.write("buildarch_compat: %s: noarch\n" % self.primary_arch)
+
+ open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n")
+ if self.d.getVar('RPM_PREFER_ELF_ARCH'):
+ open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
+ else:
+ open(platformconfdir + "macros", 'a').write("%_prefer_color 7")
+
+ if self.d.getVar('RPM_SIGN_PACKAGES') == '1':
+ signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND'))
+ pubkey_path = oe.path.join(self.d.getVar('B'), 'rpm-key')
+ signer.export_pubkey(pubkey_path, self.d.getVar('RPM_GPG_NAME'))
+ rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys")
+ cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path]
+ try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Importing GPG key failed. Command '%s' "
+ "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
- self.ml_prefix_list, self.ml_os_list = self.indexer.get_ml_prefix_and_os_list(arch_var, os_var)
+ def create_configs(self):
+ self._configure_dnf()
+ self._configure_rpm()
- def insert_feeds_uris(self):
- if self.feed_uris == "":
- return
+ def write_index(self):
+ lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock"
+ lf = bb.utils.lockfile(lockfilename, False)
+ RpmIndexer(self.d, self.rpm_repo_dir).write_index()
+ bb.utils.unlockfile(lf)
- arch_list = []
- if self.feed_archs is not None:
- # User define feed architectures
- arch_list = self.feed_archs.split()
- else:
- # List must be prefered to least preferred order
- default_platform_extra = list()
- platform_extra = list()
- bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
- for mlib in self.ml_os_list:
- for arch in self.ml_prefix_list[mlib]:
- plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
- if mlib == bbextendvariant:
- if plt not in default_platform_extra:
- default_platform_extra.append(plt)
- else:
- if plt not in platform_extra:
- platform_extra.append(plt)
- platform_extra = default_platform_extra + platform_extra
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ from urllib.parse import urlparse
- for canonical_arch in platform_extra:
- arch = canonical_arch.split('-')[0]
- if not os.path.exists(os.path.join(self.deploy_dir, arch)):
- continue
- arch_list.append(arch)
+ if feed_uris == "":
+ return
- feed_uris = self.construct_uris(self.feed_uris.split(), self.feed_base_paths.split())
-
- uri_iterator = 0
- channel_priority = 10 + 5 * len(feed_uris) * (len(arch_list) if arch_list else 1)
-
- for uri in feed_uris:
- if arch_list:
- for arch in arch_list:
- bb.note('Adding Smart channel url%d%s (%s)' %
- (uri_iterator, arch, channel_priority))
- self._invoke_smart('channel --add url%d-%s type=rpm-md baseurl=%s/%s -y'
- % (uri_iterator, arch, uri, arch))
- self._invoke_smart('channel --set url%d-%s priority=%d' %
- (uri_iterator, arch, channel_priority))
- channel_priority -= 5
+ gpg_opts = ''
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ gpg_opts += 'repo_gpgcheck=1\n'
+ gpg_opts += 'gpgkey=file://%s/pki/packagefeed-gpg/PACKAGEFEED-GPG-KEY-%s-%s\n' % (self.d.getVar('sysconfdir'), self.d.getVar('DISTRO'), self.d.getVar('DISTRO_CODENAME'))
+
+ if self.d.getVar('RPM_SIGN_PACKAGES') != '1':
+ gpg_opts += 'gpgcheck=0\n'
+
+ bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d"))
+ remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+ for uri in remote_uris:
+ repo_base = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/"))
+ if feed_archs is not None:
+ for arch in feed_archs.split():
+ repo_uri = uri + "/" + arch
+ repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/"))
+ repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/"))
+ open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a').write(
+ "[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts))
else:
- bb.note('Adding Smart channel url%d (%s)' %
- (uri_iterator, channel_priority))
- self._invoke_smart('channel --add url%d type=rpm-md baseurl=%s -y'
- % (uri_iterator, uri))
- self._invoke_smart('channel --set url%d priority=%d' %
- (uri_iterator, channel_priority))
- channel_priority -= 5
-
- uri_iterator += 1
-
- '''
- Create configs for rpm and smart, and multilib is supported
- '''
- def create_configs(self):
- target_arch = self.d.getVar('TARGET_ARCH', True)
- platform = '%s%s-%s' % (target_arch.replace('-', '_'),
- self.target_vendor,
- self.ml_os_list['default'])
-
- # List must be prefered to least preferred order
- default_platform_extra = list()
- platform_extra = list()
- bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
- for mlib in self.ml_os_list:
- for arch in self.ml_prefix_list[mlib]:
- plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
- if mlib == bbextendvariant:
- if plt not in default_platform_extra:
- default_platform_extra.append(plt)
- else:
- if plt not in platform_extra:
- platform_extra.append(plt)
- platform_extra = default_platform_extra + platform_extra
-
- self._create_configs(platform, platform_extra)
+ repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/"))
+ repo_uri = uri
+ open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w').write(
+ "[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts))
- def _invoke_smart(self, args):
- cmd = "%s %s %s" % (self.smart_cmd, self.smart_opt, args)
- # bb.note(cmd)
- try:
- complementary_pkgs = subprocess.check_output(cmd,
- stderr=subprocess.STDOUT,
- shell=True).decode("utf-8")
- # bb.note(complementary_pkgs)
- return complementary_pkgs
- except subprocess.CalledProcessError as e:
- bb.fatal("Could not invoke smart. Command "
- "'%s' returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- def _search_pkg_name_in_feeds(self, pkg, feed_archs):
- for arch in feed_archs:
- arch = arch.replace('-', '_')
- regex_match = re.compile(r"^%s-[^-]*-[^-]*@%s$" % \
- (re.escape(pkg), re.escape(arch)))
- for p in self.fullpkglist:
- if regex_match.match(p) is not None:
- # First found is best match
- # bb.note('%s -> %s' % (pkg, pkg + '@' + arch))
- return pkg + '@' + arch
-
- # Search provides if not found by pkgname.
- bb.note('Not found %s by name, searching provides ...' % pkg)
- cmd = "%s %s query --provides %s --show-format='$name-$version'" % \
- (self.smart_cmd, self.smart_opt, pkg)
- cmd += " | sed -ne 's/ *Provides://p'"
- bb.note('cmd: %s' % cmd)
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- # Found a provider
- if output:
- bb.note('Found providers for %s: %s' % (pkg, output))
- for p in output.split():
- for arch in feed_archs:
- arch = arch.replace('-', '_')
- if p.rstrip().endswith('@' + arch):
- return p
-
- return ""
-
- '''
- Translate the OE multilib format names to the RPM/Smart format names
- It searched the RPM/Smart format names in probable multilib feeds first,
- and then searched the default base feed.
- '''
- def _pkg_translate_oe_to_smart(self, pkgs, attempt_only=False):
- new_pkgs = list()
-
- for pkg in pkgs:
- new_pkg = pkg
- # Search new_pkg in probable multilibs first
- for mlib in self.ml_prefix_list:
- # Jump the default archs
- if mlib == 'default':
- continue
-
- subst = pkg.replace(mlib + '-', '')
- # if the pkg in this multilib feed
- if subst != pkg:
- feed_archs = self.ml_prefix_list[mlib]
- new_pkg = self._search_pkg_name_in_feeds(subst, feed_archs)
- if not new_pkg:
- # Failed to translate, package not found!
- err_msg = '%s not found in the %s feeds (%s) in %s.' % \
- (pkg, mlib, " ".join(feed_archs), self.d.getVar('DEPLOY_DIR_RPM', True))
- if not attempt_only:
- bb.error(err_msg)
- bb.fatal("This is often caused by an empty package declared " \
- "in a recipe's PACKAGES variable. (Empty packages are " \
- "not constructed unless ALLOW_EMPTY_<pkg> = '1' is used.)")
- bb.warn(err_msg)
- else:
- new_pkgs.append(new_pkg)
+ def _prepare_pkg_transaction(self):
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
- break
- # Apparently not a multilib package...
- if pkg == new_pkg:
- # Search new_pkg in default archs
- default_archs = self.ml_prefix_list['default']
- new_pkg = self._search_pkg_name_in_feeds(pkg, default_archs)
- if not new_pkg:
- err_msg = '%s not found in the feeds (%s) in %s.' % \
- (pkg, " ".join(default_archs), self.d.getVar('DEPLOY_DIR_RPM', True))
- if not attempt_only:
- bb.error(err_msg)
- bb.fatal("This is often caused by an empty package declared " \
- "in a recipe's PACKAGES variable. (Empty packages are " \
- "not constructed unless ALLOW_EMPTY_<pkg> = '1' is used.)")
- bb.warn(err_msg)
- else:
- new_pkgs.append(new_pkg)
-
- return new_pkgs
-
- def _create_configs(self, platform, platform_extra):
- # Setup base system configuration
- bb.note("configuring RPM platform settings")
-
- # Configure internal RPM environment when using Smart
- os.environ['RPM_ETCRPM'] = self.etcrpm_dir
- bb.utils.mkdirhier(self.etcrpm_dir)
-
- # Setup temporary directory -- install...
- if os.path.exists(self.install_dir_path):
- bb.utils.remove(self.install_dir_path, True)
- bb.utils.mkdirhier(os.path.join(self.install_dir_path, 'tmp'))
-
- channel_priority = 5
- platform_dir = os.path.join(self.etcrpm_dir, "platform")
- sdkos = self.d.getVar("SDK_OS", True)
- with open(platform_dir, "w+") as platform_fd:
- platform_fd.write(platform + '\n')
- for pt in platform_extra:
- channel_priority += 5
- if sdkos:
- tmp = re.sub("-%s$" % sdkos, "-%s\n" % sdkos, pt)
- tmp = re.sub("-linux.*$", "-linux.*\n", tmp)
- platform_fd.write(tmp)
-
- # Tell RPM that the "/" directory exist and is available
- bb.note("configuring RPM system provides")
- sysinfo_dir = os.path.join(self.etcrpm_dir, "sysinfo")
- bb.utils.mkdirhier(sysinfo_dir)
- with open(os.path.join(sysinfo_dir, "Dirnames"), "w+") as dirnames:
- dirnames.write("/\n")
-
- if self.providename:
- providename_dir = os.path.join(sysinfo_dir, "Providename")
- if not os.path.exists(providename_dir):
- providename_content = '\n'.join(self.providename)
- providename_content += '\n'
- open(providename_dir, "w+").write(providename_content)
-
- # Configure RPM... we enforce these settings!
- bb.note("configuring RPM DB settings")
- # After change the __db.* cache size, log file will not be
- # generated automatically, that will raise some warnings,
- # so touch a bare log for rpm write into it.
- rpmlib_log = os.path.join(self.image_rpmlib, 'log', 'log.0000000001')
- if not os.path.exists(rpmlib_log):
- bb.utils.mkdirhier(os.path.join(self.image_rpmlib, 'log'))
- open(rpmlib_log, 'w+').close()
-
- DB_CONFIG_CONTENT = "# ================ Environment\n" \
- "set_data_dir .\n" \
- "set_create_dir .\n" \
- "set_lg_dir ./log\n" \
- "set_tmp_dir ./tmp\n" \
- "set_flags db_log_autoremove on\n" \
- "\n" \
- "# -- thread_count must be >= 8\n" \
- "set_thread_count 64\n" \
- "\n" \
- "# ================ Logging\n" \
- "\n" \
- "# ================ Memory Pool\n" \
- "set_cachesize 0 1048576 0\n" \
- "set_mp_mmapsize 268435456\n" \
- "\n" \
- "# ================ Locking\n" \
- "set_lk_max_locks 16384\n" \
- "set_lk_max_lockers 16384\n" \
- "set_lk_max_objects 16384\n" \
- "mutex_set_max 163840\n" \
- "\n" \
- "# ================ Replication\n"
-
- db_config_dir = os.path.join(self.image_rpmlib, 'DB_CONFIG')
- if not os.path.exists(db_config_dir):
- open(db_config_dir, 'w+').write(DB_CONFIG_CONTENT)
-
- # Create database so that smart doesn't complain (lazy init)
- opt = "-qa"
- cmd = "%s --root %s --dbpath /var/lib/rpm %s > /dev/null" % (
- self.rpm_cmd, self.target_rootfs, opt)
- try:
- subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
- bb.fatal("Create rpm database failed. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
- # Import GPG key to RPM database of the target system
- if self.d.getVar('RPM_SIGN_PACKAGES', True) == '1':
- pubkey_path = self.d.getVar('RPM_GPG_PUBKEY', True)
- cmd = "%s --root %s --dbpath /var/lib/rpm --import %s > /dev/null" % (
- self.rpm_cmd, self.target_rootfs, pubkey_path)
- subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ def install(self, pkgs, attempt_only = False):
+ if len(pkgs) == 0:
+ return
+ self._prepare_pkg_transaction()
- # Configure smart
- bb.note("configuring Smart settings")
- bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
- True)
- self._invoke_smart('config --set rpm-root=%s' % self.target_rootfs)
- self._invoke_smart('config --set rpm-dbpath=/var/lib/rpm')
- self._invoke_smart('config --set rpm-extra-macros._var=%s' %
- self.d.getVar('localstatedir', True))
- cmd = "config --set rpm-extra-macros._tmppath=/%s/tmp" % (self.install_dir_name)
-
- prefer_color = self.d.getVar('RPM_PREFER_ELF_ARCH', True)
- if prefer_color:
- if prefer_color not in ['0', '1', '2', '4']:
- bb.fatal("Invalid RPM_PREFER_ELF_ARCH: %s, it should be one of:\n"
- "\t1: ELF32 wins\n"
- "\t2: ELF64 wins\n"
- "\t4: ELF64 N32 wins (mips64 or mips64el only)" %
- prefer_color)
- if prefer_color == "4" and self.d.getVar("TUNE_ARCH", True) not in \
- ['mips64', 'mips64el']:
- bb.fatal("RPM_PREFER_ELF_ARCH = \"4\" is for mips64 or mips64el "
- "only.")
- self._invoke_smart('config --set rpm-extra-macros._prefer_color=%s'
- % prefer_color)
-
- self._invoke_smart(cmd)
- self._invoke_smart('config --set rpm-ignoresize=1')
-
- # Write common configuration for host and target usage
- self._invoke_smart('config --set rpm-nolinktos=1')
- self._invoke_smart('config --set rpm-noparentdirs=1')
- check_signature = self.d.getVar('RPM_CHECK_SIGNATURES', True)
- if check_signature and check_signature.strip() == "0":
- self._invoke_smart('config --set rpm-check-signatures=false')
- for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
- self._invoke_smart('flag --set ignore-recommends %s' % i)
-
- # Do the following configurations here, to avoid them being
- # saved for field upgrade
- if self.d.getVar('NO_RECOMMENDATIONS', True).strip() == "1":
- self._invoke_smart('config --set ignore-all-recommends=1')
- pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or ""
- for i in pkg_exclude.split():
- self._invoke_smart('flag --set exclude-packages %s' % i)
-
- # Optional debugging
- # self._invoke_smart('config --set rpm-log-level=debug')
- # cmd = 'config --set rpm-log-file=/tmp/smart-debug-logfile'
- # self._invoke_smart(cmd)
- ch_already_added = []
- for canonical_arch in platform_extra:
- arch = canonical_arch.split('-')[0]
- arch_channel = os.path.join(self.d.getVar('WORKDIR', True), 'rpms', arch)
- oe.path.remove(arch_channel)
- deploy_arch_dir = os.path.join(self.deploy_dir, arch)
- if not os.path.exists(deploy_arch_dir):
- continue
+ bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS')
+ package_exclude = self.d.getVar('PACKAGE_EXCLUDE')
+ exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else [])
- lockfilename = self.d.getVar('DEPLOY_DIR_RPM', True) + "/rpm.lock"
- lf = bb.utils.lockfile(lockfilename, False)
- oe.path.copyhardlinktree(deploy_arch_dir, arch_channel)
- bb.utils.unlockfile(lf)
-
- if not arch in ch_already_added:
- bb.note('Adding Smart channel %s (%s)' %
- (arch, channel_priority))
- self._invoke_smart('channel --add %s type=rpm-md baseurl=%s -y'
- % (arch, arch_channel))
- self._invoke_smart('channel --set %s priority=%d' %
- (arch, channel_priority))
- channel_priority -= 5
-
- ch_already_added.append(arch)
-
- bb.note('adding Smart RPM DB channel')
- self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
-
- # Construct install scriptlet wrapper.
- # Scripts need to be ordered when executed, this ensures numeric order.
- # If we ever run into needing more the 899 scripts, we'll have to.
- # change num to start with 1000.
- #
- scriptletcmd = "$2 $1/$3 $4\n"
- scriptpath = "$1/$3"
-
- # When self.debug_level >= 3, also dump the content of the
- # executed scriptlets and how they get invoked. We have to
- # replace "exit 1" and "ERR" because printing those as-is
- # would trigger a log analysis failure.
- if self.debug_level >= 3:
- dump_invocation = 'echo "Executing ${name} ${kind} with: ' + scriptletcmd + '"\n'
- dump_script = 'cat ' + scriptpath + '| sed -e "s/exit 1/exxxit 1/g" -e "s/ERR/IRR/g"; echo\n'
- else:
- dump_invocation = 'echo "Executing ${name} ${kind}"\n'
- dump_script = ''
-
- SCRIPTLET_FORMAT = "#!/bin/bash\n" \
- "\n" \
- "export PATH=%s\n" \
- "export D=%s\n" \
- 'export OFFLINE_ROOT="$D"\n' \
- 'export IPKG_OFFLINE_ROOT="$D"\n' \
- 'export OPKG_OFFLINE_ROOT="$D"\n' \
- "export INTERCEPT_DIR=%s\n" \
- "export NATIVE_ROOT=%s\n" \
- "\n" \
- "name=`head -1 " + scriptpath + " | cut -d\' \' -f 2`\n" \
- "kind=`head -1 " + scriptpath + " | cut -d\' \' -f 4`\n" \
- + dump_invocation \
- + dump_script \
- + scriptletcmd + \
- "ret=$?\n" \
- "echo Result of ${name} ${kind}: ${ret}\n" \
- "if [ ${ret} -ne 0 ]; then\n" \
- " if [ $4 -eq 1 ]; then\n" \
- " mkdir -p $1/etc/rpm-postinsts\n" \
- " num=100\n" \
- " while [ -e $1/etc/rpm-postinsts/${num}-* ]; do num=$((num + 1)); done\n" \
- ' echo "#!$2" > $1/etc/rpm-postinsts/${num}-${name}\n' \
- ' echo "# Arg: $4" >> $1/etc/rpm-postinsts/${num}-${name}\n' \
- " cat " + scriptpath + " >> $1/etc/rpm-postinsts/${num}-${name}\n" \
- " chmod +x $1/etc/rpm-postinsts/${num}-${name}\n" \
- ' echo "Info: deferring ${name} ${kind} install scriptlet to first boot"\n' \
- " else\n" \
- ' echo "Error: ${name} ${kind} remove scriptlet failed"\n' \
- " fi\n" \
- "fi\n"
-
- intercept_dir = self.d.expand('${WORKDIR}/intercept_scripts')
- native_root = self.d.getVar('STAGING_DIR_NATIVE', True)
- scriptlet_content = SCRIPTLET_FORMAT % (os.environ['PATH'],
- self.target_rootfs,
- intercept_dir,
- native_root)
- open(self.scriptlet_wrapper, 'w+').write(scriptlet_content)
-
- bb.note("configuring RPM cross-install scriptlet_wrapper")
- os.chmod(self.scriptlet_wrapper, 0o755)
- cmd = 'config --set rpm-extra-macros._cross_scriptlet_wrapper=%s' % \
- self.scriptlet_wrapper
- self._invoke_smart(cmd)
-
- # Debug to show smart config info
- # bb.note(self._invoke_smart('config --show'))
+ output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) +
+ (["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) +
+ (["--setopt=install_weak_deps=False"] if self.d.getVar('NO_RECOMMENDATIONS') == "1" else []) +
+ (["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) +
+ ["install"] +
+ pkgs)
- def update(self):
- self._invoke_smart('update rpmsys')
-
- def get_rdepends_recursively(self, pkgs):
- # pkgs will be changed during the loop, so use [:] to make a copy.
- for pkg in pkgs[:]:
- sub_data = oe.packagedata.read_subpkgdata(pkg, self.d)
- sub_rdep = sub_data.get("RDEPENDS_" + pkg)
- if not sub_rdep:
- continue
- done = list(bb.utils.explode_dep_versions2(sub_rdep).keys())
- next = done
- # Find all the rdepends on dependency chain
- while next:
- new = []
- for sub_pkg in next:
- sub_data = oe.packagedata.read_subpkgdata(sub_pkg, self.d)
- sub_pkg_rdep = sub_data.get("RDEPENDS_" + sub_pkg)
- if not sub_pkg_rdep:
- continue
- for p in bb.utils.explode_dep_versions2(sub_pkg_rdep):
- # Already handled, skip it.
- if p in done or p in pkgs:
- continue
- # It's a new dep
- if oe.packagedata.has_subpkgdata(p, self.d):
- done.append(p)
- new.append(p)
- next = new
- pkgs.extend(done)
- return pkgs
+ failed_scriptlets_pkgnames = collections.OrderedDict()
+ for line in output.splitlines():
+ if line.startswith("Error in POSTIN scriptlet in rpm package"):
+ failed_scriptlets_pkgnames[line.split()[-1]] = True
- '''
- Install pkgs with smart, the pkg name is oe format
- '''
- def install(self, pkgs, attempt_only=False):
+ if len(failed_scriptlets_pkgnames) > 0:
+ failed_postinsts_abort(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
+ def remove(self, pkgs, with_dependencies = True):
if not pkgs:
- bb.note("There are no packages to install")
return
- bb.note("Installing the following packages: %s" % ' '.join(pkgs))
- if not attempt_only:
- # Pull in multilib requires since rpm may not pull in them
- # correctly, for example,
- # lib32-packagegroup-core-standalone-sdk-target requires
- # lib32-libc6, but rpm may pull in libc6 rather than lib32-libc6
- # since it doesn't know mlprefix (lib32-), bitbake knows it and
- # can handle it well, find out the RDEPENDS on the chain will
- # fix the problem. Both do_rootfs and do_populate_sdk have this
- # issue.
- # The attempt_only packages don't need this since they are
- # based on the installed ones.
- #
- # Separate pkgs into two lists, one is multilib, the other one
- # is non-multilib.
- ml_pkgs = []
- non_ml_pkgs = pkgs[:]
- for pkg in pkgs:
- for mlib in (self.d.getVar("MULTILIB_VARIANTS", True) or "").split():
- if pkg.startswith(mlib + '-'):
- ml_pkgs.append(pkg)
- non_ml_pkgs.remove(pkg)
-
- if len(ml_pkgs) > 0 and len(non_ml_pkgs) > 0:
- # Found both foo and lib-foo
- ml_pkgs = self.get_rdepends_recursively(ml_pkgs)
- non_ml_pkgs = self.get_rdepends_recursively(non_ml_pkgs)
- # Longer list makes smart slower, so only keep the pkgs
- # which have the same BPN, and smart can handle others
- # correctly.
- pkgs_new = []
- for pkg in non_ml_pkgs:
- for mlib in (self.d.getVar("MULTILIB_VARIANTS", True) or "").split():
- mlib_pkg = mlib + "-" + pkg
- if mlib_pkg in ml_pkgs:
- pkgs_new.append(pkg)
- pkgs_new.append(mlib_pkg)
- for pkg in pkgs:
- if pkg not in pkgs_new:
- pkgs_new.append(pkg)
- pkgs = pkgs_new
- new_depends = {}
- deps = bb.utils.explode_dep_versions2(" ".join(pkgs))
- for depend in deps:
- data = oe.packagedata.read_subpkgdata(depend, self.d)
- key = "PKG_%s" % depend
- if key in data:
- new_depend = data[key]
- else:
- new_depend = depend
- new_depends[new_depend] = deps[depend]
- pkgs = bb.utils.join_deps(new_depends, commasep=True).split(', ')
- pkgs = self._pkg_translate_oe_to_smart(pkgs, attempt_only)
- if not pkgs:
- bb.note("There are no packages to install")
- return
- if not attempt_only:
- bb.note('to be installed: %s' % ' '.join(pkgs))
- cmd = "%s %s install -y %s" % \
- (self.smart_cmd, self.smart_opt, ' '.join(pkgs))
- bb.note(cmd)
- else:
- bb.note('installing attempt only packages...')
- bb.note('Attempting %s' % ' '.join(pkgs))
- cmd = "%s %s install --attempt -y %s" % \
- (self.smart_cmd, self.smart_opt, ' '.join(pkgs))
- try:
- output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
- bb.note(output)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to install packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
- '''
- Remove pkgs with smart, the pkg name is smart/rpm format
- '''
- def remove(self, pkgs, with_dependencies=True):
- bb.note('to be removed: ' + ' '.join(pkgs))
-
- if not with_dependencies:
- cmd = "%s -e --nodeps " % self.rpm_cmd
- cmd += "--root=%s " % self.target_rootfs
- cmd += "--dbpath=/var/lib/rpm "
- cmd += "--define='_cross_scriptlet_wrapper %s' " % \
- self.scriptlet_wrapper
- cmd += "--define='_tmppath /%s/tmp' %s" % (self.install_dir_name, ' '.join(pkgs))
- else:
- # for pkg in pkgs:
- # bb.note('Debug: What required: %s' % pkg)
- # bb.note(self._invoke_smart('query %s --show-requiredby' % pkg))
+ self._prepare_pkg_transaction()
- cmd = "%s %s remove -y %s" % (self.smart_cmd,
- self.smart_opt,
- ' '.join(pkgs))
+ if with_dependencies:
+ self._invoke_dnf(["remove"] + pkgs)
+ else:
+ cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+ args = ["-e", "-v", "--nodeps", "--root=%s" %self.target_rootfs]
- try:
- bb.note(cmd)
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- bb.note(output)
- except subprocess.CalledProcessError as e:
- bb.note("Unable to remove packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+ try:
+ bb.note("Running %s" % ' '.join([cmd] + args + pkgs))
+ output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not invoke rpm. Command "
+ "'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8")))
def upgrade(self):
- bb.note('smart upgrade')
- self._invoke_smart('upgrade')
+ self._prepare_pkg_transaction()
+ self._invoke_dnf(["upgrade"])
- def write_index(self):
- result = self.indexer.write_index()
-
- if result is not None:
- bb.fatal(result)
+ def autoremove(self):
+ self._prepare_pkg_transaction()
+ self._invoke_dnf(["autoremove"])
def remove_packaging_data(self):
- bb.utils.remove(self.image_rpmlib, True)
- bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
- True)
- bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/opkg'), True)
-
- # remove temp directory
- bb.utils.remove(self.install_dir_path, True)
+ self._invoke_dnf(["clean", "all"])
+ for dir in self.packaging_data_dirs:
+ bb.utils.remove(oe.path.join(self.target_rootfs, dir), True)
def backup_packaging_data(self):
- # Save the rpmlib for increment rpm image generation
- if os.path.exists(self.saved_rpmlib):
- bb.utils.remove(self.saved_rpmlib, True)
- shutil.copytree(self.image_rpmlib,
- self.saved_rpmlib,
- symlinks=True)
+ # Save the packaging dirs for increment rpm image generation
+ if os.path.exists(self.saved_packaging_data):
+ bb.utils.remove(self.saved_packaging_data, True)
+ for i in self.packaging_data_dirs:
+ source_dir = oe.path.join(self.target_rootfs, i)
+ target_dir = oe.path.join(self.saved_packaging_data, i)
+ if os.path.isdir(source_dir):
+ shutil.copytree(source_dir, target_dir, symlinks=True)
+ elif os.path.isfile(source_dir):
+ shutil.copy2(source_dir, target_dir)
def recovery_packaging_data(self):
# Move the rpmlib back
- if os.path.exists(self.saved_rpmlib):
- if os.path.exists(self.image_rpmlib):
- bb.utils.remove(self.image_rpmlib, True)
-
- bb.note('Recovery packaging data')
- shutil.copytree(self.saved_rpmlib,
- self.image_rpmlib,
- symlinks=True)
+ if os.path.exists(self.saved_packaging_data):
+ for i in self.packaging_data_dirs:
+ target_dir = oe.path.join(self.target_rootfs, i)
+ if os.path.exists(target_dir):
+ bb.utils.remove(target_dir, True)
+ source_dir = oe.path.join(self.saved_packaging_data, i)
+ if os.path.isdir(source_dir):
+ shutil.copytree(source_dir, target_dir, symlinks=True)
+ elif os.path.isfile(source_dir):
+ shutil.copy2(source_dir, target_dir)
def list_installed(self):
- return self.pkgs_list.list_pkgs()
-
- '''
- If incremental install, we need to determine what we've got,
- what we need to add, and what to remove...
- The dump_install_solution will dump and save the new install
- solution.
- '''
- def dump_install_solution(self, pkgs):
- bb.note('creating new install solution for incremental install')
- if len(pkgs) == 0:
- return
-
- pkgs = self._pkg_translate_oe_to_smart(pkgs, False)
- install_pkgs = list()
+ output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{name}-%{version}-%{release}.%{arch}.rpm\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"],
+ print_output = False)
+ packages = {}
+ current_package = None
+ current_deps = None
+ current_state = "initial"
+ for line in output.splitlines():
+ if line.startswith("Package:"):
+ package_info = line.split(" ")[1:]
+ current_package = package_info[0]
+ package_arch = package_info[1]
+ package_version = package_info[2]
+ package_rpm = package_info[3]
+ packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_rpm}
+ current_deps = []
+ elif line.startswith("Dependencies:"):
+ current_state = "dependencies"
+ elif line.startswith("Recommendations"):
+ current_state = "recommendations"
+ elif line.startswith("DependenciesEndHere:"):
+ current_state = "initial"
+ packages[current_package]["deps"] = current_deps
+ elif len(line) > 0:
+ if current_state == "dependencies":
+ current_deps.append(line)
+ elif current_state == "recommendations":
+ current_deps.append("%s [REC]" % line)
+
+ return packages
- cmd = "%s %s install -y --dump %s 2>%s" % \
- (self.smart_cmd,
- self.smart_opt,
- ' '.join(pkgs),
- self.solution_manifest)
+ def update(self):
+ self._invoke_dnf(["makecache", "--refresh"])
+
+ def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ):
+ os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs
+
+ dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf")
+ standard_dnf_args = ["-v", "--rpmverbosity=info", "-y",
+ "-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"),
+ "--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")),
+ "--installroot=%s" % (self.target_rootfs),
+ "--setopt=logdir=%s" % (self.d.getVar('T'))
+ ]
+ if hasattr(self, "rpm_repo_dir"):
+ standard_dnf_args.append("--repofrompath=oe-repo,%s" % (self.rpm_repo_dir))
+ cmd = [dnf_cmd] + standard_dnf_args + dnf_args
+ bb.note('Running %s' % ' '.join(cmd))
try:
- # Disable rpmsys channel for the fake install
- self._invoke_smart('channel --disable rpmsys')
-
- subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- with open(self.solution_manifest, 'r') as manifest:
- for pkg in manifest.read().split('\n'):
- if '@' in pkg:
- install_pkgs.append(pkg)
+ output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8")
+ if print_output:
+ bb.debug(1, output)
+ return output
except subprocess.CalledProcessError as e:
- bb.note("Unable to dump install packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
- # Recovery rpmsys channel
- self._invoke_smart('channel --enable rpmsys')
- return install_pkgs
-
- '''
- If incremental install, we need to determine what we've got,
- what we need to add, and what to remove...
- The load_old_install_solution will load the previous install
- solution
- '''
- def load_old_install_solution(self):
- bb.note('load old install solution for incremental install')
- installed_pkgs = list()
- if not os.path.exists(self.solution_manifest):
- bb.note('old install solution not exist')
- return installed_pkgs
-
- with open(self.solution_manifest, 'r') as manifest:
- for pkg in manifest.read().split('\n'):
- if '@' in pkg:
- installed_pkgs.append(pkg.strip())
-
- return installed_pkgs
-
- '''
- Dump all available packages in feeds, it should be invoked after the
- newest rpm index was created
- '''
- def dump_all_available_pkgs(self):
- available_manifest = self.d.expand('${T}/saved/available_pkgs.txt')
- available_pkgs = list()
- cmd = "%s %s query --output %s" % \
- (self.smart_cmd, self.smart_opt, available_manifest)
- try:
- subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- with open(available_manifest, 'r') as manifest:
- for pkg in manifest.read().split('\n'):
- if '@' in pkg:
- available_pkgs.append(pkg.strip())
- except subprocess.CalledProcessError as e:
- bb.note("Unable to list all available packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+ if print_output:
+ (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
+ "'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+ else:
+ (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
+ "'%s' returned %d:" % (' '.join(cmd), e.returncode))
+ return e.output.decode("utf-8")
- self.fullpkglist = available_pkgs
+ def dump_install_solution(self, pkgs):
+ open(self.solution_manifest, 'w').write(" ".join(pkgs))
+ return pkgs
- return
+ def load_old_install_solution(self):
+ if not os.path.exists(self.solution_manifest):
+ return []
+ with open(self.solution_manifest, 'r') as fd:
+ return fd.read().split()
+
+ def _script_num_prefix(self, path):
+ files = os.listdir(path)
+ numbers = set()
+ numbers.add(99)
+ for f in files:
+ numbers.add(int(f.split("-")[0]))
+ return max(numbers) + 1
def save_rpmpostinst(self, pkg):
- mlibs = (self.d.getVar('MULTILIB_GLOBAL_VARIANTS', False) or "").split()
-
- new_pkg = pkg
- # Remove any multilib prefix from the package name
- for mlib in mlibs:
- if mlib in pkg:
- new_pkg = pkg.replace(mlib + '-', '')
- break
-
- bb.note(' * postponing %s' % new_pkg)
- saved_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + new_pkg
-
- cmd = self.rpm_cmd + ' -q --scripts --root ' + self.target_rootfs
- cmd += ' --dbpath=/var/lib/rpm ' + new_pkg
- cmd += ' | sed -n -e "/^postinstall scriptlet (using .*):$/,/^.* scriptlet (using .*):$/ {/.*/p}"'
- cmd += ' | sed -e "/postinstall scriptlet (using \(.*\)):$/d"'
- cmd += ' -e "/^.* scriptlet (using .*):$/d" > %s' % saved_dir
+ bb.note("Saving postinstall script of %s" % (pkg))
+ cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+ args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg]
try:
- bb.note(cmd)
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip().decode("utf-8")
- bb.note(output)
- os.chmod(saved_dir, 0o755)
- except subprocess.CalledProcessError as e:
- bb.fatal("Invoke save_rpmpostinst failed. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- '''Write common configuration for target usage'''
- def rpm_setup_smart_target_config(self):
- bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
- True)
-
- self._invoke_smart('config --set rpm-nolinktos=1')
- self._invoke_smart('config --set rpm-noparentdirs=1')
- for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
- self._invoke_smart('flag --set ignore-recommends %s' % i)
- self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
-
- '''
- The rpm db lock files were produced after invoking rpm to query on
- build system, and they caused the rpm on target didn't work, so we
- need to unlock the rpm db by removing the lock files.
- '''
- def unlock_rpm_db(self):
- # Remove rpm db lock files
- rpm_db_locks = glob.glob('%s/var/lib/rpm/__db.*' % self.target_rootfs)
- for f in rpm_db_locks:
- bb.utils.remove(f, True)
-
- """
- Returns a dictionary with the package info.
- """
- def package_info(self, pkg):
- cmd = "%s %s info --urls %s" % (self.smart_cmd, self.smart_opt, pkg)
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
+ output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8")
except subprocess.CalledProcessError as e:
- bb.fatal("Unable to list available packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+ bb.fatal("Could not invoke rpm. Command "
+ "'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8")))
- # Set default values to avoid UnboundLocalError
- arch = ""
- ver = ""
- filename = ""
+ # may need to prepend #!/bin/sh to output
- #Parse output
- for line in output.splitlines():
- line = line.rstrip()
- if line.startswith("Name:"):
- pkg = line.split(": ")[1]
- elif line.startswith("Version:"):
- tmp_str = line.split(": ")[1]
- ver, arch = tmp_str.split("@")
- break
-
- # Get filename
- index = re.search("^URLs", output, re.MULTILINE)
- tmp_str = output[index.end():]
- for line in tmp_str.splitlines():
- if "/" in line:
- line = line.lstrip()
- filename = line.split(" ")[0]
- break
-
- # To have the same data type than other package_info methods
- filepath = os.path.join(self.deploy_dir, arch, filename)
- pkg_dict = {}
- pkg_dict[pkg] = {"arch":arch, "ver":ver, "filename":filename,
- "filepath": filepath}
-
- return pkg_dict
+ target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/'))
+ bb.utils.mkdirhier(target_path)
+ num = self._script_num_prefix(target_path)
+ saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg))
+ open(saved_script_name, 'w').write(output)
+ os.chmod(saved_script_name, 0o755)
- """
- Returns the path to a tmpdir where resides the contents of a package.
+ def _handle_intercept_failure(self, registered_pkgs):
+ rpm_postinsts_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
+ bb.utils.mkdirhier(rpm_postinsts_dir)
- Deleting the tmpdir is responsability of the caller.
+ # Save the package postinstalls in /etc/rpm-postinsts
+ for pkg in registered_pkgs.split():
+ self.save_rpmpostinst(pkg)
- """
def extract(self, pkg):
- pkg_info = self.package_info(pkg)
- if not pkg_info:
- bb.fatal("Unable to get information for package '%s' while "
- "trying to extract the package." % pkg)
-
- pkg_path = pkg_info[pkg]["filepath"]
+ output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg])
+ pkg_name = output.splitlines()[-1]
+ if not pkg_name.endswith(".rpm"):
+ bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output))
+ pkg_path = oe.path.join(self.rpm_repo_dir, pkg_name)
cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio")
rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio")
@@ -1508,18 +1084,18 @@ class RpmPM(PackageManager):
class OpkgDpkgPM(PackageManager):
- """
- This is an abstract class. Do not instantiate this directly.
- """
- def __init__(self, d):
- super(OpkgDpkgPM, self).__init__(d)
+ def __init__(self, d, target_rootfs):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+ super(OpkgDpkgPM, self).__init__(d, target_rootfs)
- """
- Returns a dictionary with the package info.
-
- This method extracts the common parts for Opkg and Dpkg
- """
def package_info(self, pkg, cmd):
+ """
+ Returns a dictionary with the package info.
+
+ This method extracts the common parts for Opkg and Dpkg
+ """
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
@@ -1528,14 +1104,14 @@ class OpkgDpkgPM(PackageManager):
"returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
return opkg_query(output)
- """
- Returns the path to a tmpdir where resides the contents of a package.
+ def extract(self, pkg, pkg_info):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
- Deleting the tmpdir is responsability of the caller.
+ Deleting the tmpdir is responsability of the caller.
- This method extracts the common parts for Opkg and Dpkg
- """
- def extract(self, pkg, pkg_info):
+ This method extracts the common parts for Opkg and Dpkg
+ """
ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
@@ -1548,20 +1124,21 @@ class OpkgDpkgPM(PackageManager):
tmp_dir = tempfile.mkdtemp()
current_dir = os.getcwd()
os.chdir(tmp_dir)
+ data_tar = 'data.tar.xz'
try:
- cmd = "%s x %s" % (ar_cmd, pkg_path)
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- cmd = "%s xf data.tar.*" % tar_cmd
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ cmd = [ar_cmd, 'x', pkg_path]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ cmd = [tar_cmd, 'xf', data_tar]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8")))
+ "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
except OSError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename))
+ "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
@@ -1570,23 +1147,27 @@ class OpkgDpkgPM(PackageManager):
return tmp_dir
+ def _handle_intercept_failure(self, registered_pkgs):
+ self.mark_packages("unpacked", registered_pkgs.split())
class OpkgPM(OpkgDpkgPM):
- def __init__(self, d, target_rootfs, config_file, archs, task_name='target'):
- super(OpkgPM, self).__init__(d)
+ def __init__(self, d, target_rootfs, config_file, archs, task_name='target', ipk_repo_workdir="oe-rootfs-repo", filterbydependencies=True, prepare_index=True):
+ super(OpkgPM, self).__init__(d, target_rootfs)
- self.target_rootfs = target_rootfs
self.config_file = config_file
self.pkg_archs = archs
self.task_name = task_name
- self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK", True)
+ self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), ipk_repo_workdir)
self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs)
- self.opkg_args += self.d.getVar("OPKG_ARGS", True)
+ self.opkg_args += self.d.getVar("OPKG_ARGS")
- opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True)
+ if prepare_index:
+ create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_IPK"), "package_write_ipk", filterbydependencies)
+
+ opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
if opkg_lib_dir[0] == "/":
opkg_lib_dir = opkg_lib_dir[1:]
@@ -1598,7 +1179,7 @@ class OpkgPM(OpkgDpkgPM):
if not os.path.exists(self.d.expand('${T}/saved')):
bb.utils.mkdirhier(self.d.expand('${T}/saved'))
- self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") == "1"
+ self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1"
if self.from_feeds:
self._create_custom_config()
else:
@@ -1606,12 +1187,12 @@ class OpkgPM(OpkgDpkgPM):
self.indexer = OpkgIndexer(self.d, self.deploy_dir)
- """
- This function will change a package's status in /var/lib/opkg/status file.
- If 'packages' is None then the new_status will be applied to all
- packages
- """
def mark_packages(self, status_tag, packages=None):
+ """
+ This function will change a package's status in /var/lib/opkg/status file.
+ If 'packages' is None then the new_status will be applied to all
+ packages
+ """
status_file = os.path.join(self.opkg_dir, "status")
with open(status_file, "r") as sf:
@@ -1643,8 +1224,8 @@ class OpkgPM(OpkgDpkgPM):
config_file.write("arch %s %d\n" % (arch, priority))
priority += 5
- for line in (self.d.getVar('IPK_FEED_URIS', True) or "").split():
- feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
+ for line in (self.d.getVar('IPK_FEED_URIS') or "").split():
+ feed_match = re.match(r"^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
if feed_match is not None:
feed_name = feed_match.group(1)
@@ -1660,27 +1241,29 @@ class OpkgPM(OpkgDpkgPM):
specified as compatible for the current machine.
NOTE: Development-helper feature, NOT a full-fledged feed.
"""
- if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True) or "") != "":
+ if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "":
for arch in self.pkg_archs.split():
cfg_file_name = os.path.join(self.target_rootfs,
- self.d.getVar("sysconfdir", True),
+ self.d.getVar("sysconfdir"),
"opkg",
"local-%s-feed.conf" % arch)
with open(cfg_file_name, "w+") as cfg_file:
cfg_file.write("src/gz local-%s %s/%s" %
(arch,
- self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True),
+ self.d.getVar('FEED_DEPLOYDIR_BASE_URI'),
arch))
- if self.opkg_dir != '/var/lib/opkg':
+ if self.d.getVar('OPKGLIBDIR') != '/var/lib':
# There is no command line option for this anymore, we need to add
# info_dir and status_file to config file, if OPKGLIBDIR doesn't have
# the default value of "/var/lib" as defined in opkg:
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR "/var/lib/opkg/info"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE "/var/lib/opkg/status"
- cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'info'))
- cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'status'))
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
+ cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
+ cfg_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
+ cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
def _create_config(self):
@@ -1698,31 +1281,35 @@ class OpkgPM(OpkgDpkgPM):
config_file.write("src oe-%s file:%s\n" %
(arch, pkgs_dir))
- if self.opkg_dir != '/var/lib/opkg':
+ if self.d.getVar('OPKGLIBDIR') != '/var/lib':
# There is no command line option for this anymore, we need to add
# info_dir and status_file to config file, if OPKGLIBDIR doesn't have
# the default value of "/var/lib" as defined in opkg:
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR "/var/lib/opkg/info"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE "/var/lib/opkg/status"
- config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'info'))
- config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'status'))
-
- def insert_feeds_uris(self):
- if self.feed_uris == "":
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
+ config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
+ config_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
+ config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
+
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ if feed_uris == "":
return
rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
% self.target_rootfs)
- feed_uris = self.construct_uris(self.feed_uris.split(), self.feed_base_paths.split())
- archs = self.pkg_archs.split() if self.feed_archs is None else self.feed_archs.split()
+ os.makedirs('%s/etc/opkg' % self.target_rootfs, exist_ok=True)
+
+ feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+ archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split()
with open(rootfs_config, "w+") as config_file:
uri_iterator = 0
for uri in feed_uris:
if archs:
for arch in archs:
- if (self.feed_archs is None) and (not os.path.exists(os.path.join(self.deploy_dir, arch))):
+ if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))):
continue
bb.note('Adding opkg feed url-%s-%d (%s)' %
(arch, uri_iterator, uri))
@@ -1754,29 +1341,44 @@ class OpkgPM(OpkgDpkgPM):
if not pkgs:
return
- cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+ cmd = "%s %s" % (self.opkg_cmd, self.opkg_args)
+ for exclude in (self.d.getVar("PACKAGE_EXCLUDE") or "").split():
+ cmd += " --add-exclude %s" % exclude
+ for bad_recommendation in (self.d.getVar("BAD_RECOMMENDATIONS") or "").split():
+ cmd += " --add-ignore-recommends %s" % bad_recommendation
+ cmd += " install "
+ cmd += " ".join(pkgs)
os.environ['D'] = self.target_rootfs
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True),
- "intercept_scripts")
- os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True)
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
try:
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
bb.note(cmd)
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
bb.note(output)
+ failed_pkgs = []
+ for line in output.split('\n'):
+ if line.endswith("configuration required on target."):
+ bb.warn(line)
+ failed_pkgs.append(line.split(".")[0])
+ if failed_pkgs:
+ failed_postinsts_abort(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
except subprocess.CalledProcessError as e:
- (bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
+ (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
"Command '%s' returned %d:\n%s" %
(cmd, e.returncode, e.output.decode("utf-8")))
def remove(self, pkgs, with_dependencies=True):
+ if not pkgs:
+ return
+
if with_dependencies:
- cmd = "%s %s --force-depends --force-remove --force-removal-of-dependent-packages remove %s" % \
+ cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \
(self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
else:
cmd = "%s %s --force-depends remove %s" % \
@@ -1812,59 +1414,23 @@ class OpkgPM(OpkgDpkgPM):
def list_installed(self):
return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list_pkgs()
- def handle_bad_recommendations(self):
- bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS", True) or ""
- if bad_recommendations.strip() == "":
- return
-
- status_file = os.path.join(self.opkg_dir, "status")
-
- # If status file existed, it means the bad recommendations has already
- # been handled
- if os.path.exists(status_file):
- return
-
- cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args)
-
- with open(status_file, "w+") as status:
- for pkg in bad_recommendations.split():
- pkg_info = cmd + pkg
-
- try:
- output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip().decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot get package info. Command '%s' "
- "returned %d:\n%s" % (pkg_info, e.returncode, e.output.decode("utf-8")))
-
- if output == "":
- bb.note("Ignored bad recommendation: '%s' is "
- "not a package" % pkg)
- continue
-
- for line in output.split('\n'):
- if line.startswith("Status:"):
- status.write("Status: deinstall hold not-installed\n")
- else:
- status.write(line + "\n")
-
- # Append a blank line after each package entry to ensure that it
- # is separated from the following entry
- status.write("\n")
-
- '''
- The following function dummy installs pkgs and returns the log of output.
- '''
def dummy_install(self, pkgs):
+ """
+ The following function dummy installs pkgs and returns the log of output.
+ """
if len(pkgs) == 0:
return
# Create an temp dir as opkg root for dummy installation
temp_rootfs = self.d.expand('${T}/opkg')
- temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg')
+ opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
+ if opkg_lib_dir[0] == "/":
+ opkg_lib_dir = opkg_lib_dir[1:]
+ temp_opkg_dir = os.path.join(temp_rootfs, opkg_lib_dir, 'opkg')
bb.utils.mkdirhier(temp_opkg_dir)
opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
- opkg_args += self.d.getVar("OPKG_ARGS", True)
+ opkg_args += self.d.getVar("OPKG_ARGS")
cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
try:
@@ -1906,10 +1472,10 @@ class OpkgPM(OpkgDpkgPM):
self.opkg_dir,
symlinks=True)
- """
- Returns a dictionary with the package info.
- """
def package_info(self, pkg):
+ """
+ Returns a dictionary with the package info.
+ """
cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg)
pkg_info = super(OpkgPM, self).package_info(pkg, cmd)
@@ -1920,27 +1486,29 @@ class OpkgPM(OpkgDpkgPM):
return pkg_info
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
- """
def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+ """
pkg_info = self.package_info(pkg)
if not pkg_info:
bb.fatal("Unable to get information for package '%s' while "
"trying to extract the package." % pkg)
tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info)
- bb.utils.remove(os.path.join(tmp_dir, "data.tar.gz"))
+ bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
return tmp_dir
class DpkgPM(OpkgDpkgPM):
- def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None):
- super(DpkgPM, self).__init__(d)
- self.target_rootfs = target_rootfs
- self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB', True)
+ def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None, deb_repo_workdir="oe-rootfs-repo", filterbydependencies=True):
+ super(DpkgPM, self).__init__(d, target_rootfs)
+ self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), deb_repo_workdir)
+
+ create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_DEB"), "package_write_deb", filterbydependencies)
+
if apt_conf_dir is None:
self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
else:
@@ -1949,22 +1517,22 @@ class DpkgPM(OpkgDpkgPM):
self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache")
- self.apt_args = d.getVar("APT_ARGS", True)
+ self.apt_args = d.getVar("APT_ARGS")
self.all_arch_list = archs.split()
- all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").split()
+ all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
self._create_configs(archs, base_archs)
self.indexer = DpkgIndexer(self.d, self.deploy_dir)
- """
- This function will change a package's status in /var/lib/dpkg/status file.
- If 'packages' is None then the new_status will be applied to all
- packages
- """
def mark_packages(self, status_tag, packages=None):
+ """
+ This function will change a package's status in /var/lib/dpkg/status file.
+ If 'packages' is None then the new_status will be applied to all
+ packages
+ """
status_file = self.target_rootfs + "/var/lib/dpkg/status"
with open(status_file, "r") as sf:
@@ -1987,19 +1555,22 @@ class DpkgPM(OpkgDpkgPM):
os.rename(status_file + ".tmp", status_file)
- """
- Run the pre/post installs for package "package_name". If package_name is
- None, then run all pre/post install scriptlets.
- """
def run_pre_post_installs(self, package_name=None):
+ """
+ Run the pre/post installs for package "package_name". If package_name is
+ None, then run all pre/post install scriptlets.
+ """
info_dir = self.target_rootfs + "/var/lib/dpkg/info"
- suffixes = [(".preinst", "Preinstall"), (".postinst", "Postinstall")]
+ ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"])
+ control_scripts = [
+ ControlScript(".preinst", "Preinstall", "install"),
+ ControlScript(".postinst", "Postinstall", "configure")]
status_file = self.target_rootfs + "/var/lib/dpkg/status"
installed_pkgs = []
with open(status_file, "r") as status:
for line in status.read().split('\n'):
- m = re.match("^Package: (.*)", line)
+ m = re.match(r"^Package: (.*)", line)
if m is not None:
installed_pkgs.append(m.group(1))
@@ -2010,27 +1581,24 @@ class DpkgPM(OpkgDpkgPM):
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True),
- "intercept_scripts")
- os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True)
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
- failed_pkgs = []
for pkg_name in installed_pkgs:
- for suffix in suffixes:
- p_full = os.path.join(info_dir, pkg_name + suffix[0])
+ for control_script in control_scripts:
+ p_full = os.path.join(info_dir, pkg_name + control_script.suffix)
if os.path.exists(p_full):
try:
bb.note("Executing %s for package: %s ..." %
- (suffix[1].lower(), pkg_name))
- subprocess.check_output(p_full, stderr=subprocess.STDOUT)
+ (control_script.name.lower(), pkg_name))
+ output = subprocess.check_output([p_full, control_script.argument],
+ stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
except subprocess.CalledProcessError as e:
- bb.note("%s for package %s failed with %d:\n%s" %
- (suffix[1], pkg_name, e.returncode, e.output.decode("utf-8")))
- failed_pkgs.append(pkg_name)
- break
-
- if len(failed_pkgs):
- self.mark_packages("unpacked", failed_pkgs)
+ bb.warn("%s for package %s failed with %d:\n%s" %
+ (control_script.name, pkg_name, e.returncode,
+ e.output.decode("utf-8")))
+ failed_postinsts_abort([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
def update(self):
os.environ['APT_CONFIG'] = self.apt_conf_file
@@ -2060,26 +1628,29 @@ class DpkgPM(OpkgDpkgPM):
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
- (bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
+ (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
"Command '%s' returned %d:\n%s" %
(cmd, e.returncode, e.output.decode("utf-8")))
# rename *.dpkg-new files/dirs
for root, dirs, files in os.walk(self.target_rootfs):
for dir in dirs:
- new_dir = re.sub("\.dpkg-new", "", dir)
+ new_dir = re.sub(r"\.dpkg-new", "", dir)
if dir != new_dir:
os.rename(os.path.join(root, dir),
os.path.join(root, new_dir))
for file in files:
- new_file = re.sub("\.dpkg-new", "", file)
+ new_file = re.sub(r"\.dpkg-new", "", file)
if file != new_file:
os.rename(os.path.join(root, file),
os.path.join(root, new_file))
def remove(self, pkgs, with_dependencies=True):
+ if not pkgs:
+ return
+
if with_dependencies:
os.environ['APT_CONFIG'] = self.apt_conf_file
cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs))
@@ -2105,23 +1676,23 @@ class DpkgPM(OpkgDpkgPM):
if result is not None:
bb.fatal(result)
- def insert_feeds_uris(self):
- if self.feed_uris == "":
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ if feed_uris == "":
return
sources_conf = os.path.join("%s/etc/apt/sources.list"
% self.target_rootfs)
arch_list = []
- if self.feed_archs is None:
+ if feed_archs is None:
for arch in self.all_arch_list:
if not os.path.exists(os.path.join(self.deploy_dir, arch)):
continue
arch_list.append(arch)
else:
- arch_list = self.feed_archs.split()
+ arch_list = feed_archs.split()
- feed_uris = self.construct_uris(self.feed_uris.split(), self.feed_base_paths.split())
+ feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
with open(sources_conf, "w+") as sources_file:
for uri in feed_uris:
@@ -2135,7 +1706,7 @@ class DpkgPM(OpkgDpkgPM):
sources_file.write("deb %s ./\n" % uri)
def _create_configs(self, archs, base_archs):
- base_archs = re.sub("_", "-", base_archs)
+ base_archs = re.sub(r"_", r"-", base_archs)
if os.path.exists(self.apt_conf_dir):
bb.utils.remove(self.apt_conf_dir, True)
@@ -2161,7 +1732,7 @@ class DpkgPM(OpkgDpkgPM):
priority += 5
- pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or ""
+ pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or ""
for pkg in pkg_exclude.split():
prefs_file.write(
"Package: %s\n"
@@ -2176,21 +1747,20 @@ class DpkgPM(OpkgDpkgPM):
os.path.join(self.deploy_dir, arch))
base_arch_list = base_archs.split()
- multilib_variants = self.d.getVar("MULTILIB_VARIANTS", True);
+ multilib_variants = self.d.getVar("MULTILIB_VARIANTS");
for variant in multilib_variants.split():
localdata = bb.data.createCopy(self.d)
variant_tune = localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + variant, False)
- orig_arch = localdata.getVar("DPKG_ARCH", True)
+ orig_arch = localdata.getVar("DPKG_ARCH")
localdata.setVar("DEFAULTTUNE", variant_tune)
- bb.data.update_data(localdata)
- variant_arch = localdata.getVar("DPKG_ARCH", True)
+ variant_arch = localdata.getVar("DPKG_ARCH")
if variant_arch not in base_arch_list:
base_arch_list.append(variant_arch)
with open(self.apt_conf_file, "w+") as apt_conf:
with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
for line in apt_conf_sample.read().split("\n"):
- match_arch = re.match(" Architecture \".*\";$", line)
+ match_arch = re.match(r" Architecture \".*\";$", line)
architectures = ""
if match_arch:
for base_arch in base_arch_list:
@@ -2198,8 +1768,8 @@ class DpkgPM(OpkgDpkgPM):
apt_conf.write(" Architectures {%s};\n" % architectures);
apt_conf.write(" Architecture \"%s\";\n" % base_archs)
else:
- line = re.sub("#ROOTFS#", self.target_rootfs, line)
- line = re.sub("#APTCONF#", self.apt_conf_dir, line)
+ line = re.sub(r"#ROOTFS#", self.target_rootfs, line)
+ line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
apt_conf.write(line + "\n")
target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
@@ -2214,7 +1784,7 @@ class DpkgPM(OpkgDpkgPM):
def remove_packaging_data(self):
bb.utils.remove(os.path.join(self.target_rootfs,
- self.d.getVar('opkglibdir', True)), True)
+ self.d.getVar('opkglibdir')), True)
bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
def fix_broken_dependencies(self):
@@ -2231,10 +1801,10 @@ class DpkgPM(OpkgDpkgPM):
def list_installed(self):
return DpkgPkgsList(self.d, self.target_rootfs).list_pkgs()
- """
- Returns a dictionary with the package info.
- """
def package_info(self, pkg):
+ """
+ Returns a dictionary with the package info.
+ """
cmd = "%s show %s" % (self.apt_cache_cmd, pkg)
pkg_info = super(DpkgPM, self).package_info(pkg, cmd)
@@ -2245,12 +1815,12 @@ class DpkgPM(OpkgDpkgPM):
return pkg_info
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
- """
def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+ """
pkg_info = self.package_info(pkg)
if not pkg_info:
bb.fatal("Unable to get information for package '%s' while "
@@ -2262,12 +1832,12 @@ class DpkgPM(OpkgDpkgPM):
return tmp_dir
def generate_index_files(d):
- classes = d.getVar('PACKAGE_CLASSES', True).replace("package_", "").split()
+ classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split()
indexer_map = {
- "rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM', True)),
- "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK', True)),
- "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB', True))
+ "rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')),
+ "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')),
+ "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB'))
}
result = None
@@ -2281,12 +1851,3 @@ def generate_index_files(d):
if result is not None:
bb.fatal(result)
-
-if __name__ == "__main__":
- """
- We should be able to run this as a standalone script, from outside bitbake
- environment.
- """
- """
- TBD
- """
diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py
index 21d4de914f..a82085a792 100644
--- a/meta/lib/oe/packagedata.py
+++ b/meta/lib/oe/packagedata.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import codecs
import os
@@ -13,9 +17,8 @@ def read_pkgdatafile(fn):
if os.access(fn, os.R_OK):
import re
- f = open(fn, 'r')
- lines = f.readlines()
- f.close()
+ with open(fn, 'r') as f:
+ lines = f.readlines()
r = re.compile("([^:]+):\s*(.*)")
for l in lines:
m = r.match(l)
@@ -57,7 +60,7 @@ def read_subpkgdata_dict(pkg, d):
def _pkgmap(d):
"""Return a dictionary mapping package to recipe name."""
- pkgdatadir = d.getVar("PKGDATA_DIR", True)
+ pkgdatadir = d.getVar("PKGDATA_DIR")
pkgmap = {}
try:
diff --git a/meta/lib/oe/packagegroup.py b/meta/lib/oe/packagegroup.py
index 97819279b7..2419cbb6d3 100644
--- a/meta/lib/oe/packagegroup.py
+++ b/meta/lib/oe/packagegroup.py
@@ -1,17 +1,21 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import itertools
def is_optional(feature, d):
- packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True)
+ packages = d.getVar("FEATURE_PACKAGES_%s" % feature)
if packages:
- return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional", True))
+ return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional"))
else:
- return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional", True))
+ return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional"))
def packages(features, d):
for feature in features:
- packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True)
+ packages = d.getVar("FEATURE_PACKAGES_%s" % feature)
if not packages:
- packages = d.getVar("PACKAGE_GROUP_%s" % feature, True)
+ packages = d.getVar("PACKAGE_GROUP_%s" % feature)
for pkg in (packages or "").split():
yield pkg
diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py
index 0332f100f1..2b1eee1003 100644
--- a/meta/lib/oe/patch.py
+++ b/meta/lib/oe/patch.py
@@ -1,4 +1,9 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import oe.path
+import oe.types
class NotFoundError(bb.BBHandledException):
def __init__(self, path):
@@ -20,6 +25,7 @@ class CmdError(bb.BBHandledException):
def runcmd(args, dir = None):
import pipes
+ import subprocess
if dir:
olddir = os.path.abspath(os.curdir)
@@ -32,9 +38,14 @@ def runcmd(args, dir = None):
args = [ pipes.quote(str(arg)) for arg in args ]
cmd = " ".join(args)
# print("cmd: %s" % cmd)
- (exitstatus, output) = oe.utils.getstatusoutput(cmd)
+ (exitstatus, output) = subprocess.getstatusoutput(cmd)
if exitstatus != 0:
raise CmdError(cmd, exitstatus >> 8, output)
+ if " fuzz " in output:
+ # Drop patch fuzz info with header and footer to log file so
+ # insane.bbclass can handle to throw error/warning
+ bb.note("--- Patch fuzz start ---\n%s\n--- Patch fuzz end ---" % format(output))
+
return output
finally:
@@ -81,7 +92,7 @@ class PatchSet(object):
patch[param] = PatchSet.defaults[param]
if patch.get("remote"):
- patch["file"] = bb.data.expand(bb.fetch2.localpath(patch["remote"], self.d), self.d)
+ patch["file"] = self.d.expand(bb.fetch2.localpath(patch["remote"], self.d))
patch["filemd5"] = bb.utils.md5_file(patch["file"])
@@ -211,7 +222,7 @@ class PatchTree(PatchSet):
self.patches.insert(i, patch)
def _applypatch(self, patch, force = False, reverse = False, run = True):
- shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']]
+ shellcmd = ["cat", patch['file'], "|", "patch", "--no-backup-if-mismatch", "-p", patch['strippath']]
if reverse:
shellcmd.append('-R')
@@ -281,8 +292,8 @@ class GitApplyTree(PatchTree):
def __init__(self, dir, d):
PatchTree.__init__(self, dir, d)
- self.commituser = d.getVar('PATCH_GIT_USER_NAME', True)
- self.commitemail = d.getVar('PATCH_GIT_USER_EMAIL', True)
+ self.commituser = d.getVar('PATCH_GIT_USER_NAME')
+ self.commitemail = d.getVar('PATCH_GIT_USER_EMAIL')
@staticmethod
def extractPatchHeader(patchfile):
@@ -316,8 +327,8 @@ class GitApplyTree(PatchTree):
@staticmethod
def interpretPatchHeader(headerlines):
import re
- author_re = re.compile('[\S ]+ <\S+@\S+\.\S+>')
- from_commit_re = re.compile('^From [a-z0-9]{40} .*')
+ author_re = re.compile(r'[\S ]+ <\S+@\S+\.\S+>')
+ from_commit_re = re.compile(r'^From [a-z0-9]{40} .*')
outlines = []
author = None
date = None
@@ -371,8 +382,8 @@ class GitApplyTree(PatchTree):
@staticmethod
def gitCommandUserOptions(cmd, commituser=None, commitemail=None, d=None):
if d:
- commituser = d.getVar('PATCH_GIT_USER_NAME', True)
- commitemail = d.getVar('PATCH_GIT_USER_EMAIL', True)
+ commituser = d.getVar('PATCH_GIT_USER_NAME')
+ commitemail = d.getVar('PATCH_GIT_USER_EMAIL')
if commituser:
cmd += ['-c', 'user.name="%s"' % commituser]
if commitemail:
@@ -428,9 +439,10 @@ class GitApplyTree(PatchTree):
def extractPatches(tree, startcommit, outdir, paths=None):
import tempfile
import shutil
+ import re
tempdir = tempfile.mkdtemp(prefix='oepatch')
try:
- shellcmd = ["git", "format-patch", startcommit, "-o", tempdir]
+ shellcmd = ["git", "format-patch", "--no-signature", "--no-numbered", startcommit, "-o", tempdir]
if paths:
shellcmd.append('--')
shellcmd.extend(paths)
@@ -443,10 +455,13 @@ class GitApplyTree(PatchTree):
try:
with open(srcfile, 'r', encoding=encoding) as f:
for line in f:
- if line.startswith(GitApplyTree.patch_line_prefix):
+ checkline = line
+ if checkline.startswith('Subject: '):
+ checkline = re.sub(r'\[.+?\]\s*', '', checkline[9:])
+ if checkline.startswith(GitApplyTree.patch_line_prefix):
outfile = line.split()[-1].strip()
continue
- if line.startswith(GitApplyTree.ignore_commit_prefix):
+ if checkline.startswith(GitApplyTree.ignore_commit_prefix):
continue
patchlines.append(line)
except UnicodeDecodeError:
@@ -547,7 +562,7 @@ class GitApplyTree(PatchTree):
class QuiltTree(PatchSet):
def _runcmd(self, args, run = True):
- quiltrc = self.d.getVar('QUILTRCFILE', True)
+ quiltrc = self.d.getVar('QUILTRCFILE')
if not run:
return ["quilt"] + ["--quiltrc"] + [quiltrc] + args
runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir)
@@ -723,7 +738,7 @@ class UserResolver(Resolver):
# Patch application failed
patchcmd = self.patchset.Push(True, False, False)
- t = self.patchset.d.getVar('T', True)
+ t = self.patchset.d.getVar('T')
if not t:
bb.msg.fatal("Build", "T not set")
bb.utils.mkdirhier(t)
@@ -765,3 +780,123 @@ class UserResolver(Resolver):
os.chdir(olddir)
raise
os.chdir(olddir)
+
+
+def patch_path(url, fetch, workdir, expand=True):
+ """Return the local path of a patch, or return nothing if this isn't a patch"""
+
+ local = fetch.localpath(url)
+ if os.path.isdir(local):
+ return
+ base, ext = os.path.splitext(os.path.basename(local))
+ if ext in ('.gz', '.bz2', '.xz', '.Z'):
+ if expand:
+ local = os.path.join(workdir, base)
+ ext = os.path.splitext(base)[1]
+
+ urldata = fetch.ud[url]
+ if "apply" in urldata.parm:
+ apply = oe.types.boolean(urldata.parm["apply"])
+ if not apply:
+ return
+ elif ext not in (".diff", ".patch"):
+ return
+
+ return local
+
+def src_patches(d, all=False, expand=True):
+ workdir = d.getVar('WORKDIR')
+ fetch = bb.fetch2.Fetch([], d)
+ patches = []
+ sources = []
+ for url in fetch.urls:
+ local = patch_path(url, fetch, workdir, expand)
+ if not local:
+ if all:
+ local = fetch.localpath(url)
+ sources.append(local)
+ continue
+
+ urldata = fetch.ud[url]
+ parm = urldata.parm
+ patchname = parm.get('pname') or os.path.basename(local)
+
+ apply, reason = should_apply(parm, d)
+ if not apply:
+ if reason:
+ bb.note("Patch %s %s" % (patchname, reason))
+ continue
+
+ patchparm = {'patchname': patchname}
+ if "striplevel" in parm:
+ striplevel = parm["striplevel"]
+ elif "pnum" in parm:
+ #bb.msg.warn(None, "Deprecated usage of 'pnum' url parameter in '%s', please use 'striplevel'" % url)
+ striplevel = parm["pnum"]
+ else:
+ striplevel = '1'
+ patchparm['striplevel'] = striplevel
+
+ patchdir = parm.get('patchdir')
+ if patchdir:
+ patchparm['patchdir'] = patchdir
+
+ localurl = bb.fetch.encodeurl(('file', '', local, '', '', patchparm))
+ patches.append(localurl)
+
+ if all:
+ return sources
+
+ return patches
+
+
+def should_apply(parm, d):
+ import bb.utils
+ if "mindate" in parm or "maxdate" in parm:
+ pn = d.getVar('PN')
+ srcdate = d.getVar('SRCDATE_%s' % pn)
+ if not srcdate:
+ srcdate = d.getVar('SRCDATE')
+
+ if srcdate == "now":
+ srcdate = d.getVar('DATE')
+
+ if "maxdate" in parm and parm["maxdate"] < srcdate:
+ return False, 'is outdated'
+
+ if "mindate" in parm and parm["mindate"] > srcdate:
+ return False, 'is predated'
+
+
+ if "minrev" in parm:
+ srcrev = d.getVar('SRCREV')
+ if srcrev and srcrev < parm["minrev"]:
+ return False, 'applies to later revisions'
+
+ if "maxrev" in parm:
+ srcrev = d.getVar('SRCREV')
+ if srcrev and srcrev > parm["maxrev"]:
+ return False, 'applies to earlier revisions'
+
+ if "rev" in parm:
+ srcrev = d.getVar('SRCREV')
+ if srcrev and parm["rev"] not in srcrev:
+ return False, "doesn't apply to revision"
+
+ if "notrev" in parm:
+ srcrev = d.getVar('SRCREV')
+ if srcrev and parm["notrev"] in srcrev:
+ return False, "doesn't apply to revision"
+
+ if "maxver" in parm:
+ pv = d.getVar('PV')
+ if bb.utils.vercmp_string_op(pv, parm["maxver"], ">"):
+ return False, "applies to earlier version"
+
+ if "minver" in parm:
+ pv = d.getVar('PV')
+ if bb.utils.vercmp_string_op(pv, parm["minver"], "<"):
+ return False, "applies to later version"
+
+ return True, None
+
diff --git a/meta/lib/oe/path.py b/meta/lib/oe/path.py
index 06a5af2659..fa209b9795 100644
--- a/meta/lib/oe/path.py
+++ b/meta/lib/oe/path.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import errno
import glob
import shutil
@@ -50,9 +54,30 @@ def make_relative_symlink(path):
os.remove(path)
os.symlink(base, path)
+def replace_absolute_symlinks(basedir, d):
+ """
+ Walk basedir looking for absolute symlinks and replacing them with relative ones.
+ The absolute links are assumed to be relative to basedir
+ (compared to make_relative_symlink above which tries to compute common ancestors
+ using pattern matching instead)
+ """
+ for walkroot, dirs, files in os.walk(basedir):
+ for file in files + dirs:
+ path = os.path.join(walkroot, file)
+ if not os.path.islink(path):
+ continue
+ link = os.readlink(path)
+ if not os.path.isabs(link):
+ continue
+ walkdir = os.path.dirname(path.rpartition(basedir)[2])
+ base = os.path.relpath(link, walkdir)
+ bb.debug(2, "Replacing absolute path %s with relative path %s" % (link, base))
+ os.remove(path)
+ os.symlink(base, path)
+
def format_display(path, metadata):
""" Prepare a path for display to the user. """
- rel = relative(metadata.getVar("TOPDIR", True), path)
+ rel = relative(metadata.getVar("TOPDIR"), path)
if len(rel) > len(path):
return path
else:
@@ -65,11 +90,11 @@ def copytree(src, dst):
# This way we also preserve hardlinks between files in the tree.
bb.utils.mkdirhier(dst)
- cmd = "tar --xattrs --xattrs-include='*' -cf - -C %s -p . | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dst)
+ cmd = "tar --xattrs --xattrs-include='*' -cf - -S -C %s -p . | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dst)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
def copyhardlinktree(src, dst):
- """ Make the hard link when possible, otherwise copy. """
+ """Make a tree of hard links when possible, otherwise copy."""
bb.utils.mkdirhier(dst)
if os.path.isdir(src) and not len(os.listdir(src)):
return
@@ -77,23 +102,42 @@ def copyhardlinktree(src, dst):
if (os.stat(src).st_dev == os.stat(dst).st_dev):
# Need to copy directories only with tar first since cp will error if two
# writers try and create a directory at the same time
- cmd = "cd %s; find . -type d -print | tar --xattrs --xattrs-include='*' -cf - -C %s -p --no-recursion --files-from - | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, src, dst)
+ cmd = "cd %s; find . -type d -print | tar --xattrs --xattrs-include='*' -cf - -S -C %s -p --no-recursion --files-from - | tar --xattrs --xattrs-include='*' -xhf - -C %s" % (src, src, dst)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
source = ''
if os.path.isdir(src):
- import glob
if len(glob.glob('%s/.??*' % src)) > 0:
- source = '%s/.??* ' % src
- source = source + '%s/*' % src
+ source = './.??* '
+ source += './*'
+ s_dir = src
else:
source = src
- cmd = 'cp -afl --preserve=xattr %s %s' % (source, dst)
- subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+ s_dir = os.getcwd()
+ cmd = 'cp -afl --preserve=xattr %s %s' % (source, os.path.realpath(dst))
+ subprocess.check_output(cmd, shell=True, cwd=s_dir, stderr=subprocess.STDOUT)
else:
copytree(src, dst)
+def copyhardlink(src, dst):
+ """Make a hard link when possible, otherwise copy."""
+
+ # We need to stat the destination directory as the destination file probably
+ # doesn't exist yet.
+ dstdir = os.path.dirname(dst)
+ if os.stat(src).st_dev == os.stat(dstdir).st_dev:
+ os.link(src, dst)
+ else:
+ shutil.copy(src, dst)
+
def remove(path, recurse=True):
- """Equivalent to rm -f or rm -rf"""
+ """
+ Equivalent to rm -f or rm -rf
+ NOTE: be careful about passing paths that may contain filenames with
+ wildcards in them (as opposed to passing an actual wildcarded path) -
+ since we use glob.glob() to expand the path. Filenames containing
+ square brackets are particularly problematic since the they may not
+ actually expand to match the original filename.
+ """
for name in glob.glob(path):
try:
os.unlink(name)
@@ -208,3 +252,59 @@ def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False)
raise
return file
+
+def is_path_parent(possible_parent, *paths):
+ """
+ Return True if a path is the parent of another, False otherwise.
+ Multiple paths to test can be specified in which case all
+ specified test paths must be under the parent in order to
+ return True.
+ """
+ def abs_path_trailing(pth):
+ pth_abs = os.path.abspath(pth)
+ if not pth_abs.endswith(os.sep):
+ pth_abs += os.sep
+ return pth_abs
+
+ possible_parent_abs = abs_path_trailing(possible_parent)
+ if not paths:
+ return False
+ for path in paths:
+ path_abs = abs_path_trailing(path)
+ if not path_abs.startswith(possible_parent_abs):
+ return False
+ return True
+
+def which_wild(pathname, path=None, mode=os.F_OK, *, reverse=False, candidates=False):
+ """Search a search path for pathname, supporting wildcards.
+
+ Return all paths in the specific search path matching the wildcard pattern
+ in pathname, returning only the first encountered for each file. If
+ candidates is True, information on all potential candidate paths are
+ included.
+ """
+ paths = (path or os.environ.get('PATH', os.defpath)).split(':')
+ if reverse:
+ paths.reverse()
+
+ seen, files = set(), []
+ for index, element in enumerate(paths):
+ if not os.path.isabs(element):
+ element = os.path.abspath(element)
+
+ candidate = os.path.join(element, pathname)
+ globbed = glob.glob(candidate)
+ if globbed:
+ for found_path in sorted(globbed):
+ if not os.access(found_path, mode):
+ continue
+ rel = os.path.relpath(found_path, element)
+ if rel not in seen:
+ seen.add(rel)
+ if candidates:
+ files.append((found_path, [os.path.join(p, rel) for p in paths[:index+1]]))
+ else:
+ files.append(found_path)
+
+ return files
+
diff --git a/meta/lib/oe/prservice.py b/meta/lib/oe/prservice.py
index 0054f954cc..b1132ccb11 100644
--- a/meta/lib/oe/prservice.py
+++ b/meta/lib/oe/prservice.py
@@ -1,7 +1,10 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
def prserv_make_conn(d, check = False):
import prserv.serv
- host_params = list([_f for _f in (d.getVar("PRSERV_HOST", True) or '').split(':') if _f])
+ host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
try:
conn = None
conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1]))
@@ -15,11 +18,11 @@ def prserv_make_conn(d, check = False):
return conn
def prserv_dump_db(d):
- if not d.getVar('PRSERV_HOST', True):
+ if not d.getVar('PRSERV_HOST'):
bb.error("Not using network based PR service")
return None
- conn = d.getVar("__PRSERV_CONN", True)
+ conn = d.getVar("__PRSERV_CONN")
if conn is None:
conn = prserv_make_conn(d)
if conn is None:
@@ -27,18 +30,18 @@ def prserv_dump_db(d):
return None
#dump db
- opt_version = d.getVar('PRSERV_DUMPOPT_VERSION', True)
- opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH', True)
- opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM', True)
- opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL', True))
+ opt_version = d.getVar('PRSERV_DUMPOPT_VERSION')
+ opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH')
+ opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM')
+ opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL'))
return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None):
- if not d.getVar('PRSERV_HOST', True):
+ if not d.getVar('PRSERV_HOST'):
bb.error("Not using network based PR service")
return None
- conn = d.getVar("__PRSERV_CONN", True)
+ conn = d.getVar("__PRSERV_CONN")
if conn is None:
conn = prserv_make_conn(d)
if conn is None:
@@ -58,7 +61,7 @@ def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksu
(filter_checksum and filter_checksum != checksum):
continue
try:
- value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum, True))
+ value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum))
except BaseException as exc:
bb.debug("Not valid value of %s:%s" % (v,str(exc)))
continue
@@ -72,8 +75,8 @@ def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksu
def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
import bb.utils
#initilize the output file
- bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR', True))
- df = d.getVar('PRSERV_DUMPFILE', True)
+ bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR'))
+ df = d.getVar('PRSERV_DUMPFILE')
#write data
lf = bb.utils.lockfile("%s.lock" % df)
f = open(df, "a")
@@ -114,7 +117,7 @@ def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
bb.utils.unlockfile(lf)
def prserv_check_avail(d):
- host_params = list([_f for _f in (d.getVar("PRSERV_HOST", True) or '').split(':') if _f])
+ host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
try:
if len(host_params) != 2:
raise TypeError
diff --git a/meta/lib/oe/qa.py b/meta/lib/oe/qa.py
index 75e7df8546..21066c4dc3 100644
--- a/meta/lib/oe/qa.py
+++ b/meta/lib/oe/qa.py
@@ -1,4 +1,8 @@
-import os, struct
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os, struct, mmap
class NotELFFileError(Exception):
pass
@@ -23,9 +27,9 @@ class ELFFile:
EV_CURRENT = 1
# possible values for EI_DATA
- ELFDATANONE = 0
- ELFDATA2LSB = 1
- ELFDATA2MSB = 2
+ EI_DATA_NONE = 0
+ EI_DATA_LSB = 1
+ EI_DATA_MSB = 2
PT_INTERP = 3
@@ -34,51 +38,46 @@ class ELFFile:
#print "'%x','%x' %s" % (ord(expectation), ord(result), self.name)
raise NotELFFileError("%s is not an ELF" % self.name)
- def __init__(self, name, bits = 0):
+ def __init__(self, name):
self.name = name
- self.bits = bits
self.objdump_output = {}
- def open(self):
- if not os.path.isfile(self.name):
- raise NotELFFileError("%s is not a normal file" % self.name)
+ # Context Manager functions to close the mmap explicitly
+ def __enter__(self):
+ return self
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.data.close()
+
+ def open(self):
with open(self.name, "rb") as f:
- # Read 4k which should cover most of the headers we're after
- self.data = f.read(4096)
+ try:
+ self.data = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
+ except ValueError:
+ # This means the file is empty
+ raise NotELFFileError("%s is empty" % self.name)
+ # Check the file has the minimum number of ELF table entries
if len(self.data) < ELFFile.EI_NIDENT + 4:
raise NotELFFileError("%s is not an ELF" % self.name)
+ # ELF header
self.my_assert(self.data[0], 0x7f)
self.my_assert(self.data[1], ord('E'))
self.my_assert(self.data[2], ord('L'))
self.my_assert(self.data[3], ord('F'))
- if self.bits == 0:
- if self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS32:
- self.bits = 32
- elif self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS64:
- self.bits = 64
- else:
- # Not 32-bit or 64.. lets assert
- raise NotELFFileError("ELF but not 32 or 64 bit.")
- elif self.bits == 32:
- self.my_assert(self.data[ELFFile.EI_CLASS], ELFFile.ELFCLASS32)
- elif self.bits == 64:
- self.my_assert(self.data[ELFFile.EI_CLASS], ELFFile.ELFCLASS64)
+ if self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS32:
+ self.bits = 32
+ elif self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS64:
+ self.bits = 64
else:
- raise NotELFFileError("Must specify unknown, 32 or 64 bit size.")
+ # Not 32-bit or 64.. lets assert
+ raise NotELFFileError("ELF but not 32 or 64 bit.")
self.my_assert(self.data[ELFFile.EI_VERSION], ELFFile.EV_CURRENT)
- self.sex = self.data[ELFFile.EI_DATA]
- if self.sex == ELFFile.ELFDATANONE:
- raise NotELFFileError("self.sex == ELFDATANONE")
- elif self.sex == ELFFile.ELFDATA2LSB:
- self.sex = "<"
- elif self.sex == ELFFile.ELFDATA2MSB:
- self.sex = ">"
- else:
- raise NotELFFileError("Unknown self.sex")
+ self.endian = self.data[ELFFile.EI_DATA]
+ if self.endian not in (ELFFile.EI_DATA_LSB, ELFFile.EI_DATA_MSB):
+ raise NotELFFileError("Unexpected EI_DATA %x" % self.endian)
def osAbi(self):
return self.data[ELFFile.EI_OSABI]
@@ -90,16 +89,20 @@ class ELFFile:
return self.bits
def isLittleEndian(self):
- return self.sex == "<"
+ return self.endian == ELFFile.EI_DATA_LSB
def isBigEndian(self):
- return self.sex == ">"
+ return self.endian == ELFFile.EI_DATA_MSB
+
+ def getStructEndian(self):
+ return {ELFFile.EI_DATA_LSB: "<",
+ ELFFile.EI_DATA_MSB: ">"}[self.endian]
def getShort(self, offset):
- return struct.unpack_from(self.sex+"H", self.data, offset)[0]
+ return struct.unpack_from(self.getStructEndian() + "H", self.data, offset)[0]
def getWord(self, offset):
- return struct.unpack_from(self.sex+"i", self.data, offset)[0]
+ return struct.unpack_from(self.getStructEndian() + "i", self.data, offset)[0]
def isDynamic(self):
"""
@@ -118,7 +121,7 @@ class ELFFile:
def machine(self):
"""
- We know the sex stored in self.sex and we
+ We know the endian stored in self.endian and we
know the position
"""
return self.getShort(ELFFile.E_MACHINE)
@@ -130,11 +133,11 @@ class ELFFile:
if cmd in self.objdump_output:
return self.objdump_output[cmd]
- objdump = d.getVar('OBJDUMP', True)
+ objdump = d.getVar('OBJDUMP')
env = os.environ.copy()
env["LC_ALL"] = "C"
- env["PATH"] = d.getVar('PATH', True)
+ env["PATH"] = d.getVar('PATH')
try:
bb.note("%s %s %s" % (objdump, cmd, self.name))
@@ -144,8 +147,30 @@ class ELFFile:
bb.note("%s %s %s failed: %s" % (objdump, cmd, self.name, e))
return ""
+def elf_machine_to_string(machine):
+ """
+ Return the name of a given ELF e_machine field or the hex value as a string
+ if it isn't recognised.
+ """
+ try:
+ return {
+ 0x02: "SPARC",
+ 0x03: "x86",
+ 0x08: "MIPS",
+ 0x14: "PowerPC",
+ 0x28: "ARM",
+ 0x2A: "SuperH",
+ 0x32: "IA-64",
+ 0x3E: "x86-64",
+ 0xB7: "AArch64",
+ 0xF7: "BPF"
+ }[machine]
+ except:
+ return "Unknown (%s)" % repr(machine)
+
if __name__ == "__main__":
import sys
- elf = ELFFile(sys.argv[1])
- elf.open()
- print(elf.isDynamic())
+
+ with ELFFile(sys.argv[1]) as elf:
+ elf.open()
+ print(elf.isDynamic())
diff --git a/meta/lib/oe/recipeutils.py b/meta/lib/oe/recipeutils.py
index 58e4028aed..630ae967af 100644
--- a/meta/lib/oe/recipeutils.py
+++ b/meta/lib/oe/recipeutils.py
@@ -2,7 +2,9 @@
#
# Some code borrowed from the OE layer index
#
-# Copyright (C) 2013-2016 Intel Corporation
+# Copyright (C) 2013-2017 Intel Corporation
+#
+# SPDX-License-Identifier: GPL-2.0-only
#
import sys
@@ -16,71 +18,40 @@ import shutil
import re
import fnmatch
import glob
-from collections import OrderedDict, defaultdict
+import bb.tinfoil
+from collections import OrderedDict, defaultdict
+from bb.utils import vercmp_string
# Help us to find places to insert values
recipe_progression = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRCPV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()']
# Variables that sometimes are a bit long but shouldn't be wrapped
-nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', 'SRC_URI[md5sum]', 'SRC_URI[sha256sum]']
+nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', r'SRC_URI\[(.+\.)?md5sum\]', r'SRC_URI\[(.+\.)?sha256sum\]']
list_vars = ['SRC_URI', 'LIC_FILES_CHKSUM']
meta_vars = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION']
-def pn_to_recipe(cooker, pn, mc=''):
- """Convert a recipe name (PN) to the path to the recipe file"""
- import bb.providers
-
- if pn in cooker.recipecaches[mc].pkg_pn:
- best = bb.providers.findBestProvider(pn, cooker.data, cooker.recipecaches[mc], cooker.recipecaches[mc].pkg_pn)
- return best[3]
- elif pn in cooker.recipecaches[mc].providers:
- filenames = cooker.recipecaches[mc].providers[pn]
- eligible, foundUnique = bb.providers.filterProviders(filenames, pn, cooker.expanded_data, cooker.recipecaches[mc])
- filename = eligible[0]
- return filename
- else:
- return None
-
-
-def get_unavailable_reasons(cooker, pn):
- """If a recipe could not be found, find out why if possible"""
- import bb.taskdata
- taskdata = bb.taskdata.TaskData(None, skiplist=cooker.skiplist)
- return taskdata.get_reasons(pn)
-
-
-def parse_recipe(cooker, fn, appendfiles):
- """
- Parse an individual recipe file, optionally with a list of
- bbappend files.
- """
- import bb.cache
- parser = bb.cache.NoCache(cooker.databuilder)
- envdata = parser.loadDataFull(fn, appendfiles)
- return envdata
-
-
-def parse_recipe_simple(cooker, pn, d, appends=True):
+def simplify_history(history, d):
"""
- Parse a recipe and optionally all bbappends that apply to it
- in the current configuration.
+ Eliminate any irrelevant events from a variable history
"""
- import bb.providers
-
- recipefile = pn_to_recipe(cooker, pn)
- if not recipefile:
- skipreasons = get_unavailable_reasons(cooker, pn)
- # We may as well re-use bb.providers.NoProvider here
- if skipreasons:
- raise bb.providers.NoProvider(skipreasons)
- else:
- raise bb.providers.NoProvider('Unable to find any recipe file matching %s' % pn)
- if appends:
- appendfiles = cooker.collection.get_file_appends(recipefile)
- else:
- appendfiles = None
- return parse_recipe(cooker, recipefile, appendfiles)
+ ret_history = []
+ has_set = False
+ # Go backwards through the history and remove any immediate operations
+ # before the most recent set
+ for event in reversed(history):
+ if 'flag' in event or not 'file' in event:
+ continue
+ if event['op'] == 'set':
+ if has_set:
+ continue
+ has_set = True
+ elif event['op'] in ('append', 'prepend', 'postdot', 'predot'):
+ # Reminder: "append" and "prepend" mean += and =+ respectively, NOT _append / _prepend
+ if has_set:
+ continue
+ ret_history.insert(0, event)
+ return ret_history
def get_var_files(fn, varlist, d):
@@ -89,11 +60,19 @@ def get_var_files(fn, varlist, d):
"""
varfiles = {}
for v in varlist:
- history = d.varhistory.variable(v)
files = []
- for event in history:
- if 'file' in event and not 'flag' in event:
- files.append(event['file'])
+ if '[' in v:
+ varsplit = v.split('[')
+ varflag = varsplit[1].split(']')[0]
+ history = d.varhistory.variable(varsplit[0])
+ for event in history:
+ if 'file' in event and event.get('flag', '') == varflag:
+ files.append(event['file'])
+ else:
+ history = d.varhistory.variable(v)
+ for event in history:
+ if 'file' in event and not 'flag' in event:
+ files.append(event['file'])
if files:
actualfile = files[-1]
else:
@@ -173,6 +152,10 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
else:
newline = ''
+ nowrap_vars_res = []
+ for item in nowrap_vars:
+ nowrap_vars_res.append(re.compile('^%s$' % item))
+
recipe_progression_res = []
recipe_progression_restrs = []
for item in recipe_progression:
@@ -180,7 +163,7 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
key = item[:-2]
else:
key = item
- restr = '%s(_[a-zA-Z0-9-_$(){}]+|\[[^\]]*\])?' % key
+ restr = r'%s(_[a-zA-Z0-9-_$(){}]+|\[[^\]]*\])?' % key
if item.endswith('()'):
recipe_progression_restrs.append(restr + '()')
else:
@@ -203,15 +186,27 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
def outputvalue(name, lines, rewindcomments=False):
if values[name] is None:
return
- rawtext = '%s = "%s"%s' % (name, values[name], newline)
+ if isinstance(values[name], tuple):
+ op, value = values[name]
+ if op == '+=' and value.strip() == '':
+ return
+ else:
+ value = values[name]
+ op = '='
+ rawtext = '%s %s "%s"%s' % (name, op, value, newline)
addlines = []
- if name in nowrap_vars:
+ nowrap = False
+ for nowrap_re in nowrap_vars_res:
+ if nowrap_re.match(name):
+ nowrap = True
+ break
+ if nowrap:
addlines.append(rawtext)
elif name in list_vars:
- splitvalue = split_var_value(values[name], assignment=False)
+ splitvalue = split_var_value(value, assignment=False)
if len(splitvalue) > 1:
linesplit = ' \\\n' + (' ' * (len(name) + 4))
- addlines.append('%s = "%s%s"%s' % (name, linesplit.join(splitvalue), linesplit, newline))
+ addlines.append('%s %s "%s%s"%s' % (name, op, linesplit.join(splitvalue), linesplit, newline))
else:
addlines.append(rawtext)
else:
@@ -219,6 +214,11 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
for wrapline in wrapped[:-1]:
addlines.append('%s \\%s' % (wrapline, newline))
addlines.append('%s%s' % (wrapped[-1], newline))
+
+ # Split on newlines - this isn't strictly necessary if you are only
+ # going to write the output to disk, but if you want to compare it
+ # (as patch_recipe_file() will do if patch=True) then it's important.
+ addlines = [line for l in addlines for line in l.splitlines(True)]
if rewindcomments:
# Ensure we insert the lines before any leading comments
# (that we'd want to ensure remain leading the next value)
@@ -268,7 +268,7 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
return changed, tolines
-def patch_recipe_file(fn, values, patch=False, relpath=''):
+def patch_recipe_file(fn, values, patch=False, relpath='', redirect_output=None):
"""Update or insert variable values into a recipe file (assuming you
have already identified the exact file you want to update.)
Note that some manual inspection/intervention may be required
@@ -280,7 +280,11 @@ def patch_recipe_file(fn, values, patch=False, relpath=''):
_, tolines = patch_recipe_lines(fromlines, values)
- if patch:
+ if redirect_output:
+ with open(os.path.join(redirect_output, os.path.basename(fn)), 'w') as f:
+ f.writelines(tolines)
+ return None
+ elif patch:
relfn = os.path.relpath(fn, relpath)
diff = difflib.unified_diff(fromlines, tolines, 'a/%s' % relfn, 'b/%s' % relfn)
return diff
@@ -330,17 +334,52 @@ def localise_file_vars(fn, varfiles, varlist):
return filevars
-def patch_recipe(d, fn, varvalues, patch=False, relpath=''):
+def patch_recipe(d, fn, varvalues, patch=False, relpath='', redirect_output=None):
"""Modify a list of variable values in the specified recipe. Handles inc files if
used by the recipe.
"""
+ overrides = d.getVar('OVERRIDES').split(':')
+ def override_applicable(hevent):
+ op = hevent['op']
+ if '[' in op:
+ opoverrides = op.split('[')[1].split(']')[0].split('_')
+ for opoverride in opoverrides:
+ if not opoverride in overrides:
+ return False
+ return True
+
varlist = varvalues.keys()
+ fn = os.path.abspath(fn)
varfiles = get_var_files(fn, varlist, d)
locs = localise_file_vars(fn, varfiles, varlist)
patches = []
for f,v in locs.items():
vals = {k: varvalues[k] for k in v}
- patchdata = patch_recipe_file(f, vals, patch, relpath)
+ f = os.path.abspath(f)
+ if f == fn:
+ extravals = {}
+ for var, value in vals.items():
+ if var in list_vars:
+ history = simplify_history(d.varhistory.variable(var), d)
+ recipe_set = False
+ for event in history:
+ if os.path.abspath(event['file']) == fn:
+ if event['op'] == 'set':
+ recipe_set = True
+ if not recipe_set:
+ for event in history:
+ if event['op'].startswith('_remove'):
+ continue
+ if not override_applicable(event):
+ continue
+ newvalue = value.replace(event['detail'], '')
+ if newvalue == value and os.path.abspath(event['file']) == fn and event['op'].startswith('_'):
+ op = event['op'].replace('[', '_').replace(']', '')
+ extravals[var + op] = None
+ value = newvalue
+ vals[var] = ('+=', value)
+ vals.update(extravals)
+ patchdata = patch_recipe_file(f, vals, patch, relpath, redirect_output)
if patch:
patches.append(patchdata)
@@ -351,7 +390,7 @@ def patch_recipe(d, fn, varvalues, patch=False, relpath=''):
-def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True):
+def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True, all_variants=False):
"""Copy (local) recipe files, including both files included via include/require,
and files referred to in the SRC_URI variable."""
import bb.fetch2
@@ -359,18 +398,41 @@ def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True):
# FIXME need a warning if the unexpanded SRC_URI value contains variable references
- uris = (d.getVar('SRC_URI', True) or "").split()
- fetch = bb.fetch2.Fetch(uris, d)
- if download:
- fetch.download()
+ uri_values = []
+ localpaths = []
+ def fetch_urls(rdata):
+ # Collect the local paths from SRC_URI
+ srcuri = rdata.getVar('SRC_URI') or ""
+ if srcuri not in uri_values:
+ fetch = bb.fetch2.Fetch(srcuri.split(), rdata)
+ if download:
+ fetch.download()
+ for pth in fetch.localpaths():
+ if pth not in localpaths:
+ localpaths.append(pth)
+ uri_values.append(srcuri)
+
+ fetch_urls(d)
+ if all_variants:
+ # Get files for other variants e.g. in the case of a SRC_URI_append
+ localdata = bb.data.createCopy(d)
+ variants = (localdata.getVar('BBCLASSEXTEND') or '').split()
+ if variants:
+ # Ensure we handle class-target if we're dealing with one of the variants
+ variants.append('target')
+ for variant in variants:
+ localdata.setVar('CLASSOVERRIDE', 'class-%s' % variant)
+ fetch_urls(localdata)
# Copy local files to target directory and gather any remote files
- bb_dir = os.path.dirname(d.getVar('FILE', True)) + os.sep
+ bb_dir = os.path.abspath(os.path.dirname(d.getVar('FILE'))) + os.sep
remotes = []
copied = []
- includes = [path for path in d.getVar('BBINCLUDED', True).split() if
- path.startswith(bb_dir) and os.path.exists(path)]
- for path in fetch.localpaths() + includes:
+ # Need to do this in two steps since we want to check against the absolute path
+ includes = [os.path.abspath(path) for path in d.getVar('BBINCLUDED').split() if os.path.exists(path)]
+ # We also check this below, but we don't want any items in this list being considered remotes
+ includes = [path for path in includes if path.startswith(bb_dir)]
+ for path in localpaths + includes:
# Only import files that are under the meta directory
if path.startswith(bb_dir):
if not whole_dir:
@@ -389,15 +451,21 @@ def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True):
return copied, remotes
-def get_recipe_local_files(d, patches=False):
+def get_recipe_local_files(d, patches=False, archives=False):
"""Get a list of local files in SRC_URI within a recipe."""
- uris = (d.getVar('SRC_URI', True) or "").split()
+ import oe.patch
+ uris = (d.getVar('SRC_URI') or "").split()
fetch = bb.fetch2.Fetch(uris, d)
+ # FIXME this list should be factored out somewhere else (such as the
+ # fetcher) though note that this only encompasses actual container formats
+ # i.e. that can contain multiple files as opposed to those that only
+ # contain a compressed stream (i.e. .tar.gz as opposed to just .gz)
+ archive_exts = ['.tar', '.tgz', '.tar.gz', '.tar.Z', '.tbz', '.tbz2', '.tar.bz2', '.txz', '.tar.xz', '.tar.lz', '.zip', '.jar', '.rpm', '.srpm', '.deb', '.ipk', '.tar.7z', '.7z']
ret = {}
for uri in uris:
if fetch.ud[uri].type == 'file':
if (not patches and
- bb.utils.exec_flat_python_func('patch_path', uri, fetch, '')):
+ oe.patch.patch_path(uri, fetch, '', expand=False)):
continue
# Skip files that are referenced by absolute path
fname = fetch.ud[uri].basepath
@@ -409,16 +477,29 @@ def get_recipe_local_files(d, patches=False):
if os.path.isabs(subdir):
continue
fname = os.path.join(subdir, fname)
- ret[fname] = fetch.localpath(uri)
+ localpath = fetch.localpath(uri)
+ if not archives:
+ # Ignore archives that will be unpacked
+ if localpath.endswith(tuple(archive_exts)):
+ unpack = fetch.ud[uri].parm.get('unpack', True)
+ if unpack:
+ continue
+ if os.path.isdir(localpath):
+ for root, dirs, files in os.walk(localpath):
+ for fname in files:
+ fileabspath = os.path.join(root,fname)
+ srcdir = os.path.dirname(localpath)
+ ret[os.path.relpath(fileabspath,srcdir)] = fileabspath
+ else:
+ ret[fname] = localpath
return ret
def get_recipe_patches(d):
"""Get a list of the patches included in SRC_URI within a recipe."""
+ import oe.patch
+ patches = oe.patch.src_patches(d, expand=False)
patchfiles = []
- # Execute src_patches() defined in patch.bbclass - this works since that class
- # is inherited globally
- patches = bb.utils.exec_flat_python_func('src_patches', d)
for patch in patches:
_, _, local, _, _, parm = bb.fetch.decodeurl(patch)
patchfiles.append(local)
@@ -435,14 +516,12 @@ def get_recipe_patched_files(d):
change mode ('A' for add, 'D' for delete or 'M' for modify)
"""
import oe.patch
- # Execute src_patches() defined in patch.bbclass - this works since that class
- # is inherited globally
- patches = bb.utils.exec_flat_python_func('src_patches', d)
+ patches = oe.patch.src_patches(d, expand=False)
patchedfiles = {}
for patch in patches:
_, _, patchfile, _, _, parm = bb.fetch.decodeurl(patch)
striplevel = int(parm['striplevel'])
- patchedfiles[patchfile] = oe.patch.PatchSet.getPatchedFiles(patchfile, striplevel, os.path.join(d.getVar('S', True), parm.get('patchdir', '')))
+ patchedfiles[patchfile] = oe.patch.PatchSet.getPatchedFiles(patchfile, striplevel, os.path.join(d.getVar('S'), parm.get('patchdir', '')))
return patchedfiles
@@ -480,9 +559,9 @@ def get_bbfile_path(d, destdir, extrapathhint=None):
confdata.setVar('LAYERDIR', destlayerdir)
destlayerconf = os.path.join(destlayerdir, "conf", "layer.conf")
confdata = bb.cookerdata.parse_config_file(destlayerconf, confdata)
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
- bbfilespecs = (confdata.getVar('BBFILES', True) or '').split()
+ bbfilespecs = (confdata.getVar('BBFILES') or '').split()
if destdir == destlayerdir:
for bbfilespec in bbfilespecs:
if not bbfilespec.endswith('.bbappend'):
@@ -495,8 +574,8 @@ def get_bbfile_path(d, destdir, extrapathhint=None):
# Try to make up a path that matches BBFILES
# this is a little crude, but better than nothing
- bpn = d.getVar('BPN', True)
- recipefn = os.path.basename(d.getVar('FILE', True))
+ bpn = d.getVar('BPN')
+ recipefn = os.path.basename(d.getVar('FILE'))
pathoptions = [destdir]
if extrapathhint:
pathoptions.append(os.path.join(destdir, extrapathhint))
@@ -520,7 +599,7 @@ def get_bbappend_path(d, destlayerdir, wildcardver=False):
import bb.cookerdata
destlayerdir = os.path.abspath(destlayerdir)
- recipefile = d.getVar('FILE', True)
+ recipefile = d.getVar('FILE')
recipefn = os.path.splitext(os.path.basename(recipefile))[0]
if wildcardver and '_' in recipefn:
recipefn = recipefn.split('_', 1)[0] + '_%'
@@ -540,7 +619,7 @@ def get_bbappend_path(d, destlayerdir, wildcardver=False):
appendpath = os.path.join(destlayerdir, os.path.relpath(os.path.dirname(recipefile), origlayerdir), appendfn)
closepath = ''
pathok = True
- for bbfilespec in confdata.getVar('BBFILES', True).split():
+ for bbfilespec in confdata.getVar('BBFILES').split():
if fnmatch.fnmatchcase(appendpath, bbfilespec):
# Our append path works, we're done
break
@@ -568,7 +647,7 @@ def get_bbappend_path(d, destlayerdir, wildcardver=False):
return (appendpath, pathok)
-def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None):
+def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None, redirect_output=None):
"""
Writes a bbappend file for a recipe
Parameters:
@@ -595,6 +674,9 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
value pairs, or simply a list of the lines.
removevalues:
Variable values to remove - a dict of names/values.
+ redirect_output:
+ If specified, redirects writing the output file to the
+ specified directory (for dry-run purposes)
"""
if not removevalues:
@@ -609,11 +691,12 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
bb.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.' % (os.path.join(destlayerdir, 'conf', 'layer.conf'), os.path.dirname(appendpath)))
appenddir = os.path.dirname(appendpath)
- bb.utils.mkdirhier(appenddir)
+ if not redirect_output:
+ bb.utils.mkdirhier(appenddir)
# FIXME check if the bbappend doesn't get overridden by a higher priority layer?
- layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS', True).split()]
+ layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS').split()]
if not os.path.abspath(destlayerdir) in layerdirs:
bb.warn('Specified layer is not currently enabled in bblayers.conf, you will need to add it before this bbappend will be active')
@@ -649,7 +732,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
else:
bbappendlines.append((varname, op, value))
- destsubdir = rd.getVar('PN', True)
+ destsubdir = rd.getVar('PN')
if srcfiles:
bbappendlines.append(('FILESEXTRAPATHS_prepend', ':=', '${THISDIR}/${PN}:'))
@@ -668,7 +751,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
srcurientry = 'file://%s' % srcfile
# Double-check it's not there already
# FIXME do we care if the entry is added by another bbappend that might go away?
- if not srcurientry in rd.getVar('SRC_URI', True).split():
+ if not srcurientry in rd.getVar('SRC_URI').split():
if machine:
appendline('SRC_URI_append%s' % appendoverride, '=', ' ' + srcurientry)
else:
@@ -686,9 +769,18 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
if instfunclines:
bbappendlines.append(('do_install_append%s()' % appendoverride, '', instfunclines))
- bb.note('Writing append file %s' % appendpath)
+ if redirect_output:
+ bb.note('Writing append file %s (dry-run)' % appendpath)
+ outfile = os.path.join(redirect_output, os.path.basename(appendpath))
+ # Only take a copy if the file isn't already there (this function may be called
+ # multiple times per operation when we're handling overrides)
+ if os.path.exists(appendpath) and not os.path.exists(outfile):
+ shutil.copy2(appendpath, outfile)
+ else:
+ bb.note('Writing append file %s' % appendpath)
+ outfile = appendpath
- if os.path.exists(appendpath):
+ if os.path.exists(outfile):
# Work around lack of nonlocal in python 2
extvars = {'destsubdir': destsubdir}
@@ -760,7 +852,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
if removevalues:
varnames.extend(list(removevalues.keys()))
- with open(appendpath, 'r') as f:
+ with open(outfile, 'r') as f:
(updated, newlines) = bb.utils.edit_metadata(f, varnames, appendfile_varfunc)
destsubdir = extvars['destsubdir']
@@ -777,16 +869,27 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
updated = True
if updated:
- with open(appendpath, 'w') as f:
+ with open(outfile, 'w') as f:
f.writelines(newlines)
if copyfiles:
if machine:
destsubdir = os.path.join(destsubdir, machine)
+ if redirect_output:
+ outdir = redirect_output
+ else:
+ outdir = appenddir
for newfile, srcfile in copyfiles.items():
- filedest = os.path.join(appenddir, destsubdir, os.path.basename(srcfile))
+ filedest = os.path.join(outdir, destsubdir, os.path.basename(srcfile))
if os.path.abspath(newfile) != os.path.abspath(filedest):
- bb.note('Copying %s to %s' % (newfile, filedest))
+ if newfile.startswith(tempfile.gettempdir()):
+ newfiledisp = os.path.basename(newfile)
+ else:
+ newfiledisp = newfile
+ if redirect_output:
+ bb.note('Copying %s to %s (dry-run)' % (newfiledisp, os.path.join(appenddir, destsubdir, os.path.basename(srcfile))))
+ else:
+ bb.note('Copying %s to %s' % (newfiledisp, filedest))
bb.utils.mkdirhier(os.path.dirname(filedest))
shutil.copyfile(newfile, filedest)
@@ -795,7 +898,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
def find_layerdir(fn):
""" Figure out the path to the base of the layer containing a file (e.g. a recipe)"""
- pth = fn
+ pth = os.path.abspath(fn)
layerdir = ''
while pth:
if os.path.exists(os.path.join(pth, 'conf', 'layer.conf')):
@@ -813,7 +916,7 @@ def replace_dir_vars(path, d):
# Sort by length so we get the variables we're interested in first
for var in sorted(list(d.keys()), key=len):
if var.endswith('dir') and var.lower() == var:
- value = d.getVar(var, True)
+ value = d.getVar(var)
if value.startswith('/') and not '\n' in value and value not in dirvars:
dirvars[value] = var
for dirpath in sorted(list(dirvars.keys()), reverse=True):
@@ -831,7 +934,7 @@ def get_recipe_pv_without_srcpv(pv, uri_type):
sfx = ''
if uri_type == 'git':
- git_regex = re.compile("(?P<pfx>v?)(?P<ver>[^\+]*)((?P<sfx>\+(git)?r?(AUTOINC\+))(?P<rev>.*))?")
+ git_regex = re.compile(r"(?P<pfx>v?)(?P<ver>.*?)(?P<sfx>\+[^\+]*(git)?r?(AUTOINC\+))(?P<rev>.*)")
m = git_regex.match(pv)
if m:
@@ -839,7 +942,7 @@ def get_recipe_pv_without_srcpv(pv, uri_type):
pfx = m.group('pfx')
sfx = m.group('sfx')
else:
- regex = re.compile("(?P<pfx>(v|r)?)(?P<ver>.*)")
+ regex = re.compile(r"(?P<pfx>(v|r)?)(?P<ver>.*)")
m = regex.match(pv)
if m:
pv = m.group('ver')
@@ -856,25 +959,25 @@ def get_recipe_upstream_version(rd):
FetchError when don't have network access or upstream site don't response.
NoMethodError when uri latest_versionstring method isn't implemented.
- Returns a dictonary with version, type and datetime.
+ Returns a dictonary with version, repository revision, current_version, type and datetime.
Type can be A for Automatic, M for Manual and U for Unknown.
"""
from bb.fetch2 import decodeurl
from datetime import datetime
ru = {}
+ ru['current_version'] = rd.getVar('PV')
ru['version'] = ''
ru['type'] = 'U'
ru['datetime'] = ''
-
- pv = rd.getVar('PV', True)
+ ru['revision'] = ''
# XXX: If don't have SRC_URI means that don't have upstream sources so
# returns the current recipe version, so that upstream version check
# declares a match.
- src_uris = rd.getVar('SRC_URI', True)
+ src_uris = rd.getVar('SRC_URI')
if not src_uris:
- ru['version'] = pv
+ ru['version'] = ru['current_version']
ru['type'] = 'M'
ru['datetime'] = datetime.now()
return ru
@@ -883,13 +986,16 @@ def get_recipe_upstream_version(rd):
src_uri = src_uris.split()[0]
uri_type, _, _, _, _, _ = decodeurl(src_uri)
- manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION", True)
+ (pv, pfx, sfx) = get_recipe_pv_without_srcpv(rd.getVar('PV'), uri_type)
+ ru['current_version'] = pv
+
+ manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION")
if manual_upstream_version:
# manual tracking of upstream version.
ru['version'] = manual_upstream_version
ru['type'] = 'M'
- manual_upstream_date = rd.getVar("CHECK_DATE", True)
+ manual_upstream_date = rd.getVar("CHECK_DATE")
if manual_upstream_date:
date = datetime.strptime(manual_upstream_date, "%b %d, %Y")
else:
@@ -903,33 +1009,106 @@ def get_recipe_upstream_version(rd):
ru['datetime'] = datetime.now()
else:
ud = bb.fetch2.FetchData(src_uri, rd)
- pupver = ud.method.latest_versionstring(ud, rd)
- (upversion, revision) = pupver
+ if rd.getVar("UPSTREAM_CHECK_COMMITS") == "1":
+ revision = ud.method.latest_revision(ud, rd, 'default')
+ upversion = pv
+ if revision != rd.getVar("SRCREV"):
+ upversion = upversion + "-new-commits-available"
+ else:
+ pupver = ud.method.latest_versionstring(ud, rd)
+ (upversion, revision) = pupver
+
+ if upversion:
+ ru['version'] = upversion
+ ru['type'] = 'A'
- # format git version version+gitAUTOINC+HASH
- if uri_type == 'git':
- (pv, pfx, sfx) = get_recipe_pv_without_srcpv(pv, uri_type)
+ if revision:
+ ru['revision'] = revision
- # if contains revision but not upversion use current pv
- if upversion == '' and revision:
- upversion = pv
+ ru['datetime'] = datetime.now()
+
+ return ru
+
+def _get_recipe_upgrade_status(data):
+ uv = get_recipe_upstream_version(data)
- if upversion:
- tmp = upversion
- upversion = ''
+ pn = data.getVar('PN')
+ cur_ver = uv['current_version']
- if pfx:
- upversion = pfx + tmp
+ upstream_version_unknown = data.getVar('UPSTREAM_VERSION_UNKNOWN')
+ if not uv['version']:
+ status = "UNKNOWN" if upstream_version_unknown else "UNKNOWN_BROKEN"
+ else:
+ cmp = vercmp_string(uv['current_version'], uv['version'])
+ if cmp == -1:
+ status = "UPDATE" if not upstream_version_unknown else "KNOWN_BROKEN"
+ elif cmp == 0:
+ status = "MATCH" if not upstream_version_unknown else "KNOWN_BROKEN"
+ else:
+ status = "UNKNOWN" if upstream_version_unknown else "UNKNOWN_BROKEN"
+
+ next_ver = uv['version'] if uv['version'] else "N/A"
+ revision = uv['revision'] if uv['revision'] else "N/A"
+ maintainer = data.getVar('RECIPE_MAINTAINER')
+ no_upgrade_reason = data.getVar('RECIPE_NO_UPDATE_REASON')
+
+ return (pn, status, cur_ver, next_ver, maintainer, revision, no_upgrade_reason)
+
+def get_recipe_upgrade_status(recipes=None):
+ pkgs_list = []
+ data_copy_list = []
+ copy_vars = ('SRC_URI',
+ 'PV',
+ 'GITDIR',
+ 'DL_DIR',
+ 'PN',
+ 'CACHE',
+ 'PERSISTENT_DIR',
+ 'BB_URI_HEADREVS',
+ 'UPSTREAM_CHECK_COMMITS',
+ 'UPSTREAM_CHECK_GITTAGREGEX',
+ 'UPSTREAM_CHECK_REGEX',
+ 'UPSTREAM_CHECK_URI',
+ 'UPSTREAM_VERSION_UNKNOWN',
+ 'RECIPE_MAINTAINER',
+ 'RECIPE_NO_UPDATE_REASON',
+ 'RECIPE_UPSTREAM_VERSION',
+ 'RECIPE_UPSTREAM_DATE',
+ 'CHECK_DATE',
+ )
+
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False)
+
+ if not recipes:
+ recipes = tinfoil.all_recipe_files(variants=False)
+
+ for fn in recipes:
+ try:
+ if fn.startswith("/"):
+ data = tinfoil.parse_recipe_file(fn)
else:
- upversion = tmp
+ data = tinfoil.parse_recipe(fn)
+ except bb.providers.NoProvider:
+ bb.note(" No provider for %s" % fn)
+ continue
- if sfx:
- upversion = upversion + sfx + revision[:10]
+ unreliable = data.getVar('UPSTREAM_CHECK_UNRELIABLE')
+ if unreliable == "1":
+ bb.note(" Skip package %s as upstream check unreliable" % pn)
+ continue
- if upversion:
- ru['version'] = upversion
- ru['type'] = 'A'
+ data_copy = bb.data.init()
+ for var in copy_vars:
+ data_copy.setVar(var, data.getVar(var))
+ for k in data:
+ if k.startswith('SRCREV'):
+ data_copy.setVar(k, data.getVar(k))
- ru['datetime'] = datetime.now()
+ data_copy_list.append(data_copy)
- return ru
+ from concurrent.futures import ProcessPoolExecutor
+ with ProcessPoolExecutor(max_workers=utils.cpu_count()) as executor:
+ pkgs_list = executor.map(_get_recipe_upgrade_status, data_copy_list)
+
+ return pkgs_list
diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py
index a348b975c2..c62fa5f54a 100644
--- a/meta/lib/oe/rootfs.py
+++ b/meta/lib/oe/rootfs.py
@@ -1,3 +1,6 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
from abc import ABCMeta, abstractmethod
from oe.utils import execute_pre_post_process
from oe.package_manager import *
@@ -15,12 +18,13 @@ class Rootfs(object, metaclass=ABCMeta):
This is an abstract class. Do not instantiate this directly.
"""
- def __init__(self, d, progress_reporter=None):
+ def __init__(self, d, progress_reporter=None, logcatcher=None):
self.d = d
self.pm = None
- self.image_rootfs = self.d.getVar('IMAGE_ROOTFS', True)
- self.deploydir = self.d.getVar('IMGDEPLOYDIR', True)
+ self.image_rootfs = self.d.getVar('IMAGE_ROOTFS')
+ self.deploydir = self.d.getVar('IMGDEPLOYDIR')
self.progress_reporter = progress_reporter
+ self.logcatcher = logcatcher
self.install_order = Manifest.INSTALL_ORDER
@@ -53,6 +57,8 @@ class Rootfs(object, metaclass=ABCMeta):
messages = []
with open(log_path, 'r') as log:
for line in log:
+ if self.logcatcher and self.logcatcher.contains(line.rstrip()):
+ continue
for ee in excludes:
m = ee.search(line)
if m:
@@ -69,7 +75,7 @@ class Rootfs(object, metaclass=ABCMeta):
else:
msg = '%d %s messages' % (len(messages), type)
msg = '[log_check] %s: found %s in the logfile:\n%s' % \
- (self.d.getVar('PN', True), msg, ''.join(messages))
+ (self.d.getVar('PN'), msg, ''.join(messages))
if type == 'error':
bb.fatal(msg)
else:
@@ -84,11 +90,10 @@ class Rootfs(object, metaclass=ABCMeta):
def _insert_feed_uris(self):
if bb.utils.contains("IMAGE_FEATURES", "package-management",
True, False, self.d):
- self.pm.insert_feeds_uris()
+ self.pm.insert_feeds_uris(self.d.getVar('PACKAGE_FEED_URIS') or "",
+ self.d.getVar('PACKAGE_FEED_BASE_PATHS') or "",
+ self.d.getVar('PACKAGE_FEED_ARCHS'))
- @abstractmethod
- def _handle_intercept_failure(self, failed_script):
- pass
"""
The _cleanup() method should be used to clean-up stuff that we don't really
@@ -100,7 +105,7 @@ class Rootfs(object, metaclass=ABCMeta):
pass
def _setup_dbg_rootfs(self, dirs):
- gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS', True) or '0'
+ gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS') or '0'
if gen_debugfs != '1':
return
@@ -142,6 +147,20 @@ class Rootfs(object, metaclass=ABCMeta):
bb.note(" Install complementary '*-dbg' packages...")
self.pm.install_complementary('*-dbg')
+ if self.d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
+ bb.note(" Install complementary '*-src' packages...")
+ self.pm.install_complementary('*-src')
+
+ """
+ Install additional debug packages. Possibility to install additional packages,
+ which are not automatically installed as complementary package of
+ standard one, e.g. debug package of static libraries.
+ """
+ extra_debug_pkgs = self.d.getVar('IMAGE_INSTALL_DEBUGFS')
+ if extra_debug_pkgs:
+ bb.note(" Install extra debug packages...")
+ self.pm.install(extra_debug_pkgs.split(), True)
+
bb.note(" Rename debug rootfs...")
try:
shutil.rmtree(self.image_rootfs + '-dbg')
@@ -153,7 +172,7 @@ class Rootfs(object, metaclass=ABCMeta):
os.rename(self.image_rootfs + '-orig', self.image_rootfs)
def _exec_shell_cmd(self, cmd):
- fakerootcmd = self.d.getVar('FAKEROOT', True)
+ fakerootcmd = self.d.getVar('FAKEROOT')
if fakerootcmd is not None:
exec_cmd = [fakerootcmd, cmd]
else:
@@ -168,28 +187,14 @@ class Rootfs(object, metaclass=ABCMeta):
def create(self):
bb.note("###### Generate rootfs #######")
- pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND", True)
- post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND", True)
- rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND', True)
-
- postinst_intercepts_dir = self.d.getVar("POSTINST_INTERCEPTS_DIR", True)
- if not postinst_intercepts_dir:
- postinst_intercepts_dir = self.d.expand("${COREBASE}/scripts/postinst-intercepts")
- intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True),
- "intercept_scripts")
-
- bb.utils.remove(intercepts_dir, True)
+ pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND")
+ post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND")
+ rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND')
bb.utils.mkdirhier(self.image_rootfs)
bb.utils.mkdirhier(self.deploydir)
- shutil.copytree(postinst_intercepts_dir, intercepts_dir)
-
- shutil.copy(self.d.expand("${COREBASE}/meta/files/deploydir_readme.txt"),
- self.deploydir +
- "/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt")
-
execute_pre_post_process(self.d, pre_process_cmds)
if self.progress_reporter:
@@ -198,14 +203,14 @@ class Rootfs(object, metaclass=ABCMeta):
# call the package manager dependent create method
self._create()
- sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir', True)
+ sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir')
bb.utils.mkdirhier(sysconfdir)
with open(sysconfdir + "/version", "w+") as ver:
- ver.write(self.d.getVar('BUILDNAME', True) + "\n")
+ ver.write(self.d.getVar('BUILDNAME') + "\n")
execute_pre_post_process(self.d, rootfs_post_install_cmds)
- self._run_intercepts()
+ self.pm.run_intercepts()
execute_pre_post_process(self.d, post_process_cmds)
@@ -220,7 +225,7 @@ class Rootfs(object, metaclass=ABCMeta):
"offline and rootfs is read-only: %s" %
delayed_postinsts)
- if self.d.getVar('USE_DEVFS', True) != "1":
+ if self.d.getVar('USE_DEVFS') != "1":
self._create_devfs()
self._uninstall_unneeded()
@@ -232,7 +237,7 @@ class Rootfs(object, metaclass=ABCMeta):
self._run_ldconfig()
- if self.d.getVar('USE_DEPMOD', True) != "0":
+ if self.d.getVar('USE_DEPMOD') != "0":
self._generate_kernel_module_deps()
self._cleanup()
@@ -248,21 +253,33 @@ class Rootfs(object, metaclass=ABCMeta):
if delayed_postinsts is None:
if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")):
self._exec_shell_cmd(["update-rc.d", "-f", "-r",
- self.d.getVar('IMAGE_ROOTFS', True),
+ self.d.getVar('IMAGE_ROOTFS'),
"run-postinsts", "remove"])
image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
True, False, self.d)
- image_rorfs_force = self.d.getVar('FORCE_RO_REMOVE', True)
+ image_rorfs_force = self.d.getVar('FORCE_RO_REMOVE')
if image_rorfs or image_rorfs_force == "1":
# Remove components that we don't need if it's a read-only rootfs
- unneeded_pkgs = self.d.getVar("ROOTFS_RO_UNNEEDED", True).split()
+ unneeded_pkgs = self.d.getVar("ROOTFS_RO_UNNEEDED").split()
pkgs_installed = image_list_installed_packages(self.d)
- pkgs_to_remove = [pkg for pkg in pkgs_installed if pkg in unneeded_pkgs]
-
+ # Make sure update-alternatives is removed last. This is
+ # because its database has to available while uninstalling
+ # other packages, allowing alternative symlinks of packages
+ # to be uninstalled or to be managed correctly otherwise.
+ provider = self.d.getVar("VIRTUAL-RUNTIME_update-alternatives")
+ pkgs_to_remove = sorted([pkg for pkg in pkgs_installed if pkg in unneeded_pkgs], key=lambda x: x == provider)
+
+ # update-alternatives provider is removed in its own remove()
+ # call because all package managers do not guarantee the packages
+ # are removed in the order they given in the list (which is
+ # passed to the command line). The sorting done earlier is
+ # utilized to implement the 2-stage removal.
+ if len(pkgs_to_remove) > 1:
+ self.pm.remove(pkgs_to_remove[:-1], False)
if len(pkgs_to_remove) > 0:
- self.pm.remove(pkgs_to_remove, False)
+ self.pm.remove([pkgs_to_remove[-1]], False)
if delayed_postinsts:
self._save_postinsts()
@@ -270,7 +287,7 @@ class Rootfs(object, metaclass=ABCMeta):
bb.warn("There are post install scripts "
"in a read-only rootfs")
- post_uninstall_cmds = self.d.getVar("ROOTFS_POSTUNINSTALL_COMMAND", True)
+ post_uninstall_cmds = self.d.getVar("ROOTFS_POSTUNINSTALL_COMMAND")
execute_pre_post_process(self.d, post_uninstall_cmds)
runtime_pkgmanage = bb.utils.contains("IMAGE_FEATURES", "package-management",
@@ -279,45 +296,8 @@ class Rootfs(object, metaclass=ABCMeta):
# Remove the package manager data files
self.pm.remove_packaging_data()
- def _run_intercepts(self):
- intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True),
- "intercept_scripts")
-
- bb.note("Running intercept scripts:")
- os.environ['D'] = self.image_rootfs
- os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE', True)
- for script in os.listdir(intercepts_dir):
- script_full = os.path.join(intercepts_dir, script)
-
- if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
- continue
-
- bb.note("> Executing %s intercept ..." % script)
-
- try:
- subprocess.check_call(script_full)
- except subprocess.CalledProcessError as e:
- bb.warn("The postinstall intercept hook '%s' failed (exit code: %d)! See log for details!" %
- (script, e.returncode))
-
- with open(script_full) as intercept:
- registered_pkgs = None
- for line in intercept.read().split("\n"):
- m = re.match("^##PKGS:(.*)", line)
- if m is not None:
- registered_pkgs = m.group(1).strip()
- break
-
- if registered_pkgs is not None:
- bb.warn("The postinstalls for the following packages "
- "will be postponed for first boot: %s" %
- registered_pkgs)
-
- # call the backend dependent handler
- self._handle_intercept_failure(registered_pkgs)
-
def _run_ldconfig(self):
- if self.d.getVar('LDCONFIGDEPEND', True):
+ if self.d.getVar('LDCONFIGDEPEND'):
bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v")
self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c',
'new', '-v'])
@@ -337,7 +317,7 @@ class Rootfs(object, metaclass=ABCMeta):
bb.note("No Kernel Modules found, not running depmod")
return
- kernel_abi_ver_file = oe.path.join(self.d.getVar('PKGDATA_DIR', True), "kernel-depmod",
+ kernel_abi_ver_file = oe.path.join(self.d.getVar('PKGDATA_DIR'), "kernel-depmod",
'kernel-abiversion')
if not os.path.exists(kernel_abi_ver_file):
bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
@@ -359,15 +339,15 @@ class Rootfs(object, metaclass=ABCMeta):
"""
def _create_devfs(self):
devtable_list = []
- devtable = self.d.getVar('IMAGE_DEVICE_TABLE', True)
+ devtable = self.d.getVar('IMAGE_DEVICE_TABLE')
if devtable is not None:
devtable_list.append(devtable)
else:
- devtables = self.d.getVar('IMAGE_DEVICE_TABLES', True)
+ devtables = self.d.getVar('IMAGE_DEVICE_TABLES')
if devtables is None:
devtables = 'files/device_table-minimal.txt'
for devtable in devtables.split():
- devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH', True), devtable))
+ devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH'), devtable))
for devtable in devtable_list:
self._exec_shell_cmd(["makedevs", "-r",
@@ -375,24 +355,24 @@ class Rootfs(object, metaclass=ABCMeta):
class RpmRootfs(Rootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None):
- super(RpmRootfs, self).__init__(d, progress_reporter)
- self.log_check_regex = '(unpacking of archive failed|Cannot find package'\
- '|exit 1|ERROR: |Error: |Error |ERROR '\
- '|Failed |Failed: |Failed$|Failed\(\d+\):)'
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(RpmRootfs, self).__init__(d, progress_reporter, logcatcher)
+ self.log_check_regex = r'(unpacking of archive failed|Cannot find package'\
+ r'|exit 1|ERROR: |Error: |Error |ERROR '\
+ r'|Failed |Failed: |Failed$|Failed\(\d+\):)'
self.manifest = RpmManifest(d, manifest_dir)
self.pm = RpmPM(d,
- d.getVar('IMAGE_ROOTFS', True),
- self.d.getVar('TARGET_VENDOR', True)
+ d.getVar('IMAGE_ROOTFS'),
+ self.d.getVar('TARGET_VENDOR')
)
- self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN', True)
+ self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN')
if self.inc_rpm_image_gen != "1":
bb.utils.remove(self.image_rootfs, True)
else:
self.pm.recovery_packaging_data()
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
self.pm.create_configs()
@@ -424,10 +404,12 @@ class RpmRootfs(Rootfs):
bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
self.pm.remove(pkg_to_remove)
+ self.pm.autoremove()
+
def _create(self):
pkgs_to_install = self.manifest.parse_initial_manifest()
- rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS', True)
- rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS', True)
+ rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS')
+ rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS')
# update PM index files
self.pm.write_index()
@@ -437,8 +419,6 @@ class RpmRootfs(Rootfs):
if self.progress_reporter:
self.progress_reporter.next_stage()
- self.pm.dump_all_available_pkgs()
-
if self.inc_rpm_image_gen == "1":
self._create_incremental(pkgs_to_install)
@@ -473,17 +453,13 @@ class RpmRootfs(Rootfs):
if self.progress_reporter:
self.progress_reporter.next_stage()
- self._setup_dbg_rootfs(['/etc/rpm', '/var/lib/rpm', '/var/lib/smart'])
+ self._setup_dbg_rootfs(['/etc', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
execute_pre_post_process(self.d, rpm_post_process_cmds)
- self._log_check()
-
if self.inc_rpm_image_gen == "1":
self.pm.backup_packaging_data()
- self.pm.rpm_setup_smart_target_config()
-
if self.progress_reporter:
self.progress_reporter.next_stage()
@@ -512,35 +488,21 @@ class RpmRootfs(Rootfs):
self._log_check_warn()
self._log_check_error()
- def _handle_intercept_failure(self, registered_pkgs):
- rpm_postinsts_dir = self.image_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
- bb.utils.mkdirhier(rpm_postinsts_dir)
-
- # Save the package postinstalls in /etc/rpm-postinsts
- for pkg in registered_pkgs.split():
- self.pm.save_rpmpostinst(pkg)
-
def _cleanup(self):
- # during the execution of postprocess commands, rpm is called several
- # times to get the files installed, dependencies, etc. This creates the
- # __db.00* (Berkeley DB files that hold locks, rpm specific environment
- # settings, etc.), that should not get into the final rootfs
- self.pm.unlock_rpm_db()
- if os.path.isdir(self.pm.install_dir_path + "/tmp") and not os.listdir(self.pm.install_dir_path + "/tmp"):
- bb.utils.remove(self.pm.install_dir_path + "/tmp", True)
- if os.path.isdir(self.pm.install_dir_path) and not os.listdir(self.pm.install_dir_path):
- bb.utils.remove(self.pm.install_dir_path, True)
+ if bb.utils.contains("IMAGE_FEATURES", "package-management", True, False, self.d):
+ self.pm._invoke_dnf(["clean", "all"])
+
class DpkgOpkgRootfs(Rootfs):
- def __init__(self, d, progress_reporter=None):
- super(DpkgOpkgRootfs, self).__init__(d, progress_reporter)
+ def __init__(self, d, progress_reporter=None, logcatcher=None):
+ super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
def _get_pkgs_postinsts(self, status_file):
def _get_pkg_depends_list(pkg_depends):
pkg_depends_list = []
# filter version requirements like libc (>= 1.1)
for dep in pkg_depends.split(', '):
- m_dep = re.match("^(.*) \(.*\)$", dep)
+ m_dep = re.match(r"^(.*) \(.*\)$", dep)
if m_dep:
dep = m_dep.group(1)
pkg_depends_list.append(dep)
@@ -556,21 +518,33 @@ class DpkgOpkgRootfs(Rootfs):
data = status.read()
status.close()
for line in data.split('\n'):
- m_pkg = re.match("^Package: (.*)", line)
- m_status = re.match("^Status:.*unpacked", line)
- m_depends = re.match("^Depends: (.*)", line)
+ m_pkg = re.match(r"^Package: (.*)", line)
+ m_status = re.match(r"^Status:.*unpacked", line)
+ m_depends = re.match(r"^Depends: (.*)", line)
+ #Only one of m_pkg, m_status or m_depends is not None at time
+ #If m_pkg is not None, we started a new package
if m_pkg is not None:
- if pkg_name and pkg_status_match:
- pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
-
+ #Get Package name
pkg_name = m_pkg.group(1)
+ #Make sure we reset other variables
pkg_status_match = False
pkg_depends = ""
elif m_status is not None:
+ #New status matched
pkg_status_match = True
elif m_depends is not None:
+ #New depends macthed
pkg_depends = m_depends.group(1)
+ else:
+ pass
+
+ #Now check if we can process package depends and postinst
+ if "" != pkg_name and pkg_status_match:
+ pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
+ else:
+ #Not enough information
+ pass
# remove package dependencies not in postinsts
pkg_names = list(pkgs.keys())
@@ -600,7 +574,7 @@ class DpkgOpkgRootfs(Rootfs):
pkg_list = []
pkgs = None
- if not self.d.getVar('PACKAGE_INSTALL', True).strip():
+ if not self.d.getVar('PACKAGE_INSTALL').strip():
bb.note("Building empty image")
else:
pkgs = self._get_pkgs_postinsts(status_file)
@@ -616,6 +590,9 @@ class DpkgOpkgRootfs(Rootfs):
return pkg_list
def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
+ if bb.utils.contains("IMAGE_FEATURES", "package-management",
+ True, False, self.d):
+ return
num = 0
for p in self._get_delayed_postinsts():
bb.utils.mkdirhier(dst_postinst_dir)
@@ -627,8 +604,8 @@ class DpkgOpkgRootfs(Rootfs):
num += 1
class DpkgRootfs(DpkgOpkgRootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None):
- super(DpkgRootfs, self).__init__(d, progress_reporter)
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(DpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
self.log_check_regex = '^E:'
self.log_check_expected_regexes = \
[
@@ -636,17 +613,17 @@ class DpkgRootfs(DpkgOpkgRootfs):
]
bb.utils.remove(self.image_rootfs, True)
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
self.manifest = DpkgManifest(d, manifest_dir)
- self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS', True),
- d.getVar('PACKAGE_ARCHS', True),
- d.getVar('DPKG_ARCH', True))
+ self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS'),
+ d.getVar('PACKAGE_ARCHS'),
+ d.getVar('DPKG_ARCH'))
def _create(self):
pkgs_to_install = self.manifest.parse_initial_manifest()
- deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS', True)
- deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS', True)
+ deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS')
+ deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS')
alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
bb.utils.mkdirhier(alt_dir)
@@ -670,6 +647,7 @@ class DpkgRootfs(DpkgOpkgRootfs):
if pkg_type in pkgs_to_install:
self.pm.install(pkgs_to_install[pkg_type],
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+ self.pm.fix_broken_dependencies()
if self.progress_reporter:
# Don't support attemptonly, so skip that
@@ -707,9 +685,6 @@ class DpkgRootfs(DpkgOpkgRootfs):
src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
- def _handle_intercept_failure(self, registered_pkgs):
- self.pm.mark_packages("unpacked", registered_pkgs.split())
-
def _log_check(self):
self._log_check_warn()
self._log_check_error()
@@ -719,15 +694,15 @@ class DpkgRootfs(DpkgOpkgRootfs):
class OpkgRootfs(DpkgOpkgRootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None):
- super(OpkgRootfs, self).__init__(d, progress_reporter)
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(OpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
self.log_check_regex = '(exit 1|Collected errors)'
self.manifest = OpkgManifest(d, manifest_dir)
- self.opkg_conf = self.d.getVar("IPKGCONF_TARGET", True)
- self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True)
+ self.opkg_conf = self.d.getVar("IPKGCONF_TARGET")
+ self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS")
- self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN', True) or ""
+ self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN') or ""
if self._remove_old_rootfs():
bb.utils.remove(self.image_rootfs, True)
self.pm = OpkgPM(d,
@@ -741,7 +716,7 @@ class OpkgRootfs(DpkgOpkgRootfs):
self.pkg_archs)
self.pm.recover_packaging_data()
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
def _prelink_file(self, root_dir, filename):
bb.note('prelink %s in %s' % (filename, root_dir))
@@ -776,15 +751,16 @@ class OpkgRootfs(DpkgOpkgRootfs):
if filecmp.cmp(f1, f2):
return True
- if self.image_rootfs not in f1:
- self._prelink_file(f1.replace(key, ''), f1)
+ if bb.data.inherits_class('image-prelink', self.d):
+ if self.image_rootfs not in f1:
+ self._prelink_file(f1.replace(key, ''), f1)
- if self.image_rootfs not in f2:
- self._prelink_file(f2.replace(key, ''), f2)
+ if self.image_rootfs not in f2:
+ self._prelink_file(f2.replace(key, ''), f2)
- # Both of them are prelinked
- if filecmp.cmp(f1, f2):
- return True
+ # Both of them are prelinked
+ if filecmp.cmp(f1, f2):
+ return True
# Not equal
return False
@@ -796,11 +772,11 @@ class OpkgRootfs(DpkgOpkgRootfs):
"""
def _multilib_sanity_test(self, dirs):
- allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP", True)
+ allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP")
if allow_replace is None:
allow_replace = ""
- allow_rep = re.compile(re.sub("\|$", "", allow_replace))
+ allow_rep = re.compile(re.sub(r"\|$", r"", allow_replace))
error_prompt = "Multilib check error:"
files = {}
@@ -828,12 +804,12 @@ class OpkgRootfs(DpkgOpkgRootfs):
files[key] = item
def _multilib_test_install(self, pkgs):
- ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS", True)
+ ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS")
bb.utils.mkdirhier(ml_temp)
dirs = [self.image_rootfs]
- for variant in self.d.getVar("MULTILIB_VARIANTS", True).split():
+ for variant in self.d.getVar("MULTILIB_VARIANTS").split():
ml_target_rootfs = os.path.join(ml_temp, variant)
bb.utils.remove(ml_target_rootfs, True)
@@ -841,7 +817,7 @@ class OpkgRootfs(DpkgOpkgRootfs):
ml_opkg_conf = os.path.join(ml_temp,
variant + "-" + os.path.basename(self.opkg_conf))
- ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs)
+ ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs, prepare_index=False)
ml_pm.update()
ml_pm.install(pkgs)
@@ -893,9 +869,9 @@ class OpkgRootfs(DpkgOpkgRootfs):
old_vars_list = open(vars_list_file, 'r+').read()
new_vars_list = '%s:%s:%s\n' % \
- ((self.d.getVar('BAD_RECOMMENDATIONS', True) or '').strip(),
- (self.d.getVar('NO_RECOMMENDATIONS', True) or '').strip(),
- (self.d.getVar('PACKAGE_EXCLUDE', True) or '').strip())
+ ((self.d.getVar('BAD_RECOMMENDATIONS') or '').strip(),
+ (self.d.getVar('NO_RECOMMENDATIONS') or '').strip(),
+ (self.d.getVar('PACKAGE_EXCLUDE') or '').strip())
open(vars_list_file, 'w+').write(new_vars_list)
if old_vars_list != new_vars_list:
@@ -905,12 +881,11 @@ class OpkgRootfs(DpkgOpkgRootfs):
def _create(self):
pkgs_to_install = self.manifest.parse_initial_manifest()
- opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS', True)
- opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS', True)
+ opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS')
+ opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS')
- # update PM index files, unless users provide their own feeds
- if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
- self.pm.write_index()
+ # update PM index files
+ self.pm.write_index()
execute_pre_post_process(self.d, opkg_pre_process_cmds)
@@ -921,8 +896,6 @@ class OpkgRootfs(DpkgOpkgRootfs):
self.pm.update()
- self.pm.handle_bad_recommendations()
-
if self.progress_reporter:
self.progress_reporter.next_stage()
@@ -951,7 +924,9 @@ class OpkgRootfs(DpkgOpkgRootfs):
if self.progress_reporter:
self.progress_reporter.next_stage()
- self._setup_dbg_rootfs(['/etc', '/var/lib/opkg', '/usr/lib/ssl'])
+ opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
+ opkg_dir = os.path.join(opkg_lib_dir, 'opkg')
+ self._setup_dbg_rootfs([opkg_dir])
execute_pre_post_process(self.d, opkg_post_process_cmds)
@@ -967,7 +942,7 @@ class OpkgRootfs(DpkgOpkgRootfs):
def _get_delayed_postinsts(self):
status_file = os.path.join(self.image_rootfs,
- self.d.getVar('OPKGLIBDIR', True).strip('/'),
+ self.d.getVar('OPKGLIBDIR').strip('/'),
"opkg", "status")
return self._get_delayed_postinsts_common(status_file)
@@ -976,9 +951,6 @@ class OpkgRootfs(DpkgOpkgRootfs):
src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
- def _handle_intercept_failure(self, registered_pkgs):
- self.pm.mark_packages("unpacked", registered_pkgs.split())
-
def _log_check(self):
self._log_check_warn()
self._log_check_error()
@@ -992,20 +964,20 @@ def get_class_for_type(imgtype):
"deb": DpkgRootfs}[imgtype]
def variable_depends(d, manifest_dir=None):
- img_type = d.getVar('IMAGE_PKGTYPE', True)
+ img_type = d.getVar('IMAGE_PKGTYPE')
cls = get_class_for_type(img_type)
return cls._depends_list()
-def create_rootfs(d, manifest_dir=None, progress_reporter=None):
+def create_rootfs(d, manifest_dir=None, progress_reporter=None, logcatcher=None):
env_bkp = os.environ.copy()
- img_type = d.getVar('IMAGE_PKGTYPE', True)
+ img_type = d.getVar('IMAGE_PKGTYPE')
if img_type == "rpm":
- RpmRootfs(d, manifest_dir, progress_reporter).create()
+ RpmRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
elif img_type == "ipk":
- OpkgRootfs(d, manifest_dir, progress_reporter).create()
+ OpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
elif img_type == "deb":
- DpkgRootfs(d, manifest_dir, progress_reporter).create()
+ DpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
os.environ.clear()
os.environ.update(env_bkp)
@@ -1013,13 +985,13 @@ def create_rootfs(d, manifest_dir=None, progress_reporter=None):
def image_list_installed_packages(d, rootfs_dir=None):
if not rootfs_dir:
- rootfs_dir = d.getVar('IMAGE_ROOTFS', True)
+ rootfs_dir = d.getVar('IMAGE_ROOTFS')
- img_type = d.getVar('IMAGE_PKGTYPE', True)
+ img_type = d.getVar('IMAGE_PKGTYPE')
if img_type == "rpm":
return RpmPkgsList(d, rootfs_dir).list_pkgs()
elif img_type == "ipk":
- return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET", True)).list_pkgs()
+ return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET")).list_pkgs()
elif img_type == "deb":
return DpkgPkgsList(d, rootfs_dir).list_pkgs()
diff --git a/meta/lib/oe/sdk.py b/meta/lib/oe/sdk.py
index c74525f929..d02a274812 100644
--- a/meta/lib/oe/sdk.py
+++ b/meta/lib/oe/sdk.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
from abc import ABCMeta, abstractmethod
from oe.utils import execute_pre_post_process
from oe.manifest import *
@@ -7,20 +11,19 @@ import shutil
import glob
import traceback
-
class Sdk(object, metaclass=ABCMeta):
def __init__(self, d, manifest_dir):
self.d = d
- self.sdk_output = self.d.getVar('SDK_OUTPUT', True)
- self.sdk_native_path = self.d.getVar('SDKPATHNATIVE', True).strip('/')
- self.target_path = self.d.getVar('SDKTARGETSYSROOT', True).strip('/')
- self.sysconfdir = self.d.getVar('sysconfdir', True).strip('/')
+ self.sdk_output = self.d.getVar('SDK_OUTPUT')
+ self.sdk_native_path = self.d.getVar('SDKPATHNATIVE').strip('/')
+ self.target_path = self.d.getVar('SDKTARGETSYSROOT').strip('/')
+ self.sysconfdir = self.d.getVar('sysconfdir').strip('/')
self.sdk_target_sysroot = os.path.join(self.sdk_output, self.target_path)
self.sdk_host_sysroot = self.sdk_output
if manifest_dir is None:
- self.manifest_dir = self.d.getVar("SDK_DIR", True)
+ self.manifest_dir = self.d.getVar("SDK_DIR")
else:
self.manifest_dir = manifest_dir
@@ -40,12 +43,12 @@ class Sdk(object, metaclass=ABCMeta):
# Don't ship any libGL in the SDK
self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
- self.d.getVar('libdir_nativesdk', True).strip('/'),
+ self.d.getVar('libdir_nativesdk').strip('/'),
"libGL*"))
# Fix or remove broken .la files
self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
- self.d.getVar('libdir_nativesdk', True).strip('/'),
+ self.d.getVar('libdir_nativesdk').strip('/'),
"*.la"))
# Link the ld.so.cache file into the hosts filesystem
@@ -54,7 +57,7 @@ class Sdk(object, metaclass=ABCMeta):
self.mkdirhier(os.path.dirname(link_name))
os.symlink("/etc/ld.so.cache", link_name)
- execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND', True))
+ execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND'))
def movefile(self, sourcefile, destdir):
try:
@@ -84,8 +87,31 @@ class Sdk(object, metaclass=ABCMeta):
bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
bb.warn("cannot remove SDK dir: %s" % path)
+ def install_locales(self, pm):
+ linguas = self.d.getVar("SDKIMAGE_LINGUAS")
+ if linguas:
+ import fnmatch
+ # Install the binary locales
+ if linguas == "all":
+ pm.install_glob("nativesdk-glibc-binary-localedata-*.utf-8", sdk=True)
+ else:
+ pm.install(["nativesdk-glibc-binary-localedata-%s.utf-8" % \
+ lang for lang in linguas.split()])
+ # Generate a locale archive of them
+ target_arch = self.d.getVar('SDK_ARCH')
+ rootfs = oe.path.join(self.sdk_host_sysroot, self.sdk_native_path)
+ localedir = oe.path.join(rootfs, self.d.getVar("libdir_nativesdk"), "locale")
+ generate_locale_archive(self.d, rootfs, target_arch, localedir)
+ # And now delete the binary locales
+ pkgs = fnmatch.filter(pm.list_installed(), "nativesdk-glibc-binary-localedata-*.utf-8")
+ pm.remove(pkgs)
+ else:
+ # No linguas so do nothing
+ pass
+
+
class RpmSdk(Sdk):
- def __init__(self, d, manifest_dir=None):
+ def __init__(self, d, manifest_dir=None, rpm_workdir="oe-sdk-repo"):
super(RpmSdk, self).__init__(d, manifest_dir)
self.target_manifest = RpmManifest(d, self.manifest_dir,
@@ -93,36 +119,24 @@ class RpmSdk(Sdk):
self.host_manifest = RpmManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_HOST)
- target_providename = ['/bin/sh',
- '/bin/bash',
- '/usr/bin/env',
- '/usr/bin/perl',
- 'pkgconfig'
- ]
+ rpm_repo_workdir = "oe-sdk-repo"
+ if "sdk_ext" in d.getVar("BB_RUNTASK"):
+ rpm_repo_workdir = "oe-sdk-ext-repo"
self.target_pm = RpmPM(d,
self.sdk_target_sysroot,
- self.d.getVar('TARGET_VENDOR', True),
+ self.d.getVar('TARGET_VENDOR'),
'target',
- target_providename
+ rpm_repo_workdir=rpm_repo_workdir
)
- sdk_providename = ['/bin/sh',
- '/bin/bash',
- '/usr/bin/env',
- '/usr/bin/perl',
- 'pkgconfig',
- 'libGL.so()(64bit)',
- 'libGL.so'
- ]
-
self.host_pm = RpmPM(d,
self.sdk_host_sysroot,
- self.d.getVar('SDK_VENDOR', True),
+ self.d.getVar('SDK_VENDOR'),
'host',
- sdk_providename,
"SDK_PACKAGE_ARCHS",
- "SDK_OS"
+ "SDK_OS",
+ rpm_repo_workdir=rpm_repo_workdir
)
def _populate_sysroot(self, pm, manifest):
@@ -130,7 +144,6 @@ class RpmSdk(Sdk):
pm.create_configs()
pm.write_index()
- pm.dump_all_available_pkgs()
pm.update()
pkgs = []
@@ -146,20 +159,27 @@ class RpmSdk(Sdk):
pm.install(pkgs_attempt, True)
def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
bb.note("Installing TARGET packages")
self._populate_sysroot(self.target_pm, self.target_manifest)
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
+ self.target_pm.run_intercepts(populate_sdk='target')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.target_pm.remove_packaging_data()
bb.note("Installing NATIVESDK packages")
self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
+
+ self.host_pm.run_intercepts(populate_sdk='host')
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.host_pm.remove_packaging_data()
@@ -167,7 +187,7 @@ class RpmSdk(Sdk):
# Move host RPM library data
native_rpm_state_dir = os.path.join(self.sdk_output,
self.sdk_native_path,
- self.d.getVar('localstatedir_nativesdk', True).strip('/'),
+ self.d.getVar('localstatedir_nativesdk').strip('/'),
"lib",
"rpm"
)
@@ -188,7 +208,9 @@ class RpmSdk(Sdk):
True).strip('/'),
)
self.mkdirhier(native_sysconf_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "etc", "*")):
+ for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")):
+ self.movefile(f, native_sysconf_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")):
self.movefile(f, native_sysconf_dir)
self.remove(os.path.join(self.sdk_output, "etc"), True)
@@ -197,24 +219,30 @@ class OpkgSdk(Sdk):
def __init__(self, d, manifest_dir=None):
super(OpkgSdk, self).__init__(d, manifest_dir)
- self.target_conf = self.d.getVar("IPKGCONF_TARGET", True)
- self.host_conf = self.d.getVar("IPKGCONF_SDK", True)
+ self.target_conf = self.d.getVar("IPKGCONF_TARGET")
+ self.host_conf = self.d.getVar("IPKGCONF_SDK")
self.target_manifest = OpkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_TARGET)
self.host_manifest = OpkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_HOST)
+ ipk_repo_workdir = "oe-sdk-repo"
+ if "sdk_ext" in d.getVar("BB_RUNTASK"):
+ ipk_repo_workdir = "oe-sdk-ext-repo"
+
self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
- self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True))
+ self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"),
+ ipk_repo_workdir=ipk_repo_workdir)
self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
- self.d.getVar("SDK_PACKAGE_ARCHS", True))
+ self.d.getVar("SDK_PACKAGE_ARCHS"),
+ ipk_repo_workdir=ipk_repo_workdir)
def _populate_sysroot(self, pm, manifest):
pkgs_to_install = manifest.parse_initial_manifest()
- if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
+ if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
pm.write_index()
pm.update()
@@ -225,20 +253,27 @@ class OpkgSdk(Sdk):
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
bb.note("Installing TARGET packages")
self._populate_sysroot(self.target_pm, self.target_manifest)
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
+ self.target_pm.run_intercepts(populate_sdk='target')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.target_pm.remove_packaging_data()
bb.note("Installing NATIVESDK packages")
self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
+
+ self.host_pm.run_intercepts(populate_sdk='host')
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.host_pm.remove_packaging_data()
@@ -257,7 +292,7 @@ class OpkgSdk(Sdk):
os.path.basename(self.host_conf)), 0o644)
native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
- self.d.getVar('localstatedir_nativesdk', True).strip('/'),
+ self.d.getVar('localstatedir_nativesdk').strip('/'),
"lib", "opkg")
self.mkdirhier(native_opkg_state_dir)
for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
@@ -270,26 +305,32 @@ class DpkgSdk(Sdk):
def __init__(self, d, manifest_dir=None):
super(DpkgSdk, self).__init__(d, manifest_dir)
- self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt")
- self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt-sdk")
+ self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt")
+ self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt-sdk")
self.target_manifest = DpkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_TARGET)
self.host_manifest = DpkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_HOST)
+ deb_repo_workdir = "oe-sdk-repo"
+ if "sdk_ext" in d.getVar("BB_RUNTASK"):
+ deb_repo_workdir = "oe-sdk-ext-repo"
+
self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
- self.d.getVar("PACKAGE_ARCHS", True),
- self.d.getVar("DPKG_ARCH", True),
- self.target_conf_dir)
+ self.d.getVar("PACKAGE_ARCHS"),
+ self.d.getVar("DPKG_ARCH"),
+ self.target_conf_dir,
+ deb_repo_workdir=deb_repo_workdir)
self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
- self.d.getVar("SDK_PACKAGE_ARCHS", True),
- self.d.getVar("DEB_SDK_ARCH", True),
- self.host_conf_dir)
+ self.d.getVar("SDK_PACKAGE_ARCHS"),
+ self.d.getVar("DEB_SDK_ARCH"),
+ self.host_conf_dir,
+ deb_repo_workdir=deb_repo_workdir)
def _copy_apt_dir_to(self, dst_dir):
- staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE", True)
+ staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE")
self.remove(dst_dir, True)
@@ -307,12 +348,16 @@ class DpkgSdk(Sdk):
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
bb.note("Installing TARGET packages")
self._populate_sysroot(self.target_pm, self.target_manifest)
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+
+ self.target_pm.run_intercepts(populate_sdk='target')
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
@@ -321,8 +366,11 @@ class DpkgSdk(Sdk):
bb.note("Installing NATIVESDK packages")
self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
+ self.host_pm.run_intercepts(populate_sdk='host')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
"etc", "apt"))
@@ -341,26 +389,26 @@ class DpkgSdk(Sdk):
def sdk_list_installed_packages(d, target, rootfs_dir=None):
if rootfs_dir is None:
- sdk_output = d.getVar('SDK_OUTPUT', True)
- target_path = d.getVar('SDKTARGETSYSROOT', True).strip('/')
+ sdk_output = d.getVar('SDK_OUTPUT')
+ target_path = d.getVar('SDKTARGETSYSROOT').strip('/')
rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True]
- img_type = d.getVar('IMAGE_PKGTYPE', True)
+ img_type = d.getVar('IMAGE_PKGTYPE')
if img_type == "rpm":
arch_var = ["SDK_PACKAGE_ARCHS", None][target is True]
os_var = ["SDK_OS", None][target is True]
- return RpmPkgsList(d, rootfs_dir, arch_var, os_var).list_pkgs()
+ return RpmPkgsList(d, rootfs_dir).list_pkgs()
elif img_type == "ipk":
conf_file_var = ["IPKGCONF_SDK", "IPKGCONF_TARGET"][target is True]
- return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var, True)).list_pkgs()
+ return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var)).list_pkgs()
elif img_type == "deb":
return DpkgPkgsList(d, rootfs_dir).list_pkgs()
def populate_sdk(d, manifest_dir=None):
env_bkp = os.environ.copy()
- img_type = d.getVar('IMAGE_PKGTYPE', True)
+ img_type = d.getVar('IMAGE_PKGTYPE')
if img_type == "rpm":
RpmSdk(d, manifest_dir).populate()
elif img_type == "ipk":
@@ -371,5 +419,24 @@ def populate_sdk(d, manifest_dir=None):
os.environ.clear()
os.environ.update(env_bkp)
+def get_extra_sdkinfo(sstate_dir):
+ """
+ This function is going to be used for generating the target and host manifest files packages of eSDK.
+ """
+ import math
+
+ extra_info = {}
+ extra_info['tasksizes'] = {}
+ extra_info['filesizes'] = {}
+ for root, _, files in os.walk(sstate_dir):
+ for fn in files:
+ if fn.endswith('.tgz'):
+ fsize = int(math.ceil(float(os.path.getsize(os.path.join(root, fn))) / 1024))
+ task = fn.rsplit(':',1)[1].split('_',1)[1].split(',')[0]
+ origtotal = extra_info['tasksizes'].get(task, 0)
+ extra_info['tasksizes'][task] = origtotal + fsize
+ extra_info['filesizes'][fn] = fsize
+ return extra_info
+
if __name__ == "__main__":
pass
diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py
index 8224e3a12e..7cecb59d8e 100644
--- a/meta/lib/oe/sstatesig.py
+++ b/meta/lib/oe/sstatesig.py
@@ -1,4 +1,8 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import bb.siggen
+import oe
def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
# Return True if we should keep the dependency, False to drop it
@@ -20,19 +24,22 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
def isImage(fn):
return "/image.bbclass" in " ".join(dataCache.inherits[fn])
- # Always include our own inter-task dependencies
+ # (Almost) always include our own inter-task dependencies.
+ # The exception is the special do_kernel_configme->do_unpack_and_patch
+ # dependency from archiver.bbclass.
if recipename == depname:
+ if task == "do_kernel_configme" and dep.endswith(".do_unpack_and_patch"):
+ return False
return True
- # Quilt (patch application) changing isn't likely to affect anything
- excludelist = ['quilt-native', 'subversion-native', 'git-native']
- if depname in excludelist and recipename != depname:
- return False
-
# Exclude well defined recipe->dependency
if "%s->%s" % (recipename, depname) in siggen.saferecipedeps:
return False
+ # Check for special wildcard
+ if "*->%s" % depname in siggen.saferecipedeps and recipename != depname:
+ return False
+
# Don't change native/cross/nativesdk recipe dependencies any further
if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename):
return True
@@ -41,7 +48,7 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
# allarch packagegroups are assumed to have well behaved names which don't change between architecures/tunes
if isPackageGroup(fn) and isAllArch(fn) and not isNative(depname):
- return False
+ return False
# Exclude well defined machine specific configurations which don't change ABI
if depname in siggen.abisaferecipes and not isImage(fn):
@@ -52,7 +59,7 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
# is machine specific.
# Therefore if we're not a kernel or a module recipe (inheriting the kernel classes)
# and we reccomend a kernel-module, we exclude the dependency.
- depfn = dep.rsplit(".", 1)[0]
+ depfn = dep.rsplit(":", 1)[0]
if dataCache and isKernel(depfn) and not isKernel(fn):
for pkg in dataCache.runrecs[fn]:
if " ".join(dataCache.runrecs[fn][pkg]).find("kernel-module-") != -1:
@@ -63,10 +70,10 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
def sstate_lockedsigs(d):
sigs = {}
- types = (d.getVar("SIGGEN_LOCKEDSIGS_TYPES", True) or "").split()
+ types = (d.getVar("SIGGEN_LOCKEDSIGS_TYPES") or "").split()
for t in types:
siggen_lockedsigs_var = "SIGGEN_LOCKEDSIGS_%s" % t
- lockedsigs = (d.getVar(siggen_lockedsigs_var, True) or "").split()
+ lockedsigs = (d.getVar(siggen_lockedsigs_var) or "").split()
for ls in lockedsigs:
pn, task, h = ls.split(":", 2)
if pn not in sigs:
@@ -77,24 +84,23 @@ def sstate_lockedsigs(d):
class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic):
name = "OEBasic"
def init_rundepcheck(self, data):
- self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
- self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
+ self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
+ self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
pass
def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
-class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
- name = "OEBasicHash"
+class SignatureGeneratorOEBasicHashMixIn(object):
def init_rundepcheck(self, data):
- self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
- self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
+ self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
+ self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
self.lockedsigs = sstate_lockedsigs(data)
self.lockedhashes = {}
self.lockedpnmap = {}
self.lockedhashfn = {}
- self.machine = data.getVar("MACHINE", True)
+ self.machine = data.getVar("MACHINE")
self.mismatch_msgs = []
- self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES", True) or
+ self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES") or
"").split()
self.unlockedrecipes = { k: "" for k in self.unlockedrecipes }
pass
@@ -122,12 +128,11 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
def get_taskdata(self):
- data = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskdata()
- return (data, self.lockedpnmap, self.lockedhashfn)
+ return (self.lockedpnmap, self.lockedhashfn, self.lockedhashes) + super().get_taskdata()
def set_taskdata(self, data):
- coredata, self.lockedpnmap, self.lockedhashfn = data
- super(bb.siggen.SignatureGeneratorBasicHash, self).set_taskdata(coredata)
+ self.lockedpnmap, self.lockedhashfn, self.lockedhashes = data[:3]
+ super().set_taskdata(data[3:])
def dump_sigs(self, dataCache, options):
sigfile = os.getcwd() + "/locked-sigs.inc"
@@ -135,8 +140,16 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
self.dump_lockedsigs(sigfile)
return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options)
- def get_taskhash(self, fn, task, deps, dataCache):
- h = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskhash(fn, task, deps, dataCache)
+ def get_taskhash(self, tid, deps, dataCache):
+ if tid in self.lockedhashes:
+ if self.lockedhashes[tid]:
+ return self.lockedhashes[tid]
+ else:
+ return super().get_taskhash(tid, deps, dataCache)
+
+ h = super().get_taskhash(tid, deps, dataCache)
+
+ (mc, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
recipename = dataCache.pkg_fn[fn]
self.lockedpnmap[fn] = recipename
@@ -147,90 +160,105 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
unlocked = True
else:
def recipename_from_dep(dep):
- # The dep entry will look something like
- # /path/path/recipename.bb.task, virtual:native:/p/foo.bb.task,
- # ...
- fn = dep.rsplit('.', 1)[0]
+ fn = bb.runqueue.fn_from_tid(dep)
return dataCache.pkg_fn[fn]
# If any unlocked recipe is in the direct dependencies then the
# current recipe should be unlocked as well.
- depnames = [ recipename_from_dep(x) for x in deps ]
+ depnames = [ recipename_from_dep(x) for x in deps if mc == bb.runqueue.mc_from_tid(x)]
if any(x in y for y in depnames for x in self.unlockedrecipes):
self.unlockedrecipes[recipename] = ''
unlocked = True
if not unlocked and recipename in self.lockedsigs:
if task in self.lockedsigs[recipename]:
- k = fn + "." + task
h_locked = self.lockedsigs[recipename][task][0]
var = self.lockedsigs[recipename][task][1]
- self.lockedhashes[k] = h_locked
- self.taskhash[k] = h_locked
+ self.lockedhashes[tid] = h_locked
+ unihash = super().get_unihash(tid)
+ self.taskhash[tid] = h_locked
#bb.warn("Using %s %s %s" % (recipename, task, h))
- if h != h_locked:
+ if h != h_locked and h_locked != unihash:
self.mismatch_msgs.append('The %s:%s sig is computed to be %s, but the sig is locked to %s in %s'
% (recipename, task, h, h_locked, var))
return h_locked
+
+ self.lockedhashes[tid] = False
#bb.warn("%s %s %s" % (recipename, task, h))
return h
+ def get_unihash(self, tid):
+ if tid in self.lockedhashes and self.lockedhashes[tid]:
+ return self.lockedhashes[tid]
+ return super().get_unihash(tid)
+
def dump_sigtask(self, fn, task, stampbase, runtime):
- k = fn + "." + task
- if k in self.lockedhashes:
+ tid = fn + ":" + task
+ if tid in self.lockedhashes and self.lockedhashes[tid]:
return
super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigtask(fn, task, stampbase, runtime)
def dump_lockedsigs(self, sigfile, taskfilter=None):
types = {}
- for k in self.runtaskdeps:
+ for tid in self.runtaskdeps:
if taskfilter:
- if not k in taskfilter:
+ if not tid in taskfilter:
continue
- fn = k.rsplit(".",1)[0]
+ fn = bb.runqueue.fn_from_tid(tid)
t = self.lockedhashfn[fn].split(" ")[1].split(":")[5]
t = 't-' + t.replace('_', '-')
if t not in types:
types[t] = []
- types[t].append(k)
+ types[t].append(tid)
with open(sigfile, "w") as f:
- for t in types:
+ l = sorted(types)
+ for t in l:
f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % t)
types[t].sort()
- sortedk = sorted(types[t], key=lambda k: self.lockedpnmap[k.rsplit(".",1)[0]])
- for k in sortedk:
- fn = k.rsplit(".",1)[0]
- task = k.rsplit(".",1)[1]
- if k not in self.taskhash:
+ sortedtid = sorted(types[t], key=lambda tid: self.lockedpnmap[bb.runqueue.fn_from_tid(tid)])
+ for tid in sortedtid:
+ (_, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
+ if tid not in self.taskhash:
continue
- f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.taskhash[k] + " \\\n")
+ f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.get_unihash(tid) + " \\\n")
f.write(' "\n')
- f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(list(types.keys()))))
+ f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(l)))
- def checkhashes(self, missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d):
+ def dump_siglist(self, sigfile):
+ with open(sigfile, "w") as f:
+ tasks = []
+ for taskitem in self.taskhash:
+ (fn, task) = taskitem.rsplit(":", 1)
+ pn = self.lockedpnmap[fn]
+ tasks.append((pn, task, fn, self.taskhash[taskitem]))
+ for (pn, task, fn, taskhash) in sorted(tasks):
+ f.write('%s:%s %s %s\n' % (pn, task, fn, taskhash))
+
+ def checkhashes(self, sq_data, missed, found, d):
warn_msgs = []
error_msgs = []
sstate_missing_msgs = []
- for task in range(len(sq_fn)):
- if task not in ret:
+ for tid in sq_data['hash']:
+ if tid not in found:
for pn in self.lockedsigs:
- if sq_hash[task] in iter(self.lockedsigs[pn].values()):
- if sq_task[task] == 'do_shared_workdir':
+ taskname = bb.runqueue.taskname_from_tid(tid)
+ if sq_data['hash'][tid] in iter(self.lockedsigs[pn].values()):
+ if taskname == 'do_shared_workdir':
continue
sstate_missing_msgs.append("Locked sig is set for %s:%s (%s) yet not in sstate cache?"
- % (pn, sq_task[task], sq_hash[task]))
+ % (pn, taskname, sq_data['hash'][tid]))
- checklevel = d.getVar("SIGGEN_LOCKEDSIGS_TASKSIG_CHECK", True)
+ checklevel = d.getVar("SIGGEN_LOCKEDSIGS_TASKSIG_CHECK")
if checklevel == 'warn':
warn_msgs += self.mismatch_msgs
elif checklevel == 'error':
error_msgs += self.mismatch_msgs
- checklevel = d.getVar("SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK", True)
+ checklevel = d.getVar("SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK")
if checklevel == 'warn':
warn_msgs += sstate_missing_msgs
elif checklevel == 'error':
@@ -241,10 +269,25 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
if error_msgs:
bb.fatal("\n".join(error_msgs))
+class SignatureGeneratorOEBasicHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorBasicHash):
+ name = "OEBasicHash"
+
+class SignatureGeneratorOEEquivHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorUniHashMixIn, bb.siggen.SignatureGeneratorBasicHash):
+ name = "OEEquivHash"
+
+ def init_rundepcheck(self, data):
+ super().init_rundepcheck(data)
+ self.server = data.getVar('BB_HASHSERVE')
+ if not self.server:
+ bb.fatal("OEEquivHash requires BB_HASHSERVE to be set")
+ self.method = data.getVar('SSTATE_HASHEQUIV_METHOD')
+ if not self.method:
+ bb.fatal("OEEquivHash requires SSTATE_HASHEQUIV_METHOD to be set")
# Insert these classes into siggen's namespace so it can see and select them
bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic
bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash
+bb.siggen.SignatureGeneratorOEEquivHash = SignatureGeneratorOEEquivHash
def find_siginfo(pn, taskname, taskhashlist, d):
@@ -253,20 +296,24 @@ def find_siginfo(pn, taskname, taskhashlist, d):
import fnmatch
import glob
- if taskhashlist:
- hashfiles = {}
-
if not taskname:
# We have to derive pn and taskname
key = pn
- splitit = key.split('.bb.')
+ splitit = key.split('.bb:')
taskname = splitit[1]
pn = os.path.basename(splitit[0]).split('_')[0]
if key.startswith('virtual:native:'):
pn = pn + '-native'
+ hashfiles = {}
filedates = {}
+ def get_hashval(siginfo):
+ if siginfo.endswith('.siginfo'):
+ return siginfo.rpartition(':')[2].partition('_')[0]
+ else:
+ return siginfo.rpartition('.')[2]
+
# First search in stamps dir
localdata = d.createCopy()
localdata.setVar('MULTIMACH_TARGET_SYS', '*')
@@ -274,7 +321,7 @@ def find_siginfo(pn, taskname, taskhashlist, d):
localdata.setVar('PV', '*')
localdata.setVar('PR', '*')
localdata.setVar('EXTENDPE', '')
- stamp = localdata.getVar('STAMP', True)
+ stamp = localdata.getVar('STAMP')
if pn.startswith("gcc-source"):
# gcc-source shared workdir is a special case :(
stamp = localdata.expand("${STAMPS_DIR}/work-shared/gcc-${PV}-${PR}")
@@ -296,10 +343,12 @@ def find_siginfo(pn, taskname, taskhashlist, d):
filedates[fullpath] = os.stat(fullpath).st_mtime
except OSError:
continue
+ hashval = get_hashval(fullpath)
+ hashfiles[hashval] = fullpath
if not taskhashlist or (len(filedates) < 2 and not foundall):
# That didn't work, look in sstate-cache
- hashes = taskhashlist or ['*']
+ hashes = taskhashlist or ['?' * 64]
localdata = bb.data.createCopy(d)
for hashval in hashes:
localdata.setVar('PACKAGE_ARCH', '*')
@@ -309,30 +358,25 @@ def find_siginfo(pn, taskname, taskhashlist, d):
localdata.setVar('PV', '*')
localdata.setVar('PR', '*')
localdata.setVar('BB_TASKHASH', hashval)
- swspec = localdata.getVar('SSTATE_SWSPEC', True)
+ swspec = localdata.getVar('SSTATE_SWSPEC')
if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic', 'do_preconfigure'] and swspec:
localdata.setVar('SSTATE_PKGSPEC', '${SSTATE_SWSPEC}')
elif pn.endswith('-native') or "-cross-" in pn or "-crosssdk-" in pn:
localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
sstatename = taskname[3:]
- filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG', True), sstatename)
+ filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG'), sstatename)
- if hashval != '*':
- sstatedir = "%s/%s" % (d.getVar('SSTATE_DIR', True), hashval[:2])
- else:
- sstatedir = d.getVar('SSTATE_DIR', True)
-
- for root, dirs, files in os.walk(sstatedir):
- for fn in files:
- fullpath = os.path.join(root, fn)
- if fnmatch.fnmatch(fullpath, filespec):
- if taskhashlist:
- hashfiles[hashval] = fullpath
- else:
- try:
- filedates[fullpath] = os.stat(fullpath).st_mtime
- except:
- continue
+ matchedfiles = glob.glob(filespec)
+ for fullpath in matchedfiles:
+ actual_hashval = get_hashval(fullpath)
+ if actual_hashval in hashfiles:
+ continue
+ hashfiles[hashval] = fullpath
+ if not taskhashlist:
+ try:
+ filedates[fullpath] = os.stat(fullpath).st_mtime
+ except:
+ continue
if taskhashlist:
return hashfiles
@@ -348,7 +392,176 @@ def sstate_get_manifest_filename(task, d):
Also returns the datastore that can be used to query related variables.
"""
d2 = d.createCopy()
- extrainf = d.getVarFlag("do_" + task, 'stamp-extra-info', True)
+ extrainf = d.getVarFlag("do_" + task, 'stamp-extra-info')
if extrainf:
d2.setVar("SSTATE_MANMACH", extrainf)
return (d2.expand("${SSTATE_MANFILEPREFIX}.%s" % task), d2)
+
+def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
+ d2 = d
+ variant = ''
+ curr_variant = ''
+ if d.getVar("BBEXTENDCURR") == "multilib":
+ curr_variant = d.getVar("BBEXTENDVARIANT")
+ if "virtclass-multilib" not in d.getVar("OVERRIDES"):
+ curr_variant = "invalid"
+ if taskdata2.startswith("virtual:multilib"):
+ variant = taskdata2.split(":")[2]
+ if curr_variant != variant:
+ if variant not in multilibcache:
+ multilibcache[variant] = oe.utils.get_multilib_datastore(variant, d)
+ d2 = multilibcache[variant]
+
+ if taskdata.endswith("-native"):
+ pkgarchs = ["${BUILD_ARCH}"]
+ elif taskdata.startswith("nativesdk-"):
+ pkgarchs = ["${SDK_ARCH}_${SDK_OS}", "allarch"]
+ elif "-cross-canadian" in taskdata:
+ pkgarchs = ["${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}"]
+ elif "-cross-" in taskdata:
+ pkgarchs = ["${BUILD_ARCH}_${TARGET_ARCH}"]
+ elif "-crosssdk" in taskdata:
+ pkgarchs = ["${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"]
+ else:
+ pkgarchs = ['${MACHINE_ARCH}']
+ pkgarchs = pkgarchs + list(reversed(d2.getVar("PACKAGE_EXTRA_ARCHS").split()))
+ pkgarchs.append('allarch')
+ pkgarchs.append('${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}')
+
+ for pkgarch in pkgarchs:
+ manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.%s" % (pkgarch, taskdata, taskname))
+ if os.path.exists(manifest):
+ return manifest, d2
+ bb.warn("Manifest %s not found in %s (variant '%s')?" % (manifest, d2.expand(" ".join(pkgarchs)), variant))
+ return None, d2
+
+def OEOuthashBasic(path, sigfile, task, d):
+ """
+ Basic output hash function
+
+ Calculates the output hash of a task by hashing all output file metadata,
+ and file contents.
+ """
+ import hashlib
+ import stat
+ import pwd
+ import grp
+
+ def update_hash(s):
+ s = s.encode('utf-8')
+ h.update(s)
+ if sigfile:
+ sigfile.write(s)
+
+ h = hashlib.sha256()
+ prev_dir = os.getcwd()
+ include_owners = os.environ.get('PSEUDO_DISABLED') == '0'
+
+ try:
+ os.chdir(path)
+
+ update_hash("OEOuthashBasic\n")
+
+ # It is only currently useful to get equivalent hashes for things that
+ # can be restored from sstate. Since the sstate object is named using
+ # SSTATE_PKGSPEC and the task name, those should be included in the
+ # output hash calculation.
+ update_hash("SSTATE_PKGSPEC=%s\n" % d.getVar('SSTATE_PKGSPEC'))
+ update_hash("task=%s\n" % task)
+
+ for root, dirs, files in os.walk('.', topdown=True):
+ # Sort directories to ensure consistent ordering when recursing
+ dirs.sort()
+ files.sort()
+
+ def process(path):
+ s = os.lstat(path)
+
+ if stat.S_ISDIR(s.st_mode):
+ update_hash('d')
+ elif stat.S_ISCHR(s.st_mode):
+ update_hash('c')
+ elif stat.S_ISBLK(s.st_mode):
+ update_hash('b')
+ elif stat.S_ISSOCK(s.st_mode):
+ update_hash('s')
+ elif stat.S_ISLNK(s.st_mode):
+ update_hash('l')
+ elif stat.S_ISFIFO(s.st_mode):
+ update_hash('p')
+ else:
+ update_hash('-')
+
+ def add_perm(mask, on, off='-'):
+ if mask & s.st_mode:
+ update_hash(on)
+ else:
+ update_hash(off)
+
+ add_perm(stat.S_IRUSR, 'r')
+ add_perm(stat.S_IWUSR, 'w')
+ if stat.S_ISUID & s.st_mode:
+ add_perm(stat.S_IXUSR, 's', 'S')
+ else:
+ add_perm(stat.S_IXUSR, 'x')
+
+ add_perm(stat.S_IRGRP, 'r')
+ add_perm(stat.S_IWGRP, 'w')
+ if stat.S_ISGID & s.st_mode:
+ add_perm(stat.S_IXGRP, 's', 'S')
+ else:
+ add_perm(stat.S_IXGRP, 'x')
+
+ add_perm(stat.S_IROTH, 'r')
+ add_perm(stat.S_IWOTH, 'w')
+ if stat.S_ISVTX & s.st_mode:
+ update_hash('t')
+ else:
+ add_perm(stat.S_IXOTH, 'x')
+
+ if include_owners:
+ update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name)
+ update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name)
+
+ update_hash(" ")
+ if stat.S_ISBLK(s.st_mode) or stat.S_ISCHR(s.st_mode):
+ update_hash("%9s" % ("%d.%d" % (os.major(s.st_rdev), os.minor(s.st_rdev))))
+ else:
+ update_hash(" " * 9)
+
+ update_hash(" ")
+ if stat.S_ISREG(s.st_mode):
+ update_hash("%10d" % s.st_size)
+ else:
+ update_hash(" " * 10)
+
+ update_hash(" ")
+ fh = hashlib.sha256()
+ if stat.S_ISREG(s.st_mode):
+ # Hash file contents
+ with open(path, 'rb') as d:
+ for chunk in iter(lambda: d.read(4096), b""):
+ fh.update(chunk)
+ update_hash(fh.hexdigest())
+ else:
+ update_hash(" " * len(fh.hexdigest()))
+
+ update_hash(" %s" % path)
+
+ if stat.S_ISLNK(s.st_mode):
+ update_hash(" -> %s" % os.readlink(path))
+
+ update_hash("\n")
+
+ # Process this directory and all its child files
+ process(root)
+ for f in files:
+ if f == 'fixmepath':
+ continue
+ process(os.path.join(root, f))
+ finally:
+ os.chdir(prev_dir)
+
+ return h.hexdigest()
+
+
diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py
index 3901ad3f26..a1daa2bed6 100644
--- a/meta/lib/oe/terminal.py
+++ b/meta/lib/oe/terminal.py
@@ -1,3 +1,6 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import logging
import oe.classutils
import shlex
@@ -11,7 +14,8 @@ class UnsupportedTerminal(Exception):
pass
class NoSupportedTerminals(Exception):
- pass
+ def __init__(self, terms):
+ self.terms = terms
class Registry(oe.classutils.ClassRegistry):
@@ -38,7 +42,7 @@ class Terminal(Popen, metaclass=Registry):
raise
def format_command(self, sh_cmd, title):
- fmt = {'title': title or 'Terminal', 'command': sh_cmd}
+ fmt = {'title': title or 'Terminal', 'command': sh_cmd, 'cwd': os.getcwd() }
if isinstance(self.command, str):
return shlex.split(self.command.format(**fmt))
else:
@@ -51,7 +55,7 @@ class XTerminal(Terminal):
raise UnsupportedTerminal(self.name)
class Gnome(XTerminal):
- command = 'gnome-terminal -t "{title}" -x {command}'
+ command = 'gnome-terminal -t "{title}" -- {command}'
priority = 2
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -61,31 +65,10 @@ class Gnome(XTerminal):
# Once fixed on the gnome-terminal project, this should be removed.
if os.getenv('LC_ALL'): os.putenv('LC_ALL','')
- # We need to know when the command completes but gnome-terminal gives us no way
- # to do this. We therefore write the pid to a file using a "phonehome" wrapper
- # script, then monitor the pid until it exits. Thanks gnome!
- import tempfile
- pidfile = tempfile.NamedTemporaryFile(delete = False).name
- try:
- sh_cmd = "oe-gnome-terminal-phonehome " + pidfile + " " + sh_cmd
- XTerminal.__init__(self, sh_cmd, title, env, d)
- while os.stat(pidfile).st_size <= 0:
- continue
- with open(pidfile, "r") as f:
- pid = int(f.readline())
- finally:
- os.unlink(pidfile)
-
- import time
- while True:
- try:
- os.kill(pid, 0)
- time.sleep(0.1)
- except OSError:
- return
+ XTerminal.__init__(self, sh_cmd, title, env, d)
class Mate(XTerminal):
- command = 'mate-terminal -t "{title}" -x {command}'
+ command = 'mate-terminal --disable-factory -t "{title}" -x {command}'
priority = 2
class Xfce(XTerminal):
@@ -97,7 +80,7 @@ class Terminology(XTerminal):
priority = 2
class Konsole(XTerminal):
- command = 'konsole --nofork --workdir . -p tabtitle="{title}" -e {command}'
+ command = 'konsole --separate --workdir . -p tabtitle="{title}" -e {command}'
priority = 2
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -106,6 +89,9 @@ class Konsole(XTerminal):
if vernum and LooseVersion(vernum) < '2.0.0':
# Konsole from KDE 3.x
self.command = 'konsole -T "{title}" -e {command}'
+ elif vernum and LooseVersion(vernum) < '16.08.1':
+ # Konsole pre 16.08.01 Has nofork
+ self.command = 'konsole --nofork --workdir . -p tabtitle="{title}" -e {command}'
XTerminal.__init__(self, sh_cmd, title, env, d)
class XTerm(XTerminal):
@@ -129,12 +115,12 @@ class Screen(Terminal):
bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id,
0.5, 10), d)
else:
- logger.warn(msg)
+ logger.warning(msg)
class TmuxRunning(Terminal):
"""Open a new pane in the current running tmux window"""
name = 'tmux-running'
- command = 'tmux split-window "{command}"'
+ command = 'tmux split-window -c "{cwd}" "{command}"'
priority = 2.75
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -152,7 +138,7 @@ class TmuxRunning(Terminal):
class TmuxNewWindow(Terminal):
"""Open a new window in the current running tmux session"""
name = 'tmux-new-window'
- command = 'tmux new-window -n "{title}" "{command}"'
+ command = 'tmux new-window -c "{cwd}" -n "{title}" "{command}"'
priority = 2.70
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -166,7 +152,7 @@ class TmuxNewWindow(Terminal):
class Tmux(Terminal):
"""Start a new tmux session and window"""
- command = 'tmux new -d -s devshell -n devshell "{command}"'
+ command = 'tmux new -c "{cwd}" -d -s devshell -n devshell "{command}"'
priority = 0.75
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -177,7 +163,7 @@ class Tmux(Terminal):
# devshells, if it's already there, add a new window to it.
window_name = 'devshell-%i' % os.getpid()
- self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'.format(window_name)
+ self.command = 'tmux new -c "{{cwd}}" -d -s {0} -n {0} "{{command}}"'.format(window_name)
Terminal.__init__(self, sh_cmd, title, env, d)
attach_cmd = 'tmux att -t {0}'.format(window_name)
@@ -185,19 +171,19 @@ class Tmux(Terminal):
if d:
bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d)
else:
- logger.warn(msg)
+ logger.warning(msg)
class Custom(Terminal):
command = 'false' # This is a placeholder
priority = 3
def __init__(self, sh_cmd, title=None, env=None, d=None):
- self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD', True)
+ self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD')
if self.command:
if not '{command}' in self.command:
self.command += ' {command}'
Terminal.__init__(self, sh_cmd, title, env, d)
- logger.warn('Custom terminal was started.')
+ logger.warning('Custom terminal was started.')
else:
logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set')
raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set')
@@ -206,6 +192,14 @@ class Custom(Terminal):
def prioritized():
return Registry.prioritized()
+def get_cmd_list():
+ terms = Registry.prioritized()
+ cmds = []
+ for term in terms:
+ if term.command:
+ cmds.append(term.command)
+ return cmds
+
def spawn_preferred(sh_cmd, title=None, env=None, d=None):
"""Spawn the first supported terminal, by priority"""
for terminal in prioritized():
@@ -215,7 +209,7 @@ def spawn_preferred(sh_cmd, title=None, env=None, d=None):
except UnsupportedTerminal:
continue
else:
- raise NoSupportedTerminals()
+ raise NoSupportedTerminals(get_cmd_list())
def spawn(name, sh_cmd, title=None, env=None, d=None):
"""Spawn the specified terminal, by name"""
@@ -225,10 +219,36 @@ def spawn(name, sh_cmd, title=None, env=None, d=None):
except KeyError:
raise UnsupportedTerminal(name)
- pipe = terminal(sh_cmd, title, env, d)
- output = pipe.communicate()[0]
- if pipe.returncode != 0:
- raise ExecutionError(sh_cmd, pipe.returncode, output)
+ # We need to know when the command completes but some terminals (at least
+ # gnome and tmux) gives us no way to do this. We therefore write the pid
+ # to a file using a "phonehome" wrapper script, then monitor the pid
+ # until it exits.
+ import tempfile
+ import time
+ pidfile = tempfile.NamedTemporaryFile(delete = False).name
+ try:
+ sh_cmd = bb.utils.which(os.getenv('PATH'), "oe-gnome-terminal-phonehome") + " " + pidfile + " " + sh_cmd
+ pipe = terminal(sh_cmd, title, env, d)
+ output = pipe.communicate()[0]
+ if output:
+ output = output.decode("utf-8")
+ if pipe.returncode != 0:
+ raise ExecutionError(sh_cmd, pipe.returncode, output)
+
+ while os.stat(pidfile).st_size <= 0:
+ time.sleep(0.01)
+ continue
+ with open(pidfile, "r") as f:
+ pid = int(f.readline())
+ finally:
+ os.unlink(pidfile)
+
+ while True:
+ try:
+ os.kill(pid, 0)
+ time.sleep(0.1)
+ except OSError:
+ return
def check_tmux_pane_size(tmux):
import subprocess as sub
@@ -275,8 +295,12 @@ def check_terminal_version(terminalName):
vernum = ver.split(' ')[-1]
if ver.startswith('GNOME Terminal'):
vernum = ver.split(' ')[-1]
+ if ver.startswith('MATE Terminal'):
+ vernum = ver.split(' ')[-1]
if ver.startswith('tmux'):
vernum = ver.split()[-1]
+ if ver.startswith('tmux next-'):
+ vernum = ver.split()[-1][5:]
return vernum
def distro_name():
diff --git a/meta/lib/oe/tests/test_utils.py b/meta/lib/oe/tests/test_utils.py
deleted file mode 100644
index 5d9ac52e7d..0000000000
--- a/meta/lib/oe/tests/test_utils.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import unittest
-from oe.utils import packages_filter_out_system
-
-class TestPackagesFilterOutSystem(unittest.TestCase):
- def test_filter(self):
- """
- Test that oe.utils.packages_filter_out_system works.
- """
- try:
- import bb
- except ImportError:
- self.skipTest("Cannot import bb")
-
- d = bb.data_smart.DataSmart()
- d.setVar("PN", "foo")
-
- d.setVar("PACKAGES", "foo foo-doc foo-dev")
- pkgs = packages_filter_out_system(d)
- self.assertEqual(pkgs, [])
-
- d.setVar("PACKAGES", "foo foo-doc foo-data foo-dev")
- pkgs = packages_filter_out_system(d)
- self.assertEqual(pkgs, ["foo-data"])
-
- d.setVar("PACKAGES", "foo foo-locale-en-gb")
- pkgs = packages_filter_out_system(d)
- self.assertEqual(pkgs, [])
-
- d.setVar("PACKAGES", "foo foo-data foo-locale-en-gb")
- pkgs = packages_filter_out_system(d)
- self.assertEqual(pkgs, ["foo-data"])
-
-
-class TestTrimVersion(unittest.TestCase):
- def test_version_exception(self):
- with self.assertRaises(TypeError):
- trim_version(None, 2)
- with self.assertRaises(TypeError):
- trim_version((1, 2, 3), 2)
-
- def test_num_exception(self):
- with self.assertRaises(ValueError):
- trim_version("1.2.3", 0)
- with self.assertRaises(ValueError):
- trim_version("1.2.3", -1)
-
- def test_valid(self):
- self.assertEqual(trim_version("1.2.3", 1), "1")
- self.assertEqual(trim_version("1.2.3", 2), "1.2")
- self.assertEqual(trim_version("1.2.3", 3), "1.2.3")
- self.assertEqual(trim_version("1.2.3", 4), "1.2.3")
diff --git a/meta/lib/oe/types.py b/meta/lib/oe/types.py
index 4ae58acfac..bbbabafbf6 100644
--- a/meta/lib/oe/types.py
+++ b/meta/lib/oe/types.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import errno
import re
import os
@@ -103,8 +107,13 @@ def boolean(value):
"""OpenEmbedded 'boolean' type
Valid values for true: 'yes', 'y', 'true', 't', '1'
- Valid values for false: 'no', 'n', 'false', 'f', '0'
+ Valid values for false: 'no', 'n', 'false', 'f', '0', None
"""
+ if value is None:
+ return False
+
+ if isinstance(value, bool):
+ return value
if not isinstance(value, str):
raise TypeError("boolean accepts a string, not '%s'" % type(value))
@@ -145,9 +154,33 @@ def path(value, relativeto='', normalize='true', mustexist='false'):
if boolean(mustexist):
try:
- open(value, 'r')
+ with open(value, 'r'):
+ pass
except IOError as exc:
if exc.errno == errno.ENOENT:
raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT)))
return value
+
+def is_x86(arch):
+ """
+ Check whether arch is x86 or x86_64
+ """
+ if arch.startswith('x86_') or re.match('i.*86', arch):
+ return True
+ else:
+ return False
+
+def qemu_use_kvm(kvm, target_arch):
+ """
+ Enable kvm if target_arch == build_arch or both of them are x86 archs.
+ """
+
+ use_kvm = False
+ if kvm and boolean(kvm):
+ build_arch = os.uname()[4]
+ if is_x86(build_arch) and is_x86(target_arch):
+ use_kvm = True
+ elif build_arch == target_arch:
+ use_kvm = True
+ return use_kvm
diff --git a/meta/lib/oe/useradd.py b/meta/lib/oe/useradd.py
new file mode 100644
index 0000000000..8fc77568ff
--- /dev/null
+++ b/meta/lib/oe/useradd.py
@@ -0,0 +1,71 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+import argparse
+import re
+
+class myArgumentParser(argparse.ArgumentParser):
+ def _print_message(self, message, file=None):
+ bb.warn("%s - %s: %s" % (d.getVar('PN'), pkg, message))
+
+ # This should never be called...
+ def exit(self, status=0, message=None):
+ message = message or ("%s - %s: useradd.bbclass: Argument parsing exited" % (d.getVar('PN'), pkg))
+ error(message)
+
+ def error(self, message):
+ bb.fatal(message)
+
+def split_commands(params):
+ params = re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip())
+ # Remove any empty items
+ return [x for x in params if x]
+
+def split_args(params):
+ params = re.split('''[ \t]+(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip())
+ # Remove any empty items
+ return [x for x in params if x]
+
+def build_useradd_parser():
+ # The following comes from --help on useradd from shadow
+ parser = myArgumentParser(prog='useradd')
+ parser.add_argument("-b", "--base-dir", metavar="BASE_DIR", help="base directory for the home directory of the new account")
+ parser.add_argument("-c", "--comment", metavar="COMMENT", help="GECOS field of the new account")
+ parser.add_argument("-d", "--home-dir", metavar="HOME_DIR", help="home directory of the new account")
+ parser.add_argument("-D", "--defaults", help="print or change default useradd configuration", action="store_true")
+ parser.add_argument("-e", "--expiredate", metavar="EXPIRE_DATE", help="expiration date of the new account")
+ parser.add_argument("-f", "--inactive", metavar="INACTIVE", help="password inactivity period of the new account")
+ parser.add_argument("-g", "--gid", metavar="GROUP", help="name or ID of the primary group of the new account")
+ parser.add_argument("-G", "--groups", metavar="GROUPS", help="list of supplementary groups of the new account")
+ parser.add_argument("-k", "--skel", metavar="SKEL_DIR", help="use this alternative skeleton directory")
+ parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
+ parser.add_argument("-l", "--no-log-init", help="do not add the user to the lastlog and faillog databases", action="store_true")
+ parser.add_argument("-m", "--create-home", help="create the user's home directory", action="store_const", const=True)
+ parser.add_argument("-M", "--no-create-home", dest="create_home", help="do not create the user's home directory", action="store_const", const=False)
+ parser.add_argument("-N", "--no-user-group", dest="user_group", help="do not create a group with the same name as the user", action="store_const", const=False)
+ parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true")
+ parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account")
+ parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new account")
+ parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
+ parser.add_argument("-r", "--system", help="create a system account", action="store_true")
+ parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account")
+ parser.add_argument("-u", "--uid", metavar="UID", help="user ID of the new account")
+ parser.add_argument("-U", "--user-group", help="create a group with the same name as the user", action="store_const", const=True)
+ parser.add_argument("LOGIN", help="Login name of the new user")
+
+ return parser
+
+def build_groupadd_parser():
+ # The following comes from --help on groupadd from shadow
+ parser = myArgumentParser(prog='groupadd')
+ parser.add_argument("-f", "--force", help="exit successfully if the group already exists, and cancel -g if the GID is already used", action="store_true")
+ parser.add_argument("-g", "--gid", metavar="GID", help="use GID for the new group")
+ parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
+ parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true")
+ parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group")
+ parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new group")
+ parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
+ parser.add_argument("-r", "--system", help="create a system account", action="store_true")
+ parser.add_argument("GROUP", help="Group name of the new group")
+
+ return parser
diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py
index d6545b197d..652b2be145 100644
--- a/meta/lib/oe/utils.py
+++ b/meta/lib/oe/utils.py
@@ -1,9 +1,10 @@
-try:
- # Python 2
- import commands as cmdstatus
-except ImportError:
- # Python 3
- import subprocess as cmdstatus
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import subprocess
+import multiprocessing
+import traceback
def read_file(filename):
try:
@@ -23,27 +24,34 @@ def ifelse(condition, iftrue = True, iffalse = False):
return iffalse
def conditional(variable, checkvalue, truevalue, falsevalue, d):
- if d.getVar(variable, True) == checkvalue:
+ if d.getVar(variable) == checkvalue:
return truevalue
else:
return falsevalue
+def vartrue(var, iftrue, iffalse, d):
+ import oe.types
+ if oe.types.boolean(d.getVar(var)):
+ return iftrue
+ else:
+ return iffalse
+
def less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
- if float(d.getVar(variable, True)) <= float(checkvalue):
+ if float(d.getVar(variable)) <= float(checkvalue):
return truevalue
else:
return falsevalue
def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
- result = bb.utils.vercmp_string(d.getVar(variable,True), checkvalue)
+ result = bb.utils.vercmp_string(d.getVar(variable), checkvalue)
if result <= 0:
return truevalue
else:
return falsevalue
def both_contain(variable1, variable2, checkvalue, d):
- val1 = d.getVar(variable1, True)
- val2 = d.getVar(variable2, True)
+ val1 = d.getVar(variable1)
+ val2 = d.getVar(variable2)
val1 = set(val1.split())
val2 = set(val2.split())
if isinstance(checkvalue, str):
@@ -66,20 +74,20 @@ def set_intersect(variable1, variable2, d):
s3 = set_intersect(s1, s2)
=> s3 = "b c"
"""
- val1 = set(d.getVar(variable1, True).split())
- val2 = set(d.getVar(variable2, True).split())
+ val1 = set(d.getVar(variable1).split())
+ val2 = set(d.getVar(variable2).split())
return " ".join(val1 & val2)
def prune_suffix(var, suffixes, d):
# See if var ends with any of the suffixes listed and
# remove it if found
for suffix in suffixes:
- if var.endswith(suffix):
- var = var.replace(suffix, "")
+ if suffix and var.endswith(suffix):
+ var = var[:-len(suffix)]
- prefix = d.getVar("MLPREFIX", True)
+ prefix = d.getVar("MLPREFIX")
if prefix and var.startswith(prefix):
- var = var.replace(prefix, "")
+ var = var[len(prefix):]
return var
@@ -91,16 +99,9 @@ def str_filter_out(f, str, d):
from re import match
return " ".join([x for x in str.split() if not match(f, x, 0)])
-def param_bool(cfg, field, dflt = None):
- """Lookup <field> in <cfg> map and convert it to a boolean; take
- <dflt> when this <field> does not exist"""
- value = cfg.get(field, dflt)
- strvalue = str(value).lower()
- if strvalue in ('yes', 'y', 'true', 't', '1'):
- return True
- elif strvalue in ('no', 'n', 'false', 'f', '0'):
- return False
- raise ValueError("invalid value for boolean parameter '%s': '%s'" % (field, value))
+def build_depends_string(depends, task):
+ """Append a taskname to a string of dependencies as used by the [depends] flag"""
+ return " ".join(dep + ":" + task for dep in depends.split())
def inherits(d, *classes):
"""Return True if the metadata inherits any of the specified classes"""
@@ -115,9 +116,9 @@ def features_backfill(var,d):
# disturbing distributions that have already set DISTRO_FEATURES.
# Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should
# add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED
- features = (d.getVar(var, True) or "").split()
- backfill = (d.getVar(var+"_BACKFILL", True) or "").split()
- considered = (d.getVar(var+"_BACKFILL_CONSIDERED", True) or "").split()
+ features = (d.getVar(var) or "").split()
+ backfill = (d.getVar(var+"_BACKFILL") or "").split()
+ considered = (d.getVar(var+"_BACKFILL_CONSIDERED") or "").split()
addfeatures = []
for feature in backfill:
@@ -127,24 +128,107 @@ def features_backfill(var,d):
if addfeatures:
d.appendVar(var, " " + " ".join(addfeatures))
+def all_distro_features(d, features, truevalue="1", falsevalue=""):
+ """
+ Returns truevalue if *all* given features are set in DISTRO_FEATURES,
+ else falsevalue. The features can be given as single string or anything
+ that can be turned into a set.
+
+ This is a shorter, more flexible version of
+ bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d).
+
+ Without explicit true/false values it can be used directly where
+ Python expects a boolean:
+ if oe.utils.all_distro_features(d, "foo bar"):
+ bb.fatal("foo and bar are mutually exclusive DISTRO_FEATURES")
+
+ With just a truevalue, it can be used to include files that are meant to be
+ used only when requested via DISTRO_FEATURES:
+ require ${@ oe.utils.all_distro_features(d, "foo bar", "foo-and-bar.inc")
+ """
+ return bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d)
+
+def any_distro_features(d, features, truevalue="1", falsevalue=""):
+ """
+ Returns truevalue if at least *one* of the given features is set in DISTRO_FEATURES,
+ else falsevalue. The features can be given as single string or anything
+ that can be turned into a set.
+
+ This is a shorter, more flexible version of
+ bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d).
+
+ Without explicit true/false values it can be used directly where
+ Python expects a boolean:
+ if not oe.utils.any_distro_features(d, "foo bar"):
+ bb.fatal("foo, bar or both must be set in DISTRO_FEATURES")
+
+ With just a truevalue, it can be used to include files that are meant to be
+ used only when requested via DISTRO_FEATURES:
+ require ${@ oe.utils.any_distro_features(d, "foo bar", "foo-or-bar.inc")
+
+ """
+ return bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d)
+
+def parallel_make(d):
+ """
+ Return the integer value for the number of parallel threads to use when
+ building, scraped out of PARALLEL_MAKE. If no parallelization option is
+ found, returns None
+
+ e.g. if PARALLEL_MAKE = "-j 10", this will return 10 as an integer.
+ """
+ pm = (d.getVar('PARALLEL_MAKE') or '').split()
+ # look for '-j' and throw other options (e.g. '-l') away
+ while pm:
+ opt = pm.pop(0)
+ if opt == '-j':
+ v = pm.pop(0)
+ elif opt.startswith('-j'):
+ v = opt[2:].strip()
+ else:
+ continue
+
+ return int(v)
+
+ return None
+
+def parallel_make_argument(d, fmt, limit=None):
+ """
+ Helper utility to construct a parallel make argument from the number of
+ parallel threads specified in PARALLEL_MAKE.
+
+ Returns the input format string `fmt` where a single '%d' will be expanded
+ with the number of parallel threads to use. If `limit` is specified, the
+ number of parallel threads will be no larger than it. If no parallelization
+ option is found in PARALLEL_MAKE, returns an empty string
+
+ e.g. if PARALLEL_MAKE = "-j 10", parallel_make_argument(d, "-n %d") will return
+ "-n 10"
+ """
+ v = parallel_make(d)
+ if v:
+ if limit:
+ v = min(limit, v)
+ return fmt % v
+ return ''
def packages_filter_out_system(d):
"""
Return a list of packages from PACKAGES with the "system" packages such as
PN-dbg PN-doc PN-locale-eb-gb removed.
"""
- pn = d.getVar('PN', True)
- blacklist = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev')]
+ pn = d.getVar('PN')
+ blacklist = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev', '-src')]
localepkg = pn + "-locale-"
pkgs = []
- for pkg in d.getVar('PACKAGES', True).split():
+ for pkg in d.getVar('PACKAGES').split():
if pkg not in blacklist and localepkg not in pkg:
pkgs.append(pkg)
return pkgs
def getstatusoutput(cmd):
- return cmdstatus.getstatusoutput(cmd)
+ return subprocess.getstatusoutput(cmd)
def trim_version(version, num_parts=2):
@@ -175,38 +259,87 @@ def execute_pre_post_process(d, cmds):
bb.note("Executing %s ..." % cmd)
bb.build.exec_func(cmd, d)
-def multiprocess_exec(commands, function):
- import signal
- import multiprocessing
-
- if not commands:
- return []
+# For each item in items, call the function 'target' with item as the first
+# argument, extraargs as the other arguments and handle any exceptions in the
+# parent thread
+def multiprocess_launch(target, items, d, extraargs=None):
- def init_worker():
- signal.signal(signal.SIGINT, signal.SIG_IGN)
+ class ProcessLaunch(multiprocessing.Process):
+ def __init__(self, *args, **kwargs):
+ multiprocessing.Process.__init__(self, *args, **kwargs)
+ self._pconn, self._cconn = multiprocessing.Pipe()
+ self._exception = None
+ self._result = None
- nproc = min(multiprocessing.cpu_count(), len(commands))
- pool = bb.utils.multiprocessingpool(nproc, init_worker)
- imap = pool.imap(function, commands)
-
- try:
- res = list(imap)
- pool.close()
- pool.join()
- results = []
- for result in res:
- if result is not None:
- results.append(result)
- return results
-
- except KeyboardInterrupt:
- pool.terminate()
- pool.join()
- raise
+ def run(self):
+ try:
+ ret = self._target(*self._args, **self._kwargs)
+ self._cconn.send((None, ret))
+ except Exception as e:
+ tb = traceback.format_exc()
+ self._cconn.send((e, tb))
+
+ def update(self):
+ if self._pconn.poll():
+ (e, tb) = self._pconn.recv()
+ if e is not None:
+ self._exception = (e, tb)
+ else:
+ self._result = tb
+
+ @property
+ def exception(self):
+ self.update()
+ return self._exception
+
+ @property
+ def result(self):
+ self.update()
+ return self._result
+
+ max_process = int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
+ launched = []
+ errors = []
+ results = []
+ items = list(items)
+ while (items and not errors) or launched:
+ if not errors and items and len(launched) < max_process:
+ args = (items.pop(),)
+ if extraargs is not None:
+ args = args + extraargs
+ p = ProcessLaunch(target=target, args=args)
+ p.start()
+ launched.append(p)
+ for q in launched:
+ # Have to manually call update() to avoid deadlocks. The pipe can be full and
+ # transfer stalled until we try and read the results object but the subprocess won't exit
+ # as it still has data to write (https://bugs.python.org/issue8426)
+ q.update()
+ # The finished processes are joined when calling is_alive()
+ if not q.is_alive():
+ if q.exception:
+ errors.append(q.exception)
+ if q.result:
+ results.append(q.result)
+ launched.remove(q)
+ # Paranoia doesn't hurt
+ for p in launched:
+ p.join()
+ if errors:
+ msg = ""
+ for (e, tb) in errors:
+ if isinstance(e, subprocess.CalledProcessError) and e.output:
+ msg = msg + str(e) + "\n"
+ msg = msg + "Subprocess output:"
+ msg = msg + e.output.decode("utf-8", errors="ignore")
+ else:
+ msg = msg + str(e) + ": " + str(tb) + "\n"
+ bb.fatal("Fatal errors occurred in subprocesses:\n%s" % msg)
+ return results
def squashspaces(string):
import re
- return re.sub("\s+", " ", string).strip()
+ return re.sub(r"\s+", " ", string).strip()
def format_pkg_list(pkg_dict, ret_format=None):
output = []
@@ -228,7 +361,55 @@ def format_pkg_list(pkg_dict, ret_format=None):
for pkg in sorted(pkg_dict):
output.append(pkg)
- return '\n'.join(output)
+ output_str = '\n'.join(output)
+
+ if output_str:
+ # make sure last line is newline terminated
+ output_str += '\n'
+
+ return output_str
+
+def host_gcc_version(d, taskcontextonly=False):
+ import re, subprocess
+
+ if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1':
+ return
+
+ compiler = d.getVar("BUILD_CC")
+ # Get rid of ccache since it is not present when parsing.
+ if compiler.startswith('ccache '):
+ compiler = compiler[7:]
+ try:
+ env = os.environ.copy()
+ env["PATH"] = d.getVar("PATH")
+ output = subprocess.check_output("%s --version" % compiler, \
+ shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8")))
+
+ match = re.match(r".* (\d\.\d)\.\d.*", output.split('\n')[0])
+ if not match:
+ bb.fatal("Can't get compiler version from %s --version output" % compiler)
+
+ version = match.group(1)
+ return "-%s" % version if version in ("4.8", "4.9") else ""
+
+
+def get_multilib_datastore(variant, d):
+ localdata = bb.data.createCopy(d)
+ if variant:
+ overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant
+ localdata.setVar("OVERRIDES", overrides)
+ localdata.setVar("MLPREFIX", variant + "-")
+ else:
+ origdefault = localdata.getVar("DEFAULTTUNE_MULTILIB_ORIGINAL")
+ if origdefault:
+ localdata.setVar("DEFAULTTUNE", origdefault)
+ overrides = localdata.getVar("OVERRIDES", False).split(":")
+ overrides = ":".join([x for x in overrides if not x.startswith("virtclass-multilib-")])
+ localdata.setVar("OVERRIDES", overrides)
+ localdata.setVar("MLPREFIX", "")
+ return localdata
#
# Python 2.7 doesn't have threaded pools (just multiprocessing)
@@ -302,10 +483,10 @@ def write_ld_so_conf(d):
bb.utils.remove(ldsoconf)
bb.utils.mkdirhier(os.path.dirname(ldsoconf))
with open(ldsoconf, "w") as f:
- f.write(d.getVar("base_libdir", True) + '\n')
- f.write(d.getVar("libdir", True) + '\n')
+ f.write(d.getVar("base_libdir") + '\n')
+ f.write(d.getVar("libdir") + '\n')
-class ImageQAFailed(bb.build.FuncFailed):
+class ImageQAFailed(Exception):
def __init__(self, description, name=None, logfile=None):
self.description = description
self.name = name
@@ -317,3 +498,7 @@ class ImageQAFailed(bb.build.FuncFailed):
msg = msg + ' (%s)' % self.description
return msg
+
+def sh_quote(string):
+ import shlex
+ return shlex.quote(string)
diff --git a/meta/lib/oeqa/buildperf/__init__.py b/meta/lib/oeqa/buildperf/__init__.py
index 605f429ecc..b7ebd036ae 100644
--- a/meta/lib/oeqa/buildperf/__init__.py
+++ b/meta/lib/oeqa/buildperf/__init__.py
@@ -1,13 +1,7 @@
# Copyright (c) 2016, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Build performance tests"""
from .base import (BuildPerfTestCase,
diff --git a/meta/lib/oeqa/buildperf/base.py b/meta/lib/oeqa/buildperf/base.py
index 2c102554b9..3b2fed549f 100644
--- a/meta/lib/oeqa/buildperf/base.py
+++ b/meta/lib/oeqa/buildperf/base.py
@@ -1,30 +1,24 @@
# Copyright (c) 2016, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Build performance test base classes and functionality"""
-import glob
import json
import logging
import os
import re
import resource
-import shutil
import socket
+import shutil
import time
-import traceback
import unittest
+import xml.etree.ElementTree as ET
+from collections import OrderedDict
from datetime import datetime, timedelta
from functools import partial
from multiprocessing import Process
from multiprocessing import SimpleQueue
+from xml.dom import minidom
import oe.path
from oeqa.utils.commands import CommandError, runCmd, get_bb_vars
@@ -35,7 +29,7 @@ log = logging.getLogger('build-perf')
# Our own version of runCmd which does not raise AssertErrors which would cause
# errors to interpreted as failures
-runCmd2 = partial(runCmd, assert_error=False)
+runCmd2 = partial(runCmd, assert_error=False, limit_exc_output=40)
class KernelDropCaches(object):
@@ -99,50 +93,34 @@ class BuildPerfTestResult(unittest.TextTestResult):
super(BuildPerfTestResult, self).__init__(*args, **kwargs)
self.out_dir = out_dir
- # Get Git parameters
- try:
- self.repo = GitRepo('.')
- except GitError:
- self.repo = None
- self.git_commit, self.git_commit_count, self.git_branch = \
- self.get_git_revision()
self.hostname = socket.gethostname()
self.product = os.getenv('OE_BUILDPERFTEST_PRODUCT', 'oe-core')
self.start_time = self.elapsed_time = None
self.successes = []
- log.info("Using Git branch:commit %s:%s (%s)", self.git_branch,
- self.git_commit, self.git_commit_count)
-
- def get_git_revision(self):
- """Get git branch and commit under testing"""
- commit = os.getenv('OE_BUILDPERFTEST_GIT_COMMIT')
- commit_cnt = os.getenv('OE_BUILDPERFTEST_GIT_COMMIT_COUNT')
- branch = os.getenv('OE_BUILDPERFTEST_GIT_BRANCH')
- if not self.repo and (not commit or not commit_cnt or not branch):
- log.info("The current working directory doesn't seem to be a Git "
- "repository clone. You can specify branch and commit "
- "displayed in test results with OE_BUILDPERFTEST_GIT_BRANCH, "
- "OE_BUILDPERFTEST_GIT_COMMIT and "
- "OE_BUILDPERFTEST_GIT_COMMIT_COUNT environment variables")
- else:
- if not commit:
- commit = self.repo.rev_parse('HEAD^0')
- commit_cnt = self.repo.run_cmd(['rev-list', '--count', 'HEAD^0'])
- if not branch:
- branch = self.repo.get_current_branch()
- if not branch:
- log.debug('Currently on detached HEAD')
- return str(commit), str(commit_cnt), str(branch)
def addSuccess(self, test):
"""Record results from successful tests"""
super(BuildPerfTestResult, self).addSuccess(test)
- self.successes.append((test, None))
+ self.successes.append(test)
+
+ def addError(self, test, err):
+ """Record results from crashed test"""
+ test.err = err
+ super(BuildPerfTestResult, self).addError(test, err)
+
+ def addFailure(self, test, err):
+ """Record results from failed test"""
+ test.err = err
+ super(BuildPerfTestResult, self).addFailure(test, err)
+
+ def addExpectedFailure(self, test, err):
+ """Record results from expectedly failed test"""
+ test.err = err
+ super(BuildPerfTestResult, self).addExpectedFailure(test, err)
def startTest(self, test):
"""Pre-test hook"""
test.base_dir = self.out_dir
- os.mkdir(test.out_dir)
log.info("Executing test %s: %s", test.name, test.shortDescription())
self.stream.write(datetime.now().strftime("[%Y-%m-%d %H:%M:%S] "))
super(BuildPerfTestResult, self).startTest(test)
@@ -154,141 +132,113 @@ class BuildPerfTestResult(unittest.TextTestResult):
def stopTestRun(self):
"""Pre-run hook"""
self.elapsed_time = datetime.utcnow() - self.start_time
- self.write_results_json()
def all_results(self):
- result_map = {'SUCCESS': self.successes,
- 'FAIL': self.failures,
- 'ERROR': self.errors,
- 'EXP_FAIL': self.expectedFailures,
- 'UNEXP_SUCCESS': self.unexpectedSuccesses,
- 'SKIPPED': self.skipped}
- for status, tests in result_map.items():
- for test in tests:
- yield (status, test)
-
-
- def update_globalres_file(self, filename):
- """Write results to globalres csv file"""
- # Map test names to time and size columns in globalres
- # The tuples represent index and length of times and sizes
- # respectively
- gr_map = {'test1': ((0, 1), (8, 1)),
- 'test12': ((1, 1), (None, None)),
- 'test13': ((2, 1), (9, 1)),
- 'test2': ((3, 1), (None, None)),
- 'test3': ((4, 3), (None, None)),
- 'test4': ((7, 1), (10, 2))}
-
- if self.repo:
- git_tag_rev = self.repo.run_cmd(['describe', self.git_commit])
- else:
- git_tag_rev = self.git_commit
+ compound = [('SUCCESS', t, None) for t in self.successes] + \
+ [('FAILURE', t, m) for t, m in self.failures] + \
+ [('ERROR', t, m) for t, m in self.errors] + \
+ [('EXPECTED_FAILURE', t, m) for t, m in self.expectedFailures] + \
+ [('UNEXPECTED_SUCCESS', t, None) for t in self.unexpectedSuccesses] + \
+ [('SKIPPED', t, m) for t, m in self.skipped]
+ return sorted(compound, key=lambda info: info[1].start_time)
+
+
+ def write_buildstats_json(self):
+ """Write buildstats file"""
+ buildstats = OrderedDict()
+ for _, test, _ in self.all_results():
+ for key, val in test.buildstats.items():
+ buildstats[test.name + '.' + key] = val
+ with open(os.path.join(self.out_dir, 'buildstats.json'), 'w') as fobj:
+ json.dump(buildstats, fobj, cls=ResultsJsonEncoder)
- values = ['0'] * 12
- for status, (test, msg) in self.all_results():
- if status in ['ERROR', 'SKIPPED']:
- continue
- (t_ind, t_len), (s_ind, s_len) = gr_map[test.name]
- if t_ind is not None:
- values[t_ind:t_ind + t_len] = test.times
- if s_ind is not None:
- values[s_ind:s_ind + s_len] = test.sizes
-
- log.debug("Writing globalres log to %s", filename)
- with open(filename, 'a') as fobj:
- fobj.write('{},{}:{},{},'.format(self.hostname,
- self.git_branch,
- self.git_commit,
- git_tag_rev))
- fobj.write(','.join(values) + '\n')
def write_results_json(self):
"""Write test results into a json-formatted file"""
- results = {'tester_host': self.hostname,
- 'git_branch': self.git_branch,
- 'git_commit': self.git_commit,
- 'git_commit_count': self.git_commit_count,
- 'product': self.product,
- 'start_time': self.start_time,
- 'elapsed_time': self.elapsed_time}
-
- tests = {}
- for status, (test, reason) in self.all_results():
- tests[test.name] = {'name': test.name,
- 'description': test.shortDescription(),
- 'status': status,
- 'start_time': test.start_time,
- 'elapsed_time': test.elapsed_time,
- 'cmd_log_file': os.path.relpath(test.cmd_log_file,
- self.out_dir),
- 'measurements': test.measurements}
- results['tests'] = tests
+ results = OrderedDict([('tester_host', self.hostname),
+ ('start_time', self.start_time),
+ ('elapsed_time', self.elapsed_time),
+ ('tests', OrderedDict())])
+
+ for status, test, reason in self.all_results():
+ test_result = OrderedDict([('name', test.name),
+ ('description', test.shortDescription()),
+ ('status', status),
+ ('start_time', test.start_time),
+ ('elapsed_time', test.elapsed_time),
+ ('measurements', test.measurements)])
+ if status in ('ERROR', 'FAILURE', 'EXPECTED_FAILURE'):
+ test_result['message'] = str(test.err[1])
+ test_result['err_type'] = test.err[0].__name__
+ test_result['err_output'] = reason
+ elif reason:
+ test_result['message'] = reason
+
+ results['tests'][test.name] = test_result
with open(os.path.join(self.out_dir, 'results.json'), 'w') as fobj:
- json.dump(results, fobj, indent=4, sort_keys=True,
+ json.dump(results, fobj, indent=4,
cls=ResultsJsonEncoder)
-
- def git_commit_results(self, repo_path, branch=None, tag=None):
- """Commit results into a Git repository"""
- repo = GitRepo(repo_path, is_topdir=True)
- if not branch:
- branch = self.git_branch
- else:
- # Replace keywords
- branch = branch.format(git_branch=self.git_branch,
- tester_host=self.hostname)
-
- log.info("Committing test results into %s %s", repo_path, branch)
- tmp_index = os.path.join(repo_path, '.git', 'index.oe-build-perf')
- try:
- # Create new commit object from the new results
- env_update = {'GIT_INDEX_FILE': tmp_index,
- 'GIT_WORK_TREE': self.out_dir}
- repo.run_cmd('add .', env_update)
- tree = repo.run_cmd('write-tree', env_update)
- parent = repo.rev_parse(branch)
- msg = "Results of {}:{}\n".format(self.git_branch, self.git_commit)
- git_cmd = ['commit-tree', tree, '-m', msg]
- if parent:
- git_cmd += ['-p', parent]
- commit = repo.run_cmd(git_cmd, env_update)
-
- # Update branch head
- git_cmd = ['update-ref', 'refs/heads/' + branch, commit]
- if parent:
- git_cmd.append(parent)
- repo.run_cmd(git_cmd)
-
- # Update current HEAD, if we're on branch 'branch'
- if repo.get_current_branch() == branch:
- log.info("Updating %s HEAD to latest commit", repo_path)
- repo.run_cmd('reset --hard')
-
- # Create (annotated) tag
- if tag:
- # Find tags matching the pattern
- tag_keywords = dict(git_branch=self.git_branch,
- git_commit=self.git_commit,
- git_commit_count=self.git_commit_count,
- tester_host=self.hostname,
- tag_num='[0-9]{1,5}')
- tag_re = re.compile(tag.format(**tag_keywords) + '$')
- tag_keywords['tag_num'] = 0
- for existing_tag in repo.run_cmd('tag').splitlines():
- if tag_re.match(existing_tag):
- tag_keywords['tag_num'] += 1
-
- tag = tag.format(**tag_keywords)
- msg = "Test run #{} of {}:{}\n".format(tag_keywords['tag_num'],
- self.git_branch,
- self.git_commit)
- repo.run_cmd(['tag', '-a', '-m', msg, tag, commit])
-
- finally:
- if os.path.exists(tmp_index):
- os.unlink(tmp_index)
+ def write_results_xml(self):
+ """Write test results into a JUnit XML file"""
+ top = ET.Element('testsuites')
+ suite = ET.SubElement(top, 'testsuite')
+ suite.set('name', 'oeqa.buildperf')
+ suite.set('timestamp', self.start_time.isoformat())
+ suite.set('time', str(self.elapsed_time.total_seconds()))
+ suite.set('hostname', self.hostname)
+ suite.set('failures', str(len(self.failures) + len(self.expectedFailures)))
+ suite.set('errors', str(len(self.errors)))
+ suite.set('skipped', str(len(self.skipped)))
+
+ test_cnt = 0
+ for status, test, reason in self.all_results():
+ test_cnt += 1
+ testcase = ET.SubElement(suite, 'testcase')
+ testcase.set('classname', test.__module__ + '.' + test.__class__.__name__)
+ testcase.set('name', test.name)
+ testcase.set('description', test.shortDescription())
+ testcase.set('timestamp', test.start_time.isoformat())
+ testcase.set('time', str(test.elapsed_time.total_seconds()))
+ if status in ('ERROR', 'FAILURE', 'EXP_FAILURE'):
+ if status in ('FAILURE', 'EXP_FAILURE'):
+ result = ET.SubElement(testcase, 'failure')
+ else:
+ result = ET.SubElement(testcase, 'error')
+ result.set('message', str(test.err[1]))
+ result.set('type', test.err[0].__name__)
+ result.text = reason
+ elif status == 'SKIPPED':
+ result = ET.SubElement(testcase, 'skipped')
+ result.text = reason
+ elif status not in ('SUCCESS', 'UNEXPECTED_SUCCESS'):
+ raise TypeError("BUG: invalid test status '%s'" % status)
+
+ for data in test.measurements.values():
+ measurement = ET.SubElement(testcase, data['type'])
+ measurement.set('name', data['name'])
+ measurement.set('legend', data['legend'])
+ vals = data['values']
+ if data['type'] == BuildPerfTestCase.SYSRES:
+ ET.SubElement(measurement, 'time',
+ timestamp=vals['start_time'].isoformat()).text = \
+ str(vals['elapsed_time'].total_seconds())
+ attrib = dict((k, str(v)) for k, v in vals['iostat'].items())
+ ET.SubElement(measurement, 'iostat', attrib=attrib)
+ attrib = dict((k, str(v)) for k, v in vals['rusage'].items())
+ ET.SubElement(measurement, 'rusage', attrib=attrib)
+ elif data['type'] == BuildPerfTestCase.DISKUSAGE:
+ ET.SubElement(measurement, 'size').text = str(vals['size'])
+ else:
+ raise TypeError('BUG: unsupported measurement type')
+
+ suite.set('tests', str(test_cnt))
+
+ # Use minidom for pretty-printing
+ dom_doc = minidom.parseString(ET.tostring(top, 'utf-8'))
+ with open(os.path.join(self.out_dir, 'results.xml'), 'w') as fobj:
+ dom_doc.writexml(fobj, addindent=' ', newl='\n', encoding='utf-8')
class BuildPerfTestCase(unittest.TestCase):
@@ -303,7 +253,10 @@ class BuildPerfTestCase(unittest.TestCase):
self.base_dir = None
self.start_time = None
self.elapsed_time = None
- self.measurements = []
+ self.measurements = OrderedDict()
+ self.buildstats = OrderedDict()
+ # self.err is supposed to be a tuple from sys.exc_info()
+ self.err = None
self.bb_vars = get_bb_vars()
# TODO: remove 'times' and 'sizes' arrays when globalres support is
# removed
@@ -311,18 +264,23 @@ class BuildPerfTestCase(unittest.TestCase):
self.sizes = []
@property
- def out_dir(self):
- return os.path.join(self.base_dir, self.name)
+ def tmp_dir(self):
+ return os.path.join(self.base_dir, self.name + '.tmp')
- @property
- def cmd_log_file(self):
- return os.path.join(self.out_dir, 'commands.log')
+ def shortDescription(self):
+ return super(BuildPerfTestCase, self).shortDescription() or ""
def setUp(self):
"""Set-up fixture for each test"""
+ if not os.path.isdir(self.tmp_dir):
+ os.mkdir(self.tmp_dir)
if self.build_target:
- self.log_cmd_output(['bitbake', self.build_target,
- '-c', 'fetchall'])
+ self.run_cmd(['bitbake', self.build_target, '--runall=fetch'])
+
+ def tearDown(self):
+ """Tear-down fixture for each test"""
+ if os.path.isdir(self.tmp_dir):
+ shutil.rmtree(self.tmp_dir)
def run(self, *args, **kwargs):
"""Run test"""
@@ -330,17 +288,23 @@ class BuildPerfTestCase(unittest.TestCase):
super(BuildPerfTestCase, self).run(*args, **kwargs)
self.elapsed_time = datetime.now() - self.start_time
- def log_cmd_output(self, cmd):
- """Run a command and log it's output"""
+ def run_cmd(self, cmd):
+ """Convenience method for running a command"""
cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
log.info("Logging command: %s", cmd_str)
try:
- with open(self.cmd_log_file, 'a') as fobj:
- runCmd2(cmd, stdout=fobj)
+ runCmd2(cmd)
except CommandError as err:
log.error("Command failed: %s", err.retcode)
raise
+ def _append_measurement(self, measurement):
+ """Simple helper for adding measurements results"""
+ if measurement['name'] in self.measurements:
+ raise ValueError('BUG: two measurements with the same name in {}'.format(
+ self.__class__.__name__))
+ self.measurements[measurement['name']] = measurement
+
def measure_cmd_resources(self, cmd, name, legend, save_bs=False):
"""Measure system resource usage of a command"""
def _worker(data_q, cmd, **kwargs):
@@ -350,12 +314,12 @@ class BuildPerfTestCase(unittest.TestCase):
ret = runCmd2(cmd, **kwargs)
etime = datetime.now() - start_time
rusage_struct = resource.getrusage(resource.RUSAGE_CHILDREN)
- iostat = {}
+ iostat = OrderedDict()
with open('/proc/{}/io'.format(os.getpid())) as fobj:
for line in fobj.readlines():
key, val = line.split(':')
iostat[key] = int(val)
- rusage = {}
+ rusage = OrderedDict()
# Skip unused fields, (i.e. 'ru_ixrss', 'ru_idrss', 'ru_isrss',
# 'ru_nswap', 'ru_msgsnd', 'ru_msgrcv' and 'ru_nsignals')
for key in ['ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
@@ -374,33 +338,28 @@ class BuildPerfTestCase(unittest.TestCase):
log.info("Timing command: %s", cmd_str)
data_q = SimpleQueue()
try:
- with open(self.cmd_log_file, 'a') as fobj:
- proc = Process(target=_worker, args=(data_q, cmd,),
- kwargs={'stdout': fobj})
- proc.start()
- data = data_q.get()
- proc.join()
+ proc = Process(target=_worker, args=(data_q, cmd,))
+ proc.start()
+ data = data_q.get()
+ proc.join()
if isinstance(data, Exception):
raise data
except CommandError:
- log.error("Command '%s' failed, see %s for more details", cmd_str,
- self.cmd_log_file)
+ log.error("Command '%s' failed", cmd_str)
raise
etime = data['elapsed_time']
- measurement = {'type': self.SYSRES,
- 'name': name,
- 'legend': legend}
- measurement['values'] = {'start_time': data['start_time'],
- 'elapsed_time': etime,
- 'rusage': data['rusage'],
- 'iostat': data['iostat']}
+ measurement = OrderedDict([('type', self.SYSRES),
+ ('name', name),
+ ('legend', legend)])
+ measurement['values'] = OrderedDict([('start_time', data['start_time']),
+ ('elapsed_time', etime),
+ ('rusage', data['rusage']),
+ ('iostat', data['iostat'])])
if save_bs:
- bs_file = self.save_buildstats(legend)
- measurement['values']['buildstats_file'] = \
- os.path.relpath(bs_file, self.base_dir)
+ self.save_buildstats(name)
- self.measurements.append(measurement)
+ self._append_measurement(measurement)
# Append to 'times' array for globalres log
e_sec = etime.total_seconds()
@@ -408,20 +367,25 @@ class BuildPerfTestCase(unittest.TestCase):
int((e_sec % 3600) / 60),
e_sec % 60))
- def measure_disk_usage(self, path, name, legend):
+ def measure_disk_usage(self, path, name, legend, apparent_size=False):
"""Estimate disk usage of a file or directory"""
- ret = runCmd2(['du', '-s', path])
+ cmd = ['du', '-s', '--block-size', '1024']
+ if apparent_size:
+ cmd.append('--apparent-size')
+ cmd.append(path)
+
+ ret = runCmd2(cmd)
size = int(ret.output.split()[0])
log.debug("Size of %s path is %s", path, size)
- measurement = {'type': self.DISKUSAGE,
- 'name': name,
- 'legend': legend}
- measurement['values'] = {'size': size}
- self.measurements.append(measurement)
+ measurement = OrderedDict([('type', self.DISKUSAGE),
+ ('name', name),
+ ('legend', legend)])
+ measurement['values'] = OrderedDict([('size', size)])
+ self._append_measurement(measurement)
# Append to 'sizes' array for globalres log
self.sizes.append(str(size))
- def save_buildstats(self, label=None):
+ def save_buildstats(self, measurement_name):
"""Save buildstats"""
def split_nevr(nevr):
"""Split name and version information from recipe "nevr" string"""
@@ -440,9 +404,9 @@ class BuildPerfTestCase(unittest.TestCase):
def bs_to_json(filename):
"""Convert (task) buildstats file into json format"""
- bs_json = {'iostat': {},
- 'rusage': {},
- 'child_rusage': {}}
+ bs_json = OrderedDict()
+ iostat = OrderedDict()
+ rusage = OrderedDict()
with open(filename) as fobj:
for line in fobj.readlines():
key, val = line.split(':', 1)
@@ -454,7 +418,7 @@ class BuildPerfTestCase(unittest.TestCase):
end_time = datetime.utcfromtimestamp(float(val))
elif key.startswith('IO '):
split = key.split()
- bs_json['iostat'][split[1]] = int(val)
+ iostat[split[1]] = int(val)
elif key.find('rusage') >= 0:
split = key.split()
ru_key = split[-1]
@@ -462,12 +426,12 @@ class BuildPerfTestCase(unittest.TestCase):
val = float(val)
else:
val = int(val)
- ru_type = 'rusage' if split[0] == 'rusage' else \
- 'child_rusage'
- bs_json[ru_type][ru_key] = val
+ rusage[ru_key] = rusage.get(ru_key, 0) + val
elif key == 'Status':
bs_json['status'] = val
bs_json['elapsed_time'] = end_time - start_time
+ bs_json['rusage'] = rusage
+ bs_json['iostat'] = iostat
return bs_json
log.info('Saving buildstats in JSON format')
@@ -483,24 +447,17 @@ class BuildPerfTestCase(unittest.TestCase):
if not os.path.isdir(recipe_dir):
continue
name, epoch, version, revision = split_nevr(fname)
- recipe_bs = {'name': name,
- 'epoch': epoch,
- 'version': version,
- 'revision': revision,
- 'tasks': {}}
+ recipe_bs = OrderedDict((('name', name),
+ ('epoch', epoch),
+ ('version', version),
+ ('revision', revision),
+ ('tasks', OrderedDict())))
for task in os.listdir(recipe_dir):
recipe_bs['tasks'][task] = bs_to_json(os.path.join(recipe_dir,
task))
buildstats.append(recipe_bs)
- # Write buildstats into json file
- postfix = '.' + str_to_fn(label) if label else ''
- postfix += '.json'
- outfile = os.path.join(self.out_dir, 'buildstats' + postfix)
- with open(outfile, 'w') as fobj:
- json.dump(buildstats, fobj, indent=4, sort_keys=True,
- cls=ResultsJsonEncoder)
- return outfile
+ self.buildstats[measurement_name] = buildstats
def rm_tmp(self):
"""Cleanup temporary/intermediate files and directories"""
@@ -521,6 +478,7 @@ class BuildPerfTestCase(unittest.TestCase):
@staticmethod
def sync():
"""Sync and drop kernel caches"""
+ runCmd2('bitbake -m', ignore_status=True)
log.debug("Syncing and dropping kernel caches""")
KernelDropCaches.drop()
os.sync()
@@ -542,5 +500,5 @@ class BuildPerfTestRunner(unittest.TextTestRunner):
self.out_dir = out_dir
def _makeResult(self):
- return BuildPerfTestResult(self.out_dir, self.stream, self.descriptions,
- self.verbosity)
+ return BuildPerfTestResult(self.out_dir, self.stream, self.descriptions,
+ self.verbosity)
diff --git a/meta/lib/oeqa/buildperf/test_basic.py b/meta/lib/oeqa/buildperf/test_basic.py
index e448ed18c9..2104617ba3 100644
--- a/meta/lib/oeqa/buildperf/test_basic.py
+++ b/meta/lib/oeqa/buildperf/test_basic.py
@@ -1,13 +1,6 @@
# Copyright (c) 2016, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Basic set of build performance tests"""
import os
@@ -15,14 +8,13 @@ import shutil
import oe.path
from oeqa.buildperf import BuildPerfTestCase
-from oeqa.utils.commands import get_bb_vars
-
+from oeqa.utils.commands import get_bb_var, get_bb_vars
class Test1P1(BuildPerfTestCase):
build_target = 'core-image-sato'
def test1(self):
- """Measure wall clock of bitbake core-image-sato and size of tmp dir"""
+ """Build core-image-sato"""
self.rm_tmp()
self.rm_sstate()
self.rm_cache()
@@ -30,16 +22,17 @@ class Test1P1(BuildPerfTestCase):
self.measure_cmd_resources(['bitbake', self.build_target], 'build',
'bitbake ' + self.build_target, save_bs=True)
self.measure_disk_usage(self.bb_vars['TMPDIR'], 'tmpdir', 'tmpdir')
+ self.measure_disk_usage(get_bb_var("IMAGE_ROOTFS", self.build_target), 'rootfs', 'rootfs', True)
class Test1P2(BuildPerfTestCase):
build_target = 'virtual/kernel'
def test12(self):
- """Measure bitbake virtual/kernel"""
+ """Build virtual/kernel"""
# Build and cleans state in order to get all dependencies pre-built
- self.log_cmd_output(['bitbake', self.build_target])
- self.log_cmd_output(['bitbake', self.build_target, '-c', 'cleansstate'])
+ self.run_cmd(['bitbake', self.build_target])
+ self.run_cmd(['bitbake', self.build_target, '-c', 'cleansstate'])
self.sync()
self.measure_cmd_resources(['bitbake', self.build_target], 'build',
@@ -51,30 +44,28 @@ class Test1P3(BuildPerfTestCase):
def test13(self):
"""Build core-image-sato with rm_work enabled"""
- postfile = os.path.join(self.out_dir, 'postfile.conf')
+ postfile = os.path.join(self.tmp_dir, 'postfile.conf')
with open(postfile, 'w') as fobj:
fobj.write('INHERIT += "rm_work"\n')
- try:
- self.rm_tmp()
- self.rm_sstate()
- self.rm_cache()
- self.sync()
- cmd = ['bitbake', '-R', postfile, self.build_target]
- self.measure_cmd_resources(cmd, 'build',
- 'bitbake' + self.build_target,
- save_bs=True)
- self.measure_disk_usage(self.bb_vars['TMPDIR'], 'tmpdir', 'tmpdir')
- finally:
- os.unlink(postfile)
+
+ self.rm_tmp()
+ self.rm_sstate()
+ self.rm_cache()
+ self.sync()
+ cmd = ['bitbake', '-R', postfile, self.build_target]
+ self.measure_cmd_resources(cmd, 'build',
+ 'bitbake' + self.build_target,
+ save_bs=True)
+ self.measure_disk_usage(self.bb_vars['TMPDIR'], 'tmpdir', 'tmpdir')
class Test2(BuildPerfTestCase):
build_target = 'core-image-sato'
def test2(self):
- """Measure bitbake core-image-sato -c rootfs with sstate"""
+ """Run core-image-sato do_rootfs with sstate"""
# Build once in order to populate sstate cache
- self.log_cmd_output(['bitbake', self.build_target])
+ self.run_cmd(['bitbake', self.build_target])
self.rm_tmp()
self.rm_cache()
@@ -86,7 +77,7 @@ class Test2(BuildPerfTestCase):
class Test3(BuildPerfTestCase):
def test3(self):
- """Parsing time metrics (bitbake -p)"""
+ """Bitbake parsing (bitbake -p)"""
# Drop all caches and parse
self.rm_cache()
oe.path.remove(os.path.join(self.bb_vars['TMPDIR'], 'cache'), True)
@@ -106,15 +97,16 @@ class Test4(BuildPerfTestCase):
def test4(self):
"""eSDK metrics"""
- self.log_cmd_output("bitbake {} -c do_populate_sdk_ext".format(
- self.build_target))
+ self.run_cmd(['bitbake', '-c', 'do_populate_sdk_ext',
+ self.build_target])
self.bb_vars = get_bb_vars(None, self.build_target)
tmp_dir = self.bb_vars['TMPDIR']
installer = os.path.join(
self.bb_vars['SDK_DEPLOY'],
self.bb_vars['TOOLCHAINEXT_OUTPUTNAME'] + '.sh')
# Measure installer size
- self.measure_disk_usage(installer, 'installer_bin', 'eSDK installer')
+ self.measure_disk_usage(installer, 'installer_bin', 'eSDK installer',
+ apparent_size=True)
# Measure deployment time and deployed size
deploy_dir = os.path.join(tmp_dir, 'esdk-deploy')
if os.path.exists(deploy_dir):
@@ -122,4 +114,7 @@ class Test4(BuildPerfTestCase):
self.sync()
self.measure_cmd_resources([installer, '-y', '-d', deploy_dir],
'deploy', 'eSDK deploy')
- self.measure_disk_usage(deploy_dir, 'deploy_dir', 'deploy dir')
+ #make sure bitbake is unloaded
+ self.sync()
+ self.measure_disk_usage(deploy_dir, 'deploy_dir', 'deploy dir',
+ apparent_size=True)
diff --git a/meta/lib/oeqa/controllers/__init__.py b/meta/lib/oeqa/controllers/__init__.py
index 8eda92763c..cc3836c4bf 100644
--- a/meta/lib/oeqa/controllers/__init__.py
+++ b/meta/lib/oeqa/controllers/__init__.py
@@ -1,3 +1,6 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Enable other layers to have modules in the same named directory
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
diff --git a/meta/lib/oeqa/controllers/masterimage.py b/meta/lib/oeqa/controllers/masterimage.py
index 9ce3bf803d..0435dfa125 100644
--- a/meta/lib/oeqa/controllers/masterimage.py
+++ b/meta/lib/oeqa/controllers/masterimage.py
@@ -1,7 +1,7 @@
# Copyright (C) 2014 Intel Corporation
#
-# Released under the MIT license (see COPYING.MIT)
-
+# SPDX-License-Identifier: MIT
+#
# This module adds support to testimage.bbclass to deploy images and run
# tests using a "master image" - this is a "known good" image that is
# installed onto the device as part of initial setup and will be booted into
@@ -32,14 +32,14 @@ class MasterImageHardwareTarget(oeqa.targetcontrol.BaseTarget, metaclass=ABCMeta
super(MasterImageHardwareTarget, self).__init__(d)
# target ip
- addr = d.getVar("TEST_TARGET_IP", True) or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.')
+ addr = d.getVar("TEST_TARGET_IP") or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.')
self.ip = addr.split(":")[0]
try:
self.port = addr.split(":")[1]
except IndexError:
self.port = None
bb.note("Target IP: %s" % self.ip)
- self.server_ip = d.getVar("TEST_SERVER_IP", True)
+ self.server_ip = d.getVar("TEST_SERVER_IP")
if not self.server_ip:
try:
self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split("\n")[0].split()[-1]
@@ -49,8 +49,8 @@ class MasterImageHardwareTarget(oeqa.targetcontrol.BaseTarget, metaclass=ABCMeta
# test rootfs + kernel
self.image_fstype = self.get_image_fstype(d)
- self.rootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME", True) + '.' + self.image_fstype)
- self.kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("KERNEL_IMAGETYPE", False) + '-' + d.getVar('MACHINE', False) + '.bin')
+ self.rootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), d.getVar("IMAGE_LINK_NAME") + '.' + self.image_fstype)
+ self.kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), d.getVar("KERNEL_IMAGETYPE", False) + '-' + d.getVar('MACHINE', False) + '.bin')
if not os.path.isfile(self.rootfs):
# we could've checked that IMAGE_FSTYPES contains tar.gz but the config for running testimage might not be
# the same as the config with which the image was build, ie
@@ -64,16 +64,16 @@ class MasterImageHardwareTarget(oeqa.targetcontrol.BaseTarget, metaclass=ABCMeta
# master ssh connection
self.master = None
# if the user knows what they are doing, then by all means...
- self.user_cmds = d.getVar("TEST_DEPLOY_CMDS", True)
+ self.user_cmds = d.getVar("TEST_DEPLOY_CMDS")
self.deploy_cmds = None
# this is the name of the command that controls the power for a board
# e.g: TEST_POWERCONTROL_CMD = "/home/user/myscripts/powercontrol.py ${MACHINE} what-ever-other-args-the-script-wants"
# the command should take as the last argument "off" and "on" and "cycle" (off, on)
- self.powercontrol_cmd = d.getVar("TEST_POWERCONTROL_CMD", True) or None
+ self.powercontrol_cmd = d.getVar("TEST_POWERCONTROL_CMD") or None
self.powercontrol_args = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS", False) or ""
- self.serialcontrol_cmd = d.getVar("TEST_SERIALCONTROL_CMD", True) or None
+ self.serialcontrol_cmd = d.getVar("TEST_SERIALCONTROL_CMD") or None
self.serialcontrol_args = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS", False) or ""
self.origenv = os.environ
@@ -82,7 +82,7 @@ class MasterImageHardwareTarget(oeqa.targetcontrol.BaseTarget, metaclass=ABCMeta
# ssh + keys means we need the original user env
bborigenv = d.getVar("BB_ORIGENV", False) or {}
for key in bborigenv:
- val = bborigenv.getVar(key, True)
+ val = bborigenv.getVar(key)
if val is not None:
self.origenv[key] = str(val)
@@ -108,7 +108,7 @@ class MasterImageHardwareTarget(oeqa.targetcontrol.BaseTarget, metaclass=ABCMeta
time.sleep(10)
self.power_ctl("cycle")
else:
- status, output = conn.run("reboot")
+ status, output = conn.run("sync; { sleep 1; reboot; } > /dev/null &")
if status != 0:
bb.error("Failed rebooting target and no power control command defined. You need to manually reset the device.\n%s" % output)
@@ -143,7 +143,7 @@ class MasterImageHardwareTarget(oeqa.targetcontrol.BaseTarget, metaclass=ABCMeta
def _deploy(self):
pass
- def start(self, params=None):
+ def start(self, extra_bootparams=None):
bb.plain("%s - boot test image on target" % self.pn)
self._start()
# set the ssh object for the target/test image
@@ -156,47 +156,7 @@ class MasterImageHardwareTarget(oeqa.targetcontrol.BaseTarget, metaclass=ABCMeta
def stop(self):
bb.plain("%s - reboot/powercycle target" % self.pn)
- self.power_cycle(self.connection)
-
-
-class GummibootTarget(MasterImageHardwareTarget):
-
- def __init__(self, d):
- super(GummibootTarget, self).__init__(d)
- # this the value we need to set in the LoaderEntryOneShot EFI variable
- # so the system boots the 'test' bootloader label and not the default
- # The first four bytes are EFI bits, and the rest is an utf-16le string
- # (EFI vars values need to be utf-16)
- # $ echo -en "test\0" | iconv -f ascii -t utf-16le | hexdump -C
- # 00000000 74 00 65 00 73 00 74 00 00 00 |t.e.s.t...|
- self.efivarvalue = r'\x07\x00\x00\x00\x74\x00\x65\x00\x73\x00\x74\x00\x00\x00'
- self.deploy_cmds = [
- 'mount -L boot /boot',
- 'mkdir -p /mnt/testrootfs',
- 'mount -L testrootfs /mnt/testrootfs',
- 'modprobe efivarfs',
- 'mount -t efivarfs efivarfs /sys/firmware/efi/efivars',
- 'cp ~/test-kernel /boot',
- 'rm -rf /mnt/testrootfs/*',
- 'tar xvf ~/test-rootfs.%s -C /mnt/testrootfs' % self.image_fstype,
- 'printf "%s" > /sys/firmware/efi/efivars/LoaderEntryOneShot-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f' % self.efivarvalue
- ]
-
- def _deploy(self):
- # make sure these aren't mounted
- self.master.run("umount /boot; umount /mnt/testrootfs; umount /sys/firmware/efi/efivars;")
- # from now on, every deploy cmd should return 0
- # else an exception will be thrown by sshcontrol
- self.master.ignore_status = False
- self.master.copy_to(self.rootfs, "~/test-rootfs." + self.image_fstype)
- self.master.copy_to(self.kernel, "~/test-kernel")
- for cmd in self.deploy_cmds:
- self.master.run(cmd)
-
- def _start(self, params=None):
self.power_cycle(self.master)
- # there are better ways than a timeout but this should work for now
- time.sleep(120)
class SystemdbootTarget(MasterImageHardwareTarget):
diff --git a/meta/lib/oeqa/controllers/testtargetloader.py b/meta/lib/oeqa/controllers/testtargetloader.py
index a1b7b1d92b..23101c7371 100644
--- a/meta/lib/oeqa/controllers/testtargetloader.py
+++ b/meta/lib/oeqa/controllers/testtargetloader.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import types
import bb
import os
@@ -61,8 +65,6 @@ class TestTargetLoader:
obj = getattr(module, target)
if obj:
from oeqa.targetcontrol import BaseTarget
- if (not isinstance(obj, (type, types.ClassType))):
- bb.warn("Target {0} found, but not of type Class".format(target))
if( not issubclass(obj, BaseTarget)):
bb.warn("Target {0} found, but subclass is not BaseTarget".format(target))
except:
diff --git a/meta/lib/oeqa/core/README b/meta/lib/oeqa/core/README
new file mode 100644
index 0000000000..d4fcda41f2
--- /dev/null
+++ b/meta/lib/oeqa/core/README
@@ -0,0 +1,76 @@
+= OEQA (v2) Framework =
+
+== Introduction ==
+
+This is version 2 of the OEQA framework. Base clases are located in the
+'oeqa/core' directory and subsequent components must extend from these.
+
+The main design consideration was to implement the needed functionality on
+top of the Python unittest framework. To achieve this goal, the following
+modules are used:
+
+ * oeqa/core/runner.py: Provides OETestResult and OETestRunner base
+ classes extending the unittest class. These classes support exporting
+ results to different formats; currently RAW and XML support exist.
+
+ * oeqa/core/loader.py: Provides OETestLoader extending the unittest class.
+ It also features a unified implementation of decorator support and
+ filtering test cases.
+
+ * oeqa/core/case.py: Provides OETestCase base class extending
+ unittest.TestCase and provides access to the Test data (td), Test context
+ and Logger functionality.
+
+ * oeqa/core/decorator: Provides OETestDecorator, a new class to implement
+ decorators for Test cases.
+
+ * oeqa/core/context: Provides OETestContext, a high-level API for
+ loadTests and runTests of certain Test component and
+ OETestContextExecutor a base class to enable oe-test to discover/use
+ the Test component.
+
+Also, a new 'oe-test' runner is located under 'scripts', allowing scans for components
+that supports OETestContextExecutor (see below).
+
+== Terminology ==
+
+ * Test component: The area of testing in the Project, for example: runtime, SDK, eSDK, selftest.
+
+ * Test data: Data associated with the Test component. Currently we use bitbake datastore as
+ a Test data input.
+
+ * Test context: A context of what tests needs to be run and how to do it; this additionally
+ provides access to the Test data and could have custom methods and/or attrs.
+
+== oe-test ==
+
+The new tool, oe-test, has the ability to scan the code base for test components and provide
+a unified way to run test cases. Internally it scans folders inside oeqa module in order to find
+specific classes that implement a test component.
+
+== Usage ==
+
+Executing the example test component
+
+ $ source oe-init-build-env
+ $ oe-test core
+
+Getting help
+
+ $ oe-test -h
+
+== Creating new Test Component ==
+
+Adding a new test component the developer needs to extend OETestContext/OETestContextExecutor
+(from context.py) and OETestCase (from case.py)
+
+== Selftesting the framework ==
+
+Run all tests:
+
+ $ PATH=$PATH:../../ python3 -m unittest discover -s tests
+
+Run some test:
+
+ $ cd tests/
+ $ ./test_data.py
diff --git a/meta/lib/oe/tests/__init__.py b/meta/lib/oeqa/core/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/meta/lib/oe/tests/__init__.py
+++ b/meta/lib/oeqa/core/__init__.py
diff --git a/meta/lib/oeqa/core/case.py b/meta/lib/oeqa/core/case.py
new file mode 100644
index 0000000000..aae451fef2
--- /dev/null
+++ b/meta/lib/oeqa/core/case.py
@@ -0,0 +1,100 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import base64
+import zlib
+import unittest
+
+from oeqa.core.exception import OEQAMissingVariable
+
+def _validate_td_vars(td, td_vars, type_msg):
+ if td_vars:
+ for v in td_vars:
+ if not v in td:
+ raise OEQAMissingVariable("Test %s need %s variable but"\
+ " isn't into td" % (type_msg, v))
+
+class OETestCase(unittest.TestCase):
+ # TestContext and Logger instance set by OETestLoader.
+ tc = None
+ logger = None
+
+ # td has all the variables needed by the test cases
+ # is the same across all the test cases.
+ td = None
+
+ # td_vars has the variables needed by a test class
+ # or test case instance, if some var isn't into td a
+ # OEQAMissingVariable exception is raised
+ td_vars = None
+
+ @classmethod
+ def _oeSetUpClass(clss):
+ _validate_td_vars(clss.td, clss.td_vars, "class")
+ if hasattr(clss, 'setUpHooker') and callable(getattr(clss, 'setUpHooker')):
+ clss.setUpHooker()
+ clss.setUpClassMethod()
+
+ @classmethod
+ def _oeTearDownClass(clss):
+ clss.tearDownClassMethod()
+
+ def _oeSetUp(self):
+ for d in self.decorators:
+ d.setUpDecorator()
+ self.setUpMethod()
+
+ def _oeTearDown(self):
+ for d in self.decorators:
+ d.tearDownDecorator()
+ self.tearDownMethod()
+
+class OEPTestResultTestCase:
+ """
+ Mix-in class to provide functions to make interacting with extraresults for
+ the purposes of storing ptestresult data.
+ """
+ @staticmethod
+ def _compress_log(log):
+ logdata = log.encode("utf-8") if isinstance(log, str) else log
+ logdata = zlib.compress(logdata)
+ logdata = base64.b64encode(logdata).decode("utf-8")
+ return {"compressed" : logdata}
+
+ def ptest_rawlog(self, log):
+ if not hasattr(self, "extraresults"):
+ self.extraresults = {"ptestresult.sections" : {}}
+ self.extraresults["ptestresult.rawlogs"] = {"log" : self._compress_log(log)}
+
+ def ptest_section(self, section, duration = None, log = None, logfile = None, exitcode = None):
+ if not hasattr(self, "extraresults"):
+ self.extraresults = {"ptestresult.sections" : {}}
+
+ sections = self.extraresults.get("ptestresult.sections")
+ if section not in sections:
+ sections[section] = {}
+
+ if log is not None:
+ sections[section]["log"] = self._compress_log(log)
+ elif logfile is not None:
+ with open(logfile, "rb") as f:
+ sections[section]["log"] = self._compress_log(f.read())
+
+ if duration is not None:
+ sections[section]["duration"] = duration
+ if exitcode is not None:
+ sections[section]["exitcode"] = exitcode
+
+ def ptest_result(self, section, test, result):
+ if not hasattr(self, "extraresults"):
+ self.extraresults = {"ptestresult.sections" : {}}
+
+ sections = self.extraresults.get("ptestresult.sections")
+ if section not in sections:
+ sections[section] = {}
+ resultname = "ptestresult.{}.{}".format(section, test)
+ self.extraresults[resultname] = {"status" : result}
+
diff --git a/meta/lib/oeqa/core/cases/__init__.py b/meta/lib/oeqa/core/cases/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/meta/lib/oeqa/core/cases/__init__.py
diff --git a/meta/lib/oeqa/core/cases/example/data.json b/meta/lib/oeqa/core/cases/example/data.json
new file mode 100644
index 0000000000..21d6b16d17
--- /dev/null
+++ b/meta/lib/oeqa/core/cases/example/data.json
@@ -0,0 +1 @@
+{"ARCH": "x86", "IMAGE": "core-image-minimal"} \ No newline at end of file
diff --git a/meta/lib/oeqa/core/cases/example/test_basic.py b/meta/lib/oeqa/core/cases/example/test_basic.py
new file mode 100644
index 0000000000..d77edcdcec
--- /dev/null
+++ b/meta/lib/oeqa/core/cases/example/test_basic.py
@@ -0,0 +1,22 @@
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.core.case import OETestCase
+from oeqa.core.decorator.depends import OETestDepends
+
+class OETestExample(OETestCase):
+ def test_example(self):
+ self.logger.info('IMAGE: %s' % self.td.get('IMAGE'))
+ self.assertEqual('core-image-minimal', self.td.get('IMAGE'))
+ self.logger.info('ARCH: %s' % self.td.get('ARCH'))
+ self.assertEqual('x86', self.td.get('ARCH'))
+
+class OETestExampleDepend(OETestCase):
+ @OETestDepends(['OETestExample.test_example'])
+ def test_example_depends(self):
+ pass
+
+ def test_example_no_depends(self):
+ pass
diff --git a/meta/lib/oeqa/core/context.py b/meta/lib/oeqa/core/context.py
new file mode 100644
index 0000000000..14fc6a54f4
--- /dev/null
+++ b/meta/lib/oeqa/core/context.py
@@ -0,0 +1,212 @@
+## Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import sys
+import json
+import time
+import logging
+import collections
+import unittest
+
+from oeqa.core.loader import OETestLoader
+from oeqa.core.runner import OETestRunner
+from oeqa.core.exception import OEQAMissingManifest, OEQATestNotFound
+
+class OETestContext(object):
+ loaderClass = OETestLoader
+ runnerClass = OETestRunner
+
+ files_dir = os.path.abspath(os.path.join(os.path.dirname(
+ os.path.abspath(__file__)), "../files"))
+
+ def __init__(self, td=None, logger=None):
+ if not type(td) is dict:
+ raise TypeError("td isn't dictionary type")
+
+ self.td = td
+ self.logger = logger
+ self._registry = {}
+ self._registry['cases'] = collections.OrderedDict()
+
+ def _read_modules_from_manifest(self, manifest):
+ if not os.path.exists(manifest):
+ raise OEQAMissingManifest("Manifest does not exist on %s" % manifest)
+
+ modules = []
+ for line in open(manifest).readlines():
+ line = line.strip()
+ if line and not line.startswith("#"):
+ modules.append(line)
+
+ return modules
+
+ def skipTests(self, skips):
+ if not skips:
+ return
+ def skipfuncgen(skipmsg):
+ def func():
+ raise unittest.SkipTest(skipmsg)
+ return func
+ class_ids = {}
+ for test in self.suites:
+ if test.__class__ not in class_ids:
+ class_ids[test.__class__] = '.'.join(test.id().split('.')[:-1])
+ for skip in skips:
+ if (test.id()+'.').startswith(skip+'.'):
+ setattr(test, 'setUp', skipfuncgen('Skip by the command line argument "%s"' % skip))
+ for tclass in class_ids:
+ cid = class_ids[tclass]
+ for skip in skips:
+ if (cid + '.').startswith(skip + '.'):
+ setattr(tclass, 'setUpHooker', skipfuncgen('Skip by the command line argument "%s"' % skip))
+
+ def loadTests(self, module_paths, modules=[], tests=[],
+ modules_manifest="", modules_required=[], **kwargs):
+ if modules_manifest:
+ modules = self._read_modules_from_manifest(modules_manifest)
+
+ self.loader = self.loaderClass(self, module_paths, modules, tests,
+ modules_required, **kwargs)
+ self.suites = self.loader.discover()
+
+ def runTests(self, processes=None, skips=[]):
+ self.runner = self.runnerClass(self, descriptions=False, verbosity=2)
+
+ # Dinamically skip those tests specified though arguments
+ self.skipTests(skips)
+
+ self._run_start_time = time.time()
+ if processes:
+ from oeqa.core.utils.concurrencytest import ConcurrentTestSuite
+
+ concurrent_suite = ConcurrentTestSuite(self.suites, processes)
+ result = self.runner.run(concurrent_suite)
+ else:
+ self.runner.buffer = True
+ result = self.runner.run(self.suites)
+ self._run_end_time = time.time()
+
+ return result
+
+ def listTests(self, display_type):
+ self.runner = self.runnerClass(self, verbosity=2)
+ return self.runner.list_tests(self.suites, display_type)
+
+class OETestContextExecutor(object):
+ _context_class = OETestContext
+ _script_executor = 'oe-test'
+
+ name = 'core'
+ help = 'core test component example'
+ description = 'executes core test suite example'
+
+ default_cases = [os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'cases/example')]
+ default_test_data = os.path.join(default_cases[0], 'data.json')
+ default_tests = None
+
+ def register_commands(self, logger, subparsers):
+ self.parser = subparsers.add_parser(self.name, help=self.help,
+ description=self.description, group='components')
+
+ self.default_output_log = '%s-results-%s.log' % (self.name,
+ time.strftime("%Y%m%d%H%M%S"))
+ self.parser.add_argument('--output-log', action='store',
+ default=self.default_output_log,
+ help="results output log, default: %s" % self.default_output_log)
+
+ group = self.parser.add_mutually_exclusive_group()
+ group.add_argument('--run-tests', action='store', nargs='+',
+ default=self.default_tests,
+ help="tests to run in <module>[.<class>[.<name>]]")
+ group.add_argument('--list-tests', action='store',
+ choices=('module', 'class', 'name'),
+ help="lists available tests")
+
+ if self.default_test_data:
+ self.parser.add_argument('--test-data-file', action='store',
+ default=self.default_test_data,
+ help="data file to load, default: %s" % self.default_test_data)
+ else:
+ self.parser.add_argument('--test-data-file', action='store',
+ help="data file to load")
+
+ if self.default_cases:
+ self.parser.add_argument('CASES_PATHS', action='store',
+ default=self.default_cases, nargs='*',
+ help="paths to directories with test cases, default: %s"\
+ % self.default_cases)
+ else:
+ self.parser.add_argument('CASES_PATHS', action='store',
+ nargs='+', help="paths to directories with test cases")
+
+ self.parser.set_defaults(func=self.run)
+
+ def _setup_logger(self, logger, args):
+ formatter = logging.Formatter('%(asctime)s - ' + self.name + \
+ ' - %(levelname)s - %(message)s')
+ sh = logger.handlers[0]
+ sh.setFormatter(formatter)
+ fh = logging.FileHandler(args.output_log)
+ fh.setFormatter(formatter)
+ logger.addHandler(fh)
+
+ return logger
+
+ def _process_args(self, logger, args):
+ self.tc_kwargs = {}
+ self.tc_kwargs['init'] = {}
+ self.tc_kwargs['load'] = {}
+ self.tc_kwargs['list'] = {}
+ self.tc_kwargs['run'] = {}
+
+ self.tc_kwargs['init']['logger'] = self._setup_logger(logger, args)
+ if args.test_data_file:
+ self.tc_kwargs['init']['td'] = json.load(
+ open(args.test_data_file, "r"))
+ else:
+ self.tc_kwargs['init']['td'] = {}
+
+ if args.run_tests:
+ self.tc_kwargs['load']['modules'] = args.run_tests
+ self.tc_kwargs['load']['modules_required'] = args.run_tests
+ else:
+ self.tc_kwargs['load']['modules'] = []
+
+ self.tc_kwargs['run']['skips'] = []
+
+ self.module_paths = args.CASES_PATHS
+
+ def _pre_run(self):
+ pass
+
+ def run(self, logger, args):
+ self._process_args(logger, args)
+
+ self.tc = self._context_class(**self.tc_kwargs['init'])
+ try:
+ self.tc.loadTests(self.module_paths, **self.tc_kwargs['load'])
+ except OEQATestNotFound as ex:
+ logger.error(ex)
+ sys.exit(1)
+
+ if args.list_tests:
+ rc = self.tc.listTests(args.list_tests, **self.tc_kwargs['list'])
+ else:
+ self._pre_run()
+ rc = self.tc.runTests(**self.tc_kwargs['run'])
+ rc.logDetails()
+ rc.logSummary(self.name)
+
+ output_link = os.path.join(os.path.dirname(args.output_log),
+ "%s-results.log" % self.name)
+ if os.path.exists(output_link):
+ os.remove(output_link)
+ os.symlink(args.output_log, output_link)
+
+ return rc
+
+_executor_class = OETestContextExecutor
diff --git a/meta/lib/oeqa/core/decorator/__init__.py b/meta/lib/oeqa/core/decorator/__init__.py
new file mode 100644
index 0000000000..1a82518ab6
--- /dev/null
+++ b/meta/lib/oeqa/core/decorator/__init__.py
@@ -0,0 +1,79 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+from functools import wraps
+from abc import abstractmethod, ABCMeta
+from oeqa.core.utils.misc import strToList
+
+decoratorClasses = set()
+
+def registerDecorator(cls):
+ decoratorClasses.add(cls)
+ return cls
+
+class OETestDecorator(object, metaclass=ABCMeta):
+ case = None # Reference of OETestCase decorated
+ attrs = None # Attributes to be loaded by decorator implementation
+
+ def __init__(self, *args, **kwargs):
+ if not self.attrs:
+ return
+
+ for idx, attr in enumerate(self.attrs):
+ if attr in kwargs:
+ value = kwargs[attr]
+ else:
+ value = args[idx]
+ setattr(self, attr, value)
+
+ def __call__(self, func):
+ @wraps(func)
+ def wrapped_f(*args, **kwargs):
+ self.attrs = self.attrs # XXX: Enables OETestLoader discover
+ return func(*args, **kwargs)
+ return wrapped_f
+
+ # OETestLoader call it when is loading test cases.
+ # XXX: Most methods would change the registry for later
+ # processing; be aware that filtrate method needs to
+ # run later than bind, so there could be data (in the
+ # registry) of a cases that were filtered.
+ def bind(self, registry, case):
+ self.case = case
+ self.logger = case.tc.logger
+ self.case.decorators.append(self)
+
+ # OETestRunner call this method when tries to run
+ # the test case.
+ def setUpDecorator(self):
+ pass
+
+ # OETestRunner call it after a test method has been
+ # called even if the method raised an exception.
+ def tearDownDecorator(self):
+ pass
+
+class OETestDiscover(OETestDecorator):
+
+ # OETestLoader call it after discover test cases
+ # needs to return the cases to be run.
+ @staticmethod
+ def discover(registry):
+ return registry['cases']
+
+def OETestTag(*tags):
+ expandedtags = []
+ for tag in tags:
+ expandedtags += strToList(tag)
+ def decorator(item):
+ if hasattr(item, "__oeqa_testtags"):
+ # do not append, create a new list (to handle classes with inheritance)
+ item.__oeqa_testtags = list(item.__oeqa_testtags) + expandedtags
+ else:
+ item.__oeqa_testtags = expandedtags
+ return item
+ return decorator
+
diff --git a/meta/lib/oeqa/core/decorator/data.py b/meta/lib/oeqa/core/decorator/data.py
new file mode 100644
index 0000000000..bc4939e87c
--- /dev/null
+++ b/meta/lib/oeqa/core/decorator/data.py
@@ -0,0 +1,222 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.core.exception import OEQAMissingVariable
+
+from . import OETestDecorator, registerDecorator
+
+def has_feature(td, feature):
+ """
+ Checks for feature in DISTRO_FEATURES or IMAGE_FEATURES.
+ """
+
+ if (feature in td.get('DISTRO_FEATURES', '') or
+ feature in td.get('IMAGE_FEATURES', '')):
+ return True
+ return False
+
+def has_machine(td, machine):
+ """
+ Checks for MACHINE.
+ """
+
+ if (machine in td.get('MACHINE', '')):
+ return True
+ return False
+
+def is_qemu(td, qemu):
+ """
+ Checks if MACHINE is qemu.
+ """
+
+ machine = td.get('MACHINE', '')
+ if (qemu in td.get('MACHINE', '') or
+ machine.startswith('qemu')):
+ return True
+ return False
+
+@registerDecorator
+class skipIfDataVar(OETestDecorator):
+ """
+ Skip test based on value of a data store's variable.
+
+ It will get the info of var from the data store and will
+ check it against value; if are equal it will skip the test
+ with msg as the reason.
+ """
+
+ attrs = ('var', 'value', 'msg')
+
+ def setUpDecorator(self):
+ msg = ('Checking if %r value is %r to skip test' %
+ (self.var, self.value))
+ self.logger.debug(msg)
+ if self.case.td.get(self.var) == self.value:
+ self.case.skipTest(self.msg)
+
+@registerDecorator
+class skipIfNotDataVar(OETestDecorator):
+ """
+ Skip test based on value of a data store's variable.
+
+ It will get the info of var from the data store and will
+ check it against value; if are not equal it will skip the
+ test with msg as the reason.
+ """
+
+ attrs = ('var', 'value', 'msg')
+
+ def setUpDecorator(self):
+ msg = ('Checking if %r value is not %r to skip test' %
+ (self.var, self.value))
+ self.logger.debug(msg)
+ if not self.case.td.get(self.var) == self.value:
+ self.case.skipTest(self.msg)
+
+@registerDecorator
+class skipIfInDataVar(OETestDecorator):
+ """
+ Skip test if value is in data store's variable.
+ """
+
+ attrs = ('var', 'value', 'msg')
+ def setUpDecorator(self):
+ msg = ('Checking if %r value contains %r to skip '
+ 'the test' % (self.var, self.value))
+ self.logger.debug(msg)
+ if self.value in (self.case.td.get(self.var)):
+ self.case.skipTest(self.msg)
+
+@registerDecorator
+class skipIfNotInDataVar(OETestDecorator):
+ """
+ Skip test if value is not in data store's variable.
+ """
+
+ attrs = ('var', 'value', 'msg')
+ def setUpDecorator(self):
+ msg = ('Checking if %r value contains %r to run '
+ 'the test' % (self.var, self.value))
+ self.logger.debug(msg)
+ if not self.value in (self.case.td.get(self.var) or ""):
+ self.case.skipTest(self.msg)
+
+@registerDecorator
+class OETestDataDepends(OETestDecorator):
+ attrs = ('td_depends',)
+
+ def setUpDecorator(self):
+ for v in self.td_depends:
+ try:
+ value = self.case.td[v]
+ except KeyError:
+ raise OEQAMissingVariable("Test case need %s variable but"\
+ " isn't into td" % v)
+
+@registerDecorator
+class skipIfNotFeature(OETestDecorator):
+ """
+ Skip test based on DISTRO_FEATURES.
+
+ value must be in distro features or it will skip the test
+ with msg as the reason.
+ """
+
+ attrs = ('value', 'msg')
+
+ def setUpDecorator(self):
+ msg = ('Checking if %s is in DISTRO_FEATURES '
+ 'or IMAGE_FEATURES' % (self.value))
+ self.logger.debug(msg)
+ if not has_feature(self.case.td, self.value):
+ self.case.skipTest(self.msg)
+
+@registerDecorator
+class skipIfFeature(OETestDecorator):
+ """
+ Skip test based on DISTRO_FEATURES.
+
+ value must not be in distro features or it will skip the test
+ with msg as the reason.
+ """
+
+ attrs = ('value', 'msg')
+
+ def setUpDecorator(self):
+ msg = ('Checking if %s is not in DISTRO_FEATURES '
+ 'or IMAGE_FEATURES' % (self.value))
+ self.logger.debug(msg)
+ if has_feature(self.case.td, self.value):
+ self.case.skipTest(self.msg)
+
+@registerDecorator
+class skipIfNotMachine(OETestDecorator):
+ """
+ Skip test based on MACHINE.
+
+ value must be match MACHINE or it will skip the test
+ with msg as the reason.
+ """
+
+ attrs = ('value', 'msg')
+
+ def setUpDecorator(self):
+ msg = ('Checking if %s is not this MACHINE' % self.value)
+ self.logger.debug(msg)
+ if not has_machine(self.case.td, self.value):
+ self.case.skipTest(self.msg)
+
+@registerDecorator
+class skipIfMachine(OETestDecorator):
+ """
+ Skip test based on Machine.
+
+ value must not be this machine or it will skip the test
+ with msg as the reason.
+ """
+
+ attrs = ('value', 'msg')
+
+ def setUpDecorator(self):
+ msg = ('Checking if %s is this MACHINE' % self.value)
+ self.logger.debug(msg)
+ if has_machine(self.case.td, self.value):
+ self.case.skipTest(self.msg)
+
+@registerDecorator
+class skipIfNotQemu(OETestDecorator):
+ """
+ Skip test based on MACHINE.
+
+ value must be a qemu MACHINE or it will skip the test
+ with msg as the reason.
+ """
+
+ attrs = ('value', 'msg')
+
+ def setUpDecorator(self):
+ msg = ('Checking if %s is not this MACHINE' % self.value)
+ self.logger.debug(msg)
+ if not is_qemu(self.case.td, self.value):
+ self.case.skipTest(self.msg)
+
+@registerDecorator
+class skipIfQemu(OETestDecorator):
+ """
+ Skip test based on Qemu Machine.
+
+ value must not be a qemu machine or it will skip the test
+ with msg as the reason.
+ """
+
+ attrs = ('value', 'msg')
+
+ def setUpDecorator(self):
+ msg = ('Checking if %s is this MACHINE' % self.value)
+ self.logger.debug(msg)
+ if is_qemu(self.case.td, self.value):
+ self.case.skipTest(self.msg)
+
diff --git a/meta/lib/oeqa/core/decorator/depends.py b/meta/lib/oeqa/core/decorator/depends.py
new file mode 100644
index 0000000000..33f0841cab
--- /dev/null
+++ b/meta/lib/oeqa/core/decorator/depends.py
@@ -0,0 +1,98 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+from unittest import SkipTest
+
+from oeqa.core.exception import OEQADependency
+
+from . import OETestDiscover, registerDecorator
+
+def _add_depends(registry, case, depends):
+ module_name = case.__module__
+ class_name = case.__class__.__name__
+
+ case_id = case.id()
+
+ for depend in depends:
+ dparts = depend.split('.')
+
+ if len(dparts) == 1:
+ depend_id = ".".join((module_name, class_name, dparts[0]))
+ elif len(dparts) == 2:
+ depend_id = ".".join((module_name, dparts[0], dparts[1]))
+ else:
+ depend_id = depend
+
+ if not case_id in registry:
+ registry[case_id] = []
+ if not depend_id in registry[case_id]:
+ registry[case_id].append(depend_id)
+
+def _validate_test_case_depends(cases, depends):
+ for case in depends:
+ if not case in cases:
+ continue
+ for dep in depends[case]:
+ if not dep in cases:
+ raise OEQADependency("TestCase %s depends on %s and isn't available"\
+ ", cases available %s." % (case, dep, str(cases.keys())))
+
+def _order_test_case_by_depends(cases, depends):
+ def _dep_resolve(graph, node, resolved, seen):
+ seen.append(node)
+ for edge in graph[node]:
+ if edge not in resolved:
+ if edge in seen:
+ raise OEQADependency("Test cases %s and %s have a circular" \
+ " dependency." % (node, edge))
+ _dep_resolve(graph, edge, resolved, seen)
+ resolved.append(node)
+
+ dep_graph = {}
+ dep_graph['__root__'] = cases.keys()
+ for case in cases:
+ if case in depends:
+ dep_graph[case] = depends[case]
+ else:
+ dep_graph[case] = []
+
+ cases_ordered = []
+ _dep_resolve(dep_graph, '__root__', cases_ordered, [])
+ cases_ordered.remove('__root__')
+
+ return [cases[case_id] for case_id in cases_ordered]
+
+def _skipTestDependency(case, depends):
+ for dep in depends:
+ found = False
+ for test, _ in case.tc.results.successes:
+ if test.id() == dep:
+ found = True
+ break
+ if not found:
+ raise SkipTest("Test case %s depends on %s but it didn't pass/run." \
+ % (case.id(), dep))
+
+@registerDecorator
+class OETestDepends(OETestDiscover):
+ attrs = ('depends',)
+
+ def bind(self, registry, case):
+ super(OETestDepends, self).bind(registry, case)
+ if not registry.get('depends'):
+ registry['depends'] = {}
+ _add_depends(registry['depends'], case, self.depends)
+
+ @staticmethod
+ def discover(registry):
+ if registry.get('depends'):
+ _validate_test_case_depends(registry['cases'], registry['depends'])
+ return _order_test_case_by_depends(registry['cases'], registry['depends'])
+ else:
+ return [registry['cases'][case_id] for case_id in registry['cases']]
+
+ def setUpDecorator(self):
+ _skipTestDependency(self.case, self.depends)
diff --git a/meta/lib/oeqa/core/decorator/oetimeout.py b/meta/lib/oeqa/core/decorator/oetimeout.py
new file mode 100644
index 0000000000..df90d1c798
--- /dev/null
+++ b/meta/lib/oeqa/core/decorator/oetimeout.py
@@ -0,0 +1,28 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import signal
+from . import OETestDecorator, registerDecorator
+from oeqa.core.exception import OEQATimeoutError
+
+@registerDecorator
+class OETimeout(OETestDecorator):
+ attrs = ('oetimeout',)
+
+ def setUpDecorator(self):
+ timeout = self.oetimeout
+ def _timeoutHandler(signum, frame):
+ raise OEQATimeoutError("Timed out after %s "
+ "seconds of execution" % timeout)
+
+ self.logger.debug("Setting up a %d second(s) timeout" % self.oetimeout)
+ self.alarmSignal = signal.signal(signal.SIGALRM, _timeoutHandler)
+ signal.alarm(self.oetimeout)
+
+ def tearDownDecorator(self):
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, self.alarmSignal)
+ self.logger.debug("Removed SIGALRM handler")
diff --git a/meta/lib/oeqa/core/exception.py b/meta/lib/oeqa/core/exception.py
new file mode 100644
index 0000000000..05be0ed21f
--- /dev/null
+++ b/meta/lib/oeqa/core/exception.py
@@ -0,0 +1,26 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+class OEQAException(Exception):
+ pass
+
+class OEQATimeoutError(OEQAException):
+ pass
+
+class OEQAMissingVariable(OEQAException):
+ pass
+
+class OEQADependency(OEQAException):
+ pass
+
+class OEQAMissingManifest(OEQAException):
+ pass
+
+class OEQAPreRun(OEQAException):
+ pass
+
+class OEQATestNotFound(OEQAException):
+ pass
diff --git a/meta/lib/oeqa/core/loader.py b/meta/lib/oeqa/core/loader.py
new file mode 100644
index 0000000000..0d7970d49e
--- /dev/null
+++ b/meta/lib/oeqa/core/loader.py
@@ -0,0 +1,342 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import re
+import sys
+import unittest
+import inspect
+
+from oeqa.core.utils.path import findFile
+from oeqa.core.utils.test import getSuiteModules, getCaseID
+
+from oeqa.core.exception import OEQATestNotFound
+from oeqa.core.case import OETestCase
+from oeqa.core.decorator import decoratorClasses, OETestDecorator, \
+ OETestDiscover
+
+# When loading tests, the unittest framework stores any exceptions and
+# displays them only when the run method is called.
+#
+# For our purposes, it is better to raise the exceptions in the loading
+# step rather than waiting to run the test suite.
+#
+# Generate the function definition because this differ across python versions
+# Python >= 3.4.4 uses tree parameters instead four but for example Python 3.5.3
+# ueses four parameters so isn't incremental.
+_failed_test_args = inspect.getfullargspec(unittest.loader._make_failed_test).args
+exec("""def _make_failed_test(%s): raise exception""" % ', '.join(_failed_test_args))
+unittest.loader._make_failed_test = _make_failed_test
+
+def _find_duplicated_modules(suite, directory):
+ for module in getSuiteModules(suite):
+ path = findFile('%s.py' % module, directory)
+ if path:
+ raise ImportError("Duplicated %s module found in %s" % (module, path))
+
+def _built_modules_dict(modules):
+ modules_dict = {}
+
+ if modules == None:
+ return modules_dict
+
+ for module in modules:
+ # Assumption: package and module names do not contain upper case
+ # characters, whereas class names do
+ m = re.match(r'^(\w+)(?:\.(\w[^.]*)(?:\.([^.]+))?)?$', module, flags=re.ASCII)
+ if not m:
+ continue
+
+ module_name, class_name, test_name = m.groups()
+
+ if module_name and module_name not in modules_dict:
+ modules_dict[module_name] = {}
+ if class_name and class_name not in modules_dict[module_name]:
+ modules_dict[module_name][class_name] = []
+ if test_name and test_name not in modules_dict[module_name][class_name]:
+ modules_dict[module_name][class_name].append(test_name)
+
+ return modules_dict
+
+class OETestLoader(unittest.TestLoader):
+ caseClass = OETestCase
+
+ kwargs_names = ['testMethodPrefix', 'sortTestMethodUsing', 'suiteClass',
+ '_top_level_dir']
+
+ def __init__(self, tc, module_paths, modules, tests, modules_required,
+ *args, **kwargs):
+ self.tc = tc
+
+ self.modules = _built_modules_dict(modules)
+
+ self.tests = tests
+ self.modules_required = modules_required
+
+ self.tags_filter = kwargs.get("tags_filter", None)
+
+ if isinstance(module_paths, str):
+ module_paths = [module_paths]
+ elif not isinstance(module_paths, list):
+ raise TypeError('module_paths must be a str or a list of str')
+ self.module_paths = module_paths
+
+ for kwname in self.kwargs_names:
+ if kwname in kwargs:
+ setattr(self, kwname, kwargs[kwname])
+
+ self._patchCaseClass(self.caseClass)
+
+ super(OETestLoader, self).__init__()
+
+ def _patchCaseClass(self, testCaseClass):
+ # Adds custom attributes to the OETestCase class
+ setattr(testCaseClass, 'tc', self.tc)
+ setattr(testCaseClass, 'td', self.tc.td)
+ setattr(testCaseClass, 'logger', self.tc.logger)
+
+ def _registerTestCase(self, case):
+ case_id = case.id()
+ self.tc._registry['cases'][case_id] = case
+
+ def _handleTestCaseDecorators(self, case):
+ def _handle(obj):
+ if isinstance(obj, OETestDecorator):
+ if not obj.__class__ in decoratorClasses:
+ raise Exception("Decorator %s isn't registered" \
+ " in decoratorClasses." % obj.__name__)
+ obj.bind(self.tc._registry, case)
+
+ def _walk_closure(obj):
+ if hasattr(obj, '__closure__') and obj.__closure__:
+ for f in obj.__closure__:
+ obj = f.cell_contents
+ _handle(obj)
+ _walk_closure(obj)
+ method = getattr(case, case._testMethodName, None)
+ _walk_closure(method)
+
+ def _filterTest(self, case):
+ """
+ Returns True if test case must be filtered, False otherwise.
+ """
+ # XXX; If the module has more than one namespace only use
+ # the first to support run the whole module specifying the
+ # <module_name>.[test_class].[test_name]
+ module_name_small = case.__module__.split('.')[0]
+ module_name = case.__module__
+
+ class_name = case.__class__.__name__
+ test_name = case._testMethodName
+
+ # 'auto' is a reserved key word to run test cases automatically
+ # warn users if their test case belong to a module named 'auto'
+ if module_name_small == "auto":
+ bb.warn("'auto' is a reserved key word for TEST_SUITES. "
+ "But test case '%s' is detected to belong to auto module. "
+ "Please condier using a new name for your module." % str(case))
+
+ # check if case belongs to any specified module
+ # if 'auto' is specified, such check is skipped
+ if self.modules and not 'auto' in self.modules:
+ module = None
+ try:
+ module = self.modules[module_name_small]
+ except KeyError:
+ try:
+ module = self.modules[module_name]
+ except KeyError:
+ return True
+
+ if module:
+ if not class_name in module:
+ return True
+
+ if module[class_name]:
+ if test_name not in module[class_name]:
+ return True
+
+ # Decorator filters
+ if self.tags_filter is not None and callable(self.tags_filter):
+ alltags = set()
+ # pull tags from the case class
+ if hasattr(case, "__oeqa_testtags"):
+ for t in getattr(case, "__oeqa_testtags"):
+ alltags.add(t)
+ # pull tags from the method itself
+ if hasattr(case, test_name):
+ method = getattr(case, test_name)
+ if hasattr(method, "__oeqa_testtags"):
+ for t in getattr(method, "__oeqa_testtags"):
+ alltags.add(t)
+
+ if self.tags_filter(alltags):
+ return True
+
+ return False
+
+ def _getTestCase(self, testCaseClass, tcName):
+ if not hasattr(testCaseClass, '__oeqa_loader') and \
+ issubclass(testCaseClass, OETestCase):
+ # In order to support data_vars validation
+ # monkey patch the default setUp/tearDown{Class} to use
+ # the ones provided by OETestCase
+ setattr(testCaseClass, 'setUpClassMethod',
+ getattr(testCaseClass, 'setUpClass'))
+ setattr(testCaseClass, 'tearDownClassMethod',
+ getattr(testCaseClass, 'tearDownClass'))
+ setattr(testCaseClass, 'setUpClass',
+ testCaseClass._oeSetUpClass)
+ setattr(testCaseClass, 'tearDownClass',
+ testCaseClass._oeTearDownClass)
+
+ # In order to support decorators initialization
+ # monkey patch the default setUp/tearDown to use
+ # a setUpDecorators/tearDownDecorators that methods
+ # will call setUp/tearDown original methods.
+ setattr(testCaseClass, 'setUpMethod',
+ getattr(testCaseClass, 'setUp'))
+ setattr(testCaseClass, 'tearDownMethod',
+ getattr(testCaseClass, 'tearDown'))
+ setattr(testCaseClass, 'setUp', testCaseClass._oeSetUp)
+ setattr(testCaseClass, 'tearDown', testCaseClass._oeTearDown)
+
+ setattr(testCaseClass, '__oeqa_loader', True)
+
+ case = testCaseClass(tcName)
+ if isinstance(case, OETestCase):
+ setattr(case, 'decorators', [])
+
+ return case
+
+ def loadTestsFromTestCase(self, testCaseClass):
+ """
+ Returns a suite of all tests cases contained in testCaseClass.
+ """
+ if issubclass(testCaseClass, unittest.suite.TestSuite):
+ raise TypeError("Test cases should not be derived from TestSuite." \
+ " Maybe you meant to derive %s from TestCase?" \
+ % testCaseClass.__name__)
+ if not issubclass(testCaseClass, unittest.case.TestCase):
+ raise TypeError("Test %s is not derived from %s" % \
+ (testCaseClass.__name__, unittest.case.TestCase.__name__))
+
+ testCaseNames = self.getTestCaseNames(testCaseClass)
+ if not testCaseNames and hasattr(testCaseClass, 'runTest'):
+ testCaseNames = ['runTest']
+
+ suite = []
+ for tcName in testCaseNames:
+ case = self._getTestCase(testCaseClass, tcName)
+ # Filer by case id
+ if not (self.tests and not 'auto' in self.tests
+ and not getCaseID(case) in self.tests):
+ self._handleTestCaseDecorators(case)
+
+ # Filter by decorators
+ if not self._filterTest(case):
+ self._registerTestCase(case)
+ suite.append(case)
+
+ return self.suiteClass(suite)
+
+ def _required_modules_validation(self):
+ """
+ Search in Test context registry if a required
+ test is found, raise an exception when not found.
+ """
+
+ for module in self.modules_required:
+ found = False
+
+ # The module name is splitted to only compare the
+ # first part of a test case id.
+ comp_len = len(module.split('.'))
+ for case in self.tc._registry['cases']:
+ case_comp = '.'.join(case.split('.')[0:comp_len])
+ if module == case_comp:
+ found = True
+ break
+
+ if not found:
+ raise OEQATestNotFound("Not found %s in loaded test cases" % \
+ module)
+
+ def discover(self):
+ big_suite = self.suiteClass()
+ for path in self.module_paths:
+ _find_duplicated_modules(big_suite, path)
+ suite = super(OETestLoader, self).discover(path,
+ pattern='*.py', top_level_dir=path)
+ big_suite.addTests(suite)
+
+ cases = None
+ discover_classes = [clss for clss in decoratorClasses
+ if issubclass(clss, OETestDiscover)]
+ for clss in discover_classes:
+ cases = clss.discover(self.tc._registry)
+
+ if self.modules_required:
+ self._required_modules_validation()
+
+ return self.suiteClass(cases) if cases else big_suite
+
+ def _filterModule(self, module):
+ if module.__name__ in sys.builtin_module_names:
+ msg = 'Tried to import %s test module but is a built-in'
+ raise ImportError(msg % module.__name__)
+
+ # XXX; If the module has more than one namespace only use
+ # the first to support run the whole module specifying the
+ # <module_name>.[test_class].[test_name]
+ module_name_small = module.__name__.split('.')[0]
+ module_name = module.__name__
+
+ # Normal test modules are loaded if no modules were specified,
+ # if module is in the specified module list or if 'auto' is in
+ # module list.
+ # Underscore modules are loaded only if specified in module list.
+ load_module = True if not module_name.startswith('_') \
+ and (not self.modules \
+ or module_name in self.modules \
+ or module_name_small in self.modules \
+ or 'auto' in self.modules) \
+ else False
+
+ load_underscore = True if module_name.startswith('_') \
+ and (module_name in self.modules or \
+ module_name_small in self.modules) \
+ else False
+
+ return (load_module, load_underscore)
+
+
+ # XXX After Python 3.5, remove backward compatibility hacks for
+ # use_load_tests deprecation via *args and **kws. See issue 16662.
+ if sys.version_info >= (3,5):
+ def loadTestsFromModule(self, module, *args, pattern=None, **kws):
+ """
+ Returns a suite of all tests cases contained in module.
+ """
+ load_module, load_underscore = self._filterModule(module)
+
+ if load_module or load_underscore:
+ return super(OETestLoader, self).loadTestsFromModule(
+ module, *args, pattern=pattern, **kws)
+ else:
+ return self.suiteClass()
+ else:
+ def loadTestsFromModule(self, module, use_load_tests=True):
+ """
+ Returns a suite of all tests cases contained in module.
+ """
+ load_module, load_underscore = self._filterModule(module)
+
+ if load_module or load_underscore:
+ return super(OETestLoader, self).loadTestsFromModule(
+ module, use_load_tests)
+ else:
+ return self.suiteClass()
diff --git a/meta/lib/oeqa/core/runner.py b/meta/lib/oeqa/core/runner.py
new file mode 100644
index 0000000000..f656e1a9c5
--- /dev/null
+++ b/meta/lib/oeqa/core/runner.py
@@ -0,0 +1,328 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import time
+import unittest
+import logging
+import re
+import json
+import sys
+
+from unittest import TextTestResult as _TestResult
+from unittest import TextTestRunner as _TestRunner
+
+class OEStreamLogger(object):
+ def __init__(self, logger):
+ self.logger = logger
+ self.buffer = ""
+
+ def write(self, msg):
+ if len(msg) > 1 and msg[0] != '\n':
+ if '...' in msg:
+ self.buffer += msg
+ elif self.buffer:
+ self.buffer += msg
+ self.logger.log(logging.INFO, self.buffer)
+ self.buffer = ""
+ else:
+ self.logger.log(logging.INFO, msg)
+
+ def flush(self):
+ for handler in self.logger.handlers:
+ handler.flush()
+
+class OETestResult(_TestResult):
+ def __init__(self, tc, *args, **kwargs):
+ super(OETestResult, self).__init__(*args, **kwargs)
+
+ self.successes = []
+ self.starttime = {}
+ self.endtime = {}
+ self.progressinfo = {}
+ self.extraresults = {}
+
+ # Inject into tc so that TestDepends decorator can see results
+ tc.results = self
+
+ self.tc = tc
+
+ # stdout and stderr for each test case
+ self.logged_output = {}
+
+ def startTest(self, test):
+ # May have been set by concurrencytest
+ if test.id() not in self.starttime:
+ self.starttime[test.id()] = time.time()
+ super(OETestResult, self).startTest(test)
+
+ def stopTest(self, test):
+ self.endtime[test.id()] = time.time()
+ if self.buffer:
+ self.logged_output[test.id()] = (
+ sys.stdout.getvalue(), sys.stderr.getvalue())
+ super(OETestResult, self).stopTest(test)
+ if test.id() in self.progressinfo:
+ self.tc.logger.info(self.progressinfo[test.id()])
+
+ # Print the errors/failures early to aid/speed debugging, its a pain
+ # to wait until selftest finishes to see them.
+ for t in ['failures', 'errors', 'skipped', 'expectedFailures']:
+ for (scase, msg) in getattr(self, t):
+ if test.id() == scase.id():
+ self.tc.logger.info(str(msg))
+ break
+
+ def logSummary(self, component, context_msg=''):
+ elapsed_time = self.tc._run_end_time - self.tc._run_start_time
+ self.tc.logger.info("SUMMARY:")
+ self.tc.logger.info("%s (%s) - Ran %d test%s in %.3fs" % (component,
+ context_msg, self.testsRun, self.testsRun != 1 and "s" or "",
+ elapsed_time))
+
+ if self.wasSuccessful():
+ msg = "%s - OK - All required tests passed" % component
+ else:
+ msg = "%s - FAIL - Required tests failed" % component
+ msg += " (successes=%d, skipped=%d, failures=%d, errors=%d)" % (len(self.successes), len(self.skipped), len(self.failures), len(self.errors))
+ self.tc.logger.info(msg)
+
+ def _getTestResultDetails(self, case):
+ result_types = {'failures': 'FAILED', 'errors': 'ERROR', 'skipped': 'SKIPPED',
+ 'expectedFailures': 'EXPECTEDFAIL', 'successes': 'PASSED',
+ 'unexpectedSuccesses' : 'PASSED'}
+
+ for rtype in result_types:
+ found = False
+ for resultclass in getattr(self, rtype):
+ # unexpectedSuccesses are just lists, not lists of tuples
+ if isinstance(resultclass, tuple):
+ scase, msg = resultclass
+ else:
+ scase, msg = resultclass, None
+ if case.id() == scase.id():
+ found = True
+ break
+ scase_str = str(scase.id())
+
+ # When fails at module or class level the class name is passed as string
+ # so figure out to see if match
+ m = re.search(r"^setUpModule \((?P<module_name>.*)\).*$", scase_str)
+ if m:
+ if case.__class__.__module__ == m.group('module_name'):
+ found = True
+ break
+
+ m = re.search(r"^setUpClass \((?P<class_name>.*)\).*$", scase_str)
+ if m:
+ class_name = "%s.%s" % (case.__class__.__module__,
+ case.__class__.__name__)
+
+ if class_name == m.group('class_name'):
+ found = True
+ break
+
+ if found:
+ return result_types[rtype], msg
+
+ return 'UNKNOWN', None
+
+ def extractExtraResults(self, test, details = None):
+ extraresults = None
+ if details is not None and "extraresults" in details:
+ extraresults = details.get("extraresults", {})
+ elif hasattr(test, "extraresults"):
+ extraresults = test.extraresults
+
+ if extraresults is not None:
+ for k, v in extraresults.items():
+ # handle updating already existing entries (e.g. ptestresults.sections)
+ if k in self.extraresults:
+ self.extraresults[k].update(v)
+ else:
+ self.extraresults[k] = v
+
+ def addError(self, test, *args, details = None):
+ self.extractExtraResults(test, details = details)
+ return super(OETestResult, self).addError(test, *args)
+
+ def addFailure(self, test, *args, details = None):
+ self.extractExtraResults(test, details = details)
+ return super(OETestResult, self).addFailure(test, *args)
+
+ def addSuccess(self, test, details = None):
+ #Added so we can keep track of successes too
+ self.successes.append((test, None))
+ self.extractExtraResults(test, details = details)
+ return super(OETestResult, self).addSuccess(test)
+
+ def addExpectedFailure(self, test, *args, details = None):
+ self.extractExtraResults(test, details = details)
+ return super(OETestResult, self).addExpectedFailure(test, *args)
+
+ def addUnexpectedSuccess(self, test, details = None):
+ self.extractExtraResults(test, details = details)
+ return super(OETestResult, self).addUnexpectedSuccess(test)
+
+ def logDetails(self, json_file_dir=None, configuration=None, result_id=None,
+ dump_streams=False):
+ self.tc.logger.info("RESULTS:")
+
+ result = self.extraresults
+ logs = {}
+ if hasattr(self.tc, "extraresults"):
+ result.update(self.tc.extraresults)
+
+ for case_name in self.tc._registry['cases']:
+ case = self.tc._registry['cases'][case_name]
+
+ (status, log) = self._getTestResultDetails(case)
+
+ t = ""
+ if case.id() in self.starttime and case.id() in self.endtime:
+ t = " (" + "{0:.2f}".format(self.endtime[case.id()] - self.starttime[case.id()]) + "s)"
+
+ if status not in logs:
+ logs[status] = []
+ logs[status].append("RESULTS - %s: %s%s" % (case.id(), status, t))
+ report = {'status': status}
+ if log:
+ report['log'] = log
+ if dump_streams and case.id() in self.logged_output:
+ (stdout, stderr) = self.logged_output[case.id()]
+ report['stdout'] = stdout
+ report['stderr'] = stderr
+ result[case.id()] = report
+
+ for i in ['PASSED', 'SKIPPED', 'EXPECTEDFAIL', 'ERROR', 'FAILED', 'UNKNOWN']:
+ if i not in logs:
+ continue
+ for l in logs[i]:
+ self.tc.logger.info(l)
+
+ if json_file_dir:
+ tresultjsonhelper = OETestResultJSONHelper()
+ tresultjsonhelper.dump_testresult_file(json_file_dir, configuration, result_id, result)
+
+ def wasSuccessful(self):
+ # Override as we unexpected successes aren't failures for us
+ return (len(self.failures) == len(self.errors) == 0)
+
+class OEListTestsResult(object):
+ def wasSuccessful(self):
+ return True
+
+class OETestRunner(_TestRunner):
+ streamLoggerClass = OEStreamLogger
+
+ def __init__(self, tc, *args, **kwargs):
+ kwargs['stream'] = self.streamLoggerClass(tc.logger)
+ super(OETestRunner, self).__init__(*args, **kwargs)
+ self.tc = tc
+ self.resultclass = OETestResult
+
+ def _makeResult(self):
+ return self.resultclass(self.tc, self.stream, self.descriptions,
+ self.verbosity)
+
+ def _walk_suite(self, suite, func):
+ for obj in suite:
+ if isinstance(obj, unittest.suite.TestSuite):
+ if len(obj._tests):
+ self._walk_suite(obj, func)
+ elif isinstance(obj, unittest.case.TestCase):
+ func(self.tc.logger, obj)
+ self._walked_cases = self._walked_cases + 1
+
+ def _list_tests_name(self, suite):
+ self._walked_cases = 0
+
+ def _list_cases(logger, case):
+ oetags = []
+ if hasattr(case, '__oeqa_testtags'):
+ oetags = getattr(case, '__oeqa_testtags')
+ if oetags:
+ logger.info("%s (%s)" % (case.id(), ",".join(oetags)))
+ else:
+ logger.info("%s" % (case.id()))
+
+ self.tc.logger.info("Listing all available tests:")
+ self._walked_cases = 0
+ self.tc.logger.info("test (tags)")
+ self.tc.logger.info("-" * 80)
+ self._walk_suite(suite, _list_cases)
+ self.tc.logger.info("-" * 80)
+ self.tc.logger.info("Total found:\t%s" % self._walked_cases)
+
+ def _list_tests_class(self, suite):
+ self._walked_cases = 0
+
+ curr = {}
+ def _list_classes(logger, case):
+ if not 'module' in curr or curr['module'] != case.__module__:
+ curr['module'] = case.__module__
+ logger.info(curr['module'])
+
+ if not 'class' in curr or curr['class'] != \
+ case.__class__.__name__:
+ curr['class'] = case.__class__.__name__
+ logger.info(" -- %s" % curr['class'])
+
+ logger.info(" -- -- %s" % case._testMethodName)
+
+ self.tc.logger.info("Listing all available test classes:")
+ self._walk_suite(suite, _list_classes)
+
+ def _list_tests_module(self, suite):
+ self._walked_cases = 0
+
+ listed = []
+ def _list_modules(logger, case):
+ if not case.__module__ in listed:
+ if case.__module__.startswith('_'):
+ logger.info("%s (hidden)" % case.__module__)
+ else:
+ logger.info(case.__module__)
+ listed.append(case.__module__)
+
+ self.tc.logger.info("Listing all available test modules:")
+ self._walk_suite(suite, _list_modules)
+
+ def list_tests(self, suite, display_type):
+ if display_type == 'name':
+ self._list_tests_name(suite)
+ elif display_type == 'class':
+ self._list_tests_class(suite)
+ elif display_type == 'module':
+ self._list_tests_module(suite)
+
+ return OEListTestsResult()
+
+class OETestResultJSONHelper(object):
+
+ testresult_filename = 'testresults.json'
+
+ def _get_existing_testresults_if_available(self, write_dir):
+ testresults = {}
+ file = os.path.join(write_dir, self.testresult_filename)
+ if os.path.exists(file):
+ with open(file, "r") as f:
+ testresults = json.load(f)
+ return testresults
+
+ def _write_file(self, write_dir, file_name, file_content):
+ file_path = os.path.join(write_dir, file_name)
+ with open(file_path, 'w') as the_file:
+ the_file.write(file_content)
+
+ def dump_testresult_file(self, write_dir, configuration, result_id, test_result):
+ bb.utils.mkdirhier(write_dir)
+ lf = bb.utils.lockfile(os.path.join(write_dir, 'jsontestresult.lock'))
+ test_results = self._get_existing_testresults_if_available(write_dir)
+ test_results[result_id] = {'configuration': configuration, 'result': test_result}
+ json_testresults = json.dumps(test_results, sort_keys=True, indent=4)
+ self._write_file(write_dir, self.testresult_filename, json_testresults)
+ bb.utils.unlockfile(lf)
diff --git a/meta/lib/oeqa/core/target/__init__.py b/meta/lib/oeqa/core/target/__init__.py
new file mode 100644
index 0000000000..1382aa9b52
--- /dev/null
+++ b/meta/lib/oeqa/core/target/__init__.py
@@ -0,0 +1,36 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+from abc import abstractmethod
+
+class OETarget(object):
+
+ def __init__(self, logger, *args, **kwargs):
+ self.logger = logger
+
+ @abstractmethod
+ def start(self):
+ pass
+
+ @abstractmethod
+ def stop(self):
+ pass
+
+ @abstractmethod
+ def run(self, cmd, timeout=None):
+ pass
+
+ @abstractmethod
+ def copyTo(self, localSrc, remoteDst):
+ pass
+
+ @abstractmethod
+ def copyFrom(self, remoteSrc, localDst):
+ pass
+
+ @abstractmethod
+ def copyDirTo(self, localSrc, remoteDst):
+ pass
diff --git a/meta/lib/oeqa/core/target/qemu.py b/meta/lib/oeqa/core/target/qemu.py
new file mode 100644
index 0000000000..758703c0d1
--- /dev/null
+++ b/meta/lib/oeqa/core/target/qemu.py
@@ -0,0 +1,64 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import sys
+import signal
+import time
+
+from .ssh import OESSHTarget
+from oeqa.utils.qemurunner import QemuRunner
+
+supported_fstypes = ['ext3', 'ext4', 'cpio.gz', 'wic']
+
+class OEQemuTarget(OESSHTarget):
+ def __init__(self, logger, server_ip, timeout=300, user='root',
+ port=None, machine='', rootfs='', kernel='', kvm=False, slirp=False,
+ dump_dir='', dump_host_cmds='', display='', bootlog='',
+ tmpdir='', dir_image='', boottime=60, **kwargs):
+
+ super(OEQemuTarget, self).__init__(logger, None, server_ip, timeout,
+ user, port)
+
+ self.server_ip = server_ip
+ self.server_port = 0
+ self.machine = machine
+ self.rootfs = rootfs
+ self.kernel = kernel
+ self.kvm = kvm
+ self.use_slirp = slirp
+
+ self.runner = QemuRunner(machine=machine, rootfs=rootfs, tmpdir=tmpdir,
+ deploy_dir_image=dir_image, display=display,
+ logfile=bootlog, boottime=boottime,
+ use_kvm=kvm, use_slirp=slirp, dump_dir=dump_dir,
+ dump_host_cmds=dump_host_cmds, logger=logger)
+
+ def start(self, params=None, extra_bootparams=None, runqemuparams=''):
+ if self.use_slirp and not self.server_ip:
+ self.logger.error("Could not start qemu with slirp without server ip - provide 'TEST_SERVER_IP'")
+ raise RuntimeError("FAILED to start qemu - check the task log and the boot log")
+ if self.runner.start(params, extra_bootparams=extra_bootparams, runqemuparams=runqemuparams):
+ self.ip = self.runner.ip
+ if self.use_slirp:
+ target_ip_port = self.runner.ip.split(':')
+ if len(target_ip_port) == 2:
+ target_ip = target_ip_port[0]
+ port = target_ip_port[1]
+ self.ip = target_ip
+ self.ssh = self.ssh + ['-p', port]
+ self.scp = self.scp + ['-P', port]
+ else:
+ self.logger.error("Could not get host machine port to connect qemu with slirp, ssh will not be "
+ "able to connect to qemu with slirp")
+ if self.runner.server_ip:
+ self.server_ip = self.runner.server_ip
+ else:
+ self.stop()
+ raise RuntimeError("FAILED to start qemu - check the task log and the boot log")
+
+ def stop(self):
+ self.runner.stop()
diff --git a/meta/lib/oeqa/core/target/ssh.py b/meta/lib/oeqa/core/target/ssh.py
new file mode 100644
index 0000000000..63fc9468b3
--- /dev/null
+++ b/meta/lib/oeqa/core/target/ssh.py
@@ -0,0 +1,271 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import time
+import select
+import logging
+import subprocess
+import codecs
+
+from . import OETarget
+
+class OESSHTarget(OETarget):
+ def __init__(self, logger, ip, server_ip, timeout=300, user='root',
+ port=None, server_port=0, **kwargs):
+ if not logger:
+ logger = logging.getLogger('target')
+ logger.setLevel(logging.INFO)
+ filePath = os.path.join(os.getcwd(), 'remoteTarget.log')
+ fileHandler = logging.FileHandler(filePath, 'w', 'utf-8')
+ formatter = logging.Formatter(
+ '%(asctime)s.%(msecs)03d %(levelname)s: %(message)s',
+ '%H:%M:%S')
+ fileHandler.setFormatter(formatter)
+ logger.addHandler(fileHandler)
+
+ super(OESSHTarget, self).__init__(logger)
+ self.ip = ip
+ self.server_ip = server_ip
+ self.server_port = server_port
+ self.timeout = timeout
+ self.user = user
+ ssh_options = [
+ '-o', 'UserKnownHostsFile=/dev/null',
+ '-o', 'StrictHostKeyChecking=no',
+ '-o', 'LogLevel=ERROR'
+ ]
+ self.ssh = ['ssh', '-l', self.user ] + ssh_options
+ self.scp = ['scp'] + ssh_options
+ if port:
+ self.ssh = self.ssh + [ '-p', port ]
+ self.scp = self.scp + [ '-P', port ]
+
+ def start(self, **kwargs):
+ pass
+
+ def stop(self, **kwargs):
+ pass
+
+ def _run(self, command, timeout=None, ignore_status=True):
+ """
+ Runs command in target using SSHProcess.
+ """
+ self.logger.debug("[Running]$ %s" % " ".join(command))
+
+ starttime = time.time()
+ status, output = SSHCall(command, self.logger, timeout)
+ self.logger.debug("[Command returned '%d' after %.2f seconds]"
+ "" % (status, time.time() - starttime))
+
+ if status and not ignore_status:
+ raise AssertionError("Command '%s' returned non-zero exit "
+ "status %d:\n%s" % (command, status, output))
+
+ return (status, output)
+
+ def run(self, command, timeout=None):
+ """
+ Runs command in target.
+
+ command: Command to run on target.
+ timeout: <value>: Kill command after <val> seconds.
+ None: Kill command default value seconds.
+ 0: No timeout, runs until return.
+ """
+ targetCmd = 'export PATH=/usr/sbin:/sbin:/usr/bin:/bin; %s' % command
+ sshCmd = self.ssh + [self.ip, targetCmd]
+
+ if timeout:
+ processTimeout = timeout
+ elif timeout==0:
+ processTimeout = None
+ else:
+ processTimeout = self.timeout
+
+ status, output = self._run(sshCmd, processTimeout, True)
+ self.logger.debug('Command: %s\nOutput: %s\n' % (command, output))
+ return (status, output)
+
+ def copyTo(self, localSrc, remoteDst):
+ """
+ Copy file to target.
+
+ If local file is symlink, recreate symlink in target.
+ """
+ if os.path.islink(localSrc):
+ link = os.readlink(localSrc)
+ dstDir, dstBase = os.path.split(remoteDst)
+ sshCmd = 'cd %s; ln -s %s %s' % (dstDir, link, dstBase)
+ return self.run(sshCmd)
+
+ else:
+ remotePath = '%s@%s:%s' % (self.user, self.ip, remoteDst)
+ scpCmd = self.scp + [localSrc, remotePath]
+ return self._run(scpCmd, ignore_status=False)
+
+ def copyFrom(self, remoteSrc, localDst):
+ """
+ Copy file from target.
+ """
+ remotePath = '%s@%s:%s' % (self.user, self.ip, remoteSrc)
+ scpCmd = self.scp + [remotePath, localDst]
+ return self._run(scpCmd, ignore_status=False)
+
+ def copyDirTo(self, localSrc, remoteDst):
+ """
+ Copy recursively localSrc directory to remoteDst in target.
+ """
+
+ for root, dirs, files in os.walk(localSrc):
+ # Create directories in the target as needed
+ for d in dirs:
+ tmpDir = os.path.join(root, d).replace(localSrc, "")
+ newDir = os.path.join(remoteDst, tmpDir.lstrip("/"))
+ cmd = "mkdir -p %s" % newDir
+ self.run(cmd)
+
+ # Copy files into the target
+ for f in files:
+ tmpFile = os.path.join(root, f).replace(localSrc, "")
+ dstFile = os.path.join(remoteDst, tmpFile.lstrip("/"))
+ srcFile = os.path.join(root, f)
+ self.copyTo(srcFile, dstFile)
+
+ def deleteFiles(self, remotePath, files):
+ """
+ Deletes files in target's remotePath.
+ """
+
+ cmd = "rm"
+ if not isinstance(files, list):
+ files = [files]
+
+ for f in files:
+ cmd = "%s %s" % (cmd, os.path.join(remotePath, f))
+
+ self.run(cmd)
+
+
+ def deleteDir(self, remotePath):
+ """
+ Deletes target's remotePath directory.
+ """
+
+ cmd = "rmdir %s" % remotePath
+ self.run(cmd)
+
+
+ def deleteDirStructure(self, localPath, remotePath):
+ """
+ Delete recursively localPath structure directory in target's remotePath.
+
+ This function is very usefult to delete a package that is installed in
+ the DUT and the host running the test has such package extracted in tmp
+ directory.
+
+ Example:
+ pwd: /home/user/tmp
+ tree: .
+ └── work
+ ├── dir1
+ │   └── file1
+ └── dir2
+
+ localpath = "/home/user/tmp" and remotepath = "/home/user"
+
+ With the above variables this function will try to delete the
+ directory in the DUT in this order:
+ /home/user/work/dir1/file1
+ /home/user/work/dir1 (if dir is empty)
+ /home/user/work/dir2 (if dir is empty)
+ /home/user/work (if dir is empty)
+ """
+
+ for root, dirs, files in os.walk(localPath, topdown=False):
+ # Delete files first
+ tmpDir = os.path.join(root).replace(localPath, "")
+ remoteDir = os.path.join(remotePath, tmpDir.lstrip("/"))
+ self.deleteFiles(remoteDir, files)
+
+ # Remove dirs if empty
+ for d in dirs:
+ tmpDir = os.path.join(root, d).replace(localPath, "")
+ remoteDir = os.path.join(remotePath, tmpDir.lstrip("/"))
+ self.deleteDir(remoteDir)
+
+def SSHCall(command, logger, timeout=None, **opts):
+
+ def run():
+ nonlocal output
+ nonlocal process
+ starttime = time.time()
+ process = subprocess.Popen(command, **options)
+ if timeout:
+ endtime = starttime + timeout
+ eof = False
+ while time.time() < endtime and not eof:
+ logger.debug('time: %s, endtime: %s' % (time.time(), endtime))
+ try:
+ if select.select([process.stdout], [], [], 5)[0] != []:
+ reader = codecs.getreader('utf-8')(process.stdout, 'ignore')
+ data = reader.read(1024, 4096)
+ if not data:
+ process.stdout.close()
+ eof = True
+ else:
+ output += data
+ logger.debug('Partial data from SSH call: %s' % data)
+ endtime = time.time() + timeout
+ except InterruptedError:
+ continue
+
+ # process hasn't returned yet
+ if not eof:
+ process.terminate()
+ time.sleep(5)
+ try:
+ process.kill()
+ except OSError:
+ pass
+ endtime = time.time() - starttime
+ lastline = ("\nProcess killed - no output for %d seconds. Total"
+ " running time: %d seconds." % (timeout, endtime))
+ logger.debug('Received data from SSH call %s ' % lastline)
+ output += lastline
+
+ else:
+ output = process.communicate()[0].decode('utf-8', errors='ignore')
+ logger.debug('Data from SSH call: %s' % output.rstrip())
+
+ options = {
+ "stdout": subprocess.PIPE,
+ "stderr": subprocess.STDOUT,
+ "stdin": None,
+ "shell": False,
+ "bufsize": -1,
+ "preexec_fn": os.setsid,
+ }
+ options.update(opts)
+ output = ''
+ process = None
+
+ # Unset DISPLAY which means we won't trigger SSH_ASKPASS
+ env = os.environ.copy()
+ if "DISPLAY" in env:
+ del env['DISPLAY']
+ options['env'] = env
+
+ try:
+ run()
+ except:
+ # Need to guard against a SystemExit or other exception ocurring
+ # whilst running and ensure we don't leave a process behind.
+ if process.poll() is None:
+ process.kill()
+ logger.debug('Something went wrong, killing SSH process')
+ raise
+ return (process.wait(), output.rstrip())
diff --git a/meta/lib/oeqa/core/tests/__init__.py b/meta/lib/oeqa/core/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/meta/lib/oeqa/core/tests/__init__.py
diff --git a/meta/lib/oeqa/core/tests/cases/data.py b/meta/lib/oeqa/core/tests/cases/data.py
new file mode 100644
index 0000000000..61f88547f7
--- /dev/null
+++ b/meta/lib/oeqa/core/tests/cases/data.py
@@ -0,0 +1,23 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.core.case import OETestCase
+from oeqa.core.decorator import OETestTag
+from oeqa.core.decorator.data import OETestDataDepends
+
+class DataTest(OETestCase):
+ data_vars = ['IMAGE', 'ARCH']
+
+ @OETestDataDepends(['MACHINE',])
+ @OETestTag('dataTestOk')
+ def testDataOk(self):
+ self.assertEqual(self.td.get('IMAGE'), 'core-image-minimal')
+ self.assertEqual(self.td.get('ARCH'), 'x86')
+ self.assertEqual(self.td.get('MACHINE'), 'qemuarm')
+
+ @OETestTag('dataTestFail')
+ def testDataFail(self):
+ pass
diff --git a/meta/lib/oeqa/core/tests/cases/depends.py b/meta/lib/oeqa/core/tests/cases/depends.py
new file mode 100644
index 0000000000..46e7db900d
--- /dev/null
+++ b/meta/lib/oeqa/core/tests/cases/depends.py
@@ -0,0 +1,41 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.core.case import OETestCase
+from oeqa.core.decorator.depends import OETestDepends
+
+class DependsTest(OETestCase):
+
+ def testDependsFirst(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestDepends(['testDependsFirst'])
+ def testDependsSecond(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestDepends(['testDependsSecond'])
+ def testDependsThird(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestDepends(['testDependsSecond'])
+ def testDependsFourth(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestDepends(['testDependsThird', 'testDependsFourth'])
+ def testDependsFifth(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestDepends(['testDependsCircular3'])
+ def testDependsCircular1(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestDepends(['testDependsCircular1'])
+ def testDependsCircular2(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestDepends(['testDependsCircular2'])
+ def testDependsCircular3(self):
+ self.assertTrue(True, msg='How is this possible?')
diff --git a/meta/lib/oeqa/core/tests/cases/loader/valid/another.py b/meta/lib/oeqa/core/tests/cases/loader/valid/another.py
new file mode 100644
index 0000000000..bedc20c8a6
--- /dev/null
+++ b/meta/lib/oeqa/core/tests/cases/loader/valid/another.py
@@ -0,0 +1,12 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.core.case import OETestCase
+
+class AnotherTest(OETestCase):
+
+ def testAnother(self):
+ self.assertTrue(True, msg='How is this possible?')
diff --git a/meta/lib/oeqa/core/tests/cases/oetag.py b/meta/lib/oeqa/core/tests/cases/oetag.py
new file mode 100644
index 0000000000..52f97dfda6
--- /dev/null
+++ b/meta/lib/oeqa/core/tests/cases/oetag.py
@@ -0,0 +1,38 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.core.case import OETestCase
+from oeqa.core.decorator import OETestTag
+
+class TagTest(OETestCase):
+ @OETestTag('goodTag')
+ def testTagGood(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestTag('otherTag')
+ def testTagOther(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestTag('otherTag', 'multiTag')
+ def testTagOtherMulti(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ def testTagNone(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+@OETestTag('classTag')
+class TagClassTest(OETestCase):
+ @OETestTag('otherTag')
+ def testTagOther(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestTag('otherTag', 'multiTag')
+ def testTagOtherMulti(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ def testTagNone(self):
+ self.assertTrue(True, msg='How is this possible?')
+
diff --git a/meta/lib/oeqa/core/tests/cases/timeout.py b/meta/lib/oeqa/core/tests/cases/timeout.py
new file mode 100644
index 0000000000..5dfecc7b7c
--- /dev/null
+++ b/meta/lib/oeqa/core/tests/cases/timeout.py
@@ -0,0 +1,21 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+from time import sleep
+
+from oeqa.core.case import OETestCase
+from oeqa.core.decorator.oetimeout import OETimeout
+
+class TimeoutTest(OETestCase):
+
+ @OETimeout(1)
+ def testTimeoutPass(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETimeout(1)
+ def testTimeoutFail(self):
+ sleep(2)
+ self.assertTrue(True, msg='How is this possible?')
diff --git a/meta/lib/oeqa/core/tests/common.py b/meta/lib/oeqa/core/tests/common.py
new file mode 100644
index 0000000000..88cc758ad3
--- /dev/null
+++ b/meta/lib/oeqa/core/tests/common.py
@@ -0,0 +1,38 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import sys
+import os
+
+import unittest
+import logging
+import os
+
+logger = logging.getLogger("oeqa")
+logger.setLevel(logging.INFO)
+consoleHandler = logging.StreamHandler()
+formatter = logging.Formatter('OEQATest: %(message)s')
+consoleHandler.setFormatter(formatter)
+logger.addHandler(consoleHandler)
+
+def setup_sys_path():
+ directory = os.path.dirname(os.path.abspath(__file__))
+ oeqa_lib = os.path.realpath(os.path.join(directory, '../../../'))
+ if not oeqa_lib in sys.path:
+ sys.path.insert(0, oeqa_lib)
+
+class TestBase(unittest.TestCase):
+ def setUp(self):
+ self.logger = logger
+ directory = os.path.dirname(os.path.abspath(__file__))
+ self.cases_path = os.path.join(directory, 'cases')
+
+ def _testLoader(self, d={}, modules=[], tests=[], **kwargs):
+ from oeqa.core.context import OETestContext
+ tc = OETestContext(d, self.logger)
+ tc.loadTests(self.cases_path, modules=modules, tests=tests,
+ **kwargs)
+ return tc
diff --git a/meta/lib/oeqa/core/tests/test_data.py b/meta/lib/oeqa/core/tests/test_data.py
new file mode 100755
index 0000000000..ac74098b78
--- /dev/null
+++ b/meta/lib/oeqa/core/tests/test_data.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python3
+
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import unittest
+import logging
+import os
+
+from common import setup_sys_path, TestBase
+setup_sys_path()
+
+from oeqa.core.exception import OEQAMissingVariable
+from oeqa.core.utils.test import getCaseMethod, getSuiteCasesNames
+
+class TestData(TestBase):
+ modules = ['data']
+
+ def test_data_fail_missing_variable(self):
+ expectedException = "oeqa.core.exception.OEQAMissingVariable"
+
+ tc = self._testLoader(modules=self.modules)
+ results = tc.runTests()
+ self.assertFalse(results.wasSuccessful())
+ for test, data in results.errors:
+ expect = False
+ if expectedException in data:
+ expect = True
+
+ self.assertTrue(expect)
+
+ def test_data_fail_wrong_variable(self):
+ expectedError = 'AssertionError'
+ d = {'IMAGE' : 'core-image-sato', 'ARCH' : 'arm'}
+
+ tc = self._testLoader(d=d, modules=self.modules)
+ results = tc.runTests()
+ self.assertFalse(results.wasSuccessful())
+ for test, data in results.failures:
+ expect = False
+ if expectedError in data:
+ expect = True
+
+ self.assertTrue(expect)
+
+ def test_data_ok(self):
+ d = {'IMAGE' : 'core-image-minimal', 'ARCH' : 'x86', 'MACHINE' : 'qemuarm'}
+
+ tc = self._testLoader(d=d, modules=self.modules)
+ self.assertEqual(True, tc.runTests().wasSuccessful())
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/meta/lib/oeqa/core/tests/test_decorators.py b/meta/lib/oeqa/core/tests/test_decorators.py
new file mode 100755
index 0000000000..b798bf7d33
--- /dev/null
+++ b/meta/lib/oeqa/core/tests/test_decorators.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import signal
+import unittest
+
+from common import setup_sys_path, TestBase
+setup_sys_path()
+
+from oeqa.core.exception import OEQADependency
+from oeqa.core.utils.test import getCaseMethod, getSuiteCasesNames, getSuiteCasesIDs
+
+class TestTagDecorator(TestBase):
+ def _runTest(self, modules, filterfn, expect):
+ tc = self._testLoader(modules = modules, tags_filter = filterfn)
+ test_loaded = set(getSuiteCasesIDs(tc.suites))
+ self.assertEqual(expect, test_loaded)
+
+ def test_oetag(self):
+ # get all cases without any filtering
+ self._runTest(['oetag'], None, {
+ 'oetag.TagTest.testTagGood',
+ 'oetag.TagTest.testTagOther',
+ 'oetag.TagTest.testTagOtherMulti',
+ 'oetag.TagTest.testTagNone',
+ 'oetag.TagClassTest.testTagOther',
+ 'oetag.TagClassTest.testTagOtherMulti',
+ 'oetag.TagClassTest.testTagNone',
+ })
+
+ # exclude any case with tags
+ self._runTest(['oetag'], lambda tags: tags, {
+ 'oetag.TagTest.testTagNone',
+ })
+
+ # exclude any case with otherTag
+ self._runTest(['oetag'], lambda tags: "otherTag" in tags, {
+ 'oetag.TagTest.testTagGood',
+ 'oetag.TagTest.testTagNone',
+ 'oetag.TagClassTest.testTagNone',
+ })
+
+ # exclude any case with classTag
+ self._runTest(['oetag'], lambda tags: "classTag" in tags, {
+ 'oetag.TagTest.testTagGood',
+ 'oetag.TagTest.testTagOther',
+ 'oetag.TagTest.testTagOtherMulti',
+ 'oetag.TagTest.testTagNone',
+ })
+
+ # include any case with classTag
+ self._runTest(['oetag'], lambda tags: "classTag" not in tags, {
+ 'oetag.TagClassTest.testTagOther',
+ 'oetag.TagClassTest.testTagOtherMulti',
+ 'oetag.TagClassTest.testTagNone',
+ })
+
+ # include any case with classTag or no tags
+ self._runTest(['oetag'], lambda tags: tags and "classTag" not in tags, {
+ 'oetag.TagTest.testTagNone',
+ 'oetag.TagClassTest.testTagOther',
+ 'oetag.TagClassTest.testTagOtherMulti',
+ 'oetag.TagClassTest.testTagNone',
+ })
+
+class TestDependsDecorator(TestBase):
+ modules = ['depends']
+
+ def test_depends_order(self):
+ tests = ['depends.DependsTest.testDependsFirst',
+ 'depends.DependsTest.testDependsSecond',
+ 'depends.DependsTest.testDependsThird',
+ 'depends.DependsTest.testDependsFourth',
+ 'depends.DependsTest.testDependsFifth']
+ tests2 = list(tests)
+ tests2[2], tests2[3] = tests[3], tests[2]
+ tc = self._testLoader(modules=self.modules, tests=tests)
+ test_loaded = getSuiteCasesIDs(tc.suites)
+ result = True if test_loaded == tests or test_loaded == tests2 else False
+ msg = 'Failed to order tests using OETestDepends decorator.\nTest order:'\
+ ' %s.\nExpected: %s\nOr: %s' % (test_loaded, tests, tests2)
+ self.assertTrue(result, msg=msg)
+
+ def test_depends_fail_missing_dependency(self):
+ expect = "TestCase depends.DependsTest.testDependsSecond depends on "\
+ "depends.DependsTest.testDependsFirst and isn't available"
+ tests = ['depends.DependsTest.testDependsSecond']
+ try:
+ # Must throw OEQADependency because missing 'testDependsFirst'
+ tc = self._testLoader(modules=self.modules, tests=tests)
+ self.fail('Expected OEQADependency exception')
+ except OEQADependency as e:
+ result = True if expect in str(e) else False
+ msg = 'Expected OEQADependency exception missing testDependsFirst test'
+ self.assertTrue(result, msg=msg)
+
+ def test_depends_fail_circular_dependency(self):
+ expect = 'have a circular dependency'
+ tests = ['depends.DependsTest.testDependsCircular1',
+ 'depends.DependsTest.testDependsCircular2',
+ 'depends.DependsTest.testDependsCircular3']
+ try:
+ # Must throw OEQADependency because circular dependency
+ tc = self._testLoader(modules=self.modules, tests=tests)
+ self.fail('Expected OEQADependency exception')
+ except OEQADependency as e:
+ result = True if expect in str(e) else False
+ msg = 'Expected OEQADependency exception having a circular dependency'
+ self.assertTrue(result, msg=msg)
+
+class TestTimeoutDecorator(TestBase):
+ modules = ['timeout']
+
+ def test_timeout(self):
+ tests = ['timeout.TimeoutTest.testTimeoutPass']
+ msg = 'Failed to run test using OETestTimeout'
+ alarm_signal = signal.getsignal(signal.SIGALRM)
+ tc = self._testLoader(modules=self.modules, tests=tests)
+ self.assertTrue(tc.runTests().wasSuccessful(), msg=msg)
+ msg = "OETestTimeout didn't restore SIGALRM"
+ self.assertIs(alarm_signal, signal.getsignal(signal.SIGALRM), msg=msg)
+
+ def test_timeout_fail(self):
+ tests = ['timeout.TimeoutTest.testTimeoutFail']
+ msg = "OETestTimeout test didn't timeout as expected"
+ alarm_signal = signal.getsignal(signal.SIGALRM)
+ tc = self._testLoader(modules=self.modules, tests=tests)
+ self.assertFalse(tc.runTests().wasSuccessful(), msg=msg)
+ msg = "OETestTimeout didn't restore SIGALRM"
+ self.assertIs(alarm_signal, signal.getsignal(signal.SIGALRM), msg=msg)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/meta/lib/oeqa/core/tests/test_loader.py b/meta/lib/oeqa/core/tests/test_loader.py
new file mode 100755
index 0000000000..cb38ac845e
--- /dev/null
+++ b/meta/lib/oeqa/core/tests/test_loader.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import unittest
+
+from common import setup_sys_path, TestBase
+setup_sys_path()
+
+from oeqa.core.exception import OEQADependency
+from oeqa.core.utils.test import getSuiteModules, getSuiteCasesIDs
+
+class TestLoader(TestBase):
+ @unittest.skip("invalid directory is missing oetag.py")
+ def test_fail_duplicated_module(self):
+ cases_path = self.cases_path
+ invalid_path = os.path.join(cases_path, 'loader', 'invalid')
+ self.cases_path = [self.cases_path, invalid_path]
+ expect = 'Duplicated oetag module found in'
+ msg = 'Expected ImportError exception for having duplicated module'
+ try:
+ # Must throw ImportEror because duplicated module
+ tc = self._testLoader()
+ self.fail(msg)
+ except ImportError as e:
+ result = True if expect in str(e) else False
+ self.assertTrue(result, msg=msg)
+ finally:
+ self.cases_path = cases_path
+
+ def test_filter_modules(self):
+ expected_modules = {'oetag'}
+ tc = self._testLoader(modules=expected_modules)
+ modules = getSuiteModules(tc.suites)
+ msg = 'Expected just %s modules' % ', '.join(expected_modules)
+ self.assertEqual(modules, expected_modules, msg=msg)
+
+ def test_filter_cases(self):
+ modules = ['oetag', 'data']
+ expected_cases = {'data.DataTest.testDataOk',
+ 'oetag.TagTest.testTagGood'}
+ tc = self._testLoader(modules=modules, tests=expected_cases)
+ cases = set(getSuiteCasesIDs(tc.suites))
+ msg = 'Expected just %s cases' % ', '.join(expected_cases)
+ self.assertEqual(cases, expected_cases, msg=msg)
+
+ def test_import_from_paths(self):
+ cases_path = self.cases_path
+ cases2_path = os.path.join(cases_path, 'loader', 'valid')
+ expected_modules = {'another'}
+ self.cases_path = [self.cases_path, cases2_path]
+ tc = self._testLoader(modules=expected_modules)
+ modules = getSuiteModules(tc.suites)
+ self.cases_path = cases_path
+ msg = 'Expected modules from two different paths'
+ self.assertEqual(modules, expected_modules, msg=msg)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/meta/lib/oeqa/core/tests/test_runner.py b/meta/lib/oeqa/core/tests/test_runner.py
new file mode 100755
index 0000000000..205464cfae
--- /dev/null
+++ b/meta/lib/oeqa/core/tests/test_runner.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import unittest
+import logging
+import tempfile
+
+from common import setup_sys_path, TestBase
+setup_sys_path()
+
+from oeqa.core.runner import OEStreamLogger
+
+class TestRunner(TestBase):
+ def test_stream_logger(self):
+ fp = tempfile.TemporaryFile(mode='w+')
+
+ logging.basicConfig(format='%(message)s', stream=fp)
+ logger = logging.getLogger()
+ logger.setLevel(logging.INFO)
+
+ oeSL = OEStreamLogger(logger)
+
+ lines = ['init', 'bigline_' * 65535, 'morebigline_' * 65535 * 4, 'end']
+ for line in lines:
+ oeSL.write(line)
+
+ fp.seek(0)
+ fp_lines = fp.readlines()
+ for i, fp_line in enumerate(fp_lines):
+ fp_line = fp_line.strip()
+ self.assertEqual(lines[i], fp_line)
+
+ fp.close()
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/meta/lib/oeqa/core/utils/__init__.py b/meta/lib/oeqa/core/utils/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/meta/lib/oeqa/core/utils/__init__.py
diff --git a/meta/lib/oeqa/core/utils/concurrencytest.py b/meta/lib/oeqa/core/utils/concurrencytest.py
new file mode 100644
index 0000000000..71ec0df5fa
--- /dev/null
+++ b/meta/lib/oeqa/core/utils/concurrencytest.py
@@ -0,0 +1,354 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Modified for use in OE by Richard Purdie, 2018
+#
+# Modified by: Corey Goldberg, 2013
+# License: GPLv2+
+#
+# Original code from:
+# Bazaar (bzrlib.tests.__init__.py, v2.6, copied Jun 01 2013)
+# Copyright (C) 2005-2011 Canonical Ltd
+# License: GPLv2+
+
+import os
+import sys
+import traceback
+import unittest
+import subprocess
+import testtools
+import threading
+import time
+import io
+import json
+import subunit
+
+from queue import Queue
+from itertools import cycle
+from subunit import ProtocolTestCase, TestProtocolClient
+from subunit.test_results import AutoTimingTestResultDecorator
+from testtools import ThreadsafeForwardingResult, iterate_tests
+from testtools.content import Content
+from testtools.content_type import ContentType
+from oeqa.utils.commands import get_test_layer
+
+import bb.utils
+import oe.path
+
+_all__ = [
+ 'ConcurrentTestSuite',
+ 'fork_for_tests',
+ 'partition_tests',
+]
+
+#
+# Patch the version from testtools to allow access to _test_start and allow
+# computation of timing information and threading progress
+#
+class BBThreadsafeForwardingResult(ThreadsafeForwardingResult):
+
+ def __init__(self, target, semaphore, threadnum, totalinprocess, totaltests):
+ super(BBThreadsafeForwardingResult, self).__init__(target, semaphore)
+ self.threadnum = threadnum
+ self.totalinprocess = totalinprocess
+ self.totaltests = totaltests
+
+ def _add_result_with_semaphore(self, method, test, *args, **kwargs):
+ self.semaphore.acquire()
+ try:
+ if self._test_start:
+ self.result.starttime[test.id()] = self._test_start.timestamp()
+ self.result.threadprogress[self.threadnum].append(test.id())
+ totalprogress = sum(len(x) for x in self.result.threadprogress.values())
+ self.result.progressinfo[test.id()] = "%s: %s/%s %s/%s (%ss) (%s)" % (
+ self.threadnum,
+ len(self.result.threadprogress[self.threadnum]),
+ self.totalinprocess,
+ totalprogress,
+ self.totaltests,
+ "{0:.2f}".format(time.time()-self._test_start.timestamp()),
+ test.id())
+ finally:
+ self.semaphore.release()
+ super(BBThreadsafeForwardingResult, self)._add_result_with_semaphore(method, test, *args, **kwargs)
+
+class ProxyTestResult:
+ # a very basic TestResult proxy, in order to modify add* calls
+ def __init__(self, target):
+ self.result = target
+
+ def _addResult(self, method, test, *args, exception = False, **kwargs):
+ return method(test, *args, **kwargs)
+
+ def addError(self, test, err = None, **kwargs):
+ self._addResult(self.result.addError, test, err, exception = True, **kwargs)
+
+ def addFailure(self, test, err = None, **kwargs):
+ self._addResult(self.result.addFailure, test, err, exception = True, **kwargs)
+
+ def addSuccess(self, test, **kwargs):
+ self._addResult(self.result.addSuccess, test, **kwargs)
+
+ def addExpectedFailure(self, test, err = None, **kwargs):
+ self._addResult(self.result.addExpectedFailure, test, err, exception = True, **kwargs)
+
+ def addUnexpectedSuccess(self, test, **kwargs):
+ self._addResult(self.result.addUnexpectedSuccess, test, **kwargs)
+
+ def __getattr__(self, attr):
+ return getattr(self.result, attr)
+
+class ExtraResultsDecoderTestResult(ProxyTestResult):
+ def _addResult(self, method, test, *args, exception = False, **kwargs):
+ if "details" in kwargs and "extraresults" in kwargs["details"]:
+ if isinstance(kwargs["details"]["extraresults"], Content):
+ kwargs = kwargs.copy()
+ kwargs["details"] = kwargs["details"].copy()
+ extraresults = kwargs["details"]["extraresults"]
+ data = bytearray()
+ for b in extraresults.iter_bytes():
+ data += b
+ extraresults = json.loads(data.decode())
+ kwargs["details"]["extraresults"] = extraresults
+ return method(test, *args, **kwargs)
+
+class ExtraResultsEncoderTestResult(ProxyTestResult):
+ def _addResult(self, method, test, *args, exception = False, **kwargs):
+ if hasattr(test, "extraresults"):
+ extras = lambda : [json.dumps(test.extraresults).encode()]
+ kwargs = kwargs.copy()
+ if "details" not in kwargs:
+ kwargs["details"] = {}
+ else:
+ kwargs["details"] = kwargs["details"].copy()
+ kwargs["details"]["extraresults"] = Content(ContentType("application", "json", {'charset': 'utf8'}), extras)
+ # if using details, need to encode any exceptions into the details obj,
+ # testtools does not handle "err" and "details" together.
+ if "details" in kwargs and exception and (len(args) >= 1 and args[0] is not None):
+ kwargs["details"]["traceback"] = testtools.content.TracebackContent(args[0], test)
+ args = []
+ return method(test, *args, **kwargs)
+
+#
+# We have to patch subunit since it doesn't understand how to handle addError
+# outside of a running test case. This can happen if classSetUp() fails
+# for a class of tests. This unfortunately has horrible internal knowledge.
+#
+def outSideTestaddError(self, offset, line):
+ """An 'error:' directive has been read."""
+ test_name = line[offset:-1].decode('utf8')
+ self.parser._current_test = subunit.RemotedTestCase(test_name)
+ self.parser.current_test_description = test_name
+ self.parser._state = self.parser._reading_error_details
+ self.parser._reading_error_details.set_simple()
+ self.parser.subunitLineReceived(line)
+
+subunit._OutSideTest.addError = outSideTestaddError
+
+
+#
+# A dummy structure to add to io.StringIO so that the .buffer object
+# is available and accepts writes. This allows unittest with buffer=True
+# to interact ok with subunit which wants to access sys.stdout.buffer.
+#
+class dummybuf(object):
+ def __init__(self, parent):
+ self.p = parent
+ def write(self, data):
+ self.p.write(data.decode("utf-8"))
+
+#
+# Taken from testtools.ConncurrencyTestSuite but modified for OE use
+#
+class ConcurrentTestSuite(unittest.TestSuite):
+
+ def __init__(self, suite, processes):
+ super(ConcurrentTestSuite, self).__init__([suite])
+ self.processes = processes
+
+ def run(self, result):
+ tests, totaltests = fork_for_tests(self.processes, self)
+ try:
+ threads = {}
+ queue = Queue()
+ semaphore = threading.Semaphore(1)
+ result.threadprogress = {}
+ for i, (test, testnum) in enumerate(tests):
+ result.threadprogress[i] = []
+ process_result = BBThreadsafeForwardingResult(
+ ExtraResultsDecoderTestResult(result),
+ semaphore, i, testnum, totaltests)
+ # Force buffering of stdout/stderr so the console doesn't get corrupted by test output
+ # as per default in parent code
+ process_result.buffer = True
+ # We have to add a buffer object to stdout to keep subunit happy
+ process_result._stderr_buffer = io.StringIO()
+ process_result._stderr_buffer.buffer = dummybuf(process_result._stderr_buffer)
+ process_result._stdout_buffer = io.StringIO()
+ process_result._stdout_buffer.buffer = dummybuf(process_result._stdout_buffer)
+ reader_thread = threading.Thread(
+ target=self._run_test, args=(test, process_result, queue))
+ threads[test] = reader_thread, process_result
+ reader_thread.start()
+ while threads:
+ finished_test = queue.get()
+ threads[finished_test][0].join()
+ del threads[finished_test]
+ except:
+ for thread, process_result in threads.values():
+ process_result.stop()
+ raise
+ finally:
+ for test in tests:
+ test[0]._stream.close()
+
+ def _run_test(self, test, process_result, queue):
+ try:
+ try:
+ test.run(process_result)
+ except Exception:
+ # The run logic itself failed
+ case = testtools.ErrorHolder(
+ "broken-runner",
+ error=sys.exc_info())
+ case.run(process_result)
+ finally:
+ queue.put(test)
+
+def removebuilddir(d):
+ delay = 5
+ while delay and os.path.exists(d + "/bitbake.lock"):
+ time.sleep(1)
+ delay = delay - 1
+ # Deleting these directories takes a lot of time, use autobuilder
+ # clobberdir if its available
+ clobberdir = os.path.expanduser("~/yocto-autobuilder-helper/janitor/clobberdir")
+ if os.path.exists(clobberdir):
+ try:
+ subprocess.check_call([clobberdir, d])
+ return
+ except subprocess.CalledProcessError:
+ pass
+ bb.utils.prunedir(d, ionice=True)
+
+def fork_for_tests(concurrency_num, suite):
+ result = []
+ if 'BUILDDIR' in os.environ:
+ selftestdir = get_test_layer()
+
+ test_blocks = partition_tests(suite, concurrency_num)
+ # Clear the tests from the original suite so it doesn't keep them alive
+ suite._tests[:] = []
+ totaltests = sum(len(x) for x in test_blocks)
+ for process_tests in test_blocks:
+ numtests = len(process_tests)
+ process_suite = unittest.TestSuite(process_tests)
+ # Also clear each split list so new suite has only reference
+ process_tests[:] = []
+ c2pread, c2pwrite = os.pipe()
+ # Clear buffers before fork to avoid duplicate output
+ sys.stdout.flush()
+ sys.stderr.flush()
+ pid = os.fork()
+ if pid == 0:
+ ourpid = os.getpid()
+ try:
+ newbuilddir = None
+ stream = os.fdopen(c2pwrite, 'wb', 1)
+ os.close(c2pread)
+
+ # Create a new separate BUILDDIR for each group of tests
+ if 'BUILDDIR' in os.environ:
+ builddir = os.environ['BUILDDIR']
+ newbuilddir = builddir + "-st-" + str(ourpid)
+ newselftestdir = newbuilddir + "/meta-selftest"
+
+ bb.utils.mkdirhier(newbuilddir)
+ oe.path.copytree(builddir + "/conf", newbuilddir + "/conf")
+ oe.path.copytree(builddir + "/cache", newbuilddir + "/cache")
+ oe.path.copytree(selftestdir, newselftestdir)
+
+ for e in os.environ:
+ if builddir in os.environ[e]:
+ os.environ[e] = os.environ[e].replace(builddir, newbuilddir)
+
+ subprocess.check_output("git init; git add *; git commit -a -m 'initial'", cwd=newselftestdir, shell=True)
+
+ # Tried to used bitbake-layers add/remove but it requires recipe parsing and hence is too slow
+ subprocess.check_output("sed %s/conf/bblayers.conf -i -e 's#%s#%s#g'" % (newbuilddir, selftestdir, newselftestdir), cwd=newbuilddir, shell=True)
+
+ os.chdir(newbuilddir)
+
+ for t in process_suite:
+ if not hasattr(t, "tc"):
+ continue
+ cp = t.tc.config_paths
+ for p in cp:
+ if selftestdir in cp[p] and newselftestdir not in cp[p]:
+ cp[p] = cp[p].replace(selftestdir, newselftestdir)
+ if builddir in cp[p] and newbuilddir not in cp[p]:
+ cp[p] = cp[p].replace(builddir, newbuilddir)
+
+ # Leave stderr and stdout open so we can see test noise
+ # Close stdin so that the child goes away if it decides to
+ # read from stdin (otherwise its a roulette to see what
+ # child actually gets keystrokes for pdb etc).
+ newsi = os.open(os.devnull, os.O_RDWR)
+ os.dup2(newsi, sys.stdin.fileno())
+
+ subunit_client = TestProtocolClient(stream)
+ # Force buffering of stdout/stderr so the console doesn't get corrupted by test output
+ # as per default in parent code
+ subunit_client.buffer = True
+ subunit_result = AutoTimingTestResultDecorator(subunit_client)
+ process_suite.run(ExtraResultsEncoderTestResult(subunit_result))
+ if ourpid != os.getpid():
+ os._exit(0)
+ if newbuilddir:
+ removebuilddir(newbuilddir)
+ except:
+ # Don't do anything with process children
+ if ourpid != os.getpid():
+ os._exit(1)
+ # Try and report traceback on stream, but exit with error
+ # even if stream couldn't be created or something else
+ # goes wrong. The traceback is formatted to a string and
+ # written in one go to avoid interleaving lines from
+ # multiple failing children.
+ try:
+ stream.write(traceback.format_exc().encode('utf-8'))
+ except:
+ sys.stderr.write(traceback.format_exc())
+ finally:
+ if newbuilddir:
+ removebuilddir(newbuilddir)
+ stream.flush()
+ os._exit(1)
+ stream.flush()
+ os._exit(0)
+ else:
+ os.close(c2pwrite)
+ stream = os.fdopen(c2pread, 'rb', 1)
+ test = ProtocolTestCase(stream)
+ result.append((test, numtests))
+ return result, totaltests
+
+def partition_tests(suite, count):
+ # Keep tests from the same class together but allow tests from modules
+ # to go to different processes to aid parallelisation.
+ modules = {}
+ for test in iterate_tests(suite):
+ m = test.__module__ + "." + test.__class__.__name__
+ if m not in modules:
+ modules[m] = []
+ modules[m].append(test)
+
+ # Simply divide the test blocks between the available processes
+ partitions = [list() for _ in range(count)]
+ for partition, m in zip(cycle(partitions), modules):
+ partition.extend(modules[m])
+
+ # No point in empty threads so drop them
+ return [p for p in partitions if p]
+
diff --git a/meta/lib/oeqa/core/utils/misc.py b/meta/lib/oeqa/core/utils/misc.py
new file mode 100644
index 0000000000..e1a59588eb
--- /dev/null
+++ b/meta/lib/oeqa/core/utils/misc.py
@@ -0,0 +1,47 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+def toList(obj, obj_type, obj_name="Object"):
+ if isinstance(obj, obj_type):
+ return [obj]
+ elif isinstance(obj, list):
+ return obj
+ else:
+ raise TypeError("%s must be %s or list" % (obj_name, obj_type))
+
+def toSet(obj, obj_type, obj_name="Object"):
+ if isinstance(obj, obj_type):
+ return {obj}
+ elif isinstance(obj, list):
+ return set(obj)
+ elif isinstance(obj, set):
+ return obj
+ else:
+ raise TypeError("%s must be %s or set" % (obj_name, obj_type))
+
+def strToList(obj, obj_name="Object"):
+ return toList(obj, str, obj_name)
+
+def strToSet(obj, obj_name="Object"):
+ return toSet(obj, str, obj_name)
+
+def intToList(obj, obj_name="Object"):
+ return toList(obj, int, obj_name)
+
+def dataStoteToDict(d, variables):
+ data = {}
+
+ for v in variables:
+ data[v] = d.getVar(v)
+
+ return data
+
+def updateTestData(d, td, variables):
+ """
+ Updates variables with values of data store to test data.
+ """
+ for var in variables:
+ td[var] = d.getVar(var)
diff --git a/meta/lib/oeqa/core/utils/path.py b/meta/lib/oeqa/core/utils/path.py
new file mode 100644
index 0000000000..c086dcb0b0
--- /dev/null
+++ b/meta/lib/oeqa/core/utils/path.py
@@ -0,0 +1,22 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import sys
+
+def findFile(file_name, directory):
+ """
+ Search for a file in directory and returns its complete path.
+ """
+ for r, d, f in os.walk(directory):
+ if file_name in f:
+ return os.path.join(r, file_name)
+ return None
+
+def remove_safe(path):
+ if os.path.exists(path):
+ os.remove(path)
+
diff --git a/meta/lib/oeqa/core/utils/test.py b/meta/lib/oeqa/core/utils/test.py
new file mode 100644
index 0000000000..d38cab8a51
--- /dev/null
+++ b/meta/lib/oeqa/core/utils/test.py
@@ -0,0 +1,89 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import inspect
+import unittest
+
+def getSuiteCases(suite):
+ """
+ Returns individual test from a test suite.
+ """
+ tests = []
+
+ if isinstance(suite, unittest.TestCase):
+ tests.append(suite)
+ elif isinstance(suite, unittest.suite.TestSuite):
+ for item in suite:
+ tests.extend(getSuiteCases(item))
+
+ return tests
+
+def getSuiteModules(suite):
+ """
+ Returns modules in a test suite.
+ """
+ modules = set()
+ for test in getSuiteCases(suite):
+ modules.add(getCaseModule(test))
+ return modules
+
+def getSuiteCasesInfo(suite, func):
+ """
+ Returns test case info from suite. Info is fetched from func.
+ """
+ tests = []
+ for test in getSuiteCases(suite):
+ tests.append(func(test))
+ return tests
+
+def getSuiteCasesNames(suite):
+ """
+ Returns test case names from suite.
+ """
+ return getSuiteCasesInfo(suite, getCaseMethod)
+
+def getSuiteCasesIDs(suite):
+ """
+ Returns test case ids from suite.
+ """
+ return getSuiteCasesInfo(suite, getCaseID)
+
+def getSuiteCasesFiles(suite):
+ """
+ Returns test case files paths from suite.
+ """
+ return getSuiteCasesInfo(suite, getCaseFile)
+
+def getCaseModule(test_case):
+ """
+ Returns test case module name.
+ """
+ return test_case.__module__
+
+def getCaseClass(test_case):
+ """
+ Returns test case class name.
+ """
+ return test_case.__class__.__name__
+
+def getCaseID(test_case):
+ """
+ Returns test case complete id.
+ """
+ return test_case.id()
+
+def getCaseFile(test_case):
+ """
+ Returns test case file path.
+ """
+ return inspect.getsourcefile(test_case.__class__)
+
+def getCaseMethod(test_case):
+ """
+ Returns test case method name.
+ """
+ return getCaseID(test_case).split('.')[-1]
diff --git a/meta/lib/oeqa/runtime/files/test.c b/meta/lib/oeqa/files/test.c
index 2d8389c92e..2d8389c92e 100644
--- a/meta/lib/oeqa/runtime/files/test.c
+++ b/meta/lib/oeqa/files/test.c
diff --git a/meta/lib/oeqa/runtime/files/test.cpp b/meta/lib/oeqa/files/test.cpp
index 9e1a76473d..9e1a76473d 100644
--- a/meta/lib/oeqa/runtime/files/test.cpp
+++ b/meta/lib/oeqa/files/test.cpp
diff --git a/meta/lib/oeqa/files/testresults/testresults.json b/meta/lib/oeqa/files/testresults/testresults.json
new file mode 100644
index 0000000000..1a62155618
--- /dev/null
+++ b/meta/lib/oeqa/files/testresults/testresults.json
@@ -0,0 +1,40 @@
+{
+ "runtime_core-image-minimal_qemuarm_20181225195701": {
+ "configuration": {
+ "DISTRO": "poky",
+ "HOST_DISTRO": "ubuntu-16.04",
+ "IMAGE_BASENAME": "core-image-minimal",
+ "IMAGE_PKGTYPE": "rpm",
+ "LAYERS": {
+ "meta": {
+ "branch": "master",
+ "commit": "801745d918e83f976c706f29669779f5b292ade3",
+ "commit_count": 52782
+ },
+ "meta-poky": {
+ "branch": "master",
+ "commit": "801745d918e83f976c706f29669779f5b292ade3",
+ "commit_count": 52782
+ },
+ "meta-yocto-bsp": {
+ "branch": "master",
+ "commit": "801745d918e83f976c706f29669779f5b292ade3",
+ "commit_count": 52782
+ }
+ },
+ "MACHINE": "qemuarm",
+ "STARTTIME": "20181225195701",
+ "TEST_TYPE": "runtime"
+ },
+ "result": {
+ "apt.AptRepoTest.test_apt_install_from_repo": {
+ "log": "Test requires apt to be installed",
+ "status": "PASSED"
+ },
+ "buildcpio.BuildCpioTest.test_cpio": {
+ "log": "Test requires autoconf to be installed",
+ "status": "ERROR"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/meta/lib/oeqa/manual/abat.patch b/meta/lib/oeqa/manual/abat.patch
new file mode 100644
index 0000000000..1541ac80cb
--- /dev/null
+++ b/meta/lib/oeqa/manual/abat.patch
@@ -0,0 +1,64 @@
+########
+diff --git a/glxgears_check.sh b/glxgears_check.sh
+index 17622b8..c4d3b97 100755
+--- a/glxgears_check.sh
++++ b/glxgears_check.sh
+@@ -31,7 +31,7 @@ else
+
+ sleep 6
+
+- XPID=$( ps ax | awk '{print $1, $5}' | grep glxgears | awk '{print $1}')
++ XPID=$( ps | awk '{print $1, $5}' | grep glxgears | awk '{print $1}')
+ if [ ! -z "$XPID" ]; then
+ kill -9 $XPID >/dev/null 2>&1
+ echo "glxgears can run, PASS!"
+diff --git a/x_close.sh b/x_close.sh
+index e287be1..3429f1a 100755
+--- a/x_close.sh
++++ b/x_close.sh
+@@ -22,7 +22,7 @@
+ #
+ function close_proc(){
+ echo "kill process Xorg"
+-XPID=$( ps ax | awk '{print $1, $5}' | egrep "X$|Xorg$" | awk '{print $1}')
++XPID=$( ps | awk '{print $1, $6}' | egrep "X$|Xorg$" | awk '{print $1}')
+ if [ ! -z "$XPID" ]; then
+ kill $XPID
+ sleep 4
+diff --git a/x_start.sh b/x_start.sh
+index 9cf6eab..2305796 100755
+--- a/x_start.sh
++++ b/x_start.sh
+@@ -24,7 +24,7 @@
+ X_ERROR=0
+
+ #test whether X has started
+-PXID=$(ps ax |awk '{print $1,$5}' |egrep "Xorg$|X$" |grep -v grep | awk '{print $1}')
++PXID=$(ps |awk '{print $1,$6}' |egrep "Xorg$|X$" |grep -v grep | awk '{print $1}')
+ if [ ! -z "$PXID" ]; then
+ echo "[WARNING] Xorg has started!"
+ XORG_STATUS="started"
+@@ -35,9 +35,11 @@ else
+ #start up the x server
+ echo "Start up the X server for test in display $DISPLAY................"
+
+- $XORG_DIR/bin/X >/dev/null 2>&1 &
++ #$XORG_DIR/bin/X >/dev/null 2>&1 &
++ #sleep 8
++ #xterm &
++ /etc/init.d/xserver-nodm start &
+ sleep 8
+- xterm &
+ fi
+ XLOG_FILE=/var/log/Xorg.0.log
+ [ -f $XORG_DIR/var/log/Xorg.0.log ] && XLOG_FILE=$XORG_DIR/var/log/Xorg.0.log
+@@ -54,7 +56,7 @@ fi
+ X_ERROR=1
+ fi
+
+- XPID=$( ps ax | awk '{print $1, $5}' | egrep "X$|Xorg$" |grep -v grep| awk '{print $1}')
++ XPID=$( ps | awk '{print $1, $6}' | egrep "X$|Xorg$" |grep -v grep| awk '{print $1}')
+ if [ -z "$XPID" ]; then
+ echo "Start up X server FAIL!"
+ echo
+######## \ No newline at end of file
diff --git a/meta/lib/oeqa/manual/bsp-hw.json b/meta/lib/oeqa/manual/bsp-hw.json
new file mode 100644
index 0000000000..5c5b9b50bb
--- /dev/null
+++ b/meta/lib/oeqa/manual/bsp-hw.json
@@ -0,0 +1,1000 @@
+[
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.boot_and_install_from_USB",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "plugin usb which contains live image burned",
+ "expected_results": "User can choose install system from usb stick onto harddisk from boot menu or command line option \n"
+ },
+ "2": {
+ "action": "configure device BIOS to firstly boot from USB if necessary",
+ "expected_results": "Installed system can boot up"
+ },
+ "3": {
+ "action": "boot the device and select option \"Install\" from boot menu",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "proceed through default install process",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Remove USB, and reboot into new installed system. \nNote: If installation was successfully completed and received this message \"\"(sdx): Volume was not properly unmounted...Please run fsck.\"\" ignore it because this was whitelisted according to bug 9652.",
+ "expected_results": ""
+ }
+ },
+ "summary": "boot_and_install_from_USB"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.live_boot_from_USB",
+ "author": [
+ {
+ "email": "juan.fernandox.ramos.frayle@intel.com",
+ "name": "juan.fernandox.ramos.frayle@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Plugin usb which contains live image burned.",
+ "expected_results": "User can choose boot from live image on usb stick from boot menu or command line option"
+ },
+ "2": {
+ "action": "Configure device BIOS to firstly boot from USB if necessary.",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Reboot the device and boot from USB stick.",
+ "expected_results": "Live image can boot up with usb stick"
+ }
+ },
+ "summary": "live_boot_from_USB"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.boot_from_runlevel_3",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Boot into system and edit /etc/inittab to make sure that system enter at the run level 3 by default, this is done by changing the line \n\n\nid:5:initdefault \n\nto \n\nid:3:initdefault \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Reboot system, and press \"Tab\" to enter \"grub\"",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Get into the \"kernel\" line with the edit option \"e\" and add \"psplash=false text\" at the end line.",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Press \"F10\" or \"ctrl+x\" to boot system",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "If system ask you for a login type \"root\"",
+ "expected_results": "System should boot to run level 3, showing the command prompt."
+ }
+ },
+ "summary": "boot_from_runlevel_3"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.boot_from_runlevel_5",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Boot into system and edit /etc/inittab to make sure that system enter at the run level 5 by default, this is done by changing the line \n\nid:3:initdefault \n\nto \n\nid:5:initdefault \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Reboot system, and press \"Tab\" to enter \"grub\"",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Get into the \"kernel\" line with the edit option \"e\" and add \"psplash=false text\" at the end line.",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Press \"F10\" or \"ctrl+x\" to boot system \nNote: The test is only for sato image.",
+ "expected_results": "System should boot to runlevel 5 ."
+ }
+ },
+ "summary": "boot_from_runlevel_5"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.shutdown_system",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot system",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "launch terminal and run \"shutdown -h now\" or \"poweroff\"",
+ "expected_results": "System can be shutdown successfully . "
+ }
+ },
+ "summary": "shutdown_system"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.switch_among_multi_applications_and_desktop",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "launch several applications(like contacts, file manager, notes, etc)",
+ "expected_results": "user could switch among multi applications and desktop"
+ },
+ "2": {
+ "action": "launch terminal",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "switch among multi applications and desktop",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "close applications \nNote: The case is for sato image only. ",
+ "expected_results": ""
+ }
+ },
+ "summary": "switch_among_multi_applications_and_desktop"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.ethernet_static_ip_set_in_connman",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Boot the system and check internet connection is on . ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Launch connmand-properties (up-right corner on desktop)",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Choose Ethernet device and set a valid static ip address for it. \nFor example, in our internal network, we can set as following: \nip address: 10.239.48.xxx \nMask: 255.255.255.0 \nGateway (Broadcast): 10.239.48.255",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Check the Network configuration with \"ifconfig\"",
+ "expected_results": "Static IP was set successfully \n"
+ },
+ "5": {
+ "action": "ping to another IP adress",
+ "expected_results": "Ping works correclty\n"
+ }
+ },
+ "summary": "ethernet_static_ip_set_in_connman"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.ethernet_get_IP_in_connman_via_DHCP",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Launch connmand-properties (up-right corner on your desktop). ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Check if Ethernet device can work properly with static IP, doing \"ping XXX.XXX.XXX.XXX\", once this is set.",
+ "expected_results": "Ping executed successfully . \n\n"
+ },
+ "3": {
+ "action": "Then choose DHCP method for Ethernet device in connmand-properties.",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Check with 'ifconfig\" and \"ping\" if Ethernet device get IP address via DHCP.",
+ "expected_results": "Ethernet device can get dynamic IP address via DHCP in connmand ."
+ }
+ },
+ "summary": "ethernet_get_IP_in_connman_via_DHCP"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.connman_offline_mode_in_connman-gnome",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Launch connman-properties after system booting \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "choose \"offline mode\" and check the connection of all network interfaces ",
+ "expected_results": "All connection should be off after clicking \"offline mode\" . "
+ }
+ },
+ "summary": "connman_offline_mode_in_connman-gnome"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.X_server_can_start_up_with_runlevel_5_boot",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot up system with default runlevel \n\n",
+ "expected_results": "X server can start up well and desktop display has no problem . \n\n"
+ },
+ "2": {
+ "action": "type runlevel at command prompt",
+ "expected_results": "Output:N 5"
+ }
+ },
+ "summary": "X_server_can_start_up_with_runlevel_5_boot"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.standby",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot system and launch terminal; check output of \"date\" and launch script \"continue.sh\"",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "echo \"mem\" > /sys/power/state",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "After system go into S3 mode, move mouse or press any key to make it resume (on NUC press power button)",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Check \"date\" and script \"continue.sh\"",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Check if application can work as normal \ncontinue.sh as below: \n \n#!/bin/sh \n \ni=1 \nwhile [ 0 ] \ndo \n echo $i \n sleep 1 \n i=$((i+1)) \ndone ",
+ "expected_results": "Screen should resume back and script can run continuously incrementing the i's value from where it was before going to standby state. Date should be the same with the corresponding time increment."
+ }
+ },
+ "summary": "standby"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.check_CPU_utilization_after_standby",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start up system",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "run \"top\" command and check if there is any process eating CPU time",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "make system into standby and resume it",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "run \"top\" command and check if there is any difference with the data before standby",
+ "expected_results": "There should be no big difference before/after standby with \"top\" . "
+ }
+ },
+ "summary": "check_CPU_utilization_after_standby"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.Test_if_LAN_device_works_well_after_resume_from_suspend_state",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot system and launch terminal",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "echo \"mem\" > /sys/power/state",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "After system go into S3 mode, move mouse or press any key to make it resume",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "check ping status \n\nNote: This TC apply only for core-image-full-cmd.",
+ "expected_results": "ping should always work before/after standby"
+ }
+ },
+ "summary": "Test_if_LAN_device_works_well_after_resume_from_suspend_state"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.Test_if_usb_hid_device_works_well_after_resume_from_suspend_state",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot system and launch terminal",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "echo \"mem\" > /sys/power/state",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "After system go into S3 mode, resume the device by pressing the power button or using HID devices",
+ "expected_results": "Devices resumes "
+ },
+ "4": {
+ "action": "check usb mouse and keyboard",
+ "expected_results": "Usb mouse and keyboard should work"
+ }
+ },
+ "summary": "Test_if_usb_hid_device_works_well_after_resume_from_suspend_state"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.click_terminal_icon_on_X_desktop",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "After system launch and X start up, click terminal icon on desktop",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Check if only one terminal window launched and no other problem met",
+ "expected_results": "There should be no problem after launching terminal . "
+ }
+ },
+ "summary": "click_terminal_icon_on_X_desktop"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.Add_multiple_files_in_media_player",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Launch media player",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Add multiple files(5 files) in media player at same time (ogg or wav)",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Verify the sound.",
+ "expected_results": "Media player should be OK with this action, it reproduce files correctly."
+ }
+ },
+ "summary": "Add_multiple_files_in_media_player"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.video_-_libva_check_(ogg_video_play)",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "check if libva is installed on system (or libogg)",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "copy sample ogg file to system",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "launch media player can play the ogg file",
+ "expected_results": "ogg file can be played without problem when libva is used (or libogg) "
+ }
+ },
+ "summary": "video_-_libva_check_(ogg_video_play)"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.media_player_-_play_video_(ogv)",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "copy sample ogv file to system",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "launch media player and make sure it can play the ogv file",
+ "expected_results": "ogv file can be played without problem"
+ }
+ },
+ "summary": "media_player_-_play_video_(ogv)"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.media_player_-_stop/play_button_(ogv)",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "copy sample ogv file to system",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "launch media player can play the ogv file",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "click \"stop\" button to stop playing",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "click \"start\" button to resume playing",
+ "expected_results": "ogv file can be start/stop without problem"
+ }
+ },
+ "summary": "media_player_-_stop/play_button_(ogv)"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.audio_-_play_(ogg)_with_HDMI",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "copy sample ogg file to system",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "connect system with a monitor with HDMI",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "launch media player and play the ogg file",
+ "expected_results": "ogg file can be played without problem with HDMI"
+ }
+ },
+ "summary": "audio_-_play_(ogg)_with_HDMI"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.audio_-_play_(wav)_with_HDMI",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "copy sample wav file to system",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "connect system with a monitor with HDMI",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "launch media player and play the wav file",
+ "expected_results": "wav file can be played without problem, with HDMI"
+ }
+ },
+ "summary": "audio_-_play_(wav)_with_HDMI"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.Graphics_-_ABAT",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Download ABAT test suite from internal git repository, git clone git://tinderbox.sh.intel.com/git/abat",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Apply following patch to make it work on yocto environment",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Run \"./abat.sh\" to run ABAT test refer to abat.patch",
+ "expected_results": "All ABAT test should pass. \nNote : If below 3 fails appears ignore them. \n- start up X server fail.. due is already up \n- module [intel_agp] \n- module [i915]"
+ }
+ },
+ "summary": "Graphics_-_ABAT"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.Graphics_-_x11perf_-_2D",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Run \"x11perf -aa10text\" and \"x11perf -rgb10text\"",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Get the FPS result and compare it with upstream graphics data on Sandybridge",
+ "expected_results": "There should not be big regression between Yocto and upstream linux . "
+ }
+ },
+ "summary": "Graphics_-_x11perf_-_2D"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.Check_if_SATA_disk_can_work_correctly",
+ "author": [
+ {
+ "email": "yi.zhao@windriver.com",
+ "name": "yi.zhao@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Run fdisk command to create partition on SATA disk. ",
+ "expected_results": "The SATA device can mount, umount, read and write. "
+ },
+ "2": {
+ "action": "Mount/Umount \n mke2fs /dev/sda1 \n mount -t ext2 /dev/sda1 /mnt/disk \n umount /mnt/disk",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Read/Write (filesystem) \n touch /mnt/disk/test.txt \n echo abcd > /mnt/disk/test.txt \n cat /mnt/disk/test.txt",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Read/Write (raw) \n dd if=/dev/sda1 of=/tmp/test bs=1k count=1k \n This command will read 1MB from /dev/sda1 to /tmp/test",
+ "expected_results": ""
+ }
+ },
+ "summary": "Check_if_SATA_disk_can_work_correctly"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.Install_and_boot_from_USB-drive_to_HDD-drive",
+ "author": [
+ {
+ "email": "david.israelx.rodriguez.castellanos@intel.com",
+ "name": "david.israelx.rodriguez.castellanos@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Get a HDD drive.",
+ "expected_results": "User can choose install system from USB stick on HDD drive from boot menu or command line option \n"
+ },
+ "2": {
+ "action": "Plugin USB which contains live image burned (USB1).",
+ "expected_results": "Installed system can boot up."
+ },
+ "3": {
+ "action": "Configure device BIOS to firstly boot from USB if necessary",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Boot the device and select option \"Install\" from boot menu.",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Make sure that the divice in which image is going to be installed is the HDD drive.",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Proceed through default install process.",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Remove USB1, and reboot into new installed system.",
+ "expected_results": ""
+ }
+ },
+ "summary": "Install_and_boot_from_USB-drive_to_HDD-drive"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.Install_and_boot_from_USB-drive_to_SD-drive",
+ "author": [
+ {
+ "email": "david.israelx.rodriguez.castellanos@intel.com",
+ "name": "david.israelx.rodriguez.castellanos@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Get a SD-drive with enough free space to install an image.",
+ "expected_results": "User can choose install system from USB stick on SD-drive from boot menu or command line option. \n"
+ },
+ "2": {
+ "action": "Plugin USB which contains live image burned (USB1).",
+ "expected_results": "Installed system can boot up."
+ },
+ "3": {
+ "action": "Configure device BIOS to firstly boot from USB if necessary",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Boot the device and select option \"Install\" from boot menu.",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Make sure that the device in which image is going to be installed is the SD-drive.",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Proceed through default install process.",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Remove USB1, and reboot into new installed system.",
+ "expected_results": ""
+ }
+ },
+ "summary": "Install_and_boot_from_USB-drive_to_SD-drive"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.Test_boot_on_serial_communication_SD",
+ "author": [
+ {
+ "email": "juan.fernandox.ramos.frayle@intel.com",
+ "name": "juan.fernandox.ramos.frayle@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "1.- Create a yocto project image in a SD card \nexample \n2 - Configure a connection like shown in the link avobe: \nhttps://wiki.yoctoproject.org/wiki/MinnowMax_board_Serial_video_connection_guide \n3 - Verify the Minow Max board is connected to the host \n4 - Boot the system to desktop \n5 - Open a Terminal and check the IP \nIn Terminal type $ifconfig\"",
+ "expected_results": "Verify you can create a live image \n"
+ }
+ },
+ "summary": "Test_boot_on_serial_communication_SD"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.Test_boot_on_serial_communication_HDD",
+ "author": [
+ {
+ "email": "juan.fernandox.ramos.frayle@intel.com",
+ "name": "juan.fernandox.ramos.frayle@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "1 - Create a yocto project image in a HDD \nexample \n2 - Configure a connection like shown in the link avobe: \nhttps://wiki.yoctoproject.org/wiki/MinnowMax_board_Serial_video_connection_guide \n3 - Verify the Minow Max board is connected to the host \n4 - Boot the system to desktop \n5 - Open a Terminal and check the IP \nIn Terminal type $ifconfig\"> ",
+ "expected_results": "Verify you can create a live image \n"
+ }
+ },
+ "summary": "Test_boot_on_serial_communication_HDD"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.Test_boot_on_serial_communication_USB",
+ "author": [
+ {
+ "email": "juan.fernandox.ramos.frayle@intel.com",
+ "name": "juan.fernandox.ramos.frayle@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "1.- Create a yocto project image in a USB \nexample <dd if= core-image-sato-sdk.hddimg of =/dev/sdb>",
+ "expected_results": "Verify you can create a live image \n"
+ },
+ "2": {
+ "action": "Configure a connection like shown in the link avobe: \nhttps://wiki.yoctoproject.org/wiki/MinnowMax_board_Serial_video_connection_guide\n\n",
+ "expected_results": "Video signal is present and not delayed \n"
+ },
+ "3": {
+ "action": " Verify the Minow Max board is connected to the host",
+ "expected_results": "Verify the system boot ok and no errors are present \n"
+ },
+ "4": {
+ "action": " Boot the system to desktop",
+ "expected_results": " Check that a valid IP is retrieved"
+ },
+ "5": {
+ "action": " Open a Terminal and check the IP \nIn Terminal type $ifconfig\" ",
+ "expected_results": ""
+ }
+ },
+ "summary": "Test_boot_on_serial_communication_USB"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.Test_Seek_bar_and_volume_control",
+ "author": [
+ {
+ "email": "juan.fernandox.ramos.frayle@intel.com",
+ "name": "juan.fernandox.ramos.frayle@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Run media player and load a media file ",
+ "expected_results": "Media player correctly open audio/video file \n"
+ },
+ "2": {
+ "action": "Verify that seek and volume control are present ",
+ "expected_results": "Seek bar and volume control are present \n"
+ },
+ "3": {
+ "action": "Verify that selecting the speaker icon opens the volume control",
+ "expected_results": "Volume control bar must appear \n"
+ },
+ "4": {
+ "action": "Verify you can increase and decrease volume level with the volume control",
+ "expected_results": "Volume level must be increased and decreased \n"
+ },
+ "5": {
+ "action": "Observe that slider on the seek bar moves along with the video/audio play",
+ "expected_results": "Video/audio file can be played and slider moves along with the video/audio play \n"
+ },
+ "6": {
+ "action": "Verify you can navigate the video with the slider back and forward",
+ "expected_results": "The slider can move back and forward in the seek bar \n"
+ },
+ "7": {
+ "action": "Verify that seek and volume control are functional in full screen mode",
+ "expected_results": "Press the full screen mode icon, seek bar and volume control must work fine \n"
+ },
+ "8": {
+ "action": "Verify that pressing << or >> while playing a file makes the slide goes slow/backwards or faster",
+ "expected_results": "Verify << and >> works correctly"
+ }
+ },
+ "summary": "Test_Seek_bar_and_volume_control"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.Check_if_watchdog_can_reset_the_target_system",
+ "author": [
+ {
+ "email": "yi.zhao@windriver.com",
+ "name": "yi.zhao@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "1.Check if watchdog device exist in /dev/ directory. Run command echo 1 > /dev/watchdog and wait for 60s. Then, the target will reboot.",
+ "expected_results": "The watchdog device exist in /dev/ directory and can reboot the target.\n"
+ }
+ },
+ "summary": "Check_if_watchdog_can_reset_the_target_system"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.Check_if_RTC_(Real_Time_Clock)_can_work_correctly",
+ "author": [
+ {
+ "email": "yi.zhao@windriver.com",
+ "name": "yi.zhao@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Read time from RTC registers. root@localhost:/root> hwclock -r Sun Mar 22 04:05:47 1970 -0.001948 seconds ",
+ "expected_results": "Can read and set the time from RTC.\n"
+ },
+ "2": {
+ "action": "Set system current time root@localhost:/root> date 062309452008 ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Synchronize the system current time to RTC registers root@localhost:/root> hwclock -w ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Read time from RTC registers root@localhost:/root> hwclock -r ",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Reboot target and read time from RTC again\n",
+ "expected_results": ""
+ }
+ },
+ "summary": "Check_if_RTC_(Real_Time_Clock)_can_work_correctly"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.Check_if_target_can_support_EEPROM",
+ "author": [
+ {
+ "email": "yi.zhao@windriver.com",
+ "name": "yi.zhao@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Check eeprom device exist in /sys/bus/i2c/devices/ ",
+ "expected_results": "Hexdump can read data from eeprom.\n"
+ },
+ "2": {
+ "action": "Run \"hexdump eeprom\" commandroot@mpc8315e-rdb:/sys/bus/i2c/devices/1-0051> hexdump eeprom0000000 9210 0b02 0211 0009 0b52 0108 0c00 3c000000010 6978 6930 6911 208c 7003 3c3c 00f0 8381\u2026\n",
+ "expected_results": ""
+ }
+ },
+ "summary": "Check_if_target_can_support_EEPROM"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.System_can_boot_up_via_NFS",
+ "author": [
+ {
+ "email": "yi.zhao@windriver.com",
+ "name": "yi.zhao@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Connect the board's first serial port to your workstation and then start up your favourite serial terminal so that you will be able to interact with the serial console. If you don't have a favourite, picocom is suggested: $ picocom /dev/ttyS0 -b 115200 ",
+ "expected_results": "The system can boot up without problem\n"
+ },
+ "2": {
+ "action": "Power up or reset the board and press a key on the terminal when prompted to get to the U-Boot command line ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Set up the environment in U-Boot: => setenv ipaddr => setenv serverip ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Download the kernel and boot: => tftp tftp $loadaddr vmlinux => bootoctlinux $loadaddr coremask=0x3 root=/dev/nfs rw nfsroot=: ip=::::edgerouter:eth0:off mtdparts=phys_mapped_flash:512k(boot0),512k(boot1),64k@3072k(eeprom)\n",
+ "expected_results": ""
+ }
+ },
+ "summary": "System_can_boot_up_via_NFS"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-hw.Boot_from_JFFS2_image",
+ "author": [
+ {
+ "email": "yi.zhao@windriver.com",
+ "name": "yi.zhao@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "First boot the board with NFS root. ",
+ "expected_results": "The system can boot up without problem\n"
+ },
+ "2": {
+ "action": "Install mtd-utils package. Erase the MTD partition which will be used as root: $ flash_eraseall /dev/mtd3 ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Copy the JFFS2 image to the MTD partition: $ flashcp core-image-minimal-mpc8315e-rdb.jffs2 /dev/mtd3 ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Then reboot the board and set up the environment in U-Boot: => setenv bootargs root=/dev/mtdblock3 rootfstype=jffs2 console=ttyS0,115200 ",
+ "expected_results": ""
+ }
+ },
+ "summary": "Boot_from_JFFS2_image"
+ }
+ }
+]
diff --git a/meta/lib/oeqa/manual/build-appliance.json b/meta/lib/oeqa/manual/build-appliance.json
new file mode 100644
index 0000000000..70f8c72c9b
--- /dev/null
+++ b/meta/lib/oeqa/manual/build-appliance.json
@@ -0,0 +1,96 @@
+[
+ {
+ "test": {
+ "@alias": "build-appliance.build-appliance.Build_core-image-minimal_with_build-appliance-image",
+ "author": [
+ {
+ "email": "corneliux.stoicescu@intel.com",
+ "name": "corneliux.stoicescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Build with AUTOREV or download from Autobuilder an image for Yocto Build Appliance. ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Boot the image under VMWare Player. ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Build qemux86 core-image-minimal using bitbake command line in the build-appliance-image ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Launch the image just built using runqemu. ",
+ "expected_results": "core-image-minimal should build and boot. "
+ }
+ },
+ "summary": "Build_core-image-minimal_with_build-appliance-image"
+ }
+ },
+ {
+ "test": {
+ "@alias": "build-appliance.build-appliance.Build_a_image_without_error_(added_recipe)",
+ "author": [
+ {
+ "email": "sstncr@gmail.com",
+ "name": "sstncr@gmail.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Launch Build Appliance",
+ "expected_results": "User could build a image without error and the added package is in the image"
+ },
+ "2": {
+ "action": "Set \"Machine\" in conf/local.conf, for example, qemuarm",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Install a new package to the image, for example, acpid. Set the following line in conf/local.conf: IMAGE_INSTALL_append = \" acpid\"",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Build a image using bitbake command line, for example, bitbake core-image-minimal",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "After build finished, launch the image and check if the added package built into image",
+ "expected_results": ""
+ }
+ },
+ "summary": "Build_a_image_without_error_(added_recipe)."
+ }
+ },
+ {
+ "test": {
+ "@alias": "build-appliance.build-appliance.Create_core-image-sato-sdk_using_build_appliance",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Build with AUTOREV or download from Autobuilder an image for Yocto Build Appliance. ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Boot the image under VMWare Player. ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Build qemux86 core-image-sato-sdk using bitbake command line in the build-appliance-image ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Launch the image just built using runqemu. ",
+ "expected_results": ""
+ }
+ },
+ "summary": "Create_core-image-sato-sdk_using_build_appliance"
+ }
+ }
+] \ No newline at end of file
diff --git a/meta/lib/oeqa/manual/crops.json b/meta/lib/oeqa/manual/crops.json
new file mode 100644
index 0000000000..5cfa653843
--- /dev/null
+++ b/meta/lib/oeqa/manual/crops.json
@@ -0,0 +1,294 @@
+[
+ {
+ "test": {
+ "@alias": "crops-default.crops-default.sdkext_eSDK_devtool_build_make",
+ "author": [
+ {
+ "email": "francisco.j.pedraza.gonzalez@intel.com",
+ "name": "francisco.j.pedraza.gonzalez@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "IMPORTANT NOTE: The firsts 5 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": " Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Create the following tree of files <crops-esdk-workdir-workspace>/sdkext/files/myapp <crops-esdk-workdir-workspace>/sdkext/files/myapp_cmake \n\n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": " Create the following files withing the myapp directory myapp.c and the Makefile. Write the following inside of each file: \n---------------------------------------- \nMakefile should contain \n\nall: myapp \n\nmyapp: myapp.o \n\t$(CC) $(LDFLAGS) $< -o $@ \n\nmyapp.o: myapp.c \n\t$(CC) $(CFLAGS) -c $< -o $@ \n\nclean: \n\trm -rf myapp.o myapp \n\n----------------------------- \nmyapp.c shold contain \n\n\n#include <stdio.h> \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n \n\treturn 0; \n} \n------------------------------------ \n\n",
+ "expected_results": "be sure that the indentations on the makefile are tabs not spaces. \n\n"
+ },
+ "5": {
+ "action": " Create the following files within the myapp_cmake directory CMakeLists.txt and myapp.c. Write the following inside each file: \n\n------------------------------------ \nCMakeLists.txt should contain: \n\ncmake_minimum_required (VERSION 2.6) \nproject (myapp) \n# The version number. \nset (myapp_VERSION_MAJOR 1) \nset (myapp_VERSION_MINOR 0) \n\n# add the executable \nadd_executable (myapp myapp.c) \n\ninstall(TARGETS myapp \nRUNTIME DESTINATION bin) \n\n------------------------------------------ \nmyapp.c should contain: \n\n#include <stdio.h> \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n\n\treturn 0; \n} \n------------------------------------------------- \n\n",
+ "expected_results": "Be sure that the indentations on CMakeLists.txt is tabs not spaces."
+ },
+ "6": {
+ "action": " source environment-setup-i586-poky-linux \n\n",
+ "expected_results": "This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n\n"
+ },
+ "7": {
+ "action": " run command which devtool \n\n",
+ "expected_results": "this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n "
+ },
+ "8": {
+ "action": "devtool add myapp <directory>(this is myapp dir) \n\n\n",
+ "expected_results": "The directory you should input is the myapp directory. This should automatically create the recipe myapp.bb under <crops-esdk-workdir-workspace>/recipes/myapp/myapp.bb"
+ },
+ "9": {
+ "action": " devtool build myapp \n\n",
+ "expected_results": "This should compile an image"
+ },
+ "10": {
+ "action": " devtool reset myapp ",
+ "expected_results": "This cleans sysroot of the myapp recipe, but it leaves the source tree intact. meaning it does not erase."
+ }
+ },
+ "summary": "sdkext_eSDK_devtool_build_make"
+ }
+ },
+ {
+ "test": {
+ "@alias": "crops-default.crops-default.sdkext_devtool_build_esdk_package",
+ "author": [
+ {
+ "email": "francisco.j.pedraza.gonzalez@intel.com",
+ "name": "francisco.j.pedraza.gonzalez@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "IMPORTANT NOTE: The firsts 5 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": " Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": " Create the following tree of files <crops-esdk-workdir-workspace>/sdkext/files/myapp/ \n <crops-esdk-workdir-workspace>/sdkext/files/myapp_cmake \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": " Create the following files withing the myapp directory myapp.c and the Makefile. Write the following inside of each file: \n---------------------------------------- \nMakefile should contain \n\nall: myapp \n\nmyapp: myapp.o \n\t$(CC) $(LDFLAGS) $< -o $@ \n\nmyapp.o: myapp.c \n\t$(CC) $(CFLAGS) -c $< -o $@ \n\nclean: \n\trm -rf myapp.o myapp \n\n----------------------------- \nmyapp.c shold contain \n\n#include <stdio.h> \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n \n\treturn 0; \n} \n------------------------------------ \n\n",
+ "expected_results": "be sure that the indentations on the makefile are tabs not spaces. \n\n"
+ },
+ "5": {
+ "action": " Create the following files within the myapp_cmake directory CMakeLists.txt and myapp.c. Write the following inside each file: \n\n------------------------------------ \nCMakeLists.txt should contain: \n\ncmake_minimum_required (VERSION 2.6) \nproject (myapp) \n# The version number. \nset (myapp_VERSION_MAJOR 1) \nset (myapp_VERSION_MINOR 0) \n\n# add the executable \nadd_executable (myapp myapp.c) \n\ninstall(TARGETS myapp \nRUNTIME DESTINATION bin) \n\n------------------------------------------ \nmyapp.c should contain: \n\n#include<stdio.h> \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n\n\treturn 0; \n} \n------------------------------------------------- \n\n",
+ "expected_results": "Be sure that the indentations on CMakeLists.txt is tabs not spaces. \n\n"
+ },
+ "6": {
+ "action": " source environment-setup-i586-poky-linux \n\n",
+ "expected_results": "This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n\n"
+ },
+ "7": {
+ "action": " run command which devtool \n\n",
+ "expected_results": " this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n"
+ },
+ "8": {
+ "action": " devtool add myapp <directory> (this is myapp dir) \n\n",
+ "expected_results": " The directory you should input is the myapp directory. This should automatically create the recipe myapp.bb under <crops-esdk-workdir-workspace>/recipes/myapp/myapp.bb \n\n"
+ },
+ "9": {
+ "action": " devtool package myapp \n\n",
+ "expected_results": " you should expect a package creation of myapp and it should be under the /tmp/deploy/ \n\n"
+ },
+ "10": {
+ "action": " devtool reset myapp ",
+ "expected_results": "This cleans sysroot of the myapp recipe, but it leaves the source tree intact. meaning it does not erase.\n</package_format>"
+ }
+ },
+ "summary": "sdkext_devtool_build_esdk_package"
+ }
+ },
+ {
+ "test": {
+ "@alias": "crops-default.crops-default.sdkext_devtool_build_cmake",
+ "author": [
+ {
+ "email": "francisco.j.pedraza.gonzalez@intel.com",
+ "name": "francisco.j.pedraza.gonzalez@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "IMPORTANT NOTE: The firsts 5 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": " Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": " Create the following tree of files <crops-esdk-workdir-workspace>/sdkext/files/myapp \n <crops-esdk-workdir-workspace>/sdkext/files/myapp_cmake \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": " Create the following files withing the myapp directory myapp.c and the Makefile. Write the following inside of each file: \n---------------------------------------- \nMakefile should contain \n\nall: myapp \n\nmyapp: myapp.o \n\t$(CC) $(LDFLAGS) $< -o $@ \n\nmyapp.o: myapp.c \n\t$(CC) $(CFLAGS) -c $< -o $@ \n\nclean: \n\trm -rf myapp.o myapp \n\n----------------------------- \nmyapp.c shold contain \n\n#include <stdio.h> \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n \n\treturn 0; \n} \n------------------------------------ \n\n",
+ "expected_results": "be sure that the indentations on the makefile are tabs not spaces. \n\n"
+ },
+ "5": {
+ "action": " Create the following files within the myapp_cmake directory CMakeLists.txt and myapp.c. Write the following inside each file: \n\n------------------------------------ \nCMakeLists.txt should contain: \n\ncmake_minimum_required (VERSION 2.6) \nproject (myapp) \n# The version number. \nset (myapp_VERSION_MAJOR 1) \nset (myapp_VERSION_MINOR 0) \n\n# add the executable \nadd_executable (myapp myapp.c) \n\ninstall(TARGETS myapp \nRUNTIME DESTINATION bin) \n\n------------------------------------------ \nmyapp.c should contain: \n\n#include \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n\n\treturn 0; \n} \n------------------------------------------------- \n\n",
+ "expected_results": "Be sure that the indentations on CMakeLists.txt is tabs not spaces. \n\n"
+ },
+ "6": {
+ "action": " source environment-setup-i586-poky-linux \n\n",
+ "expected_results": "This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n\n"
+ },
+ "7": {
+ "action": " run command which devtool \n\n",
+ "expected_results": "this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n"
+ },
+ "8": {
+ "action": " devtool add myapp <directory> (this is myapp_cmake dir) \n\n",
+ "expected_results": "The directory you should input is the myapp_cmake directory. This should automatically create the recipe myapp.bb under <crops-esdk-workdir-workspace>/recipes/myapp/myapp.bb \n\n"
+ },
+ "9": {
+ "action": " devtool build myapp \n\n",
+ "expected_results": "This should compile an image \n\n"
+ },
+ "10": {
+ "action": " devtool reset myapp ",
+ "expected_results": "This cleans sysroot of the myapp recipe, but it leaves the source tree intact. meaning it does not erase. "
+ }
+ },
+ "summary": "sdkext_devtool_build_cmake"
+ }
+ },
+ {
+ "test": {
+ "@alias": "crops-default.crops-default.sdkext_extend_autotools_recipe_creation",
+ "author": [
+ {
+ "email": "francisco.j.pedraza.gonzalez@intel.com",
+ "name": "francisco.j.pedraza.gonzalez@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "IMPORTANT NOTE: The firsts 2 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": " source environment-setup-i586-poky-linux \n\n",
+ "expected_results": " This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n\n"
+ },
+ "4": {
+ "action": "run command which devtool \n\n",
+ "expected_results": "this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n"
+ },
+ "5": {
+ "action": "devtool sdk-install -s libxml2 \n\n",
+ "expected_results": "this should install libxml2 \n\n"
+ },
+ "6": {
+ "action": "devtool add librdfa https://github.com/rdfa/librdfa \n\n",
+ "expected_results": "This should automatically create the recipe librdfa.bb under /recipes/librdfa/librdfa.bb \n\n"
+ },
+ "7": {
+ "action": "devtool build librdfa \n\n",
+ "expected_results": "This should compile \n\n"
+ },
+ "8": {
+ "action": "devtool reset librdfa ",
+ "expected_results": "This cleans sysroot of the librdfa recipe, but it leaves the source tree intact. meaning it does not erase."
+ }
+ },
+ "summary": "sdkext_extend_autotools_recipe_creation"
+ }
+ },
+ {
+ "test": {
+ "@alias": "crops-default.crops-default.sdkext_devtool_kernelmodule",
+ "author": [
+ {
+ "email": "francisco.j.pedraza.gonzalez@intel.com",
+ "name": "francisco.j.pedraza.gonzalez@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "IMPORTANT NOTE: The firsts 2 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": " Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "source environment-setup-i586-poky-linux \n\n",
+ "expected_results": "This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n \n"
+ },
+ "4": {
+ "action": "run command which devtool \n\n",
+ "expected_results": "this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n"
+ },
+ "5": {
+ "action": "devtool add kernel-module-hello-world https://git.yoctoproject.org/git/kernel-module-hello-world \n\n",
+ "expected_results": "This should automatically create the recipe kernel-module-hello-world.bb under <crops-esdk-workdir-workspace>/recipes/kernel-module-hello-world/kernel-module-hello-world.bb "
+ },
+ "6": {
+ "action": "devtool build kernel-module-hello-world \n\n",
+ "expected_results": "This should compile an image \n\n"
+ },
+ "7": {
+ "action": "devtool reset kernel-module-hello-world ",
+ "expected_results": "This cleans sysroot of the kernel-module-hello-world recipe, but it leaves the source tree intact. meaning it does not erase."
+ }
+ },
+ "summary": "sdkext_devtool_kernelmodule"
+ }
+ },
+ {
+ "test": {
+ "@alias": "crops-default.crops-default.sdkext_recipes_for_nodejs",
+ "author": [
+ {
+ "email": "francisco.j.pedraza.gonzalez@intel.com",
+ "name": "francisco.j.pedraza.gonzalez@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "IMPORTANT NOTE: The firsts 2 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\nlets say variable npm = npm://registry.npmjs.org;name=winston;version=2.2.0 \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "source environment-setup-i586-poky-linux \n\n",
+ "expected_results": "This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n\n"
+ },
+ "4": {
+ "action": "run command which devtool \n\n",
+ "expected_results": "this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n"
+ },
+ "5": {
+ "action": " 4a) git clone git://git.openembedded.org/meta-openembedded in layers/build directory \n \n4b) Add meta-openembedded/meta-oe in bblayer.conf as mentioned below: ${SDKBASEMETAPATH}/layers/build/meta-openembedded/meta-oe \\ \n\n4c) devtool add \"npm://registry.npmjs.org;name=npm;version=2.2.0\" \n\n",
+ "expected_results": " This should automatically create the recipe npm.bb under /recipes/npm/npm.bb \n\n"
+ },
+ "6": {
+ "action": "devtool build npm \n\n",
+ "expected_results": "This should compile an image \n\n"
+ },
+ "7": {
+ "action": " devtool reset npm",
+ "expected_results": "This cleans sysroot of the npm recipe, but it leaves the source tree intact. meaning it does not erase."
+ }
+ },
+ "summary": "sdkext_recipes_for_nodejs"
+ }
+ }
+]
diff --git a/meta/lib/oeqa/manual/eclipse-plugin.json b/meta/lib/oeqa/manual/eclipse-plugin.json
new file mode 100644
index 0000000000..9869150dcf
--- /dev/null
+++ b/meta/lib/oeqa/manual/eclipse-plugin.json
@@ -0,0 +1,322 @@
+[
+ {
+ "test": {
+ "@alias": "eclipse-plugin.eclipse-plugin.support_SSH_connection_to_Target",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "In Eclipse, swich to Remote System Explorer to create a connention baseed on SSH, input the remote target IP address as the Host name, make sure disable the proxy in Window->Preferences->General->Network Connection, set Direct as Active Provider field. ",
+ "expected_results": "the connection based on SSH could be set up."
+ },
+ "2": {
+ "action": "Configure connection from Eclipse: Run->Run Configurations->C/C++ Remote Application\\ ->New Connection->General->SSH Only ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Then right click to connect, input the user ID and password. ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "expand the connection, it will show the Sftp Files etc. \nNOTE. Might need to change dropbear to openssh and add the packagegroup-core-eclipse-debug recipe",
+ "expected_results": ""
+ }
+ },
+ "summary": "support_SSH_connection_to_Target"
+ }
+ },
+ {
+ "test": {
+ "@alias": "eclipse-plugin.eclipse-plugin.Launch_QEMU_from_Eclipse",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Set the Yocto ADT's toolchain root location, sysroot location and kernel, in the menu Window -> Preferences -> Yocto ADT. \n \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "wget autobuilder.yoctoproject.org/pub/releases//machines/qemu/qemux86/qemu (ex:core-image-sato-sdk-qemux86-date-rootfs-tar-bz2) \nsource /opt/poky/version/environment-setup-i585-poky-linux \n\nExtract qemu with runqemu-extract-sdk /home/user/file(ex.core-image-sato-sdk-qemux86.bz2) \n/home/user/qemux86-sato-sdk \n\n",
+ "expected_results": " Qemu can be lauched normally."
+ },
+ "3": {
+ "action": "(a)Point to the Toolchain: \n \nIf you are using a stand-alone pre-built toolchain, you should be pointing to the /opt/poky/{test-version} directory as Toolchain Root Location. This is the default location for toolchains installed by the ADT Installer or by hand. If ADT is installed in other location, use that location as Toolchain location.\nIf you are using a system-derived toolchain, the path you provide for the Toolchain Root Location field is the Yocto Project's build directory. \n \n E.g:/home/user/yocto/poky/build \n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "(b)Specify the Sysroot Location: \nSysroot Location is the location where the root filesystem for the target hardware is created on the development system by the ADT Installer (SYSROOT in step 2 of the case ADT installer Installation). \n \n Local : e.g: /home/user/qemux86-sato-sdk \nUsing ADT : e.g :/home/user/test-yocto/qemux86 \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "(c)Select the Target Architecture: \n \nThe target architecture is the type of hardware you are going to use or emulate. Use the pull-down Target Architecture menu to make your selection. \n \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "(d) QEMU: \nSelect this option if you will be using the QEMU emulator. Specify the Kernel matching the QEMU architecture you are using. \n wget autobuilder.yoctoproject.org/pub/releases//machines/qemu/qemux86/bzImage-qemux86.bin \n e.g: /home/$USER/yocto/adt-installer/download_image/bzImage-qemux86.bin \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "(e) select OK to save the settings. \n\n\n1: In the Eclipse toolbar, expose the Run -> External Tools menu. Your image should appear as a selectable menu item. \n2: Select your image in the navigation pane to launch the emulator in a new window. \n3: If needed, enter your host root password in the shell window at the prompt. This sets up a Tap 0 connection needed for running in user-space NFS mode. \n",
+ "expected_results": ""
+ }
+ },
+ "summary": "Launch_QEMU_from_Eclipse"
+ }
+ },
+ {
+ "test": {
+ "@alias": "eclipse-plugin.eclipse-plugin.Relocatable_SDK_-_C_-_Build_Hello_World_ANSI_C_Autotools_Project",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Launch a QEMU of target enviroment.(Reference to case \"ADT - Launch qemu by eclipse\") ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Select File -> New -> Project.",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Double click C/C++.",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Click C or C++ Project to create the project.",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Expand Yocto ADT Project.",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Select Hello World ANSI C Autotools Project.",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Put a name in the Project name. Do not use hyphens as part of the name. \n \n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "Click Next.",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "Add information in the Author and Copyright notice fields. \n1",
+ "expected_results": ""
+ },
+ "10": {
+ "action": "Click Finish. \n1",
+ "expected_results": ""
+ },
+ "11": {
+ "action": "If the \"open perspective\" prompt appears, click \"Yes\" so that you open the C/C++ perspective. \n1",
+ "expected_results": ""
+ },
+ "12": {
+ "action": "In the Project Explorer window, right click the project -> Reconfigure project. \n1",
+ "expected_results": ""
+ },
+ "13": {
+ "action": "In the Project Explorer window, right click the project -> Build project. \n1",
+ "expected_results": "Under the Project files, a new folder appears called Binaries. This indicates that the compilation have been successful and the project binary have been created. \n"
+ },
+ "14": {
+ "action": "Right click it again and Run as -> Run Configurations. \n\t\t\tUnder Run Configurations expand \"C/C++ Remote Application\". A configuration for the current project should appear. Clicking it will display the configuration settings. \n\t\t\tin \"C/C++ Application\" field input Remote Absolute File path for C/C++ Application. e.g.: /home/root/myapplication \n\t\t\tIn \"Connection\" drop-down list make sure a TCF connection is set up for your target. If not, create a new one by clicking the New button. \n1",
+ "expected_results": "step 14 to step 16 -> Build succeed and the console outputs Hello world, you can also check the output on target."
+ },
+ "15": {
+ "action": "After all settings are done, select the Run button on the bottom right corner \n\n1",
+ "expected_results": ""
+ },
+ "16": {
+ "action": "Repeat the steps 14-15, but instead of using Run Configurations use Debug Configurations: \nRight click it again and Debug as -> Debug Configurations \nUnder Debug Configurations expand \"C/C++ Remote Application\". A configuration for the current project should appear. Clicking it will display the configuration settings. \nin \"C/C++ Application\" field input Remote Absolute File path for C/C++ Application.\ne.g.: /home/root/myapplication \nIn \"Connection\" drop-down list make sure a TCF connection is set up for your target. If not, create a new one by clicking the New button \n1",
+ "expected_results": ""
+ },
+ "17": {
+ "action": "After all settings are done, select the Debug button on the bottom right corner",
+ "expected_results": ""
+ }
+ },
+ "summary": "Relocatable_SDK_-_C_-_Build_Hello_World_ANSI_C_Autotools_Project"
+ }
+ },
+ {
+ "test": {
+ "@alias": "eclipse-plugin.eclipse-plugin.Relocatable_SDK_-_C++_-_Build_Hello_World_C++_Autotools_project",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Launch a QEMU of target enviroment.(Reference to case \"ADT - Launch qemu by eclipse\") ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Select File -> New -> Project. ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Double click C/C++. ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Click C or C++ Project to create the project. ",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Expand Yocto ADT Project. ",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Select Hello World ANSI C++ Autotools Project. ",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Put a name in the Project name. Do not use hyphens as part of the name. \n \n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "Click Next.",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "Add information in the Author and Copyright notice fields.",
+ "expected_results": ""
+ },
+ "10": {
+ "action": "Click Finish. \n1",
+ "expected_results": ""
+ },
+ "11": {
+ "action": "If the \"open perspective\" prompt appears, click \"Yes\" so that you open the C/C++ perspective. \n1",
+ "expected_results": ""
+ },
+ "12": {
+ "action": "In the Project Explorer window, right click the project -> Reconfigure project. \n1",
+ "expected_results": ""
+ },
+ "13": {
+ "action": "In the Project Explorer window, right click the project -> Build project. \n\n1",
+ "expected_results": "under the Project files, a new folder appears called Binaries. This indicates that the compilation have been successful and the project binary have been created. \n"
+ },
+ "14": {
+ "action": "Right click it again and Run as -> Run Configurations. \n\t\t\tUnder Run Configurations expand \"C/C++ Remote Application\". A configuration for the current project should appear. Clicking it will display the configuration settings. \n\t\t\tin \"C/C++ Application\" field input Remote Absolute File path for C/C++ Application. e.g.: /home/root/myapplication \n\t\t\tIn \"Connection\" drop-down list make sure a TCF connection is set up for your target. If not, create a new one by clicking the New button. \n1",
+ "expected_results": "step 14 to step 16 -> Build succeed and the console outputs Hello world, you can also check the output on target."
+ },
+ "15": {
+ "action": "After all settings are done, select the Run button on the bottom right corner \n\n1",
+ "expected_results": ""
+ },
+ "16": {
+ "action": "Repeat the steps 14-15, but instead of using Run Configurations use Debug Configurations: \n\t\tRight click it again and Debug as -> Debug Configurations \n\t\tUnder Debug Configurations expand \"C/C++ Remote Application\". A configuration for the current project should appear. Clicking it will display the configuration settings. \n\t\tin \"C/C++ Application\" field input Remote Absolute File path for C/C++ Application. \n\t\te.g.: /home/root/myapplication \n\t\tIn \"Connection\" drop-down list make sure a TCF connection is set up for your target. If not, create a new one by clicking the New button \n1",
+ "expected_results": ""
+ },
+ "17": {
+ "action": "After all settings are done, select the Debug button on the bottom right corner",
+ "expected_results": ""
+ }
+ },
+ "summary": "Relocatable_SDK_-_C++_-_Build_Hello_World_C++_Autotools_project"
+ }
+ },
+ {
+ "test": {
+ "@alias": "eclipse-plugin.eclipse-plugin.Build_Eclipse_Plugin_from_source",
+ "author": [
+ {
+ "email": "laurentiu.serban@intel.com",
+ "name": "laurentiu.serban@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Clone eclipse-poky source. \n \n - git clone git://git.yoctoproject.org/eclipse-poky \n\n",
+ "expected_results": "Eclipse plugin is successfully installed \n\nDocumentation is there. For example if you have release yocto-2.0.1 you will found on http://autobuilder.yoctoproject.org/pub/releases/yocto-2.0.1/eclipse-plugin/mars/ archive with documentation like org.yocto.doc-development-$date.zip \n \n"
+ },
+ "2": {
+ "action": "Checkout correct tag. \n\n - git checkout <eclipse-version>/<yocto-version> \n\n",
+ "expected_results": "After plugin is build you must have 4 archive in foder scripts from eclipse-poky: \n - org.yocto.bc - mars-master-$date.zip \n - org.yocto.doc - mars-master-$date.zip --> documentation \n - org.yocto.sdk - mars-master-$date.zip \n - org.yocto.sdk - mars-master-$date.-archive.zip --> plugin "
+ },
+ "3": {
+ "action": "Move to scripts/ folder. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Run ./setup.sh \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "When the script finishes, it prompts a command to issue to build the plugin. It should look similar to the following: \n\n$ ECLIPSE_HOME=/eclipse-poky/scripts/eclipse ./build.sh /&1 | tee -a build.log \n\nHere, the three arguments to the build script are tag name, branch for documentation and release name. \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "On an eclipse without the Yocto Plugin, select \"Install New Software\" from Help pull-down menu \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Select Add and from the dialog choose Archive... Look for the *archive.zip file that was built previously with the build.sh script. Click OK. \n\n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "Select all components and proceed with Installation of plugin. Restarting eclipse might be required.\n",
+ "expected_results": ""
+ }
+ },
+ "summary": "Build_Eclipse_Plugin_from_source"
+ }
+ },
+ {
+ "test": {
+ "@alias": "eclipse-plugin.eclipse-plugin.Eclipse_Poky_installation_and_setup",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Install SDK \n\ta)Download https://autobuilder.yocto.io/pub/releases//toolchain/x86_64/poky-glibc-x86_64-core-\timage-sato-i586-toolchain-.sh \n\tb)Run the SDK installer and accept the default installation directory ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Install \"Eclipse IDE for C/C++ Developers\" Oxygen release (4.7.0) \n\ta) Go to https://www.eclipse.org/downloads/packages/all, click \"Oxygen R\" \n\tb) Click to download the build for your OS \n\tc) Click \"Download\" button to download from a mirror \n\td) Run \"tar xf\" to extract the downloaded archive ",
+ "expected_result": ""
+ },
+ "3": {
+ "action": "Install \"Eclipse IDE for C/C++ Developers\" Oxygen release (4.7.0) (Continue) \n\te) Run \"eclipse/eclipse\" to start Eclipse \n\tf) Optional step for host machine within Intel network: In Eclipse workbench window, go to \"Window\" menu -> \"Preferences...\". \n\tg) In \"Preferences\" dialog, go to \"General\" -> \"Network Connections\", set \"Active Provider\" to \"Manual\". In \"Proxy \tentries\" table, select HTTP and click \"Edit\" and enter host \"proxy-chain.intel.com\" port 911, click OK. Repeat for HTTPS with port 912 \nClick OK to close \"Preferences\" dialog. \n\th) Go to \"File\" menu -> \"Restart\" to restart Eclipse for proxy settings to take effect. ",
+ "expected_result": ""
+ },
+ "4": {
+ "action": "Install Eclipse Poky plugins \n\ta) Download https://autobuilder.yocto.io/pub/releases/<yocto-version>/eclipse-plugin/<eclipse-version>/org.yocto.sdk-development-<date>-archive.zip \n\tb) In Eclipse workbench window, go to \"Help\" menu -> \"Install New Software...\" \n\tc) In \"Install\" dialog, click \"Add...\" button \n\td) In \"Add Repository\" dialog, enter \"Eclipse Poky\" for (repository) Name, click \"Archive...\" ",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Install Eclipse Poky plugins (continue) \n\te) In \"Repository archive\" browse dialog, select the downloaded Eclipse Poky repository archive \n\tf) Back in \"Add Repository\" dialog, click \"OK\" \n\tg) Back in \"Install\" dialog, make sure \"Work with:\" is set to \"Eclipse Poky\" repository, tick \"Yocto Project \tDocumentation Plug-in\" and \"Yocto Project SDK Plug-in\", click \"Next >\" and verify plugins/features name/version, \tclick \"Next >\" and accept license agreement, click \"Finish\" \n\th) If \"Security Warning\" dialog appears, click \"OK\" to install unsigned content. \n\ti) In \"Software Updates\" dialog, click \"Yes\" to restart Eclipse to complete Eclipse Poky plugins installation. ",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Setup Eclipse Poky to use SDK \n\ta) In Eclipse workbench window, go to \"Window\" menu -> \"Preferences\". \n\tb) In \"Preferences\" window, go to \"Yocto Project SDK\", in \"Cross Compiler Options\" frame, select \"Standalone pre-\tbuilt toolchain\". ",
+ "expected_results": "Eclipse Poky plugins installed and running successfully, e.g. observe that \"Yocto Project Tools\" menu is available on Eclipse workbench window."
+ }
+ },
+ "summary": "Eclipse_Poky_installation_and_setup"
+ }
+ }
+] \ No newline at end of file
diff --git a/meta/lib/oeqa/manual/kernel-dev.json b/meta/lib/oeqa/manual/kernel-dev.json
new file mode 100644
index 0000000000..0dd99199dc
--- /dev/null
+++ b/meta/lib/oeqa/manual/kernel-dev.json
@@ -0,0 +1,200 @@
+[
+ {
+ "test": {
+ "@alias": "kernel-dev.kernel-dev.Kernel_dev_defconfig",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_7 ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_7",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_7"
+ }
+ },
+ "summary": "Kernel_dev_defconfig"
+ }
+ },
+ {
+ "test": {
+ "@alias": "kernel-dev.kernel-dev.Kernel_dev_defconfig+fragments",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_8 ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_8",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_8"
+ }
+ },
+ "summary": "Kernel_dev_defconfig+fragments"
+ }
+ },
+ {
+ "test": {
+ "@alias": "kernel-dev.kernel-dev.Kernel_dev_Applying_patches",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results"
+ }
+ },
+ "summary": "Kernel_dev_Applying_patches"
+ }
+ },
+ {
+ "test": {
+ "@alias": "kernel-dev.kernel-dev.Kernel_dev_linux-yocto-local-source",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_2 ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_2",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_2"
+ }
+ },
+ "summary": "Kernel_dev_linux-yocto-local-source"
+ }
+ },
+ {
+ "test": {
+ "@alias": "kernel-dev.kernel-dev.Kernel_dev_linux-yocto-custom-local-source",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_3 ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_3",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_3"
+ }
+ },
+ "summary": "Kernel_dev_linux-yocto-custom-local-source"
+ }
+ },
+ {
+ "test": {
+ "@alias": "kernel-dev.kernel-dev.Kernel_dev_recipe-space_meta",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_5 ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_5",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_5"
+ }
+ },
+ "summary": "Kernel_dev_recipe-space_meta"
+ }
+ },
+ {
+ "test": {
+ "@alias": "kernel-dev.kernel-dev.Kernel_dev_External_source",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_6 ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_6",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_6"
+ }
+ },
+ "summary": "Kernel_dev_External_source"
+ }
+ },
+ {
+ "test": {
+ "@alias": "kernel-dev.kernel-dev.Kernel_dev_building_external_modules(hello-mod)",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_10 ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup_10",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_10"
+ }
+ },
+ "summary": "Kernel_dev_building_external_modules(hello-mod)"
+ }
+ },
+ {
+ "test": {
+ "@alias": "kernel-dev.kernel-dev.Kernel_dev_local_parallel_meta",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wikioproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_4 ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_4",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_4"
+ }
+ },
+ "summary": "Kernel_dev_local_parallel_meta"
+ }
+ }
+] \ No newline at end of file
diff --git a/meta/lib/oeqa/manual/oe-core.json b/meta/lib/oeqa/manual/oe-core.json
new file mode 100644
index 0000000000..fb47c5ec36
--- /dev/null
+++ b/meta/lib/oeqa/manual/oe-core.json
@@ -0,0 +1,158 @@
+[
+ {
+ "test": {
+ "@alias": "oe-core.bitbake.Test_bitbake_menuconfig",
+ "author": [
+ {
+ "email": "jose.perez.carranza@intel.com",
+ "name": "jose.perez.carranza@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "clone poky \n \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "cd poky \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "source oe-init-build-env && cd build \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "set below in local.conf \n\n \tMACHINE = \"qemux86\" \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "bitbake linux-yocto -c kernel_configme -f \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "bitbake linux-yocto -c menuconfig \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Once menuconfig launches, use the interface to navigate through the selections and \n enable option \"64-bit kernel\" \n\n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "Save changes and set name of the file as \"test.config\" ",
+ "expected_results": "Open file: \n \npoky/build//tmp/work/qemux86-poky-linux/linux-yocto/4.X.X+*/linux-qemux86-standard-build/test.config \n \n \n\nand verify that changes are present in the file as follows: \n \nCONFIG_64BIT=y \n \nCONFIG_X86_64=y"
+ }
+ },
+ "summary": "Test_bitbake_menuconfig"
+ }
+ },
+ {
+ "test": {
+ "@alias": "oe-core.bitbake.test_bitbake_devshell",
+ "author": [
+ {
+ "email": "jose.perez.carranza@intel.com",
+ "name": "jose.perez.carranza@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "clone poky ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "cd poky ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "source oe-init-build-env && cd build ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "bitbake matchbox-desktop ",
+ "expected_results": "Package was build correctly "
+ },
+ "5": {
+ "action": "bitbake matchbox-desktop -c devshell ",
+ "expected_results": "A terminal with a shell prompt within the OpenEmbedded build environment is opened "
+ },
+ "6": {
+ "action": "Verify that \"matchbox-desktop\" binary file is not created under\"src\" directory ",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Run command:./configure && make ",
+ "expected_results": "Verify that \"matchbox-desktop\" binary file was created successfully under \"src/\" directory "
+ },
+ "8": {
+ "action": "Exit fromthe devshell terminal,exit ",
+ "expected_results": "Terminal back to the build directory"
+ }
+ },
+ "summary": "test_bitbake_devshell"
+ }
+ },
+ {
+ "test": {
+ "@alias": "oe-core.bitbake.test_dependency_explorer_is_launched",
+ "author": [
+ {
+ "email": "jose.perez.carranza@intel.com",
+ "name": "jose.perez.carranza@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "clone poky ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "cd poky ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "source oe-init-build-env ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "bitbake -u taskexp -g core-image-full-cmdline \n\nNOTE: To execute the last command of this test, it's necessary that the machine is executing an X11 server, or if that's not the case (for example, if running the test on a headless server), it is required to enable ssh X11 forwarding on both, the server and the client, and have the X11 server running on the client. \n\nThe instructions to enable X11 forwarding vary between distributions. But for example, these are the steps to enable it between a server running openSUSE Leap 42.1 and a client with Fedora 24: \nA. On the server, make sure /home//.ssh/config contains the line: \n ForwardX11 yes \nB. On the server, make sure xauth is installed by running: \n which xauth \nC. On the client, connect to the server, enabling X11 forwarding, for example by using: \n ssh -X user@server \nNOTE 2: depexp was renamed to taskexp on 2.3 M4",
+ "expected_results": "Verify that a \"dependency explorer\" is opened and file \n dependencies are listed "
+ }
+ },
+ "summary": "test_dependency_explorer_is_launched"
+ }
+ },
+ {
+ "test": {
+ "@alias": "oe-core.bitbake.test_bitbake_sane_error_for_invalid_layer",
+ "author": [
+ {
+ "email": "jose.perez.carranza@intel.com",
+ "name": "jose.perez.carranza@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "clone poky \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "cd poky \n \n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "source oe-init-build-env && cd build \n \n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Add a invalid layer to conf/bblayers.conf \"<poky dir>/my-invalid-layer\" \n\t\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "bitbake core-image-minimal",
+ "expected_results": "Below error should be displayed:\n\"ERROR: Layer directory does not exist! Please check BBLAYERS in <poky dir>/<build dir>/conf/bblayers.conf\""
+ }
+ },
+ "summary": "test_bitbake_sane_error_for_invalid_layer"
+ }
+ }
+]
diff --git a/meta/lib/oeqa/manual/sdk.json b/meta/lib/oeqa/manual/sdk.json
new file mode 100644
index 0000000000..434982f7f5
--- /dev/null
+++ b/meta/lib/oeqa/manual/sdk.json
@@ -0,0 +1,32 @@
+[
+ {
+ "test": {
+ "@alias": "sdk.sdk_runqemu.test_install_cross_toolchain_can_run_multiple_qemu_for_x86",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Prepare kernel, rootfs tar.bz2 image, and qemu configuration \n \ta. Download kernel, rootfs tar.bz2 image and qemu configuration from public autobuilder webpage \n \tb. Goto https://autobuilder.yocto.io/pub/releases/<target_release>/machines/qemu/qemux86/ \n \tc. Download \n \t \ti. rootfs tar.bz2: core-image-sato-sdk-qemux86.tar.bz2 \n \t\tii. kernel: bzImage-qemux86.bin \n \t\tiii. qemu configuration: core-image-sato-sdk-qemux86.qemuboot.conf ",
+ "expected_results": "Download completes successfully."
+ },
+ "2": {
+ "action": "Download & install toolchain tarball matching your host from public autobuilder \n \ta. Goto https://autobuilder.yocto.io/pub/releases/<target_release>/toolchain/x86_64/ \n \tb. Download poky-glibc-x86_64-core-image-sato-<type-arch>-toolchain-<release-version>.sh \n \tc. Run command: poky-glibc-x86_64-core-image-sato-<type-arch>-toolchain-<release-version>.sh \n \td. After installation toolchain Run source command : source /toolchain-installed-path/environment-setup-<architecture name>-poky-linux",
+ "expected_results": "Toolchain gets installed successfully."
+ },
+ "3": {
+ "action": "Extract rootfs twice into two images \n \ta. Run 2 commands below: \n runqemu-extract-sdk core-image-sato-sdk-qemux86.tar.bz2 qemux86_rootfs_image1 \n runqemu-extract-sdk core-image-sato-sdk-qemux86.tar.bz2 qemux86_rootfs_image2",
+ "expected_results": "Both images build successfully."
+ },
+ "4": {
+ "action": " From the 2 terminals, start qemu to boot up both two images \n \ta. Run 2 commands below: \n runqemu <kernel-name> core-image-sato-sdk-qemux86.qemuboot.conf qemux86_rootfs_image1 \n runqemu <kernel-name> core-image-sato-sdk-qemux86.qemuboot.conf qemux86_rootfs_image2 ",
+ "expected_results": "Expect both qemu to boot up successfully."
+ }
+ },
+ "summary": "test_install_cross_toolchain_can_run_multiple_qemu_for_x86"
+ }
+ }
+] \ No newline at end of file
diff --git a/meta/lib/oeqa/manual/toaster-managed-mode.json b/meta/lib/oeqa/manual/toaster-managed-mode.json
new file mode 100644
index 0000000000..12374c7c64
--- /dev/null
+++ b/meta/lib/oeqa/manual/toaster-managed-mode.json
@@ -0,0 +1,2572 @@
+[
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.All_layers:_default_view",
+ "author": [
+ {
+ "email": "stanciux.mihail@intel.com",
+ "name": "stanciux.mihail@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table.",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "On the project page click on the \"View compatible layers\" link situated on the right-hand side, mid-page, under the \"Project configuration\" menu, in the \"Layers\" table.",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Check that the table is populated with the default layers (eg. meta-yocto-bsp, meta-yocto)",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Check that by default the following columns are shown: Layer, Summary, Revision, Dependencies and Add/Delete",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Check that the \"Revision\" entries match the release entry from the main project page, in the project details section.",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Check that only one instance of the core layers (openembedded-core, meta-yocto and meta-yocto-bsp) shows in this table, and that instance has a branch that matches the selected project release from the main project page.",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Check that in the \"Dependencies\" column some of the layers should have a square box with a number in it. When clicking on it, a small popup should appear containing a list of other layers required for this layer to work. Every layer listed here should also be a link to the layer's detail page. \n \n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "From the \"Edit columns\" menu, activate the \"Git repository URL\" and the \"Subdirectory\" columns. In \"Git repository URL\": all the entries should have a link to the external site where the layer was downloaded from. Similarly, in \"Subdirectory\" links should exist, if a subdirectory entry is present.",
+ "expected_results": "All mentioned elements should be present and functional."
+ }
+ },
+ "summary": "All_layers:_default_view"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.All_layers:_Add/delete_layers",
+ "author": [
+ {
+ "email": "stanciux.mihail@intel.com",
+ "name": "stanciux.mihail@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table. \n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "On the project page click on the \"View compatible layers\" link situated on the right-hand side, mid-page, under the \"Project configuration\" menus, in the \"Layers\" table. \n\n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Check that the Add/delete column is enabled. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Add a new layer \nPick a layer that hasn't been added to the project. \n \nClick on the \"Add layer\" button present in the \"Add/delete\" column. \nIf the layer has unsatisfied dependencies a dialog will appear listing the dependencies (in alphabetical order), each of them with a checkbox so that you can select / deselect them. All checkboxes are checked by default. If you click the \"Cancel\" button the dialog closes. If you click the \"Add layers\" button, the layers are added to the project. \n\nMake sure to uncheck at least 1 of the dependencies so you can check that only the checked dependencies are added, and not the unchecked one(s). ",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Check that the \"Add layer\" button fades out and is replaced temporarily by a message like \"1 layer added\" and then it is replaced by the \"Delete layer\" button. \nCheck that a confirmation message is displayed at the top of the page similar to \"You have added 1 layer to project_name_here: meta-yocto-bsp\". \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Delete an existing layer \nPick a layer that's already been added to the project. \nClick on the \"Delete layer\" button present in the \"Add/delete\" column. \nCheck that once the button is pressed, it fades out and is replaced temporarily by the message \"1 layer deleted\" and then it is replaced by the \"Add layer\" button. \nCheck that a confirmation is displayed at the top of the page similar to \"You have deleted 1 layer from project_name_here: meta-yocto-bsp\". ",
+ "expected_results": "All actions should complete successfully."
+ }
+ },
+ "summary": "All_layers:_Add/delete_layers"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.All_targets:_Default_view",
+ "author": [
+ {
+ "email": "stanciux.mihail@intel.com",
+ "name": "stanciux.mihail@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table. \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": " If no images exist in the project, build an image by inserting \"core-image-minimal\" in the \"Recipes\" field and press the \"Build\" button. Wait for the image to finish building. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "On the project page click on the \"Image Recipes\" link situated in the left-handed side of the page, under the \"Project configuration\" menus, in the \"COMPATIBLE METADATA\" table. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Check that \"Compatible image recipes\" table is populated. \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Check that the following columns are shown by default: \n\t\tImage recipe \n\t\tDescription \n \n\t\tLayer \n\t\tBuild \n\t\t Version ",
+ "expected_results": ""
+ }
+ },
+ "summary": "All_targets:_Default_view"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Configuration_variables:_default_view",
+ "author": [
+ {
+ "email": "stanciux.mihail@intel.com",
+ "name": "stanciux.mihail@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table. \n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In the main project page, click on \"BitBake variables\" in the left-hand side of the page, under the \"CONFIGURATION\" menu. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Check that default values are as follows: \n\tDISTRO - poky \n\tIMAGE_FSTYPES - ext3 jffs2 tar.bz2 \n\tIMAGE_INSTALL_append - \"Not set\" \n\tPACKAGE_CLASES - package_rpm \n SSTATE_DIR - /homeDirectory/poky/sstate-cache \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Check that under the \"Add variable\" section, the \"Variable\" field has the default text \"Type variable name\" present, the \"Value\" field has the default text \"Type variable value\" present and that the \"Add variable\" button is inactive. \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Check that under the \"Add variable\" section, there is text present that describes the variables that Toaster cannot modify. ",
+ "expected_results": "All mentioned elements should be present and functional."
+ }
+ },
+ "summary": "Configuration_variables:_default_view"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Configuration_variables:_Test_UI_elements",
+ "author": [
+ {
+ "email": "stanciux.mihail@intel.com",
+ "name": "stanciux.mihail@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table. \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In the main project page, click on \"BitBake variables\" in the left-hand side of the page, under the \"CONFIGURATION\" menu. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "DISTRO: \n\t- check that the \"change\" icon is present (represented by a pen icon) \n\t- click on the \"change\" icon and check that the variable becomes an editable text field, populated with the current value of the variable \n\t- check that, if you delete the content of the text field, the save button is disabled \n\t- enter a distro name containing spaces (for example, \"poky tiny\") - check that an error message is shown explaining that the value entered cannot contain spaces \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "IMAGE_FSTYPES: \n\t- check that the \"change\" icon is present (represented by a pen icon) \n\t- click on the \"change\" icon and check that the variable becomes editable like so: the main input control is a set of checkboxes. There is a checkbox for each supported image type. The checkboxes are listed in ascending alphabetical order, broken down in 2 groups: \n\t\t",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "The selected types are checked and listed at the top \n\t\t",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "The other types are not checked and listed afterwards \n\t- check that all this is inside a scrollable div, and a text field is present above that filters out the content of the div as you type. \n\t- check that if there are no image types matching your typed string, a message is shown notifying you of this: \"No image types found\" \n\t- unselect all checkboxes and check that the save button is disabled and a message is shown: \"You must select at least one image type\" \n\t- select different checkboxes and hit save then make sure that the \tsaved value is consistent with the selected checkboxes \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "IMAGE_INSTALL_append: \n\t- check that the \"change\" icon is present (represented by a pen icon) \n\t- click on the \"change\" icon and check that the variable becomes a text field, populated with the current value of the variable. \n\n\t- check that the save button is disabled when the text field is empty \n\t- insert test in the text field (for example \"package1\") and hit save; be aware that there is no input validation for this variable \n\t- check that a new \"delete\" icon(a trashcan) has appeared next to the pen icon \n\t- check that clicking on the trashcan icon resets the value to \"Not set\" and makes the trashcan icon dissapear \n\n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "PACKAGE_CLASSES: \n\t- check that the \"change\" icon is present (represented by a pen icon) \n\t- click on the \"change\" icon and check that the variable becomes editable with the following components: \n\t\t",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "A dropdown menu with values 'package_dev', 'package_ipk' and 'package_rpm' in this order. The value selected when you enter the editable state matches the first value of the variable (e.g. if the value is set to 'package_dev package_ipk' the value selected is 'package_dev'). \n \n\t\t",
+ "expected_results": ""
+ },
+ "10": {
+ "action": "Two checkboxes, showing the 2 unselected values in the dropdown menu. \n\n\t- verify that the checkboxes are checked or unchecked to reflect the variable value (e.g. if the value is set to 'package_dev package_ipk', the 'package_ipk' checkbox is checked, and the 'package_rpm' checkbox is unchecked). \n\n\tBoth checkboxes can be unchecked. The value of the checkboxes changes dynamically as I change the selected value in the dropdown menu. This means that any changes to the dropdown menu should uncheck the checkboxes. \n\n\t- click on save and check that the value selected in the dropdown menu is the first value in the variable, followed by any checked checkboxes. \n\n\n",
+ "expected_results": ""
+ },
+ "11": {
+ "action": "Adding variables: \n\t- check that the \"add variable\" form has 2 text fields: one for the variable name and a second one for the variable value, plus an \"add\" button that is disabled until both text fields have some input in them. \n \n\t- check variable name validation: variable names cannot have spaces, and can only include letters, numbers, underscores and dashes; variable names entered cannot match the name of a variable already on the list; variable names cannot match the blacklisted variables mentioned in the text on the right-hand side of the page \n\n\t - check that an error message is shown indicating validation has failed and why once you try to put in the value or click on the \"Add variable\" button ",
+ "expected_results": "All mentioned elements should be present and functional."
+ },
+ "12": {
+ "action": "insert a valid combination and click on \"Add variable\"; check that a new variable/value pair is added at the bottom of the variable list and that the text fields in the \"add variable\" form are cleared and the \"add\" button is disabled \n\t- check that the added variable has a \"change\" icon present next to the variable value, and also that a \"delete\" icon is present next to the variable name \n\t- check that clicking the \"change\" icon makes the variable editable in a text field containing the value of the variable \n\t- check that, if you delete the content of the text field, the save button is\tdisabled\n\t- check that clicking on the \"delete\" button causes both the variable name and the variable value to be removed from the variables list",
+ "expected_results": "All mentioned elements should be present and functional."
+ }
+ },
+ "summary": "Configuration_variables:_Test_UI_elements"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Project_builds:_Default_view",
+ "author": [
+ {
+ "email": "stanciux.mihail@intel.com",
+ "name": "stanciux.mihail@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table. \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Click on the \"Builds\" , next to the \"Configuration\" Button. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Check that the page heading includes a counter with the number of builds run for the project(eg. \"Project builds (4)\"). \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Check that the following table heads are visible by default: outcome, completed on, failed tasks, errors, warnings, image files. \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Check that by default the table is sorted by \"Completed on\" in descending order",
+ "expected_results": "All mentioned elements should be present."
+ }
+ },
+ "summary": "Project_builds:_Default_view"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Project_builds:_Sorting_the_project_builds_table",
+ "author": [
+ {
+ "email": "stanciux.mihail@intel.com",
+ "name": "stanciux.mihail@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table. \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Click on the \"View all project builds\" link situated below the top-most \"Build\" button and text field, next to the \"View all targets\" link \n\n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Verify that, by default, the table is sorted by \"Completed on\" in descending order. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Activate all columns from the \"Edit columns\" table. \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Check that the following columns are sortable, both in ascending and descending order: outcome, target, machine, started on, completed on, warning, project \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Verify that hiding a column that is currently being used as the sorting criteria causes the sorting to reset to the default - i.e \"Completed on\" in descending order.",
+ "expected_results": "All mentioned elements should be present and functional."
+ }
+ },
+ "summary": "Project_builds:_Sorting_the_project_builds_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Project_builds:_customize_the_columns_of_the_table",
+ "author": [
+ {
+ "email": "stanciux.mihail@intel.com",
+ "name": "stanciux.mihail@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table. \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Click on the \"View all project builds\" link situated below the top-most \"Build\" button and text field, next to the \"View all targets\" link \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Click on the \"Edit column\" menu and check that the selected columns match the columns currently being shown. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Check that the following columns cannot be removed from the shown columns: completed on, outcome, recipe \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Check that unchecked items changed to checked immediately appear in the table and that checked items changed to unchecked immediately disappear from the table.",
+ "expected_results": "All mentioned elements should be present and functional."
+ }
+ },
+ "summary": "Project_builds:_customize_the_columns_of_the_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Project_builds:_filter_the_contents_of_the_table",
+ "author": [
+ {
+ "email": "stanciux.mihail@intel.com",
+ "name": "stanciux.mihail@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table. \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Click on the \"View all project builds\" link situated below the top-most \"Build\" button and text field, next to the \"View all targets\" link. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Make sure the following columns have filters: outcome, started on, completed on, failed tasks. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Filters are mutually exclusive. Click a filter button of a one column and a filter dialogue occurs. Select a filter item. The filter result would be showed. Then select another filter item of another column and the previously applied filter is overridden by the newly selected filter when a filter from a different column is applied to the table. This filter will override the current filter.\" \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Filters are overridden by search. Run a search query and you can see previous filter results are overridden by the results of the search query.",
+ "expected_results": "All mentioned elements should be present and functional."
+ }
+ },
+ "summary": "Project_builds:_filter_the_contents_of_the_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Project_builds:_search_the_contents_of_the_table",
+ "author": [
+ {
+ "email": "stanciux.mihail@intel.com",
+ "name": "stanciux.mihail@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table. \n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Click on the \"View all project builds\" link situated below the top-most \"Build\" button and text field, next to the \"View all targets\" link. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "When no search query has been entered, we have placeholder text saying: \"Search builds\". The placeholder text disappears when the first character is typed. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "When a search query has been submitted and results returned: \n- We keep the search string in the text input field. \n- We provide a \"Clear search\" icon (icon-remove-sign). Click it to clear the search and display all packages. \n- We change the page heading to indicate the number of results returned by the search query. \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "If your search query returns no results, the page heading changes to \"No packages found\", and we show you an alert with a search form and an option to show all packages. \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Searching does not change the state of the table: the same columns remain hidden and the same sorting applied. ",
+ "expected_results": "All mentioned elements should be present and functional."
+ }
+ },
+ "summary": "Project_builds:_search_the_contents_of_the_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Layer_details_page:_Default_view",
+ "author": [
+ {
+ "email": "stanciux.mihail@intel.com",
+ "name": "stanciux.mihail@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table. \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Click on the \"View compatible layers\" link situated in the \"Project configuration\" portion of the page, under \"Layers\" table. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Click on a layer (for example \"meta-aarch64\"). Notice that the page is divided into 2 columns: the left one is broken down into tabs; the right one provides information about the layer. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Check that breadcrumbs exist at the top of the page. Check that they work by clicking on them, then hitting back to return to the layer detail page. \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Check that the page heading includes the layer branch name - it should look something like meta-aarch64(dizzy) if the dizzy branch was selected. The branch name should also be present in the breadcrumbs. \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "The \"About\" information: \nit shows summary, description in this order, if not empty. If an information item is empty (like the Summary in the example shown in this page), it does not display. \n \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "The tabs: \nCheck that there are 3 tabs: \"layer details\", \"recipes\", \"machines\" showing up in this order. \n\n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "\"Layer details\" tab: \nCheck that the tab shows: \n- A button to add / remove the layer to / from the project. In this tab, the button labels are\"Add the $layer_name layer to your project\" \"Delete the $layer_name layer from your project\" \n- Some details about the layer: repository URL, repository subdirectory, revision (the branch) and the list of layer dependencies. If any of the above details is blank (most likely, the subdirectory) it does not display.The icons next to the repository and subdirectory information are links to their web instances. Those links should open in a new window. \n\n",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "The \"Recipes\" tab: \nCheck that it shows: \n\t",
+ "expected_results": ""
+ },
+ "10": {
+ "action": "A counter in the tab label showing the total number of targets provided by the layer \n\t",
+ "expected_results": ""
+ },
+ "11": {
+ "action": "A button to add / remove the layer to / from the project. In this tab, the button labels are \"Add the $layer_name layer to your project to enable these targets\"/\"Delete the $layer_name layer from your project\" \n\t",
+ "expected_results": ""
+ },
+ "12": {
+ "action": "A recipes table with the following columns: \n \n- Recipe \n- Description: the value of the DESCRIPTION variable. If not set, then the value of the SUMMARY variable. \n- Build recipe, which shows a \"build recipe\" button. The \"build recipe\" button is disabled when the layer is not added to the project. \n\nThe recipes table is sorted by \"Recipe\" in ascending alphabetical order. \n\n1",
+ "expected_results": ""
+ },
+ "13": {
+ "action": "The \"Machines\" tab: \n\t",
+ "expected_results": ""
+ },
+ "14": {
+ "action": "A counter in the tab label showing the total number of machines provided by the layer \n\t",
+ "expected_results": ""
+ },
+ "15": {
+ "action": "A button to add/remove the layer to/from the project. In this tab, the button labels are \"Add the $layer_name layer to your project to enable these machines\"/\"Delete the $layer_name layer from your project\" \n\t",
+ "expected_results": ""
+ },
+ "16": {
+ "action": "A machines table with the following columns: \n \n- Machine. \n \n- Description: The value of the DESCRIPTION variable in the .conf file \n- Select machine, which shows a \"select\" button. The \"select\" button is disabled when the layer is not added to the project. \nThe machines table is sorted by \"Machine\" in ascending alphabetical order. \n",
+ "expected_results": "All mentioned elements should be present and functional."
+ }
+ },
+ "summary": "Layer_details_page:_Default_view"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Layer_details_page:_UI_functionality",
+ "author": [
+ {
+ "email": "stanciux.mihail@intel.com",
+ "name": "stanciux.mihail@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table. \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Click on the \"View compatible layers\" link situated in the \"Project configuration\" portion of the page, under \"Layers\" \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Click on a layer (for example \"meta-aarch64\"). \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Adding/removing a layer: \nClick on \"Add the $layer_name_here layer to your project\" and verify that the \"Add layer\" button turns into a red button with the label \"Delete the $layer_name_here layer from your project\" and that at the top of the page, below the header, you see a message \"You have added 1 layer to $project_name_here: $layer_name_here\". This message can be dismissed by clicking on the \"X\" at the top right side of the message. ",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Click on the red \"Delete the $layer_name_here from your project\" button and verify that the \"Delete layer\" button turns back into a grey button with the label \"Add the $layer_name_here to your project\" and that at the top of the page, below the header, you see a message \"You have deleted 1 layer to $project_name_here: $layer_name_here\". This message can be dismissed by clicking on the \"X\" at the top right side of the message. \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Dependencies window: \n For a layer that has dependencies( for example \"meta-ettus\"), once you click the \"Add layer\" button, verify that you get a message window that presents the dependencies for this layer with the message \"$layer_name_here depends on some layers that are not added to your project. Select the ones you want to add:\", a list with a checkbox for each one and 2 options: \"Add layers\" or \"Cancel\". \nClicking on \"Add layers\" adds all the dependencies and the current layer. Clicking on \"Cancel\" takes you back to the layer detail page without adding any of the layers. \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "\"Recipes\" table \nCheck that if the layer hasn't been added to the project, the \"Build recipe\" button(s) are disabled. After the project is added, check that the \"Build recipe\" button(s) become active and clicking on a button sends you to the main project page and starts a build. \n\n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "\"Machines table\" \nCheck that if the layer hasn't been added to the project, the \"Select machine\" button(s) are disabled. After the project is added, check that the \"Build machine\" button(s) become active and clicking on a button sends you to the main project page and modifies the project machine to the one selected. ",
+ "expected_results": "All mentioned elements should be present and functional."
+ }
+ },
+ "summary": "Layer_details_page:_UI_functionality"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Importing_new_layers",
+ "author": [
+ {
+ "email": "stanciux.mihail@intel.com",
+ "name": "stanciux.mihail@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table. \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Click on the \"Import layer\" link situated in the \"Project configuration\" portion of the page, under \"Layers\" table. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Check that the import layer form is shown, with the following elements as text fields to be filled out: \nLayer name (example: meta-imported) \nGit repository URL (example: git://github.com/shr-distribution/meta-smartphone.git) \nRepository subdirectory (optional) (example: meta-acer) \nRevision (example: master) \n\nIn addition, a separate portion of the form will be the \"Layer dependencies\" portion, where you can add dependency layers for the layer you are importing. This portion will contain a list of dependencies already added, with a trashcan icon next to them that will delete them when pressed and a text field with a \"add layer\" button next to it for adding dependencies. (for example: meta-android, meta-oe) \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "At the bottom of the form, check that a button exists with the label \"Import and add to project\". Check that this button is inactive until the required fields are filled out. \nCheck that clicking on the \"Import button\" takes you back to the main project page and that the imported layer, along with any dependencies, were added in the project's layers. ",
+ "expected_results": "All mentioned elements should be present and functional."
+ }
+ },
+ "summary": "Importing_new_layers"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Layer_details_page:_UI_functionality_for_imported_layers",
+ "author": [
+ {
+ "email": "stanciux.mihail@intel.com",
+ "name": "stanciux.mihail@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table. \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Click on the \"View compatible layers\" link situated in the \"Project configuration\" portion of the page, under \"Layers\" table. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Select an imported layer (see in TC 1112 how to import a layer). \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Page heading \nCheck that the page heading includes the branch, tag or commit as entered when importing the layer.\nIf it's a commit, Check that only the first 10 characters are shown followed by an ellipsis character. The full commit shows on hover.The branch, tag or commit information also shows in the breadcrumb. \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "The \"About\" information \nIt shows: \n- Summary \n- Description \nin this order. Those two items always show, independently of them being blank or not, since they can be edited by users. \n\nWhen an information item is empty, it shows as \"not set\" with a \"change\" icon. Click on the icon to add a value. \n\nFor \"Summary\" and \"Description\" clicking the \"change\" icon shows the selected information item in its editable state. It consists of a text area, set to 2 rows for the \"Summary\" and to 6 rows for the \"Description\", plus 'save' and 'cancel' buttons. \nThe 'save' buttons only activate when there is at least one character in the text area. \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "The tabs \nCheck that the tabs shown are: \n- \"Layer details\" \n- \"Recipes\" \n- \"Machines\" \nThe tabs should show in the order in which they are listed above. \n\n\"Layer details\" tab: \nIn not-editable pages, the tab shows: \n- A button to add / remove the layer to / from the project. In this tab, the button labels are: \n\t- \"Add the $layer_name layer to your project\" \n\t- \"Delete the $layer_name layer from your project\" \n\n- Some details about the layer: repository URL, repository subdirectory, revision (branch / tag / commit) and the list of layer dependencies. This is the information required from users when importing a layer. The subdirectory and the layer dependencies can be blank.",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "If blank, they show as \"not set\". \n\nEditing the \"Repository URL\" \nThe \"Git repository URL\" cannot be blank. Therefore, we show only a \"change\" icon next to it. When you click the icon, the text input field is set to the current value. If you delete the value from the input field, we disable the \"save\" button. We enable it again when you type something in the field. \n\nEditing the \"Repository subdirectory\" \nThe \"Repository subdirectory\" can be blank. Therefore, we show both \"change\" and \"delete\" icons. When you click the \"delete\" icon, we \nshow the label \"Not set\".",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "\nWhen you click the \"change\" icon, the text input field is set \nto the current value. \nIf you delete the value from the input field, we disable the \"save\" button. We enable it again when you type something in the field. \n \n\nEditing the \"Revision\" \nThe \"Revision\" cannot be blank. Therefore, we show only a \"change\" icon next to it. When you click the icon, the text input field is set to the current value. If you delete the value from the input field, we disable the \"save\" button. We enable it again when you type something in the field. \n\nThe \"Recipes\" tab \nIt shows: \n1: A counter in the tab label showing the total number of targets provided by the layer \n2: A button to add/remove the layer to/from the project. ",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "In this tab, the button labels are \n \n\"Add the $layer_name layer to your project to enable \nthese targets\" \n\"Delete the $layer_name layer from your project\" \n\n3: A \"Recipes\" table with the following columns: \n \n- Recipe \n- Description: the value of the DESCRIPTION variable. If not set, then the value of the SUMMARY variable. \n- Build recipe, which shows a \"build recipe\" button. The \"build recipe\" button is disabled when the layer is not added to the project. \nThe recipes table is sorted by \"Recipe\" in ascending alphabetical order. \n\nThe \"Machines\" tab: \nIt shows: \n1: A counter in the tab label showing the total number of machines provided by the layer \n2: A button to add/remove the layer to/from the project.",
+ "expected_results": ""
+ },
+ "10": {
+ "action": "In this tab, the button labels are \n \n\t- \"Add the $layer_name layer to your project to enable these machines\" \n\t- \"Delete the $layer_name layer from your project\" \n3: A \"machines\" table with the following columns: \n- Machine. \n \n- Description: The value of the DESCRIPTION variable in the .conf file \n- Select machine, which shows a \"select\" button. The \"select\" button is disabled when the layer is not added to the project. \nThe machines table is sorted by \"Machine\" in ascending alphabetical order. ",
+ "expected_results": "All mentioned elements should be present and functional."
+ }
+ },
+ "summary": "Layer_details_page:_UI_functionality_for_imported_layers"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Multiple_build_directories",
+ "author": [
+ {
+ "email": "stanciux.mihail@intel.com",
+ "name": "stanciux.mihail@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "after starting Toaster for the first time, go to http://[localhost]:8000/admin/ and login with the admin user you created during setup. Click on the Build environments section, and on the BuildEnvironment object. \n\n\n\nNote: you can create a superuser to enter as admin with ... poky/bitbake/lib/toaster/manage.py createsuperuser \n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "make note of the \"sourcedir\" and \"builddir\" values. The build dir will be something like \"/home/user/path/build\" \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": " click \"back\", and click on the \"Add build environment\" button in the upper right corner. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "enter Address \"2\", Betype: \"local\", \"sourcedir\" is to be set to whatever the original build env is set, and \"builddir\" is ANOTHER path at the same level as the original builddir - e.g. \"/home/user/path/build2\" \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Click save \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Execute command : /poky$ source oe-init-build-env build2 \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Create new project \n\n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "issue 2 build (e.g. core-image-minimal core-image-sato)\n\n\n\n\n\n",
+ "expected_results": "Both build commands should run simultaneously."
+ }
+ },
+ "summary": "Multiple_build_directories"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Run_again_button_from_all_builds_page_must_run_the_specified_task",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start Toaster.",
+ "expected_results": "Toaster starts. \n"
+ },
+ "2": {
+ "action": "Click on new project button.",
+ "expected_results": " Open create a new project page. \n"
+ },
+ "3": {
+ "action": "Enter a project name, select a release and click on create project or select an existing project.",
+ "expected_results": " Project Created. \n"
+ },
+ "4": {
+ "action": "Build a image task (ex: core-image-minimal:clean) and wait until build finish.\nfrom all build page.",
+ "expected_results": " Build task finishes successfully. \n"
+ },
+ "5": {
+ "action": "Click on rebuild button from all build page.",
+ "expected_results": "Specified task will run again. \n"
+ },
+ "6": {
+ "action": "Click on the build and verify if the number of tasks executed = 1.",
+ "expected_results": "Only the specified task is executed. \n"
+ },
+ "7": {
+ "action": "From project builds page click on run again button.",
+ "expected_results": "Specified task will run again.\n"
+ },
+ "8": {
+ "action": "Click on the build and verify if the number of tasks executed = 1.",
+ "expected_results": ""
+ }
+ },
+ "summary": "Run_again_button_from_all_builds_page_must_run_the_specified_task"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Intel_layers_builds",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start Toaster.",
+ "expected_results": "Toaster starts. \n"
+ },
+ "2": {
+ "action": "Click on new project button.",
+ "expected_results": "Open create a new project page. \n"
+ },
+ "3": {
+ "action": "Enter a project name, select a release and click on create project.",
+ "expected_results": "Project Created. \n"
+ },
+ "4": {
+ "action": "Click on layers tab.",
+ "expected_results": "Open compatible layers page. \n"
+ },
+ "5": {
+ "action": "Search for intel.",
+ "expected_results": "Return results \n"
+ },
+ "6": {
+ "action": "Add intel layers like: meta-intel, meta-intel-quark.",
+ "expected_results": "Layers added to project. \n"
+ },
+ "7": {
+ "action": "Click on the added layer.",
+ "expected_results": "Open layer page. \n"
+ },
+ "8": {
+ "action": "From machine tab, select a machine.",
+ "expected_results": "Machine has changed. \n"
+ },
+ "9": {
+ "action": "Build a recipe(core-image-minimal) or a recipe from recipe tab.",
+ "expected_results": "Build finishes successfully."
+ }
+ },
+ "summary": "Intel_layers_builds"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Download_other_artifacts",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Delete the build/tmp folder. (to make sure the rootfs task runs and other artifacts are generated for the build)",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Start Toaster.",
+ "expected_results": "Toaster starts. \n\t"
+ },
+ "3": {
+ "action": "Click on new project button.",
+ "expected_results": "Open create a new project page. \n\t"
+ },
+ "4": {
+ "action": "Enter a project name, select a release and click on create project.",
+ "expected_results": "Project Created. \n\t"
+ },
+ "5": {
+ "action": "Build an image recipe (ex: core-image-minimal) and wait until build finish.",
+ "expected_results": "Build finishes successfully. \n\t"
+ },
+ "6": {
+ "action": "Click on the built recipe.",
+ "expected_results": "Open build summary page. \n\t"
+ },
+ "7": {
+ "action": "From other artifacts tab click on a link.",
+ "expected_results": "You can download other artifacts."
+ }
+ },
+ "summary": "Download_other_artifacts"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Download_licence_manifest",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Delete the build/tmp folder. (to make sure license manifest is generated)",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Start Toaster \n\n",
+ "expected_results": "Toaster starts. \n"
+ },
+ "3": {
+ "action": "Click on new project button. \n\n",
+ "expected_results": "Open create a new project page. \n"
+ },
+ "4": {
+ "action": "Enter a project name, select a release and click on create project. \n\n",
+ "expected_results": "Project Created. \n"
+ },
+ "5": {
+ "action": "Build an image recipe (ex: core-image-minimal) and wait until build finish. \n\n",
+ "expected_results": "Build finishes successfully. \n"
+ },
+ "6": {
+ "action": "Click on the built recipe. \n\n",
+ "expected_results": "Open build summary page. \n"
+ },
+ "7": {
+ "action": "From Image tab click on \"Download\" button for License manifest.",
+ "expected_results": "You can download license manifest."
+ }
+ },
+ "summary": "Download_licence_manifest"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Test_dependencies_layers",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start Toaster.",
+ "expected_results": "Toaster starts. \n"
+ },
+ "2": {
+ "action": "Click on new project button.",
+ "expected_results": "Open create a new project page. \n"
+ },
+ "3": {
+ "action": "Enter a project name, select a release and click on create project.",
+ "expected_results": "Project Created. \n"
+ },
+ "4": {
+ "action": "Click on Layers.",
+ "expected_results": "Open compatible layers page. \n"
+ },
+ "5": {
+ "action": "Add a layer with multi-level dependencies. (ex: meta-acer) \nThis layer depends on meta-networking, which in turn depends on meta-android. \n \n",
+ "expected_results": "The selected layer and dependencies were added to project. \n"
+ },
+ "6": {
+ "action": "Check if meta-python appears in the dependencies list, and add the layers to project.",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Delete a dependency layer.",
+ "expected_results": "Layer removed from project. \n \n"
+ },
+ "8": {
+ "action": "Build a recipe (ex: core-image-minimal) and wait until build finish.\n",
+ "expected_results": "Build will fail with an error.\n\n\t"
+ }
+ },
+ "summary": "Test_dependencies_layers"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Test_build_recipe_button_from_recipes_page",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start Toaster.",
+ "expected_results": "Toaster starts. \n"
+ },
+ "2": {
+ "action": "Click on new project button.",
+ "expected_results": "Open create a new project page. \n"
+ },
+ "3": {
+ "action": "Enter a project name, select a release and click on create project.",
+ "expected_results": "Project Created. \n"
+ },
+ "4": {
+ "action": "Click on software recipes / image recipes.",
+ "expected_results": "Open compatible software recipes page. \n"
+ },
+ "5": {
+ "action": "Select a recipe and click on 'add layer' button.",
+ "expected_results": "Layer added to project and the 'add layer' button becomes 'build recipe'. \n"
+ },
+ "6": {
+ "action": "Click on \"Build recipe\" button for one recipe (ex : core-image-minimal / busybox).",
+ "expected_results": "Build finishes successfully."
+ },
+ "7": {
+ "action": "Test this for software and image recipes tables.",
+ "expected_results": ""
+ }
+ },
+ "summary": "Test_build_recipe_button_from_recipes_page"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Test_compatible_machines",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start Toaster.",
+ "expected_results": "Toaster starts. \n"
+ },
+ "2": {
+ "action": "Click on new project button.",
+ "expected_results": "Open create a new project page. \n"
+ },
+ "3": {
+ "action": "Enter a project name, select a master release and click on create project.",
+ "expected_results": "Project Created. \n"
+ },
+ "4": {
+ "action": "Go to machines page.",
+ "expected_results": "Open compatible machines page. \n"
+ },
+ "5": {
+ "action": "Choose a machine and click on add layer for it. (intel-core2-32)",
+ "expected_results": "Layers added to project and add layer button becomes select machine. \n"
+ },
+ "6": {
+ "action": "Click on select machine.",
+ "expected_results": "Machine has changed. \n"
+ },
+ "7": {
+ "action": "Go to layer page that generate the machine. (meta-intel)",
+ "expected_results": "Open layer page \n"
+ },
+ "8": {
+ "action": "Build a recipe generated by that layer.",
+ "expected_results": "Build finishes successfully."
+ }
+ },
+ "summary": "Test_compatible_machines"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Builds_with_different_machines",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start Toaster.",
+ "expected_results": "Toaster starts. \n"
+ },
+ "2": {
+ "action": "Click on new project button.",
+ "expected_results": "Open create a new project page. \n"
+ },
+ "3": {
+ "action": "Enter a project name, select a master release and click on create project.",
+ "expected_results": "Project Created. \n"
+ },
+ "4": {
+ "action": "Select a machine (ex: qemux86-64)",
+ "expected_results": "The machine has changed. \n"
+ },
+ "5": {
+ "action": "Build a recipe (ex: core-image-minimal) and wait until buid finish.",
+ "expected_results": "Build finishes successfully. \n"
+ },
+ "6": {
+ "action": "Go to project page and change the machine (ex: qemumips)",
+ "expected_results": "The machine has changed. \n"
+ },
+ "7": {
+ "action": "Build a recipe (ex: core-image-sato) and wait until build finish.",
+ "expected_results": "Build finishes successfully. \n\nYou can build recipes with different machines."
+ },
+ "8": {
+ "action": "Check on build summary page that the machine match the machine selected.",
+ "expected_results": ""
+ }
+ },
+ "summary": "Builds_with_different_machines"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Test_bitbake_variables_-_IMAGE_FSTYPES",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start Toaster.",
+ "expected_results": "Toaster starts. \n"
+ },
+ "2": {
+ "action": "Click on new project button.",
+ "expected_results": "Open create a new project page. \n"
+ },
+ "3": {
+ "action": "Enter a project name, select release and click on create project.",
+ "expected_results": "Project Created. \n"
+ },
+ "4": {
+ "action": "Go to Configuration --> BitBake variables",
+ "expected_results": "Open Bitbake variables page. \n"
+ },
+ "5": {
+ "action": "Change IMAGE_FSTYPES variable, add some image types like: hddimg, ext4, etc.",
+ "expected_results": "Image types were added. \n"
+ },
+ "6": {
+ "action": "Build a recipe (ex: core-image-minimal) and wait until build finish.",
+ "expected_results": "Build finishes successfully. \n"
+ },
+ "7": {
+ "action": "Verify in the build summary page if the image types selected were built.",
+ "expected_results": "All the image types selected appears in the build summary page."
+ }
+ },
+ "summary": "Test_bitbake_variables_-_IMAGE_FSTYPES"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Software_recipes:_default_view",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table. \n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": " If no images exist in the project, build an image by inserting \"core-image-minimal\" in the \"Recipes\" field and press the \"Build\" button. Wait for the image to finish building. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "On the project page click on the \"Software Recipes\" link situated in the left-handed side of the page, under the \"Project configuration\" menus, in the \"COMPATIBLE METADATA\" table. \n\n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Check that \"Compatible software recipes\" table is populated. \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Check that the following columns are shown by default: \n\n\t\tSoftware recipe \n\t\tDescription \n \n\t\tLayer \n\t\tBuild \n\t\t Version ",
+ "expected_results": ""
+ }
+ },
+ "summary": "Software_recipes:_default_view"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Software_recipes:_sorting_the_content_of_the_software_recipes_table",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table. \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Navigate to the \"Software Recipes\" page. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Make sure the table is sorted on the \"Software Recipe\" column by default in ascending order. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Activate all columns from the \"Edit column\" drop-down menu. Check that \"Build\" and \"Software Recipe\" columns cannot be unchecked. \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Check that \"Software Recipe\", \"Section\", \"License\", \"Layer\" are the only sortable table heads. \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Sort the table by \"Layer\" and then navigate away by selecting a layer(such as meta-yocto). When you click \"back\" button in web-browser to go back to the \"Compatible image recipes\" table it should still be sorted by \"Layer\". \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Sorting and \"Edit columns\" \nIf you use the \"Edit columns\" menu to hide the column with the applied sorting, we revert the sorting to the default sorting (i.e. \"Recipe\"). The default sorting always uses one of the core columns, which cannot be hidden using the \"Edit columns\" menu. \n\n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "Sorting and search \nSearching should have no impact on the applied sorting. Any results returned should be sorted by the sorting criteria selected when the search query was submitted. \nSort recipes by \"Layer\" column heading. Input a string (such as \"meta\") in search box and click search button. Make sure results returned are sorted by \"Layer\".",
+ "expected_results": ""
+ }
+ },
+ "summary": "Software_recipes:_sorting_the_content_of_the_software_recipes_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Software_recipes:_Searching_the_content_of_the_software_recipes_table",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table. \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Navigate to the \"Software Recipes\" page. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Check that the search is made of a text input field and a \"Search\" button in a toolbar above the table. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "When no search query has been entered, we have placeholder text saying: \"Search compatible software recipes\". \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Input \"core\" in the text input field. The placeholder text disappears when the first character is typed. Click search button. \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": " \n(1) returned results \nThe search string is kept in the text input field. The results returned occur. Click \"Clear search\" icon to clear the search and display the compatible recipes. \n(2) no results returned \nIf your search query returns no results, the page heading changes to \"No recipes found\", and we show you an alert with a search form and an option to show all targets. Check that \"show compatible recipes\" button is available. \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "When I run a search, the search happens against the following columns (independently of they being shown or hidden):\n- Software Recipe\n- Recipe version\n- Description\n- Recipe file\n- Section\n- License\n- Layer\n- Revision\nInput a string to search for the above column headings separately to make sure that the search happens against the columns. \n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "Search, sorting and \"Edit columns\" \nSearching does not change the state of the table: the same columns remain hidden and the same sorting applied when search results are displayed, but filters are cleared by the search results.\nSearch a string and make sure that the same columns remain hidden and the same sorting applied. \n\n",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "Search and filters \nThe scope of the filters is the content currently on the table (this means all table pages, not only the one displayed). The scope of the search is always the content of the database. \n\nIf I run a search query, any filter applied afterwards will filter the content returned by the search query. \n\nIf I run a search query while a filter is applied, the filter is cleared by the results of the search query (i.e. we display the results of the search query and clear the filter applied beforehand). The same happens if I click the \"Clear search\" icon when a filter is applied to a set of search results (both search results and applied filter are cleared, and the table shows all the targets). ",
+ "expected_results": ""
+ }
+ },
+ "summary": "Software_recipes:_Searching_the_content_of_the_software_recipes_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Software_recipes:_Filter_the_contents_of_the_software_recipes_table",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Access a project page, either by creating a new project or accessing an existing project from the \"All builds\" table.",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Navigate to the \"Software Recipes\" page.",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Make sure the following table column has filters: \n- Build",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Filters are mutually exclusive. Click a filter button of a one column and a filter dialogue occurs. Select a filter item. The filter result would be showed. Then select another filter item of another column and the previously applied filter is overridden by the newly selected filter when a filter from a different column is applied to the table. In this state, we show some help text next to the \"Apply\" button, saying \"You can only apply one filter to the table. This filter will override the current filter.\"",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Filters are overridden by search. Run a search query and you can see previous filter results are overridden by the results of the search query.",
+ "expected_results": ""
+ }
+ },
+ "summary": "Software_recipes:_Filter_the_contents_of_the_software_recipes_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Test_the_packages_included_in_the_image",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start Toaster.",
+ "expected_results": "Toaster starts. \n"
+ },
+ "2": {
+ "action": " Click on new project button.",
+ "expected_results": " Open create a new project page. \n"
+ },
+ "3": {
+ "action": " Enter a project name, select a release and click on create project.",
+ "expected_results": " Project Created. \n"
+ },
+ "4": {
+ "action": " Build a recipe (ex: core-image-minimal) and wait until build finish.",
+ "expected_results": " Build finishes successfully. \n"
+ },
+ "5": {
+ "action": " Click on the built recipe.",
+ "expected_results": " Open build summary page. \n"
+ },
+ "6": {
+ "action": " Under IMAGES tab click on the recipe built.",
+ "expected_results": "Image page open. \n"
+ },
+ "7": {
+ "action": "Click on a package name.",
+ "expected_results": "Open the package page. \n"
+ },
+ "8": {
+ "action": "Under file title click on the link to file.",
+ "expected_results": "You are redirected to directory structure and you can see where the file is located."
+ }
+ },
+ "summary": "Test_the_packages_included_in_the_image"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Test_the_filters_from_a_image_page",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Start Toaster.",
+ "expected_results": "Toaster starts. \n"
+ },
+ "2": {
+ "action": " Click on new project button.",
+ "expected_results": " Open create a new project page. \n"
+ },
+ "3": {
+ "action": " Enter a project name, select a release and click on create project.",
+ "expected_results": " Project Created. \n"
+ },
+ "4": {
+ "action": " Build a recipe (ex: core-image-minimal) and wait until build finish.",
+ "expected_results": " Build finishes successfully. \n"
+ },
+ "5": {
+ "action": " Click on the built recipe.",
+ "expected_results": " Open build summary page. \n"
+ },
+ "6": {
+ "action": "Click on Configuration - Bitbake Variables.",
+ "expected_results": "Open bitbake variables page. \n"
+ },
+ "7": {
+ "action": "Test Description filter. ",
+ "expected_results": "Filter works ok. (filter returns only items that match the selected criteria) "
+ }
+ },
+ "summary": "Test_the_filters_from_a_image_page"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Test_dependencies_link",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Start Toaster.",
+ "expected_results": "Toaster starts. \n"
+ },
+ "2": {
+ "action": " Click on new project button.",
+ "expected_results": " Open create a new project page. \n"
+ },
+ "3": {
+ "action": " Enter a project name, select a release and click on create project.",
+ "expected_results": " Project Created. \n"
+ },
+ "4": {
+ "action": " Build a recipe (ex: core-image-minimal) and wait until build finish.",
+ "expected_results": " Build finishes successfully. \n"
+ },
+ "5": {
+ "action": " Click on the built recipe.",
+ "expected_results": " Open build summary page. \n"
+ },
+ "6": {
+ "action": "Click on recipes tab.",
+ "expected_results": "Open recipes page. \n"
+ },
+ "7": {
+ "action": "Click on edit columns and select Dependencies.",
+ "expected_results": "Dependencies column is shown in the table. \n"
+ },
+ "8": {
+ "action": "Click on a number of dependencies.",
+ "expected_results": "A pop up with dependencies will appear. \n"
+ },
+ "9": {
+ "action": "Click on a dependency. ",
+ "expected_results": "Open recipe dependency page. "
+ }
+ },
+ "summary": "Test_dependencies_link"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Test_recipe_file_link",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start Toaster. \n\n\t",
+ "expected_results": "Toaster starts. \n\n\t"
+ },
+ "2": {
+ "action": " Click on new project button. \n\n\t",
+ "expected_results": " Open create a new project page. \n\n\t"
+ },
+ "3": {
+ "action": " Enter a project name, select a release (ex: master) and click on create project. \n\n\t",
+ "expected_results": " Project Created. \n\n\t"
+ },
+ "4": {
+ "action": "Click on Image recipes tab. \n\n\t",
+ "expected_results": "Open Compatible image recipes table. \n\n\t"
+ },
+ "5": {
+ "action": "Click on edit columns and select recipe file. \n\n\t",
+ "expected_results": "Recipe file column appears in the table. \n\n\t"
+ },
+ "6": {
+ "action": "Click on the blue button near a recipe file. \n\n\t",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Repet steps - 4 to 6 for software recipes.",
+ "expected_results": ""
+ }
+ },
+ "summary": "Test_recipe_file_link"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.See_packages_size",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start Toaster. \n\n\t",
+ "expected_results": " Toaster starts. \n\n\t"
+ },
+ "2": {
+ "action": " Click on new project button. \n\n\t",
+ "expected_results": " Open create a new project page. \n\n\t"
+ },
+ "3": {
+ "action": " Enter a project name, select a release and click on create project. \n\n\t",
+ "expected_results": " Project Created. \n\n\t"
+ },
+ "4": {
+ "action": " Build a recipe (ex: core-image-minimal) and wait until build finish. \n\n\t",
+ "expected_results": " Build finishes successfully. \n\n\t"
+ },
+ "5": {
+ "action": " Click on the built recipe. \n\n\t",
+ "expected_results": " Open build summary page. \n\n\t"
+ },
+ "6": {
+ "action": "Click on packages tab. \n\n\t",
+ "expected_results": "Open packages page. \n\n\t"
+ },
+ "7": {
+ "action": "Click on size to sort the table. ",
+ "expected_results": "You can check the size of each package. \n\nWhen you click on 'Size' the first time, the correct sorting is the inverse one (biggest package on top). Clicking a second time will invert the sorting (you'll see packages with 0 B size on top)."
+ }
+ },
+ "summary": "See_packages_size"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Build_multiple_recipes",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Start Toaster. \n\n\t",
+ "expected_results": " Toaster starts. \n\n\t"
+ },
+ "2": {
+ "action": " Click on new project button. \n\n\t",
+ "expected_results": " Open create a new project page. \n\n\t"
+ },
+ "3": {
+ "action": " Enter a project name, select a release and click on create project. \n\n\t",
+ "expected_results": " Project Created. \n\n\t"
+ },
+ "4": {
+ "action": " Build a multiple recipes (ex: \"core-image-minimal core-image-sato\") and wait until build finish. ",
+ "expected_results": "Builds finishes successfully. \n\nYou can build multiple recipes with toaster"
+ }
+ },
+ "summary": "Build_multiple_recipes"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Build_a_recipe_with_different_distro",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Start Toaster. \n\n\t",
+ "expected_results": " Toaster starts. \n\n\t"
+ },
+ "2": {
+ "action": " Click on new project button. \n\n\t",
+ "expected_results": " Open create a new project page. \n\n\t"
+ },
+ "3": {
+ "action": " Enter a project name, select a release and click on create project. \n\n\t",
+ "expected_results": " Project Created. \n\n\t"
+ },
+ "4": {
+ "action": "From project page click on Bitbake variables tab. \n\n\t",
+ "expected_results": "Open Bitbake variables page. \n\n\t"
+ },
+ "5": {
+ "action": "Click on change button for distro. \n\n\t",
+ "expected_results": "A type in form appears. \n\n\t"
+ },
+ "6": {
+ "action": "Change distro (ex: poky-tiny). \n\n\t",
+ "expected_results": "Distro has changed. \n\n\t"
+ },
+ "7": {
+ "action": "Add specific layers for distro (meta-qt3, meta-qt4) \n\t\n\t",
+ "expected_results": "Layers added to the project \n\n\t"
+ },
+ "8": {
+ "action": " Build a recipe (ex: core-image-minimal) and wait until build finish.",
+ "expected_results": "Build finishes successfully. \n\nThe 'success' criteria for this one should be that the build is reported as using the poky-tiny distro in the build summary page, and that the DISTRO variable value in the bitbake variables table is set to the value specified in toaster (poky-tiny again)."
+ }
+ },
+ "summary": "Build_a_recipe_with_different_distro"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Test_package_format_-_ipk_rpm_deb",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Start Toaster. \n\n\t",
+ "expected_results": " Toaster starts. \n\n\t"
+ },
+ "2": {
+ "action": " Click on new project button. \n\n\t",
+ "expected_results": " Open create a new project page. \n\n\t"
+ },
+ "3": {
+ "action": " Enter a project name, select a release and click on create project. \n\n\t",
+ "expected_results": " Project Created. \n\n\t"
+ },
+ "4": {
+ "action": "From the project page click on bitbake variables tab. \n\n\t",
+ "expected_results": "Open bitbake variables page. \n\n\t"
+ },
+ "5": {
+ "action": "Click on change button near PACKAGE_CLASSES and select all the package formats (rpm, deb, ipk). \n\n\t",
+ "expected_results": "Package classes selected. \n\n\t"
+ },
+ "6": {
+ "action": "Build a recipe (ex: core-image-minimal) and wait until build finish.",
+ "expected_results": "Build finishes successfully.\nYou can see the package classes in the build summary page."
+ }
+ },
+ "summary": "Test_package_format_-_ipk_rpm_deb"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Test_IMAGE_INSTALL_append_variable",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start Toaster. \n\n",
+ "expected_results": " Toaster starts. \n\n\t"
+ },
+ "2": {
+ "action": " Click on new project button. \n\n",
+ "expected_results": " Open create a new project page. \n\n\t"
+ },
+ "3": {
+ "action": " Enter a project name, select a release and click on create project. \n\n",
+ "expected_results": " Project Created. \n\n\t"
+ },
+ "4": {
+ "action": "From the project page click on bitbake variables tab. \n\n",
+ "expected_results": "Open bitbake variables page. \n\n\t"
+ },
+ "5": {
+ "action": "Click on change button for IMAGE_INSTALL_append and add a variable (ex: acpid). \n\n",
+ "expected_results": "Variable added. \n\n\t"
+ },
+ "6": {
+ "action": "Build a recipe (ex: core-image-minimal) and wait until build finish. \n\n",
+ "expected_results": "Build finishes successfully. \n\n\t"
+ },
+ "7": {
+ "action": "After build finishes go to build page. \n\n",
+ "expected_results": "Open build summary page. \n\n\t"
+ },
+ "8": {
+ "action": "Go to package tab and search for acpid.",
+ "expected_results": "You should get results for ssh packages."
+ }
+ },
+ "summary": "Test_IMAGE_INSTALL_append_variable"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.New_custom_image:_default_view",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Access a project page, either by creating a new project or accessing an existing project from the \"All projects\" table. \n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "On the project page click on the \"New custom image\" link situated on the left-hand side, near Configuration, builds, import layer \n\n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Check that the table is populated with the list of image recipes (eg. core-image minimal) \n\n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": " Check that by default the following columns are shown: Image recipe, Version, Description, Layer, Customise \n\n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "From the \"Edit columns\" menu, activate the: Recipe file, Section, License, Git revision \n\n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Check that the \"Git revision\" entries match the release entry from the main project page, in the project details section. \n\n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Check that the image recipes provided by layers added to the project show a 'customise' button, while image recipes provided by layers not added to the project show an 'add layer' button ",
+ "expected_results": ""
+ }
+ },
+ "summary": "New_custom_image:_default_view"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.New_custom_image:_sorting_the_content_of_new_custom_image_table",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Access a project page, either by creating a new project or accessing an existing project from the \"All projects\" table. \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "On the project page click on the \"New custom image\" link situated on the left-hand side, near Configuration, builds, import layer \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Make sure that the table is sorted on the ‘Image recipe’ column by default in ascending order. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Clicking on Image recipe should revert the sorting. (from 'a to z' changes to 'z to a') \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "From the \"Edit columns\" menu activate all the columns. Check that ‘Image recipe’ and ‘Customise’ columns cannot be unchecked. \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Check that Image recipe, Section, Layer and License are the only sortable table heads. \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Sort the table by \"Layer\" and then navigate away by selecting an image. When you click the \"back\" button in the web-browser to go back, the \"New custom image\" table should still be sorted by \"Layer\". \nThis should apply also by navigating back to the page by any other means. \n\n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "Sorting and \"Edit columns\" menu: If you use the \"Edit columns\" menu to hide the column with the applied sorting, we revert the sorting to the default sorting (i.e. \"Image recipe\"). The default sorting always uses one of the core columns, which cannot be hidden using the \"Edit columns\" menu \n\n",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "Sorting and search: Searching should have no impact on the applied sorting. Any results returned should be sorted by the sorting criteria selected when the search query was submitted. Sort recipes by \"Layer\" column heading. Input a string (such as \"core-image\") in search box and click search button. Make sure results returned are sorted by \"Layer\".",
+ "expected_results": "N/A"
+ }
+ },
+ "summary": "New_custom_image:_sorting_the_content_of_new_custom_image_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.New_custom_image:_searching_the_content_of_new_custom_image_table",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Access a project page, either by creating a new project or accessing an existing project from the \"All projects\" table. \n \n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "On the project page click on the \"New custom image\" link situated on the left-hand side, near Configuration, builds, import layer \n \n\n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Check that the search is made of a text input field and a \"Search\" button in a toolbar above the table. When no search query has been entered, the text input field should show the following placeholder text: \"Search select the image recipe you want to customise\" \n \n\n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Input \"core\" in the text input field. The placeholder text disappears when the first character is typed. Click search button. \n \n\n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "The search string is kept in the text input field. The results returned occur. Click \"Clear search\" icon to clear the search and display the image recipes. If your search query returns no results, we show you an alert with a search form and an option to show all image recipes. Check that \"show all\" link is available. \n\n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Search, sorting and \"Edit columns\": Searching does not change the state of the table: the same columns remain hidden and the same sorting applied when search results are displayed, but filters are cleared by the search results. Search a string and make sure that the same columns remain hidden and the same sorting applied. \n\n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Search and filters \n \n\t•\tThe scope of the filters is the content currently on the table (this means all table pages, not only the one displayed). The scope of the search is always the content of the database. \n\n\n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "If I run a search query, any filter applied afterwards will filter the content returned by the search query. \tIf I run a search query while a filter is applied, the filter is cleared by the results of the search query (i.e. we display the results of the search query and clear the filter applied beforehand). The same happens if I click the \"Clear search\" icon when a filter is applied to a set of search results (both search results and applied filter are cleared, and the table shows all the targets). ",
+ "expected_results": "\n \n"
+ }
+ },
+ "summary": "New_custom_image:_searching_the_content_of_new_custom_image_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.New_custom_image:_Filter_the_contents_of_the_new_custom_image_table",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Access a project page, either by creating a new project or accessing an existing project from the \"All projects\" table. \n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "On the project page click on the \"New custom image\" link situated on the left-hand side, near Configuration, builds, import layer \n\n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Make sure the following table column has filters: Customise \n\n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Click the filter button in the \"Customise\" column and a filter dialogue comes up. Select a filter option. The filter results should be showed. \n\n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Filters are overridden by search. Run a search query and you can see previous filter results are overridden by the results of the search query.",
+ "expected_results": "N/A"
+ }
+ },
+ "summary": "New_custom_image:_Filter_the_contents_of_the_new_custom_image_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Create_new_custom_image",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Access a project page, either by creating a new project or accessing an existing project from the \"All projects\" table. \n \n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "On the project page click on the \"New custom image\" link situated on the left-hand side, near Configuration, builds, import layer \n\n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Search for rpi-basic-image, click on 'add layer' button. Make sure a \"layer added\" notification shows and a \"customise\" button is displayed. Click the \"customise\" button, type a name for you new custom image and click on create custom image. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Verify that image was created: when you create the custom image you will be redirected to the custom image details page, and a notification at the top of the page should tell you: ‘Your custom image X has been created. You can now add or remove packages as needed. \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "If you select an image that has not been built beforehand you should not see the 'add / remove' packages table until you build the image. \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "If you select an image that has been built beforehand you should see the 'add / remove' packages table when you create the custom image. ",
+ "expected_results": "N/A"
+ }
+ },
+ "summary": "Create_new_custom_image"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Custom_image_page_details",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "After you create a new custom image go to the custom image page, by clicking on the custom image. \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Breadcrumbs \n \n\t * Observe that the 3 breadcrumbs at the top left are: \n \n\t\ta live link that will take you back to the project page \n\t\tCustom images: a live link that will take you back to the custom images table \n\t\timage name(toaster-custom-images): the name of the current custom image (not a link) \n \n\n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Observe the 2 buttons build Custom image and Download recipe file \n \n\ti.\tTest this 2 buttons \n\tii.\tYou should always be able to build the custom image, but you only should be able to download the recipe file when the package content of the custom image is known. When you cannot download the recipe file, the 'download' button at the top of the page is disabled, and the right hand column does not show information about the recipe file. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Observe that there is a right-hand box, with information about the images \n \n\ti.\tThere is a number of packages included in the custom image \n\tii.\tApprox package size \n \n\tiii.\tLayer \n \n\tiv.\tImage based on \n \n\tv.\tRecipe file (only when you can download it_ \n\tvi.\tVersion \n \n\tvii.\tLicense \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Observe that the page includes a table with packages and you can add or remove packages from the custom image. The packages table only appears when: \n\na) the image recipe you chose as your base image when creating the custom image has been built within the project \n\nb) the custom image itself has been built \n\nIf no packages table shows, you see a notification with a build button instead. ",
+ "expected_results": "N/A"
+ }
+ },
+ "summary": "Custom_image_page_details"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Custom_image_page_–_Add_|_Remove_packages_table",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "After you create a new custom image go to the custom image page, by clicking on the custom image. \n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Make sure that the table is sorted on the ‘Package’ column by default in ascending order. \n\n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Check that by default the following columns are shown: Package, Package Version, Approx Size \n\n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "From the \"Edit columns\" menu, activate the: License, Recipe, Recipe version and Reverse dependencies columns \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Check that ‘Package’, Approx Size, License, Recipe are the only sortable table heads. \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Sorting and \"Edit columns\": If you use the \"Edit columns\" menu to hide the column with the applied sorting, we revert the sorting to the default sorting (i.e. \"Package\"). The default sorting always uses one of the core columns, which cannot be hidden using the \"Edit columns\" menu \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Sorting and search: Searching should have no impact on the applied sorting. Any results returned should be sorted by the sorting criteria selected when the search query was submitted. Sort recipes by \"Recipe\" column heading. Input a string (such as \"acl\") in search box and click search button. Make sure results returned are still sorted by \"Recipe\". \n\n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "Click \"Clear search\" icon to clear the search and display the Packages. \n\n",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "Make sure the following table column has filters: Add | Remove (Click on 'Edit custom image' in the left pane of the custom image) \n\n1",
+ "expected_results": ""
+ },
+ "10": {
+ "action": "Click the filter button: a filter dialogue displays. Select a filter option. The filter results should be showed. \n\n1",
+ "expected_results": ""
+ },
+ "11": {
+ "action": "Filters are overridden by search. Run a search query and you can see previous filter results are overridden by the results of the search query. \n\n1",
+ "expected_results": ""
+ },
+ "12": {
+ "action": "This page needs a special no results message for the search. You can see in the doc attached to\nhttps://bugzilla.yoctoproject.org/show_bug.cgi?id=9154 \n\nTo test it, enter a random string in the search input field (something like \"bbb\") and click the 'search' button. The special no results message includes the following: \n\na) instructions about searching and building recipes in order to generate new packages \n\nb) a search text input field with the search string you typed, a 'clear' icon and a search button. Click the 'clear' icon: the search field should be cleared and the full list of packages should be shown. \n\nc) a 'show all packages' link. Click the link: the search field should be cleared and the full list of packages should be shown. ",
+ "expected_results": "N/A"
+ }
+ },
+ "summary": "Custom_image_page_–_Add_|_Remove_packages_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Adding_packages_without_dependencies_from_custom_images",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Access a project page, either by creating a new project or accessing an existing project from the \"All projects\" table. \n\n ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "On the project page click on the \"New custom image\" link situated on the left-hand side, near Configuration, builds, import layer \n\n\n ",
+ "expected_results": " "
+ },
+ "3": {
+ "action": "Choose an image recipe example:(core-image-sato) and click on the customise button on the far right (note if that layer is not added the button will say +Add layer, add it and then customise) \n\n\n ",
+ "expected_results": " A pop out should appear and you should get to give the new image a customized name. Then you will be redirected to a page of Add|Remove Packages. If this image has not been build it will not have packages.\n\n"
+ },
+ "4": {
+ "action": "Build the new image if it has not been build, else start adding packages without dependencies example: attr-doc \n\n \n ",
+ "expected_results": "You should get a message in blue that says \"You have added 1 package to $image-custom-name: $package-name\" "
+ },
+ "5": {
+ "action": "Build the image again.\n",
+ "expected_results": "Expected Result on step 5: the packages you have added should be installed in the image."
+ }
+ },
+ "summary": "Adding_packages_without_dependencies_from_custom_images"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Removing_packages_without_and_with_dependencies__from_custom_images",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Access a project page, either by creating a new project or accessing an existing project from the \"All projects\" table. \n\n ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "On the project page click on the \"New custom image\" link situated on the left-hand side, near Configuration, builds, import layer \n\n\n\n ",
+ "expected_results": " A pop out should appear and you should get to give the new image a customized name. Then you will be redirected to a page of Add|Remove Packages. If this image has not been build it will not have packages. \n\n"
+ },
+ "3": {
+ "action": "Choose an image recipe example:(core-image-sato) and click on the customise button on the far right (note if that layer is not added the button will say +Add layer, add it and then customise) \n\n\n\n ",
+ "expected_results": "A pop out should appear and you should get to give the new image a customized name. Then you will be redirected to a page of Add|Remove Packages. If this image has not been build it will not have packages.\n\n"
+ },
+ "4": {
+ "action": "Build the new image if it has not been build else start removing a packages (click on 'Edit custom image' in the left pane of the custom image) that have dependencies and packages that have no dependencies that are already included by clicking on the red button \"Remove Package\" \n\n\n \n ",
+ "expected_results": "You should get a message in blue that says \"You have removed 1 package to $image-custom-name: $package-name\" "
+ },
+ "5": {
+ "action": "Build the image again.",
+ "expected_results": "the packages you have removed should not be installed in the image."
+ }
+ },
+ "summary": "Removing_packages_without_and_with_dependencies__from_custom_images"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Adding_packages_with_dependencies",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Access a project page, either by creating a new project or accessing an existing project from the \"All projects\" table. \n\n ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "On the project page click on the \"New custom image\" link situated on the left-hand side, near Configuration, builds, import layer \n\n\n ",
+ "expected_results": " "
+ },
+ "3": {
+ "action": "Choose an image recipe example:(core-image-sato) and click on the customise button on the far right (note if that layer is not added the button will say +Add layer, add it and then customise) \n\n\n ",
+ "expected_results": "A pop out should appear and you should get to give the new image a customized name. Then you will be redirected to a page of Add|Remove Packages. If this image has not been build it will not have packages."
+ },
+ "4": {
+ "action": "Build the new image if it has not been build else start adding packages that have dependencies ( you will be able to see in the dependencies column a little square with a number, that tells you the dependencies it holds) example: libattr this holds 2 dependencies( bash and glibc) ",
+ "expected_results": " You should get a pop-out that that say \"$package_name dependencies\" then it should list the dependencies. Once clicked on the add packages button it should add the packages listed in the pop-out."
+ },
+ "5": {
+ "action": "Build the image again.",
+ "expected_results": ""
+ }
+ },
+ "summary": "Adding_packages_with_dependencies"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Create_Project",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start toaster",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Create a toaster project by issuing a name and selecting a release version.",
+ "expected_results": "Once project is created it should redirect you to a new project configuration page"
+ },
+ "3": {
+ "action": "Check that the h1 page title is set to the name the user typed in the new project form. ",
+ "expected_results": ""
+ }
+ },
+ "summary": "Create_Project"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Verify_project_detail_page_left_bar_menu",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start toaster",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Create a toaster project \n \n",
+ "expected_results": "Once project is created it should redirect you to a new project configuration page. \n\n"
+ },
+ "3": {
+ "action": "Check the page contains the tabs \n Configuration ---> selected by default \n Compatible Metadata (separation label) \n Custom images \n Image recipes \n Software recipes \n Machines \n Layers \n Extra Configuration (separation label) \n BitBake variables \n\n",
+ "expected_results": " All elements are present. \n\n"
+ },
+ "4": {
+ "action": "Click on each element to see if the h2 title is changing to the respective link clicked. Example if clicked on \"Custom Images\" then the h2 title should change to \"Custom images\"",
+ "expected_results": "All elements are clickable and h2 title changes to the corresponding title.."
+ }
+ },
+ "summary": "Verify_project_detail_page_left_bar_menu"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Configuration_information_of_Project_Detail_page",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Clone Poky and start toaster \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Create a master toaster project \n \n",
+ "expected_results": "Once project is created it should redirect you to a new project configuration page \n\n"
+ },
+ "3": {
+ "action": "Check that the configuration button on the left side bar is selected by default \n\n",
+ "expected_results": " Expected result step 3 & 4: The configuration link next to the build link should be selected see attachment. \n\n\n\n"
+ },
+ "4": {
+ "action": "The configuration details should include canvas: \n Machine \n Most built recipes \n Layers \n Project Release ",
+ "expected_results": "Expected result step 4: A machine must always be set. \n\nThe default layers specified in the Toaster configuration must always be listed in the layer section (in our case, for the poky configuration we should have openembedded-core, meta-poky and meta-yocto-bsp)"
+ }
+ },
+ "summary": "Configuration_information_of_Project_Detail_page"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Verify_Machine_information_of_project_detail_page",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start toaster",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Create a toaster project \n \n",
+ "expected_results": "Expected result step 2: Once project is created it should redirect you to a new project Configuration page \n\n"
+ },
+ "3": {
+ "action": "The configuration details should include a label in bold font that says: \n Machine: this canvas should have the machine label type under it and an editing button on the side ",
+ "expected_results": "Expected result step 3: Compare to the attached snapshot. The machine must always be set."
+ }
+ },
+ "summary": "Verify_Machine_information_of_project_detail_page"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Verify_most_built_recipes_information_of_the_project_detail_page",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start toaster ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Create a toaster project \n \n",
+ "expected_results": "Expected result step 2: Once project is created it should redirect you to a new project configuration page "
+ },
+ "3": {
+ "action": "The configuration details should include a label in bold font that says: \n Most built recipes: In this canvas one of the following information should show: \n\n a) If there has been no built recipes it should have a label that says: \n \"You haven't built any recipes yet, choose a recipe to build\" \n\n b) Else it should have a list of built recipes and a check box in front of it. So that it could be selected and built again ",
+ "expected_results": "Expected result step a: See ProjectDetailPage2.png attachment.\n\nExpected result step b: See ProjectDetailPage.png attachment."
+ }
+ },
+ "summary": "Verify_most_built_recipes_information_of_the_project_detail_page"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Verify_project_release_information_on_project_detail_page",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start toaster",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Create a toaster project \n \n",
+ "expected_results": "Expected result step 2: Once project is created it should redirect you to a new project configuration page. "
+ },
+ "3": {
+ "action": "The configuration details should include a label in bold font that says: \n Project release: this canvas should also have a label that show the release project you chose at the beginning.",
+ "expected_results": "Expected result step 3: See attachment ProjectDetailPage.jnp."
+ }
+ },
+ "summary": "Verify_project_release_information_on_project_detail_page"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Verify_layer_information_of_the_project_detail_page",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Clone the poky environment git clone http://git.yoctoproject.org/git/poky",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Start toaster",
+ "expected_results": "Expected result step 2: Once project is created it should redirect you to a new project configuration page. \n\n"
+ },
+ "3": {
+ "action": "Create a toaster project \n \n",
+ "expected_results": "Expected result step 3: See attachment of layerCanvas.png\n"
+ },
+ "4": {
+ "action": "The configuration details should include a label with bold font that says: \n Layer: this canvas should have 3 layers listed by default (openembedded-core, meta-poky, and meta-yocto-bsp). \n Each layer should have a trashcan icon at the side that can be used to erase the label from the project. \n The layer canvas should have a text box with the text \"type a layer name\" a button \"Add layer\" next to it. \n just underneath a \"view compatible layer | import layer\" link. ",
+ "expected_results": ""
+ }
+ },
+ "summary": "Verify_layer_information_of_the_project_detail_page"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Verify_project_detail_links",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start toaster ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Create a toaster project \n\n",
+ "expected_results": "Expected result step 2: Once project is created it should redirect you to a new project configuration page. \n\n"
+ },
+ "3": {
+ "action": "The configuration details should include \n 4 links, starting in the upper left side the Configuration link, Builds(#) link, Import layer link, and the New custom image link. \n\n ",
+ "expected_results": "Expected result step 3: All links should be clickable and should have information or tables, or forms. see attachment projectDetailLinks.png"
+ }
+ },
+ "summary": "Verify_project_detail_links"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Verify_build_texbox_exists_and_works",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start toaster",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Create a toaster project \n \n",
+ "expected_results": " Once project is created it should redirect you to a new project configuration page. \n\n\n"
+ },
+ "3": {
+ "action": "The configuration details should include \n After the 4 links (Configuration, Build(#), Import layer & New custom image) at the far right there should be a textbox with the label that says....\"Type the recipe you want to build\" and a button \"Build\" \n\n",
+ "expected_results": "See attachment buildTXT.png, The build button should be disabled whenever the text input field is empty, so that you cannot start a build with a blank target \n\n"
+ },
+ "4": {
+ "action": "Type in the textbox an image you would like to build example (core-image-minimal) and click the build button.\n\n",
+ "expected_results": " Image starts building. Whenever there is information in the image recipes and software recipes tables, the text input field should present suggestions from the list of recipes provided by the layers in the \"layers\" list. The suggestions contain the string typed in the input field, and update as you type. They appear on typing the second character. A maximum of 8 suggestions can be shown. They are sorted as follows: first recipes starting with the string, in alphabetical order; then recipes containing the string, also in alphabetical order \n\n\nWhen you click the build button you are brought to the \"Builds\" tab, and a new build in progress appears at the top of the \"Latest project builds\" section"
+ }
+ },
+ "summary": "Verify_build_texbox_exists_and_works."
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Veryfing_the_builds_link_show_proper_information",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start toaster",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Create a toaster project \n \n",
+ "expected_results": " Once project is created it should redirect you to a new project configuration page. \n\n\n"
+ },
+ "3": {
+ "action": "The configuration details should include \n A builds tab that when clicking on, it should display one of the following: \n \n\n a) A label that says \"Latest project builds\" then a label with \"All project builds\" \n If you have a finished build or there is an ongoing builds then: \n You should see a progress bar of the ongoing builds in the project. \n You should see a table with the already done builds in the project. \n\n\n b) \"All project builds\" and a search textbox with a button nothing else only if there are no builds done in the project \n ",
+ "expected_results": "Expected result for step a): See ExistingBuilds.png attachment. \n\n\nExpected result for step b): See ZeroBuilds.png attachment. "
+ }
+ },
+ "summary": "Veryfing_the_builds_link_show_proper_information"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Verify_that_the_Import_layer_link_shows_the_form",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start toaster",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Create a toaster project \n\n",
+ "expected_results": " Once project is created it should redirect you to a new project configuration page. \n\n\n"
+ },
+ "3": {
+ "action": "The configuration details should include \n An Import layer link that when clicking on, it should display: \n A label that says \"Layer repository information\" \n A label that says \"The layer you are importing must be compatible with Yocto Project master, which is the release you are using in this project.\" \n Form composed of the following elements: \n Layer name : textbox \n Git repository URL : textbox \n Repository subdirectory (optional) : textbox \n Git revision : textbox \n Layer dependencies (optional) : \"openembedded-core\" link and (trash icon), textbox and \"Add layer\" button \n Import and add to project : button",
+ "expected_results": " See attachment ImportLayerForm.png"
+ }
+ },
+ "summary": "Verify_that_the_Import_layer_link_shows_the_form"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Verify_that_New_Custom_Image_link_works_and_shows_information",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start toaster",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Create a toaster project \n\n",
+ "expected_results": " Once project is created it should redirect you to a new project configuration page \n\n\n"
+ },
+ "3": {
+ "action": "The configuration details should include \n A \"New custom image\" tab that when clicking on, it should display: \n a Title label that says: \"Select the image recipe you want to customise(#number_or_recipes_available)\" \n A search textbox with the label of: \"Search and select the image recipe you want to customise\" \n A \"Search button\" \n A \"Edit columns\" button \n A table that will display the customise images available ",
+ "expected_results": "See attachment CustomImage.png"
+ }
+ },
+ "summary": "Verify_that_New_Custom_Image_link_works_and_shows_information"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Verify_most_built_recipe_shows_a_maximum_of_5_recipes",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start toaster",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Create a toaster project",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Build 6 recipes example (core-image-sato, core-image-minimal, core-image-base, core-image-clutter) to name a few. ",
+ "expected_results": " All recipes are built correctly \n\n"
+ },
+ "4": {
+ "action": "Wait for the recipes to finish ",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Go to the configuration details.",
+ "expected_results": " You should see 5 of the 6 recipes that were build. If you order the recipes in alphabetical order you should see that the first 5 made the list. "
+ }
+ },
+ "summary": "Verify_most_built_recipe_shows_a_maximum_of_5_recipes"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Verify_order_sequence_of_listing_in_Most_build_recipes",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start toaster",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Create a toaster project",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Build 6 recipes example (core-image-sato, core-image-minimal, core-image-base, core-image-clutter) to name a few. \n\n",
+ "expected_results": "All recipes are built correctly \n\n"
+ },
+ "4": {
+ "action": "Wait for the recipes to finish \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Go to the configuration details. \n\n",
+ "expected_results": " If you order the recipes in alphabetical order you should see that the first 5 made the list. Only 5 out of the 6 should make the list. \n\n"
+ },
+ "6": {
+ "action": "Select the 6th recipe that did not make the list and build it again. ",
+ "expected_results": " Since the 6th recipe is now built twice it should make the list and the recipe in the 5th place should not appear."
+ }
+ },
+ "summary": "Verify_order_sequence_of_listing_in_Most_build_recipes"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Verify_Most_build_recipes_multiple_selection",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start toaster",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Create a toaster project",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Build 4 recipes example (core-image-sato, core-image-minimal, core-image-base, core-image-clutter) to name a few. \n\n",
+ "expected_results": " All recipes are built correctly \n\n"
+ },
+ "4": {
+ "action": "Wait for the recipes to finish. \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Go to the configuration details. \n\n",
+ "expected_results": " You should see the 4 recipes in alphabetical order. You should also note that the Build button on the upper right hand corner is disabled since no recipe has been selected. \n\n"
+ },
+ "6": {
+ "action": "Select 1 of the recipes in the most built recipes section. \n\n",
+ "expected_results": "The build button is automatically enabled. \n\n"
+ },
+ "7": {
+ "action": "Select multiple (example 2 or 3) recipes in the most built recipes section. \n\n",
+ "expected_results": " The build button is enabled. \n\n"
+ },
+ "8": {
+ "action": "Click on the build button to start building the recipes.",
+ "expected_results": "One recipe start to build and the others are on queue."
+ }
+ },
+ "summary": "Verify_Most_build_recipes_multiple_selection"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Verify_layer_addition_functionality",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Clone the poky environment git clone http://git.yoctoproject.org/git/poky",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Start toaster",
+ "expected_results": " Once project is created it should redirect you to a new project configuration page. \n\n"
+ },
+ "3": {
+ "action": "Create a toaster project with master release \n \n\n",
+ "expected_results": " See attachment of layerCanvas.png \n\n\n"
+ },
+ "4": {
+ "action": "Add layers by typing the name in the \"Type a layer name\" input box and adding by clicking the button. \n\n",
+ "expected_results": " A list of layers with similar name to the one you are typing should appear, giving you the choice to add it. The default text \"Type a layer name\" should disappear as soon as you start typing. \n\n"
+ },
+ "5": {
+ "action": "Add layers by clicking on the View compatible layers link just bellow the input text box. \n\n",
+ "expected_results": "This should redirect you to the compatible layers page where a list of compatible layers should appear and allow you to Add layers to the project. \n\n"
+ },
+ "6": {
+ "action": "Add a layer by importing a layer clicking in the Import layer",
+ "expected_results": "This link should redirect you to the import layer form where you will be able to add layers from git repository or a local directory. \n\n\n"
+ }
+ },
+ "summary": "Verify_layer_addition_functionality"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Verify_delete_layer_functionality",
+ "author": [
+ {
+ "email": "libertad.gonzalez.de.la.cruz@intel.com",
+ "name": "libertad.gonzalez.de.la.cruz@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Clone the poky environment git clone http://git.yoctoproject.org/git/poky\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Start toaster",
+ "expected_results": " Once project is created it should redirect you to a new project configuration page.\n\n"
+ },
+ "3": {
+ "action": "Create a toaster project master release \n \n",
+ "expected_results": "See attachment of layerCanvas.png \n\n"
+ },
+ "4": {
+ "action": "Remove openembedded-core layer from the project by clicking the trash icon next to it. \n\n",
+ "expected_results": "The layer should disappear from the list and a notification should appear at the top of the page saying: \"You have removed 1 layer from your project: \". The layer_name should be a link to the corresponding layer detail page. The layer counter next to the \"layers\" heading should decrease by one. \n"
+ },
+ "5": {
+ "action": "Remove all the layers from the project",
+ "expected_results": " you should see a message that reads: \n\nYou need to add some layers. For that you can: \n\n-View all layers compatible with this project \n\n-Import a layer \n\n-Read about layers in the documentation \n\nOr type a layer name below. \n\n\n The \"Choose from the layers compatible with this project\" link should go to the compatible layers page The \"Import a layer\" link should go to the import layer page The \"Read about layers in the documentation\" link should open in a new window and bring you to http://www.yoctoproject.org/docs/current/dev-manual/dev-manual.html#understanding-and-creating-layers"
+ }
+ },
+ "summary": "Verify_delete_layer_functionality"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-managed-mode.toaster-managed.Download_task_log",
+ "author": [
+ {
+ "email": "alexandru.costinx.roman@intel.com",
+ "name": "alexandru.costinx.roman@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start Toaster. ",
+ "expected_results": "Toaster starts."
+ },
+ "2": {
+ "action": "Click on new project button.\n\n",
+ "expected_results": "Open create a new project page."
+ },
+ "3": {
+ "action": " Enter a project name, select a release and click on create project or select an existing project. \n\n",
+ "expected_results": "Project Created."
+ },
+ "4": {
+ "action": "Build a recipe (ex: core-image-minimal). \n\n",
+ "expected_results": "Build finish."
+ },
+ "5": {
+ "action": "Click on the built recipe. \n\n",
+ "expected_results": "Open build summary page."
+ },
+ "6": {
+ "action": "Click on tasks tab. \n\n",
+ "expected_results": "Open tasks page."
+ },
+ "7": {
+ "action": "Click on a task executed successfully. \n\n",
+ "expected_results": "Open task page."
+ },
+ "8": {
+ "action": "Click on \"Download task log\" button. \n",
+ "expected_results": "You can download the task log. \n"
+ },
+ "9": {
+ "action": "Click on a failed task. \n",
+ "expected_results": "Open task page, not appear download task \n"
+ }
+ },
+ "summary": "Download_task_log"
+ }
+ }
+] \ No newline at end of file
diff --git a/meta/lib/oeqa/manual/toaster-unmanaged-mode.json b/meta/lib/oeqa/manual/toaster-unmanaged-mode.json
new file mode 100644
index 0000000000..29d11a87d5
--- /dev/null
+++ b/meta/lib/oeqa/manual/toaster-unmanaged-mode.json
@@ -0,0 +1,1170 @@
+[
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.Create_a_Yocto_project_and_start_the_Toaster",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Set up yocto project and toaster test environment. \ncd ${installdir} \ngit clone git://git.yoctoproject.org/poky \n\n",
+ "expected_results": "NA \n\n"
+ },
+ "2": {
+ "action": "Start up toaster. \ncd ${installdir} \nsource poky/oe-init-build-env \nsource toaster start \n\n",
+ "expected_results": " \nlog: \nThe system will start. \nSyncing... \nCreating tables ... \nCreating table south_migrationhistory \nInstalling custom SQL ... \nInstalling indexes ... \nInstalled 0 object(s) from 0 fixture(s) \n > south \n\nNot synced (use migrations): \n - orm \n(use ./manage.py migrate to migrate these) \nRunning migrations for orm: \n - Migrating forwards to 0004_auto__add_field_package_installed_name. \n > orm:0001_initial \n > orm:0002_auto__add_field_build_timespent \n > orm:0003_timespent \n - Migration 'orm:0003_timespent' is marked for no-dry-run. \n > orm:0004_auto__add_field_package_installed_name \n - Loading initial data for orm. \nInstalled 0 object(s) from 0 fixture(s) \nserver address: 127.0.0.1, server port: 8200 \nSuccessful start."
+ },
+ "3": {
+ "action": "Build the yocto project. \nbitbake core-image-minimal \n\n",
+ "expected_results": "Build successfully. \n"
+ },
+ "4": {
+ "action": "Use a default web brower to see project build process. \nxdg-open http://localhost:8000/ \nWait for build completion. \n",
+ "expected_results": "You can open http://localhost:8000/ in a default browser. The build process is showed. "
+ }
+ },
+ "summary": "Create_a_Yocto_project_and_start_the_Toaster."
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.Sort_the_content_of_the_builds_table",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start up toaster.",
+ "expected_results": "Succeed to start up toaster. "
+ },
+ "2": {
+ "action": "Create 2 builds, such as \"bitbake core-image-minimal\" and \"bitbake core-image-sato\". Wait for successful builds and then run: http://localhost:8000/",
+ "expected_results": "Succeed to build the targets. "
+ },
+ "3": {
+ "action": "Enter \"All build\" table in web browser.",
+ "expected_results": "NA "
+ },
+ "4": {
+ "action": "Click \"Completed on\" component to sort.",
+ "expected_results": "Build targets are sorted out by the \"Completed on\". "
+ },
+ "5": {
+ "action": "Click \"Completed on\" component again to invert the sorting .",
+ "expected_results": "The sorting is inverted. "
+ },
+ "6": {
+ "action": "Have a sort try in other columns. outcome, machine, started on, completed on, errors, warnings, project.",
+ "expected_results": "See item 4 and 5."
+ }
+ },
+ "summary": "Sort_the_content_of_the_builds_table."
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.Search_the_content_of_the_builds_table",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start up toaster.",
+ "expected_results": "NA "
+ },
+ "2": {
+ "action": "Create 2 builds, such as \"bitbake core-image-minimal\" and \"bitbake core-image-sato\". Wait for successful builds and then run: xdg-open http://localhost:8000/",
+ "expected_results": "NA "
+ },
+ "3": {
+ "action": "Enter \"All build\" table in web browser.",
+ "expected_results": "NA "
+ },
+ "4": {
+ "action": "Input a string in search component and click search.",
+ "expected_results": "Show returned search results. When no search query has been entered, we have placeholder text saying: \"Search builds\". The placeholder text disappears when the first character is typed. "
+ },
+ "5": {
+ "action": "See returned search results.",
+ "expected_results": "If your search query returns no results, the section heading changes to \"No builds found\", and we show you an alert with a search form and an option to show all builds. "
+ },
+ "6": {
+ "action": "Click \"Clear search\" icon (icon-remove-sign). Observe all builds are showed. ",
+ "expected_results": "Click it to clear the search and display all builds."
+ }
+ },
+ "summary": "Search_the_content_of_the_builds_table."
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.Filter_the_content_of_the_builds_table",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start up toaster.",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Create 2 builds, such as \"bitbake core-image-minimal\" and \"bitbake core-image-sato\". Wait for successful builds and then run: xdg-open http://localhost:8000/.",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Enter \"All build\" table in web browser.",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Make sure the following table columns have filters. \n- Outcome \n-- Started on \n- Completed on \n- Failed tasks",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Filters are mutually exclusive. Click a filter button of a one column and a filter dialogue occurs. Select a filter item. The filter result would be showed. Then select another filter item of another column and the previously applied filter is overridden by the newly selected filter when a filter from a different column is applied to the table. In this state, we show some help text next to the \"Apply\" button, saying \"You can only apply one filter to the table. This filter will override the current filter.\"",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Filters are overridden by search. Run a search query and you can see previous filter results are overridden by the results of the search query.",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Have a try in filters of the following table columns. \n- Outcome \n- Started on \n- Completed on \n- Failed tasks \n",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "Filter_the_content_of_the_builds_table."
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.Tasks_in_toaster_UI",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " === TOASTER: Test Instructions for \"Tasks\", \"Time\", \"CPU Usage\", and \"Disk I/O\" pages === \n \nNOTE TO TESTERS: The three pages \"Time\", \"CPU Usage\", and \"Disk I/O\" are simple variations on the \"Tasks\" page. Those test instructions will demonstrate the respective unique parts. \n \n \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". \n \n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "In Toaster, select the build, and select the \"Tasks\" link in the left sidebar \n \n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Breadcrumbs \n \n * Observe that the 4 breadcrumbs at the top left are: \n \n : a live link that will take you back to the project page \n \t: a live link that will take you back to the project Builds page \n : a live link that will take you back to the image dashboard page \n \"Tasks\" \n \n * Test the breadcrumb live links, return to this page \n \n \n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "General Layout \n \n * Observe the left-hand box with links to the other pages of the build \n \n * Observe the title of the table is \"Tasks\", in bold \n \n * Observe the search/filter bar above the table \n \n * Observe the number of table rows in each page matches the number selected in the \"Show rows\" dropdown menu \n \n * Observe at the bottom of the page the \"Showing XX to XX out of xxx entries\", the page selection links, and the \"Show Rows\" selection. \n \n \n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Columns \n \nNote: to restore the default columns in your browser, for example in Firefox, select \"Tools > Privacy > remove individual cookies\", find the cookies for the IP address of the Toaster engine (for example \"localhost\"), and remove each sub-cookie with the prefix \"_displaycols_*\" (or the whole cookie if those are the only sub-cookies). \n \n * Observe that the default columns are: \n \n \"Order, Recipe, Task, Executed, Outcome, Cache attempt\" \n \n * Click the \"Edit Columns\" button. ",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Observe that the fields are sorted as follows (with the indicated items greyed) \n \n [ ] CPU usage \n [x] Cache attempt \n [ ] Disk I/O (ms) \n [x] Executed \n [x] Order {greyed} \n [x] Outcome \n [x] Recipe {greyed} \n [ ] Recipe version \n [x] Task {greyed} \n [ ] Time (secs) \n \n * For each of the greyed items, attempt to click them. Observe that they do not change. \n \n * For each of the non-greyed items, attempt to click them. Observe that the respective column dynamically appears when checked and disappears when un-checked. ",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "Search \n \n * Observe that the search text box background text is \"Search tasks\". \n \n * Set the search text to \"busybox\" and click \"Search\". Observe that only the busybox tasks are listed (about 16). \n \n * Click the \"X\" next to the search text box. Observe that all of the tasks re-appear. \n \n \n",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "Column sorts \n \n * Enable all of the columns. \n \n * Observe that by default the \"Order\" column header is in bold, it has a down-arrow icon, and that the table is sorted by this column in ascending order. \n \n * Observe that the columns are in this order, and are sortable only if indicated: \n \n Order {sortable} \n Recipe {sortable} \n Recipe version \n Task {sortable} \n Executed {sortable} \n Outcome {sortable} \n Cache attempt {sortable} \n Time (secs) {sortable} \n CPU usage {sortable} \n Disk I/O (ms) {sortable} ",
+ "expected_results": ""
+ },
+ "10": {
+ "action": " Test that each of the sortable columns do sort, ascending and descending \n \n * Observe that each of the column headers have a question mark icon, and that hovering over it provides help text.",
+ "expected_results": ""
+ },
+ "11": {
+ "action": "\"Executed\" Filter \n \n * Observe that the \"Executed\" column has the filter icon. Click on it and observe these values, where \"All Tasks\" is the default \n \n (*) All Tasks \n ( ) Executed Tasks \n ( ) Not Executed Tasks \n \n * Click on \"Executed Tasks\" and observe that only rows with the value \"Executed\" are displayed. \n \n * Click on \"Not Executed Tasks\" and observe that only rows with the value \"Not Executed\" are displayed. \n \n * Click on \"All Tasks\" and observe that all rows are displayed. \n \n \n",
+ "expected_results": ""
+ },
+ "12": {
+ "action": "\"Outcome\" Filter \n \n * Observe that the \"Outcome\" column has the filter icon. Click on it and observe these values, where \"All Tasks\" is the default \n \n (*) All Tasks \n ( ) Succeeded Tasks \n ( ) Failed Tasks \n ( ) Cached Tasks \n ( ) Prebuilt Tasks \n ( ) Covered Tasks \n ( ) Empty Tasks \n \n * Click on each of the filter selections, and observe that the resulting row \"Outcome\" values match the selection. \n \n * Click on \"All Tasks\" and observe that all rows are displayed. \n \n \n1",
+ "expected_results": ""
+ },
+ "13": {
+ "action": "\"Cache attempt\" Filter \n \n * Observe that the \"Cache attempt\" column has the filter icon. Click on it and observe these values, where \"All Tasks\" is the default \n \n (*) All Tasks \n ( ) Tasks with cache attempts \n ( ) Tasks with 'File not in cache' attempts \n ( ) Tasks with 'Failed' cache attempts \n ( ) Tasks with 'Succeeded' cache attempts \n \n * Click on each of the filter selections, and observe that the resulting row \"Outcome\" values match the selection. \n \n Note the with a clean build, only the \"All Tasks\" and \"Tasks with cache attempts\" will return rows. \n \n * Click on \"All Tasks\" and observe that all rows are displayed. \n \n \n1",
+ "expected_results": ""
+ },
+ "14": {
+ "action": "Order, Task, Executed, Outcome, Cache attempt links \n \n * Observe that for a given row, the above values are live links that will both take you to the respective task detail page. Click the back button to return. \n\n * Observe that for a given row the values in \"Recipe\" and \"Recipe version\" are live links that will both take you to the respective recipe details page. Click the back button to return \n \n \n1",
+ "expected_results": ""
+ },
+ "15": {
+ "action": "Time Page \n \n * \"In Toaster, select the build, and select the \"Time\" link in the left sidebar \n \n * Observe that the default columns are: \n \n \"Recipe\", \"Task\", \"Executed\", \" Outcome\", \"Time (secs)\" \n \n * Observe that the default sort is \"Time (secs)\", in descending order. \n \n * In the \"Edit Columns\" button, turn on all of the columns. \n \n * Observe that the page now matches the \"Tasks\" page, and passes the same tests. \n \n \n1",
+ "expected_results": ""
+ },
+ "16": {
+ "action": "CPU Usage Page \n \n * \"In Toaster, select the build, and select the \"CPU Usage\" link in the left sidebar \n \n * Observe that the default columns are: \n \n \"Recipe\", \"Task\", \"Executed\", \"Outcome\", \"CPU Usage\" \n \n * Observe that the default sort is \"CPU Usage\", in descending order. \n \n * In the \"Edit Columns\" button, turn on all of the columns. \n \n * Observe that the page now matches the \"Tasks\" page, and passes the same tests. \n \n \n1",
+ "expected_results": ""
+ },
+ "17": {
+ "action": "Disk I/O Page \n \n * \"In Toaster, select the build, and select the \"Disk I/O\" link in the left sidebar \n \n * Observe that the default columns are: \n \n \"Recipe\", \"Task\", \"Executed\", \"Outcome\", \"Disk I/O (ms)\" \n \n * Observe that the default sort is \"Disk I/O (ms)\", in descending order. \n \n * In the \"Edit Columns\" button, turn on all of the columns. \n \n * Observe that the page now matches the \"Tasks\" page, and passes the same tests. ",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "Tasks_in_toaster_UI"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.package_detail_in_toaster_UI",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "TOASTER: Test Instructions for \"Package Detail\" page \n \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "In Toaster, select the build, and select the \"Packages\" link in the left sidebar.",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Select a package from the \"Package\" column.",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Breadcrumbs \n * Observe that the 3 breadcrumbs at the top left are: \n : a live link that will take you back to the image dashboard page \n \"Packages\": a live link that will take you back to the Packages page \n \"bash\": the name of the current package (not a link) \n * Test the breadcrumb live links, return to this page ",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "General Layout \n * Click on any package \n * Observe that there is no the left-hand box \n * Observe that there is a right-hand box, with information about the package \n * Observe the title is the package name and version, in bold \n * Observe that, if the package is installed in an image, there is a link to the image(s) the package appears in \n * Observe that, if the package is not installed in the image, there are two tab buttons below the title ",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Observe that the tab buttons are: \n Generated files (2) {highlighted} \n Runtime dependencies (4) \n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "Generated files tab \n * Click the \"Generated files\" tab (which should be selected by default) \n * Observe that the number of files in the table matches the number in parenthesis after the \"Generated files\" tab title. \n * Observe that the columns in the table are \"File\" and \"Size\" \n * Observe that the table is sorted by \"File\" in ascending alphabetical order (A to Z). ",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "Runtime dependencies tab \n * Click the \"Runtime dependencies\" tab \n * Observe that the number of dependencies in the table matches the number in parenthesis after the \"Runtime dependencies\" tab title. \n * Observe that the columns in the table are: \n \"Package, Version, Size\" \n * Observe that the table is sorted by Package in ascending alphabetical order (A to Z) \n * Observe that the package name values are live links to the respective package details page. ",
+ "expected_results": "The information icon was eliminated on the columns that where self explanatory. "
+ },
+ "10": {
+ "action": "Package information box \n * Observe that there is a right-hand box, with information about the package, including in this example the fields: \n \"Size, License, Recipe, Recipe version, Layer, Layer branch, Layer commit\" \n * Observe that each of the field has a question mark icon, and that hovering will provide help text. \n * Observe that none of the values in the right-hand box are blank \n * Observe that the \"Recipe\" value is a live link to the respective recipe detail page. ",
+ "expected_results": ""
+ }
+ },
+ "summary": "package_detail_in_toaster_UI"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.recipes:_Sort_the_content_of_the_recipes_table",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". \n \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In Toaster, select the build, and select the \"Recipes\" link in the left sidebar. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Make sure that by default the \"Recipes\" table is sorted by \"Recipe\" in ascending alphabetical order (A to Z). \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Sort my \"Recipes\" table by \"Section\" and then navigate away by selecting a recipe(such as click busybox recipe). When you click \"back\" button in web-browser to go back to the \"Recipes\" table it should still be sorted by \"Section\". \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Make sure all column headings are sortable, except \"Recipe version\", \"Dependencies\", \"Reverse dependencies\" and \"Layer commit\". \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Sorting and \"Edit columns\" \nIf you use the \"Edit columns\" menu to hide the column with the applied sorting, we revert the sorting to the default sorting (i.e. \"Recipe\"). The default sorting always uses one of the core columns, which cannot be hidden using the \"Edit columns\" menu. \nSort recipes by \"section\" column heading. Then hide \"Section\" column by \"Edit columns\". Make sure that the \"Recipes\" table is sorted by \"Recipe\" in ascending alphabetical order (A to Z). \nNOTE: Bug 5919 is filed against the issue. \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Sorting and search \nSearching should have no impact on the applied sorting. Any results returned should be sorted by the sorting criteria selected when the search query was submitted. \nSort recipes by \"section\" column heading. Input a string (such as \"lib\") in search box and click search button. Make sure results returned should be sorted by \"section\". \n",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "recipes:_Sort_the_content_of_the_recipes_table."
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.recipes:_Search_the_content_of_the_recipes_table",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "recipes: Search the content of the recipes table \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". \n \n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "In Toaster, select the build, and select the \"Recipes\" link in the left sidebar. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Observe the search is made of a text input field and a \"Search\" button in a toolbar above the table. \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "When no search query has been entered, we have placeholder text saying: \"Search recipes\". \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Input \"lib\" in the text input field. The placeholder text disappears when the first character is typed. Click search button. \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": " \n(1) returned results \nThe search string is kept in the text input field. The results returned occur. Click \"Clear search\" icon to clear the search and display all recipes. \n(2) no results returned \nIf your search query returns no results, the page heading changes to \"No recipes found\", and we show you an alert with a search form and an option to show all recipes. Observe \"show all recipes\" button is available. \n\n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "When I run a search, the search happens against the following columns (independently of they being shown or hidden): \n- Recipe \n- Recipe version \n- Recipe file \n- Section \n- License \n- Layer \n- Layer branch \n- Layer commit \n\nInput a string to search for the above 8 column headings separately to make sure that the search happens against the columns. \n\n \n\n",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "Search, sorting and \"Edit columns\" \nSearching does not change the state of the table: the same columns remain hidden and the same sorting applied when search results are displayed, but filters are cleared by the search results.\nSearch a string and make sure that the same columns remain hidden and the same sorting applied. Since filter feature of recipes (4296) is obsolete, we don't have to test filter. \n\n",
+ "expected_results": ""
+ },
+ "10": {
+ "action": "Search and filters \nThe scope of the filters is the content currently on the table (this means all table pages, not only the one displayed). The scope of the search is always the content of the database.\nSince filter feature of recipes (4296) is obsolete, we don't have to test filter. \n\nIf I run a search query, any filter applied afterwards will filter the content returned by the search query. \n\nIf I run a search query while a filter is applied, the filter is cleared by the results of the search query (i.e. we display the results of the search query and clear the filter applied beforehand).",
+ "expected_results": "NA"
+ },
+ "11": {
+ "action": " The same happens if I click the \"Clear search\" icon when a filter is applied to a set of search results (both search results and applied filter are cleared, and the table shows all the tasks). \nSince filter feature of recipes (4296) is obsolete, we don't have to test filter. \n\n",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "recipes:_Search_the_content_of_the_recipes_table."
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.recipes:_Customise_the_columns_of_the_recipes_table",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". \n \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In Toaster, select the build, and select the \"Recipes\" link in the left sidebar. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "* Observe that the default columns are: \n \"Recipe, Recipe version, Recipe file, Section , License, Layer\" \n \n * Click the \"Edit Columns\" button. \n * Observe that the fields are sorted as follows (with the indicated items greyed) \n [ ] Dependencies \n [x] Layer \n [ ] Layer branch \n [ ] Layer commit \n [x] License \n [x] Recipe {greyed} \n [x] Recipe file \n [x] Recipe version {greyed} \n [ ] Reverse dependencies \n [x] Section \n \n * For each of the greyed items, attempt to click them. Observe that they do not change. \n * For each of the non-greyed items, attempt to click them. Observe that the respective column dynamically appears when checked and disappears when un-checked.\n",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "recipes:_Customise_the_columns_of_the_recipes_table."
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.recipes:_View_a_table_with_all_the_recipes_included_in_an_image_recipe",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". \n \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In Toaster, select the build, and select the \"Recipes\" link in the left sidebar. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "By default, the all recipes table displays the following columns in this order: \n(1) Recipe (2) Recipe version: the target version and revision (3) Dependencies (4)Reverse Dependencies (5) License: the value of the target's LICENSE variable (6) Layer: the name of the layer providing the target \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "In the \"Edit columns\" menu, table columns appear listed alphabetically. \nIn the table itself, the default order of columns is as follows: \n(1) Dependencies (2) Layer (3) Layer branch (4) Layer commit (5) License (6) Recipe (7) Recipe file (8) Reverse dependencies (9) Section (10) Version \n\nThe minimum table is made of the 2 columns that provide the information needed to identify a target: Recipe and Recipe version.",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "recipes:_View_a_table_with_all_the_recipes_included_in_an_image_recipe"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.recipes:_View_detailed_information_about_a_recipe。",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Create a default Yocto project (qemux86), and start the Toaster. \n \n $ source poky/oe-init-build-env \n $ source toaster start \n $ bitbake core-image-minimal \n $ http://localhost:8000/ \n \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Select the \"core-image-minimal\" build link \n \n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Select the \"Recipes\" link in the left sidebar \n \nObserve that the recipes are listed in a table, and that each recipe name is a live URL link. \n \n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Observe that \"Packages\" link. It should have an appended value like \"(4)\". Now click on this link. \n \nObserve: \n a) The number of packages matches the previous number in parenthesis. \n \n b) Each package has a version and a size. The size may be zero. \n \n c) Note that if you hover on a package name, it will reveal a URL of the following form. This link should take you to the corresponding package detail page. \n \n localhost:8000/gui/build//package/ \n \n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Observe that recipes of \"Build Dependencies\" link has an appended value like \"(0)\". Now click on this link. \n \nObserve: \n a) No dependencies appear, and you get a message of the form: \n \n \"$RECIPE_NAME_VERSION has no build dependencies.\" \n \n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Observe that \"Reverse build dependencies\" link has an appended value like \"(1)\". Now click on this link. \n \nObserve: \n a) The number of packages matches the previous number in parenthesis. \n \n b) The recipe dependency should be \"packagegroup-core-boot\", \n \n c) There should be a respective version displayed, for example \"1.0-r11\" \n \n d) If you hover on the recipe name, it will reveal a URL of the following form. This link should take you to the corresponding recipe detail page. \n \n localhost:8000/gui/build//recipe/ \n \n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Click the breadcrumb \"Recipes\" at the top, locate the \"gdbm\" recipe, and select it. \n \n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "Observe that \"Packages\" link. It should have an appended value like \"(0)\". Now click on this link. \n \nObserve: \n a) No packages appear, and you get a message of the form: \n \n \"$PACKAGE_NAME_VERSION does not build any packages.\" \n \n",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "Observe that \"Build dependencies\" link. It should have an appended value like \"(2)\". Now click on this link. \n \nObserve: \n a) The number of build dependencies matches the previous number in parenthesis. \n \n b) The recipe dependency should have values like \"gettext-native\" and \"libtool-cross\". \n \n c) There should be a respective versions displayed for each dependency. \n \n d) If you hover on a recipe name, it will reveal a URL of the following form. This link should take you to the corresponding recipe detail page. \n \n localhost:8000/gui/build//recipe/ \n \n1",
+ "expected_results": ""
+ },
+ "10": {
+ "action": "Observe that \"Reverse build dependencies\" link. It should have an appended value like \"(0)\". Now click on this link. \n \nObserve: \n a) No reverse dependencies appear, and you get a message of the form: \n \n \"$RECIPE_NAME_VERSION does not build any packages.",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "recipes:_View_detailed_information_about_a_recipe。"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.variables:_Search_the_content_of_the_bitbake_variables_table",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". \n \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In Toaster, select the build, and select the \"Configuration\" link in the left sidebar. Then click \"BitBake variables\" tab. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "When no search query has been entered, we have placeholder text saying: \"Search BitBake variables\". \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Input \"lib\" in text input field. The placeholder text disappears when the first character is typed. Click search button. \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Observe \"4 variables found\" is showed. (It may be other number.) \n If your search query returns no results, we display an alert with: \n - A h3 heading saying: \"No variables found\" \n - A search box \n - The search query is showed in the text input shield. \n - A link to show all variables. we show the variables table. \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "We provide a \"Clear search\" icon (icon-remove-sign). Click it to clear the search and display all variables. Check that the \"Clear search\" icon cannot be accessed using the tab key. \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Verify search scope. \nWhen I run a search, the search happens against the following columns (independently of they being shown or hidden): \n- Variable \n- Value \n- Set in file \n- Description \n\n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "Verify \"search, sorting and 'Edit columns'\" \nSearching does not change the state of the table: the same columns remain hidden and the same sorting applied \nwhen search results are displayed, but filters are cleared by the search results. \n\n",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "Search and filters \nIf I run a search query, any filter applied afterwards will filter the content returned by the search query. \nIf I run a search query while a filter is applied, the filter is cleared by the results of the search query. \n",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "variables:_Search_the_content_of_the_bitbake_variables_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.variables:_Sort_the_content_of_the_bitbake_variables_table",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". \n \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In Toaster, select the build, and select the \"Configuration\" link in the left sidebar. Then click \"BitBake variables\" tab. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "By default, the \"variables\" table is sorted by \"Variable\" in ascending alphabetical order (A to Z). \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Make sure that \"Variable\" column is sortable (Developers have disabled sort function of all other columns to avoid bug 6004) \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Sorting and search \nSearching should have no impact on the applied sorting. Any results returned should be sorted by the sorting criteria selected when the search query was submitted.",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "variables:_Sort_the_content_of_the_bitbake_variables_table."
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.builds:_View_a_table_of_all_the_builds_run_for_a_certain_build_directory",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start up toaster and open localhost:8000. ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "You can see 'Latest builds' section lists. - Builds in progress, sorted by inverse start time (last one starting at the top). - 3 latest completed builds, as long as they are less than 24 hours old. If there are no builds in progress or builds completed within the last 24 hours we don't display it: the page shows only the 'All builds' section. ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "You can see the following column headings. You can see the their description in https://bugzilla.yoctoproject.org/attachment.cgi?id=1617. outcome, recipe, machine, started on, completed on, failed tasks, errors, warnings, time, image files, project",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "builds:_View_a_table_of_all_the_builds_run_for_a_certain_build_directory"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.builds:_Customise_the_columns_of_the_builds_table",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start up toaster.",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Create 2 builds, such as \"bitbake core-image-minimal\" and \"bitbake core-image-sato\". Wait for successful builds and then run: xdg-open http://localhost:8000/",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Enter \"All build\" table in web browser.",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Select a few item in \"Edit columns\" to show or hide them in all builds.",
+ "expected_results": "Unchecked items changed to checked should immediately appear in the table. Checked items changed to unchecked should immediately disappear from the table. If you uncheck the column with the applied sorting, when you close the \"Edit columns\" menu the applied sorting should revert to the table default sorting. Bug 5919 is filed for the function."
+ }
+ },
+ "summary": "builds:_Customise_the_columns_of_the_builds_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.packages:_View_a_table_with_all_the_packages_built_for_an_image_recipe",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In Toaster, select the build, and select the \"Packages\" link in the left sidebar. ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "By default, the built packages table displays the following columns in this order: Package, Package version, Size ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Apart from the columns shown by default, the following additional columns are also available to users via the \"Edit columns\" menu: Layer, Layer branch, Layer commit, License, Recipe version ",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Enable these columns and observe that table columns appear listed alphabetically in the \"Edit columns\" menu (1)Package (2)Package version (3)Size (4)License (5)Recipe (6)Recipe version (7)Layer (8)Layer branch (9)Layer commit ",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "The minimum table is made of the 2 columns that provide the information needed to identify a package: Package and Package version. Their corresponding checkboxes in the \"Edit columns\" menu appear always selected and are in an inactive state. ",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "packages:_View_a_table_with_all_the_packages_built_for_an_image_recipe"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.packages:_Sort_the_content_of_the_packages_table",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In Toaster, select the build, and select the \"Packages\" link in the left sidebar. ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Observe that by default the \"built packages\" table is sorted by \"Package\" in ascending alphabetical order (A to Z). ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Click \"Size\" column heading to sort my \"built packages\" table by \"Size\". Then navigate away by selecting a package and you can see the package page. After click \"go back\" button in browser to return to the \"built packages\" table it should still be sorted by \"Size\". ",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Observe that all except \"Package version\", \"Recipe version\", \"Layer commit\" are sortable. ",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "If you use the \"Edit columns\" menu to hide the column with the applied sorting, we revert the sorting to the default sorting (i.e. \"Package\"). The default sorting always uses one of the core columns, which cannot be hidden using the \"Edit columns\" menu. Bug 5919 is filed for the issue. ",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Searching should have no impact on the applied sorting. Any results returned should be sorted by the sorting criteria selected when the search query was submitted. Sort packages by size and search a string. Observe that results returned should be sorted by size. ",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "In Toaster, select the build, and select the \"core-image*\" link in the left sidebar. Observe that by default the \"built packages\" table is sorted by \"Package\" in ascending alphabetical order (A to Z). ",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "If choose to sort my \"included packages\" table by \"Size\" and then navigate away by selecting a package, when I go back to the \"included packages\" table it should still be sorted by \"Size\". ",
+ "expected_results": ""
+ },
+ "10": {
+ "action": "Enable all columns by \"Edit columns\". All except \"Package version\", \"Recipe version\", \"Dependencies\", \"Reverse dependencies\", \"Layer commit\", should be sortable. ",
+ "expected_results": ""
+ },
+ "11": {
+ "action": "If you use the \"Edit columns\" menu to hide the column with the applied sorting, we revert the sorting to the default sorting (i.e. \"Package\"). The default sorting always uses one of the core columns, which cannot be hidden using the \"Edit columns\" menu. Bug 5919 is filed for the issue. ",
+ "expected_results": ""
+ },
+ "12": {
+ "action": "Searching should have no impact on the applied sorting. Any results returned should be sorted by the sorting criteria selected when the search query was submitted. Sort packages by size and search a string. Observe that results returned should be sorted by size.",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "packages:_Sort_the_content_of_the_packages_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.packages:_Customise_the_columns_of_the_packages_table",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In Toaster, select the build, and select the \"Packages\" link in the left sidebar. ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Observe that by default the \"built packages\" table is sorted by \"Package\" in ascending alphabetical order (A to Z). ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Select 1 column in \"Edit column\" to show the column. ",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Deselect 1 column in \"Edit column\" to hide 1 column by \"Edit columns\".",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "packages:_Customise_the_columns_of_the_packages_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.packages:_Search_the_content_of_the_packages_table",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In Toaster, select the build, and select the \"Packages\" link in the left sidebar. ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Observe that by default the \"built packages\" table is sorted by \"Package\" in ascending alphabetical order (A to Z). ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "When no search query has been entered, we have placeholder text saying: \"Search packages built\". The placeholder text disappears when the first character is typed. ",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "When a search query has been submitted and results returned: â–ª We keep the search string in the text input field. â–ª We provide a \"Clear search\" icon (icon-remove-sign). Click it to clear the search and display all packages. â–ª We change the page heading to indicate the number of results returned by the search query. ",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "If your search query returns no results, the page heading changes to \"No packages found\", and we show you an alert with a search form and an option to show all packages. ",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Searching does not change the state of the table: the same columns remain hidden and the same sorting applied. ",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "When I run a search, the search happens against the following columns (independently of they being shown or hidden): - Package - Package version - License - Recipe - Recipe version - Layer - Layer branch - Layer commit ",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "Hide all columns except \"Package\" and \"Package version\". Search a string which is included in other hidden columns, not the 2 columns. See if returned results occur. ",
+ "expected_results": ""
+ },
+ "10": {
+ "action": "In Toaster, select the build, and select the \"core-image*\" link in the left sidebar. Observe that by default the \"included packages\" table is sorted by \"Package\" in ascending alphabetical order (A to Z). Rerun tests according to step 4~9.",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "packages:_Search_the_content_of_the_packages_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.View_detailed_information_about_a_layer",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Make sure that layer information is shown in: \n* recipes table \n All builds-> core-image-minimal-> recipes \n Select \"Layer\", \"Layer branch\", \"Layer commit\" in Edit columns and observe the 3 columns are showed. Note that the \"Layer branch\" column can be empty. \n\n* recipe details \n All builds-> core-image-minimal-> recipes \n Click a recipe and you can see \"Layer\", \"Layer branch\", \"Layer commit\", \"Recipe details\" information. Note that \"Layer branch\" is not required. If there is no layer branch, you should not see the \"Layer branch\" item on the list.",
+ "expected_results": "NA"
+ },
+ "3": {
+ "action": "built packages table \n All builds-> core-image-minimal-> packages \n Select \"Layer\", \"Layer branch\", \"Layer commit\" in Edit columns and observer the 3 columns is showed. Note that the \"Layer branch\" column can be empty. \n\n* built package details \n All builds-> core-image-minimal-> packages \n Click a package and you can see \"Layer\", \"Layer branch\", \"Layer commit\" in package information. Note that \"Layer branch\" is not required. If there is no layer branch, you should not see the \"Layer branch\" item on the list. \n\n* image information \n All builds-> core-image-minimal-> core-image-minimal(images) \n Select \"Layer\", \"Layer branch\", \"Layer commit\" in Edit columns and observer the 3 columns is showed.",
+ "expected_results": "NA"
+ },
+ "4": {
+ "action": "Note that the \"Layer branch\" column can be empty. \n\n* installed package details \n All builds-> core-image-minimal-> core-image-minimal(images) \n Click a package and you can see layer information in package information. Note that \"Layer branch\" is not required. If there is no layer branch, you should not see the \"Layer branch\" item on the list. \n\n* configuration \n All builds-> core-image-minimal-> Configuration \n You can see \"Layer\", \"Layer branch\", \"Layer commit\" in configuration summary. Note that the \"Layer branch\" column can be empty. \n\n* build dashboard \n All builds-> core-image-minimal \n You can see \"Layers\" in build summary. ",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "View_detailed_information_about_a_layer"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.Select_the_number_of_table_rows_displayed_per_page",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In Toaster, select the build, and select the \"Packages\" link in the left sidebar. ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Users can select the number of rows they want to see in a table using the \"Show rows\" dropdown menu, which displays above and below each table. The options of the \"Show rows\" dropdown are: 10, 25, 50, 100, 150. ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "The last selected option from the \"Show rows\" menu should be remembered: * Select one option, for example 10 * Click on a package name to navigate away from the built packages table * Click on the \"Packages\" link in the breadcrumb at the top of the page to go back to the packages table Note that 25 is still selected in the \"Show rows\" menu. ",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Observe that the pagination widget is made of: - A \"Previous\" button - A \"Next\" button - A maximum of 5 page buttons. ",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "See the pagination function of \"Tasks\", \"Recipes\" links according to steps 3~4.",
+ "expected_results": "Expected result for step 4: This widget has no previous or next button if testing in toaster 2.2. see Bug 9831"
+ }
+ },
+ "summary": "Select_the_number_of_table_rows_displayed_per_page"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.View_detailed_configuration_information_for_a_build",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In Toaster, select the build, and select the \"Configuration\" link in the left sidebar. ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Observe that the configuration page has 2 tabs: - Summary - BitBake variables The Summary tab is the default tab. ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "The following content is included in summary tab. (1) Build configuration (2) Layers (Layer, Layer branch - if any, Layer commit) ",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Observe that \"BitBake variables\" tab includes \"Variable\", \"Value\", \" Set in file\", \"Description\" columns. ",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Click filter button of \"Set in file\" column. Select \"Local configuration variables\" and click \"Apply\" button to see if filter function works well. ",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "In the \"Edit columns\" menu, table columns appear listed alphabetically: Description, Set in file, Value, Variable ",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "The minimum table is made of the \"Variable\" and \"Value\" columns. ",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "Click a variable and a \"History of ${variable}\" dialog will occur. The modal dialog shows the variable value followed by the history table, which has the following columns: - Order: indicates the sequence in which the files set the variable - Configuration file: the location in disk of the file that set the variable - Operation: the value of the operation field as stored in the database. - Line number: the line number where the operation is performed in the configuration file. For such variables, whose value is an empty string, the Value cell in the variables table isempty, which is probably the right thing. In the modal, instead of the variable value, we show an alert (with the class .alert-info) saying: \"The value of is an empty string\" ",
+ "expected_results": ""
+ },
+ "10": {
+ "action": "Click arrow links in description tab to see linking variables to the Yocto Project reference manual.",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "View_detailed_configuration_information_for_a_build"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.variables:_Customise_the_columns_of_the_bitbake_variables_table",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In Toaster, select the build, and select the \"Configuration\" link in the left sidebar. Then click \"BitBake variables\" tab. ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Show or hide columns by select or deselect options of \"Edit columns\".",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "variables:_Customise_the_columns_of_the_bitbake_variables_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.variables:_Filter_the_content_of_the_bitbake_variables_table",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". \n \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In Toaster, select the build, and select the \"Configuration\" link in the left sidebar. Then click \"BitBake variables\" tab. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Observe \"Set in file\" and \"Description\" columns have filters. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Filters are mutually exclusive. Only one column filter can be applied to a table at any given time. \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Filters are overridden by search. \nThe scope of the filters is the content currently on the table. \nThe scope of the search is always the full content of the database. \nSo: \n- if I run a search query, any filter applied afterwards will filter the content returned by the search query. \nSearch a string and apply a filter. Observe it would filter the content returned by the search query. \n\n- if I run a search query while a filter is applied, the filter is overridden by the results of the search query. \nApply a filter and search a string. Observe that the previous filtered result is overridden by the search. ",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "variables:_Filter_the_content_of_the_bitbake_variables_table"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.View_and_navigate_the_full_directory_structure_of_built_images",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". \n \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In Toaster, select the build, and select the \"core-image-minimal\" link in the left sidebar. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Observe image information page has 2 tabs: \n- Packages included: this tab shows a table with all the packages installed in the image. The tab label includes the total number of packages listed in the table and their size. \n- Directory structure: this tab shows all the files included in the image. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "By default, the included packages table displays the following columns in this order: \nPackage, Package version, Size, Dependencies \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Apart from the columns shown by default, the following additional columns are also available to users via the \"Edit columns\" menu: \nLayer, Layer branch, Layer commit, License, Recipe, Recipe version. \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "The minimum table is made of the 2 columns that provide the information needed to identify a package: Package and Package version. \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "By default, the directory structure table shows the top level directories and files in the file system.\nThe table includes the following columns: \nDirectory/File, Symbolic link to, Source package, Size, Permissions, Owner, Group \nOpen some directories and see files. ",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "View_and_navigate_the_full_directory_structure_of_built_images"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.View_a_summary_of_all_the_information_available_for_a_build",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and build \"bitbake core-image-minimal\". \n \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In Toaster, select the build. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "A 'Build dashboard' page is showed. \nThe 'Build dashboard' page has two main states: \n(1) Success state: when the build completes successfully. In the success state, if the build target(s) include an image recipe, the page displays an image content module, and the left navigation has an \"Images\" section at the top. \n(2) Fail state: when the build fails (shown in this page). In the fail state, the page always displays an errors content module, and the left navigation does not have an \"Images\" section at the top. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Observe that the page provides access to all information available for the selected build. \n(1) Images \n(2) Build: this group provides links to the following pages: \nConfiguration, Tasks, Recipes, Packages \n(3) Performance: this group provides links to the following pages: \nTime, CPU usage, Disk I/O \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "The page heading is made of the build target(s) and the machine, such as \"core-image-minimal qemux86\". If the build has more than one target, they show in ascending alphabetical order (A to Z) both in the page heading and in the \"Images\" section of the left navigation. If the build was successful, there is an image content module for each target that is an image recipe. The modules also show in ascending alphabetical order by target name. \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Packages included, total package size, license manifest, image files are included in the images section. \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "The information of Machine, Distro, Layers is included in configuration content module of \"Build summary\" section. \nThe information of \"Total number of task\", \"Tasks executed\", \" Tasks not executed\", \"Reuse\" is included in the tasks content module. \nThe information of \"Recipes built\" and \"Packages built\" is included in the \"Recipes&Packages\" content module. ",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "View_a_summary_of_all_the_information_available_for_a_build"
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.Display_the_content_of_error_messages_and_warnings",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and create a successful build and a failed build. You can force a build to terminate by ctrl+c. ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Observe that the number of errors and warnings thrown by a build shows in both 'All builds' page and the 'Latest builds' section. ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Observe that the number and content of errors and warnings thrown by a build shows in the 'Build dashboard' page. ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Click warning or error links to see warning and error details. ",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "Display_the_content_of_error_messages_and_warnings."
+ }
+ },
+ {
+ "test": {
+ "@alias": "toaster-unmanaged-mode.toaster-unmanaged.Build_summary_information_fully_implemented",
+ "author": [
+ {
+ "email": "Yuan.Sun2@windriver.com",
+ "name": "Yuan.Sun2@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start with a default Yocto project (qemux86), start the Toaster, and create 4 builds. \na) a successful build with images (bitbake core-image-minimal) \nb) a successful build without images (bitbake mtools-native) \nc) a failed build with errors and warnings (run \"bitbake core-image-sato\", then press control+c to terminate the build)",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "In Toaster, select a core-image-minimal build.",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Observe that there is a 'Build dashboard' page captured by Toaster. The page does not exist for builds in progress, only for finished builds. \nThe 'Build dashboard' page is made of: \na) Breadcrumb \nb) Navigation \n IMAGES: core-image-minimal \n BUILD: \n Configuration \n Tasks \n Recipes \n Packages \n PERFORMANCE: \n Time \n CPU usage \n Disk I/O \nc) Page heading (core-image-minimal qemux86) \nd) Section heading (Images, Build summary) \ne) Build status notification (Completed on xx/xx/xx... Build time:xx:xx:xx)",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Observe \"images\" section is made of the following \n.\na) Heading: core-image-minimal, which links to the \"Packages included\" tab of the image information page \nb) Number of packages installed: (packages included xx), which is a link to the \"Packages included\" tab of the image information page \nc) Total installed package size: xxMB \nd) License manifest (which is a link to the \"Packages included\" tab of the information page with the following columns showing: \"Package\", \"Package version\", \"License\" and \"Recipe\". We have bug 6079 open for this). Next to the license manifest is the path to the directory where you can find the license manifest file. \n \ne) Image files (rootfs file names and rootfs file sizes)",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Observe \"Build summary\" section is made of the following. \na) Configuration (which is a link to the configuration page): \n Machine \n Distro \n Layers (sorted in alphabetical order) \nb) Tasks (which is a link to the tasks page) \n Total number of tasks (which is a link to the tasks page) \n Tasks executed (which is a link to the tasks page with the tasks executed filter applied) \n Tasks not executed (which is a link to the tasks pages with the tasks not executed filter applied)\n Reuse \nc) Note that \"Total number of tasks\" should equal number of \"Tasks executed\" + number of \"Tasks not executed\" \nd) Recipes (which is a link to the recipes page) & Packages (which is a link to the packages built page) ",
+ "expected_results": ""
+ },
+ "6": {
+ "action": " Number of recipes built (which is a link to the recipes page) \n \n Number of packages built (which is a link to the packages built page)",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Return to localhost:8000 and select a successful build without images (mtools-native) \n.\nObserve the build dashboard for a successful build of a target that is not an image recipe. There is no image content module, and no \"Images\" section in the left navigation.",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "Return to localhost:8000 and select the failed build (core-image-sato). Observe the build dashboard for the failed build. \n \na) the errors content module: \nThis module exists for those builds that throw error(s). It appears immediately below the build status notification. \nThe module has 2 states: \nâ–ª Expanded (shows number of errors and error content) \nâ–ª Collapsed (shows only the number of errors) \nBy default, the errors module is in the expanded state. \nErrors content modules include the following information: \n(1.1) A heading, which indicates the number of errors thrown by the build, and toggles the module between its 2 states on click. ",
+ "expected_results": "NA"
+ },
+ "9": {
+ "action": "Transitions between states should use a slide up / slide down animation. \n(1.2) Error(s) content \n\nb) the warning content module: \nThis module exists for those builds that throw warning(s). \nIt is the last content module shown on the build dashboard. \nThe module has 2 states: \nâ–ª Expanded (shows number of warnings and warning content) \nâ–ª Collapsed (shows only the number of warnings) \nBy default, the warning module is in the collapsed state. Warnings content modules include the following information: \n(2.1) A heading, which indicates the number of warnings thrown by the build, and toggles the module between its 2 \nstates on click. Transitions between states should use a slide up / slide down animation. \n(2.2) Warning(s) content ",
+ "expected_results": "NA"
+ }
+ },
+ "summary": "Build_summary_information_fully_implemented"
+ }
+ }
+] \ No newline at end of file
diff --git a/meta/lib/oeqa/oetest.py b/meta/lib/oeqa/oetest.py
index 95d3bf72fc..9c84466dd0 100644
--- a/meta/lib/oeqa/oetest.py
+++ b/meta/lib/oeqa/oetest.py
@@ -1,13 +1,15 @@
+#
# Copyright (C) 2013 Intel Corporation
#
-# Released under the MIT license (see COPYING.MIT)
+# SPDX-License-Identifier: MIT
+#
# Main unittest module used by testimage.bbclass
# This provides the oeRuntimeTest base class which is inherited by all tests in meta/lib/oeqa/runtime.
# It also has some helper functions and it's responsible for actually starting the tests
-import os, re, mmap, sys
+import os, re, sys
import unittest
import inspect
import subprocess
@@ -27,7 +29,6 @@ try:
except ImportError:
pass
from oeqa.utils.decorators import LogResults, gettag, getResults
-from oeqa.utils import avoid_paths_in_environ
logger = logging.getLogger("BitBake")
@@ -107,7 +108,7 @@ class oeRuntimeTest(oeTest):
pass
def tearDown(self):
- # Unistall packages in the DUT
+ # Uninstall packages in the DUT
self.tc.install_uninstall_packages(self.id(), False)
res = getResults()
@@ -129,48 +130,6 @@ class oeRuntimeTest(oeTest):
def tearDownLocal(self):
pass
- #TODO: use package_manager.py to install packages on any type of image
- def install_packages(self, packagelist):
- for package in packagelist:
- (status, result) = self.target.run("smart install -y "+package)
- if status != 0:
- return status
-
-class OETestCalledProcessError(subprocess.CalledProcessError):
- def __str__(self):
- if hasattr(self, "stderr"):
- return "Command '%s' returned non-zero exit status %d with output %s and stderr %s" % (self.cmd, self.returncode, self.output, self.stderr)
- else:
- return "Command '%s' returned non-zero exit status %d with output %s" % (self.cmd, self.returncode, self.output)
-
-subprocess.CalledProcessError = OETestCalledProcessError
-
-class oeSDKTest(oeTest):
- def __init__(self, methodName='runTest'):
- self.sdktestdir = oeSDKTest.tc.sdktestdir
- super(oeSDKTest, self).__init__(methodName)
-
- @classmethod
- def hasHostPackage(self, pkg):
- if re.search(pkg, oeTest.tc.hostpkgmanifest):
- return True
- return False
-
- def _run(self, cmd):
- return subprocess.check_output(". %s > /dev/null; %s;" % (self.tc.sdkenv, cmd), shell=True, stderr=subprocess.STDOUT).decode("utf-8")
-
-class oeSDKExtTest(oeSDKTest):
- def _run(self, cmd):
- # extensible sdk shows a warning if found bitbake in the path
- # because can cause contamination, i.e. use devtool from
- # poky/scripts instead of eSDK one.
- env = os.environ.copy()
- paths_to_avoid = ['bitbake/bin', 'poky/scripts']
- env['PATH'] = avoid_paths_in_environ(paths_to_avoid)
-
- return subprocess.check_output(". %s > /dev/null;"\
- " %s;" % (self.tc.sdkenv, cmd), stderr=subprocess.STDOUT, shell=True, env=env).decode("utf-8")
-
def getmodule(pos=2):
# stack returns a list of tuples containg frame information
# First element of the list the is current frame, caller is 1
@@ -221,15 +180,16 @@ class TestContext(object):
path = [os.path.dirname(os.path.abspath(__file__))]
extrapath = ""
else:
- path = d.getVar("BBPATH", True).split(':')
+ path = d.getVar("BBPATH").split(':')
extrapath = "lib/oeqa"
self.testslist = self._get_tests_list(path, extrapath)
self.testsrequired = self._get_test_suites_required()
self.filesdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runtime/files")
- self.imagefeatures = d.getVar("IMAGE_FEATURES", True).split()
- self.distrofeatures = d.getVar("DISTRO_FEATURES", True).split()
+ self.corefilesdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "files")
+ self.imagefeatures = d.getVar("IMAGE_FEATURES").split()
+ self.distrofeatures = d.getVar("DISTRO_FEATURES").split()
# get testcase list from specified file
# if path is a relative path, then relative to build/conf/
@@ -406,9 +366,9 @@ class RuntimeTestContext(TestContext):
self.target = target
self.pkgmanifest = {}
- manifest = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True),
- d.getVar("IMAGE_LINK_NAME", True) + ".manifest")
- nomanifest = d.getVar("IMAGE_NO_MANIFEST", True)
+ manifest = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"),
+ d.getVar("IMAGE_LINK_NAME") + ".manifest")
+ nomanifest = d.getVar("IMAGE_NO_MANIFEST")
if nomanifest is None or nomanifest != "1":
try:
with open(manifest) as f:
@@ -424,19 +384,19 @@ class RuntimeTestContext(TestContext):
def _get_test_suites(self):
testsuites = []
- manifests = (self.d.getVar("TEST_SUITES_MANIFEST", True) or '').split()
+ manifests = (self.d.getVar("TEST_SUITES_MANIFEST") or '').split()
if manifests:
for manifest in manifests:
testsuites.extend(self._read_testlist(manifest,
- self.d.getVar("TOPDIR", True)).split())
+ self.d.getVar("TOPDIR")).split())
else:
- testsuites = self.d.getVar("TEST_SUITES", True).split()
+ testsuites = self.d.getVar("TEST_SUITES").split()
return testsuites
def _get_test_suites_required(self):
- return [t for t in self.d.getVar("TEST_SUITES", True).split() if t != "auto"]
+ return [t for t in self.d.getVar("TEST_SUITES").split() if t != "auto"]
def loadTests(self):
super(RuntimeTestContext, self).loadTests()
@@ -449,10 +409,10 @@ class RuntimeTestContext(TestContext):
"""
modules = self.getTestModules()
- bbpaths = self.d.getVar("BBPATH", True).split(":")
+ bbpaths = self.d.getVar("BBPATH").split(":")
- shutil.rmtree(self.d.getVar("TEST_EXTRACTED_DIR", True))
- shutil.rmtree(self.d.getVar("TEST_PACKAGED_DIR", True))
+ shutil.rmtree(self.d.getVar("TEST_EXTRACTED_DIR"))
+ shutil.rmtree(self.d.getVar("TEST_PACKAGED_DIR"))
for module in modules:
json_file = self._getJsonFile(module)
if json_file:
@@ -466,8 +426,8 @@ class RuntimeTestContext(TestContext):
import oe.path
- extracted_path = self.d.getVar("TEST_EXTRACTED_DIR", True)
- packaged_path = self.d.getVar("TEST_PACKAGED_DIR", True)
+ extracted_path = self.d.getVar("TEST_EXTRACTED_DIR")
+ packaged_path = self.d.getVar("TEST_PACKAGED_DIR")
for key,value in needed_packages.items():
packages = ()
@@ -548,7 +508,7 @@ class RuntimeTestContext(TestContext):
from oeqa.utils.package_manager import get_package_manager
- pkg_path = os.path.join(self.d.getVar("TEST_INSTALL_TMP_DIR", True), pkg)
+ pkg_path = os.path.join(self.d.getVar("TEST_INSTALL_TMP_DIR"), pkg)
pm = get_package_manager(self.d, pkg_path)
extract_dir = pm.extract(pkg)
shutil.rmtree(pkg_path)
@@ -562,8 +522,8 @@ class RuntimeTestContext(TestContext):
from oeqa.utils.package_manager import get_package_manager
- pkg_path = os.path.join(self.d.getVar("TEST_INSTALL_TMP_DIR", True), pkg)
- dst_dir = self.d.getVar("TEST_PACKAGED_DIR", True)
+ pkg_path = os.path.join(self.d.getVar("TEST_INSTALL_TMP_DIR"), pkg)
+ dst_dir = self.d.getVar("TEST_PACKAGED_DIR")
pm = get_package_manager(self.d, pkg_path)
pkg_info = pm.package_info(pkg)
file_path = pkg_info[pkg]["filepath"]
@@ -572,7 +532,7 @@ class RuntimeTestContext(TestContext):
def install_uninstall_packages(self, test_id, pkg_dir, install):
"""
- Check if the test requires a package and Install/Unistall it in the DUT
+ Check if the test requires a package and Install/Uninstall it in the DUT
"""
test = test_id.split(".")[4]
@@ -585,7 +545,7 @@ class RuntimeTestContext(TestContext):
def _install_uninstall_packages(self, needed_packages, pkg_dir, install=True):
"""
- Install/Unistall packages in the DUT without using a package manager
+ Install/Uninstall packages in the DUT without using a package manager
"""
if isinstance(needed_packages, dict):
@@ -603,7 +563,7 @@ class RuntimeTestContext(TestContext):
if install and extract:
self.target.connection.copy_dir_to(src_dir, "/")
- # Unistall package
+ # Uninstall package
elif not install and rm:
self.target.connection.delete_dir_structure(src_dir, "/")
@@ -611,7 +571,7 @@ class ImageTestContext(RuntimeTestContext):
def __init__(self, d, target, host_dumper):
super(ImageTestContext, self).__init__(d, target)
- self.tagexp = d.getVar("TEST_SUITES_TAGS", True)
+ self.tagexp = d.getVar("TEST_SUITES_TAGS")
self.host_dumper = host_dumper
@@ -626,10 +586,10 @@ class ImageTestContext(RuntimeTestContext):
def install_uninstall_packages(self, test_id, install=True):
"""
- Check if the test requires a package and Install/Unistall it in the DUT
+ Check if the test requires a package and Install/Uninstall it in the DUT
"""
- pkg_dir = self.d.getVar("TEST_EXTRACTED_DIR", True)
+ pkg_dir = self.d.getVar("TEST_EXTRACTED_DIR")
super(ImageTestContext, self).install_uninstall_packages(test_id, pkg_dir, install)
class ExportTestContext(RuntimeTestContext):
@@ -643,80 +603,16 @@ class ExportTestContext(RuntimeTestContext):
super(ExportTestContext, self).__init__(d, target, exported)
tag = parsedArgs.get("tag", None)
- self.tagexp = tag if tag != None else d.getVar("TEST_SUITES_TAGS", True)
+ self.tagexp = tag if tag != None else d.getVar("TEST_SUITES_TAGS")
self.sigterm = None
def install_uninstall_packages(self, test_id, install=True):
"""
- Check if the test requires a package and Install/Unistall it in the DUT
+ Check if the test requires a package and Install/Uninstall it in the DUT
"""
export_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
- extracted_dir = self.d.getVar("TEST_EXPORT_EXTRACTED_DIR", True)
+ extracted_dir = self.d.getVar("TEST_EXPORT_EXTRACTED_DIR")
pkg_dir = os.path.join(export_dir, extracted_dir)
super(ExportTestContext, self).install_uninstall_packages(test_id, pkg_dir, install)
-
-class SDKTestContext(TestContext):
- def __init__(self, d, sdktestdir, sdkenv, tcname, *args):
- super(SDKTestContext, self).__init__(d)
-
- self.sdktestdir = sdktestdir
- self.sdkenv = sdkenv
- self.tcname = tcname
-
- if not hasattr(self, 'target_manifest'):
- self.target_manifest = d.getVar("SDK_TARGET_MANIFEST", True)
- try:
- self.pkgmanifest = {}
- with open(self.target_manifest) as f:
- for line in f:
- (pkg, arch, version) = line.strip().split()
- self.pkgmanifest[pkg] = (version, arch)
- except IOError as e:
- bb.fatal("No package manifest file found. Did you build the sdk image?\n%s" % e)
-
- if not hasattr(self, 'host_manifest'):
- self.host_manifest = d.getVar("SDK_HOST_MANIFEST", True)
- try:
- with open(self.host_manifest) as f:
- self.hostpkgmanifest = f.read()
- except IOError as e:
- bb.fatal("No host package manifest file found. Did you build the sdk image?\n%s" % e)
-
- def _get_test_namespace(self):
- return "sdk"
-
- def _get_test_suites(self):
- return (self.d.getVar("TEST_SUITES_SDK", True) or "auto").split()
-
- def _get_test_suites_required(self):
- return [t for t in (self.d.getVar("TEST_SUITES_SDK", True) or \
- "auto").split() if t != "auto"]
-
-class SDKExtTestContext(SDKTestContext):
- def __init__(self, d, sdktestdir, sdkenv, tcname, *args):
- self.target_manifest = d.getVar("SDK_EXT_TARGET_MANIFEST", True)
- self.host_manifest = d.getVar("SDK_EXT_HOST_MANIFEST", True)
- if args:
- self.cm = args[0] # Compatibility mode for run SDK tests
- else:
- self.cm = False
-
- super(SDKExtTestContext, self).__init__(d, sdktestdir, sdkenv, tcname)
-
- self.sdkextfilesdir = os.path.join(os.path.dirname(os.path.abspath(
- oeqa.sdkext.__file__)), "files")
-
- def _get_test_namespace(self):
- if self.cm:
- return "sdk"
- else:
- return "sdkext"
-
- def _get_test_suites(self):
- return (self.d.getVar("TEST_SUITES_SDK_EXT", True) or "auto").split()
-
- def _get_test_suites_required(self):
- return [t for t in (self.d.getVar("TEST_SUITES_SDK_EXT", True) or \
- "auto").split() if t != "auto"]
diff --git a/meta/lib/oeqa/runexported.py b/meta/lib/oeqa/runexported.py
index 7e245c4120..7e37213df6 100755
--- a/meta/lib/oeqa/runexported.py
+++ b/meta/lib/oeqa/runexported.py
@@ -1,9 +1,9 @@
#!/usr/bin/env python3
-
-
+#
# Copyright (C) 2013 Intel Corporation
#
-# Released under the MIT license (see COPYING.MIT)
+# SPDX-License-Identifier: MIT
+#
# This script should be used outside of the build system to run image tests.
# It needs a json file as input as exported by the build.
@@ -43,8 +43,8 @@ class FakeTarget(object):
self.ip = None
self.server_ip = None
self.datetime = time.strftime('%Y%m%d%H%M%S',time.gmtime())
- self.testdir = d.getVar("TEST_LOG_DIR", True)
- self.pn = d.getVar("PN", True)
+ self.testdir = d.getVar("TEST_LOG_DIR")
+ self.pn = d.getVar("PN")
def exportStart(self):
self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime)
@@ -130,8 +130,8 @@ def extract_sdk(d):
"""
export_dir = os.path.dirname(os.path.realpath(__file__))
- tools_dir = d.getVar("TEST_EXPORT_SDK_DIR", True)
- tarball_name = "%s.sh" % d.getVar("TEST_EXPORT_SDK_NAME", True)
+ tools_dir = d.getVar("TEST_EXPORT_SDK_DIR")
+ tarball_name = "%s.sh" % d.getVar("TEST_EXPORT_SDK_NAME")
tarball_path = os.path.join(export_dir, tools_dir, tarball_name)
extract_path = os.path.join(export_dir, "sysroot")
if os.path.isfile(tarball_path):
diff --git a/meta/lib/oeqa/runtime/_ptest.py b/meta/lib/oeqa/runtime/_ptest.py
deleted file mode 100644
index 71324d3da2..0000000000
--- a/meta/lib/oeqa/runtime/_ptest.py
+++ /dev/null
@@ -1,125 +0,0 @@
-import unittest, os, shutil
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-from oeqa.utils.logparser import *
-from oeqa.utils.httpserver import HTTPService
-import bb
-import glob
-from oe.package_manager import RpmPkgsList
-import subprocess
-
-def setUpModule():
- if not oeRuntimeTest.hasFeature("package-management"):
- skipModule("Image doesn't have package management feature")
- if not oeRuntimeTest.hasPackage("smartpm"):
- skipModule("Image doesn't have smart installed")
- if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]:
- skipModule("Rpm is not the primary package manager")
-
-class PtestRunnerTest(oeRuntimeTest):
-
- # a ptest log parser
- def parse_ptest(self, logfile):
- parser = Lparser(test_0_pass_regex="^PASS:(.+)", test_0_fail_regex="^FAIL:(.+)", section_0_begin_regex="^BEGIN: .*/(.+)/ptest", section_0_end_regex="^END: .*/(.+)/ptest")
- parser.init()
- result = Result()
-
- with open(logfile) as f:
- for line in f:
- result_tuple = parser.parse_line(line)
- if not result_tuple:
- continue
- result_tuple = line_type, category, status, name = parser.parse_line(line)
-
- if line_type == 'section' and status == 'begin':
- current_section = name
- continue
-
- if line_type == 'section' and status == 'end':
- current_section = None
- continue
-
- if line_type == 'test' and status == 'pass':
- result.store(current_section, name, status)
- continue
-
- if line_type == 'test' and status == 'fail':
- result.store(current_section, name, status)
- continue
-
- result.sort_tests()
- return result
-
- @classmethod
- def setUpClass(self):
- #note the existing channels that are on the board before creating new ones
-# self.existingchannels = set()
-# (status, result) = oeRuntimeTest.tc.target.run('smart channel --show | grep "\["', 0)
-# for x in result.split("\n"):
-# self.existingchannels.add(x)
- self.repo_server = HTTPService(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), oeRuntimeTest.tc.target.server_ip)
- self.repo_server.start()
-
- @classmethod
- def tearDownClass(self):
- self.repo_server.stop()
- #remove created channels to be able to repeat the tests on same image
-# (status, result) = oeRuntimeTest.tc.target.run('smart channel --show | grep "\["', 0)
-# for x in result.split("\n"):
-# if x not in self.existingchannels:
-# oeRuntimeTest.tc.target.run('smart channel --remove '+x[1:-1]+' -y', 0)
-
- def add_smart_channel(self):
- image_pkgtype = self.tc.d.getVar('IMAGE_PKGTYPE', True)
- deploy_url = 'http://%s:%s/%s' %(self.target.server_ip, self.repo_server.port, image_pkgtype)
- pkgarchs = self.tc.d.getVar('PACKAGE_ARCHS', True).replace("-","_").split()
- for arch in os.listdir('%s/%s' % (self.repo_server.root_dir, image_pkgtype)):
- if arch in pkgarchs:
- self.target.run('smart channel -y --add {a} type=rpm-md baseurl={u}/{a}'.format(a=arch, u=deploy_url), 0)
- self.target.run('smart update', 0)
-
- def install_complementary(self, globs=None):
- installed_pkgs_file = os.path.join(oeRuntimeTest.tc.d.getVar('WORKDIR', True),
- "installed_pkgs.txt")
- self.pkgs_list = RpmPkgsList(oeRuntimeTest.tc.d, oeRuntimeTest.tc.d.getVar('IMAGE_ROOTFS', True), oeRuntimeTest.tc.d.getVar('arch_var', True), oeRuntimeTest.tc.d.getVar('os_var', True))
- with open(installed_pkgs_file, "w+") as installed_pkgs:
- installed_pkgs.write(self.pkgs_list.list("arch"))
-
- cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"),
- "-p", oeRuntimeTest.tc.d.getVar('PKGDATA_DIR', True), "glob", installed_pkgs_file,
- globs]
- try:
- bb.note("Installing complementary packages ...")
- complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("Could not compute complementary packages list. Command "
- "'%s' returned %d:\n%s" %
- (' '.join(cmd), e.returncode, e.output))
-
- return complementary_pkgs.split()
-
- def setUpLocal(self):
- self.ptest_log = os.path.join(oeRuntimeTest.tc.d.getVar("TEST_LOG_DIR",True), "ptest-%s.log" % oeRuntimeTest.tc.d.getVar('DATETIME', True))
-
- @skipUnlessPassed('test_ssh')
- def test_ptestrunner(self):
- self.add_smart_channel()
- (runnerstatus, result) = self.target.run('which ptest-runner', 0)
- cond = oeRuntimeTest.hasPackage("ptest-runner") and oeRuntimeTest.hasFeature("ptest") and oeRuntimeTest.hasPackageMatch("-ptest") and (runnerstatus != 0)
- if cond:
- self.install_packages(self.install_complementary("*-ptest"))
- self.install_packages(['ptest-runner'])
-
- (runnerstatus, result) = self.target.run('/usr/bin/ptest-runner > /tmp/ptest.log 2>&1', 0)
- #exit code is !=0 even if ptest-runner executes because some ptest tests fail.
- self.assertTrue(runnerstatus != 127, msg="Cannot execute ptest-runner!")
- self.target.copy_from('/tmp/ptest.log', self.ptest_log)
- shutil.copyfile(self.ptest_log, "ptest.log")
-
- result = self.parse_ptest("ptest.log")
- log_results_to_location = "./results"
- if os.path.exists(log_results_to_location):
- shutil.rmtree(log_results_to_location)
- os.makedirs(log_results_to_location)
-
- result.log_as_files(log_results_to_location, test_status = ['pass','fail'])
diff --git a/meta/lib/oeqa/runtime/_qemutiny.py b/meta/lib/oeqa/runtime/_qemutiny.py
deleted file mode 100644
index a3c29f3572..0000000000
--- a/meta/lib/oeqa/runtime/_qemutiny.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import unittest
-from oeqa.oetest import oeRuntimeTest
-from oeqa.utils.qemutinyrunner import *
-
-class QemuTinyTest(oeRuntimeTest):
-
- def test_boot_tiny(self):
- (status, output) = self.target.run_serial('uname -a')
- self.assertTrue("yocto-tiny" in output, msg="Cannot detect poky tiny boot!") \ No newline at end of file
diff --git a/meta/lib/oeqa/runtime/buildcvs.py b/meta/lib/oeqa/runtime/buildcvs.py
deleted file mode 100644
index fe6cbfbcd5..0000000000
--- a/meta/lib/oeqa/runtime/buildcvs.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-from oeqa.utils.targetbuild import TargetBuildProject
-
-def setUpModule():
- if not oeRuntimeTest.hasFeature("tools-sdk"):
- skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
-
-class BuildCvsTest(oeRuntimeTest):
-
- @classmethod
- def setUpClass(self):
- self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d,
- "http://ftp.gnu.org/non-gnu/cvs/source/feature/1.12.13/cvs-1.12.13.tar.bz2")
- self.project.download_archive()
-
- @testcase(205)
- @skipUnlessPassed("test_ssh")
- def test_cvs(self):
- self.assertEqual(self.project.run_configure(), 0,
- msg="Running configure failed")
-
- self.assertEqual(self.project.run_make(), 0,
- msg="Running make failed")
-
- self.assertEqual(self.project.run_install(), 0,
- msg="Running make install failed")
-
- @classmethod
- def tearDownClass(self):
- self.project.clean()
diff --git a/meta/lib/oeqa/runtime/buildgalculator.py b/meta/lib/oeqa/runtime/buildgalculator.py
deleted file mode 100644
index 28ba29e5c2..0000000000
--- a/meta/lib/oeqa/runtime/buildgalculator.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-from oeqa.utils.targetbuild import TargetBuildProject
-
-def setUpModule():
- if not oeRuntimeTest.hasFeature("tools-sdk"):
- skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
-
-class GalculatorTest(oeRuntimeTest):
- @skipUnlessPassed("test_ssh")
- def test_galculator(self):
- try:
- project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d,
- "http://galculator.mnim.org/downloads/galculator-2.1.4.tar.bz2")
- project.download_archive()
-
- self.assertEqual(project.run_configure(), 0,
- msg="Running configure failed")
-
- self.assertEqual(project.run_make(), 0,
- msg="Running make failed")
- finally:
- project.clean()
diff --git a/meta/lib/oeqa/runtime/buildiptables.py b/meta/lib/oeqa/runtime/buildiptables.py
deleted file mode 100644
index bc75d0a0c7..0000000000
--- a/meta/lib/oeqa/runtime/buildiptables.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-from oeqa.utils.targetbuild import TargetBuildProject
-
-def setUpModule():
- if not oeRuntimeTest.hasFeature("tools-sdk"):
- skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
-
-class BuildIptablesTest(oeRuntimeTest):
-
- @classmethod
- def setUpClass(self):
- self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d,
- "http://downloads.yoctoproject.org/mirror/sources/iptables-1.4.13.tar.bz2")
- self.project.download_archive()
-
- @testcase(206)
- @skipUnlessPassed("test_ssh")
- def test_iptables(self):
- self.assertEqual(self.project.run_configure(), 0,
- msg="Running configure failed")
-
- self.assertEqual(self.project.run_make(), 0,
- msg="Running make failed")
-
- self.assertEqual(self.project.run_install(), 0,
- msg="Running make install failed")
-
- @classmethod
- def tearDownClass(self):
- self.project.clean()
diff --git a/meta/lib/oeqa/runtime/case.py b/meta/lib/oeqa/runtime/case.py
new file mode 100644
index 0000000000..f036982e1f
--- /dev/null
+++ b/meta/lib/oeqa/runtime/case.py
@@ -0,0 +1,20 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.core.case import OETestCase
+from oeqa.utils.package_manager import install_package, uninstall_package
+
+class OERuntimeTestCase(OETestCase):
+ # target instance set by OERuntimeTestLoader.
+ target = None
+
+ def setUp(self):
+ super(OERuntimeTestCase, self).setUp()
+ install_package(self)
+
+ def tearDown(self):
+ super(OERuntimeTestCase, self).tearDown()
+ uninstall_package(self)
diff --git a/meta/lib/oeqa/runtime/cases/_qemutiny.py b/meta/lib/oeqa/runtime/cases/_qemutiny.py
new file mode 100644
index 0000000000..6886e36502
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/_qemutiny.py
@@ -0,0 +1,12 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.runtime.case import OERuntimeTestCase
+
+class QemuTinyTest(OERuntimeTestCase):
+
+ def test_boot_tiny(self):
+ status, output = self.target.run_serial('uname -a')
+ msg = "Cannot detect poky tiny boot!"
+ self.assertTrue("yocto-tiny" in output, msg)
diff --git a/meta/lib/oeqa/runtime/cases/apt.py b/meta/lib/oeqa/runtime/cases/apt.py
new file mode 100644
index 0000000000..c5378d90c3
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/apt.py
@@ -0,0 +1,53 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+from oeqa.utils.httpserver import HTTPService
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.data import skipIfNotDataVar, skipIfNotFeature
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class AptTest(OERuntimeTestCase):
+
+ def pkg(self, command, expected = 0):
+ command = 'apt-get %s' % command
+ status, output = self.target.run(command, 1500)
+ message = os.linesep.join([command, output])
+ self.assertEqual(status, expected, message)
+ return output
+
+class AptRepoTest(AptTest):
+
+ @classmethod
+ def setUpClass(cls):
+ service_repo = os.path.join(cls.tc.td['DEPLOY_DIR_DEB'], 'all')
+ cls.repo_server = HTTPService(service_repo,
+ '0.0.0.0', port=cls.tc.target.server_port,
+ logger=cls.tc.logger)
+ cls.repo_server.start()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.repo_server.stop()
+
+ def setup_source_config_for_package_install(self):
+ apt_get_source_server = 'http://%s:%s/' % (self.tc.target.server_ip, self.repo_server.port)
+ apt_get_sourceslist_dir = '/etc/apt/'
+ self.target.run('cd %s; echo deb %s ./ > sources.list' % (apt_get_sourceslist_dir, apt_get_source_server))
+
+ def cleanup_source_config_for_package_install(self):
+ apt_get_sourceslist_dir = '/etc/apt/'
+ self.target.run('cd %s; rm sources.list' % (apt_get_sourceslist_dir))
+
+ @skipIfNotFeature('package-management',
+ 'Test requires package-management to be in IMAGE_FEATURES')
+ @skipIfNotDataVar('IMAGE_PKGTYPE', 'deb',
+ 'DEB is not the primary package manager')
+ @OEHasPackage(['apt'])
+ def test_apt_install_from_repo(self):
+ self.setup_source_config_for_package_install()
+ self.pkg('update')
+ self.pkg('remove --yes run-postinsts-dev')
+ self.pkg('install --yes --allow-unauthenticated run-postinsts-dev')
+ self.cleanup_source_config_for_package_install()
diff --git a/meta/lib/oeqa/runtime/cases/boot.py b/meta/lib/oeqa/runtime/cases/boot.py
new file mode 100644
index 0000000000..2142f400a0
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/boot.py
@@ -0,0 +1,33 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from subprocess import Popen, PIPE
+import time
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oetimeout import OETimeout
+from oeqa.core.decorator.data import skipIfQemu
+
+class BootTest(OERuntimeTestCase):
+
+ @OETimeout(120)
+ @skipIfQemu('qemuall', 'Test only runs on real hardware')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_reboot(self):
+ output = ''
+ count = 0
+ (status, output) = self.target.run('reboot -h')
+ while count < 5:
+ time.sleep(5)
+ cmd = 'ping -c 1 %s' % self.target.ip
+ proc = Popen(cmd, shell=True, stdout=PIPE)
+ output += proc.communicate()[0].decode('utf-8')
+ if proc.poll() == 0:
+ count += 1
+ else:
+ count = 0
+ msg = ('Expected 5 consecutive, got %d.\n'
+ 'ping output is:\n%s' % (count,output))
+ self.assertEqual(count, 5, msg = msg)
diff --git a/meta/lib/oeqa/runtime/cases/buildcpio.py b/meta/lib/oeqa/runtime/cases/buildcpio.py
new file mode 100644
index 0000000000..f4e871e421
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/buildcpio.py
@@ -0,0 +1,32 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+from oeqa.runtime.utils.targetbuildproject import TargetBuildProject
+
+class BuildCpioTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ uri = 'https://downloads.yoctoproject.org/mirror/sources/cpio-2.12.tar.gz'
+ cls.project = TargetBuildProject(cls.tc.target,
+ uri,
+ dl_dir = cls.tc.td['DL_DIR'])
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.project.clean()
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['gcc'])
+ @OEHasPackage(['make'])
+ @OEHasPackage(['autoconf'])
+ def test_cpio(self):
+ self.project.download_archive()
+ self.project.run_configure()
+ self.project.run_make()
+ self.project.run_install()
diff --git a/meta/lib/oeqa/runtime/cases/buildgalculator.py b/meta/lib/oeqa/runtime/cases/buildgalculator.py
new file mode 100644
index 0000000000..e5cc3e2888
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/buildgalculator.py
@@ -0,0 +1,32 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+from oeqa.runtime.utils.targetbuildproject import TargetBuildProject
+
+class GalculatorTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ uri = 'http://galculator.mnim.org/downloads/galculator-2.1.4.tar.bz2'
+ cls.project = TargetBuildProject(cls.tc.target,
+ uri,
+ dl_dir = cls.tc.td['DL_DIR'])
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.project.clean()
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['gcc'])
+ @OEHasPackage(['make'])
+ @OEHasPackage(['autoconf'])
+ @OEHasPackage(['gtk+3'])
+ def test_galculator(self):
+ self.project.download_archive()
+ self.project.run_configure()
+ self.project.run_make()
diff --git a/meta/lib/oeqa/runtime/cases/buildlzip.py b/meta/lib/oeqa/runtime/cases/buildlzip.py
new file mode 100644
index 0000000000..bc70b41461
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/buildlzip.py
@@ -0,0 +1,34 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+from oeqa.runtime.utils.targetbuildproject import TargetBuildProject
+
+class BuildLzipTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ uri = 'http://downloads.yoctoproject.org/mirror/sources'
+ uri = '%s/lzip-1.19.tar.gz' % uri
+ cls.project = TargetBuildProject(cls.tc.target,
+ uri,
+ dl_dir = cls.tc.td['DL_DIR'])
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.project.clean()
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['gcc'])
+ @OEHasPackage(['make'])
+ @OEHasPackage(['autoconf'])
+ def test_lzip(self):
+ self.project.download_archive()
+ self.project.run_configure()
+ self.project.run_make()
+ self.project.run_install()
+
diff --git a/meta/lib/oeqa/runtime/cases/connman.py b/meta/lib/oeqa/runtime/cases/connman.py
new file mode 100644
index 0000000000..f0d15fac9b
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/connman.py
@@ -0,0 +1,31 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class ConnmanTest(OERuntimeTestCase):
+
+ def service_status(self, service):
+ if 'systemd' in self.tc.td['DISTRO_FEATURES']:
+ (_, output) = self.target.run('systemctl status -l %s' % service)
+ return output
+ else:
+ return "Unable to get status or logs for %s" % service
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(["connman"])
+ def test_connmand_help(self):
+ (status, output) = self.target.run('/usr/sbin/connmand --help')
+ msg = 'Failed to get connman help. Output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestDepends(['connman.ConnmanTest.test_connmand_help'])
+ def test_connmand_running(self):
+ cmd = '%s | grep [c]onnmand' % self.tc.target_cmds['ps']
+ (status, output) = self.target.run(cmd)
+ if status != 0:
+ self.logger.info(self.service_status("connman"))
+ self.fail("No connmand process running")
diff --git a/meta/lib/oeqa/runtime/cases/date.py b/meta/lib/oeqa/runtime/cases/date.py
new file mode 100644
index 0000000000..7750a7293f
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/date.py
@@ -0,0 +1,42 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import re
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class DateTest(OERuntimeTestCase):
+
+ def setUp(self):
+ if self.tc.td.get('VIRTUAL-RUNTIME_init_manager') == 'systemd':
+ self.logger.debug('Stopping systemd-timesyncd daemon')
+ self.target.run('systemctl stop systemd-timesyncd')
+
+ def tearDown(self):
+ if self.tc.td.get('VIRTUAL-RUNTIME_init_manager') == 'systemd':
+ self.logger.debug('Starting systemd-timesyncd daemon')
+ self.target.run('systemctl start systemd-timesyncd')
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['coreutils', 'busybox'])
+ def test_date(self):
+ (status, output) = self.target.run('date +"%Y-%m-%d %T"')
+ msg = 'Failed to get initial date, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+ oldDate = output
+
+ sampleDate = '"2016-08-09 10:00:00"'
+ (status, output) = self.target.run("date -s %s" % sampleDate)
+ self.assertEqual(status, 0, msg='Date set failed, output: %s' % output)
+
+ (status, output) = self.target.run("date -R")
+ p = re.match('Tue, 09 Aug 2016 10:00:.. \+0000', output)
+ msg = 'The date was not set correctly, output: %s' % output
+ self.assertTrue(p, msg=msg)
+
+ (status, output) = self.target.run('date -s "%s"' % oldDate)
+ msg = 'Failed to reset date, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
diff --git a/meta/lib/oeqa/runtime/cases/df.py b/meta/lib/oeqa/runtime/cases/df.py
new file mode 100644
index 0000000000..89fd0fb901
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/df.py
@@ -0,0 +1,17 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class DfTest(OERuntimeTestCase):
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['coreutils', 'busybox'])
+ def test_df(self):
+ cmd = "df -P / | sed -n '2p' | awk '{print $4}'"
+ (status,output) = self.target.run(cmd)
+ msg = 'Not enough space on image. Current size is %s' % output
+ self.assertTrue(int(output)>5120, msg=msg)
diff --git a/meta/lib/oeqa/runtime/cases/dnf.py b/meta/lib/oeqa/runtime/cases/dnf.py
new file mode 100644
index 0000000000..f40c63026e
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/dnf.py
@@ -0,0 +1,191 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import re
+import subprocess
+from oeqa.utils.httpserver import HTTPService
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfNotDataVar, skipIfNotFeature, skipIfInDataVar, skipIfNotInDataVar
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class DnfTest(OERuntimeTestCase):
+
+ def dnf(self, command, expected = 0):
+ command = 'dnf %s' % command
+ status, output = self.target.run(command, 1500)
+ message = os.linesep.join([command, output])
+ self.assertEqual(status, expected, message)
+ return output
+
+class DnfBasicTest(DnfTest):
+
+ @skipIfNotFeature('package-management',
+ 'Test requires package-management to be in IMAGE_FEATURES')
+ @skipIfNotDataVar('IMAGE_PKGTYPE', 'rpm',
+ 'RPM is not the primary package manager')
+ @OEHasPackage(['dnf'])
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_dnf_help(self):
+ self.dnf('--help')
+
+ @OETestDepends(['dnf.DnfBasicTest.test_dnf_help'])
+ def test_dnf_version(self):
+ self.dnf('--version')
+
+ @OETestDepends(['dnf.DnfBasicTest.test_dnf_help'])
+ def test_dnf_info(self):
+ self.dnf('info dnf')
+
+ @OETestDepends(['dnf.DnfBasicTest.test_dnf_help'])
+ def test_dnf_search(self):
+ self.dnf('search dnf')
+
+ @OETestDepends(['dnf.DnfBasicTest.test_dnf_help'])
+ def test_dnf_history(self):
+ self.dnf('history')
+
+class DnfRepoTest(DnfTest):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.repo_server = HTTPService(os.path.join(cls.tc.td['WORKDIR'], 'oe-testimage-repo'),
+ '0.0.0.0', port=cls.tc.target.server_port,
+ logger=cls.tc.logger)
+ cls.repo_server.start()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.repo_server.stop()
+
+ def dnf_with_repo(self, command):
+ pkgarchs = os.listdir(os.path.join(self.tc.td['WORKDIR'], 'oe-testimage-repo'))
+ deploy_url = 'http://%s:%s/' %(self.target.server_ip, self.repo_server.port)
+ cmdlinerepoopts = ["--repofrompath=oe-testimage-repo-%s,%s%s" %(arch, deploy_url, arch) for arch in pkgarchs]
+
+ output = self.dnf(" ".join(cmdlinerepoopts) + " --nogpgcheck " + command)
+ return output
+
+ @OETestDepends(['dnf.DnfBasicTest.test_dnf_help'])
+ def test_dnf_makecache(self):
+ self.dnf_with_repo('makecache')
+
+
+# Does not work when repo is specified on the command line
+# @OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
+# def test_dnf_repolist(self):
+# self.dnf_with_repo('repolist')
+
+ @OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
+ def test_dnf_repoinfo(self):
+ self.dnf_with_repo('repoinfo')
+
+ @OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
+ def test_dnf_install(self):
+ output = self.dnf_with_repo('list run-postinsts-dev')
+ if 'Installed Packages' in output:
+ self.dnf_with_repo('remove -y run-postinsts-dev')
+ self.dnf_with_repo('install -y run-postinsts-dev')
+
+ @OETestDepends(['dnf.DnfRepoTest.test_dnf_install'])
+ def test_dnf_install_dependency(self):
+ self.dnf_with_repo('remove -y run-postinsts')
+ self.dnf_with_repo('install -y run-postinsts-dev')
+
+ @OETestDepends(['dnf.DnfRepoTest.test_dnf_install_dependency'])
+ def test_dnf_install_from_disk(self):
+ self.dnf_with_repo('remove -y run-postinsts-dev')
+ self.dnf_with_repo('install -y --downloadonly run-postinsts-dev')
+ status, output = self.target.run('find /var/cache/dnf -name run-postinsts-dev*rpm', 1500)
+ self.assertEqual(status, 0, output)
+ self.dnf_with_repo('install -y %s' % output)
+
+ @OETestDepends(['dnf.DnfRepoTest.test_dnf_install_from_disk'])
+ def test_dnf_install_from_http(self):
+ output = subprocess.check_output('%s %s -name run-postinsts-dev*' % (bb.utils.which(os.getenv('PATH'), "find"),
+ os.path.join(self.tc.td['WORKDIR'], 'oe-testimage-repo')), shell=True).decode("utf-8")
+ rpm_path = output.split("/")[-2] + "/" + output.split("/")[-1]
+ url = 'http://%s:%s/%s' %(self.target.server_ip, self.repo_server.port, rpm_path)
+ self.dnf_with_repo('remove -y run-postinsts-dev')
+ self.dnf_with_repo('install -y %s' % url)
+
+ @OETestDepends(['dnf.DnfRepoTest.test_dnf_install'])
+ def test_dnf_reinstall(self):
+ self.dnf_with_repo('reinstall -y run-postinsts-dev')
+
+ @OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
+ @skipIfInDataVar('DISTRO_FEATURES', 'usrmerge', 'Test run when not enable usrmerge')
+ @OEHasPackage('busybox')
+ def test_dnf_installroot(self):
+ rootpath = '/home/root/chroot/test'
+ #Copy necessary files to avoid errors with not yet installed tools on
+ #installroot directory.
+ self.target.run('mkdir -p %s/etc' % rootpath, 1500)
+ self.target.run('mkdir -p %s/bin %s/sbin %s/usr/bin %s/usr/sbin' % (rootpath, rootpath, rootpath, rootpath), 1500)
+ self.target.run('mkdir -p %s/dev' % rootpath, 1500)
+ #Handle different architectures lib dirs
+ self.target.run('mkdir -p %s/lib' % rootpath, 1500)
+ self.target.run('mkdir -p %s/libx32' % rootpath, 1500)
+ self.target.run('mkdir -p %s/lib64' % rootpath, 1500)
+ self.target.run('cp /lib/libtinfo.so.5 %s/lib' % rootpath, 1500)
+ self.target.run('cp /libx32/libtinfo.so.5 %s/libx32' % rootpath, 1500)
+ self.target.run('cp /lib64/libtinfo.so.5 %s/lib64' % rootpath, 1500)
+ self.target.run('cp -r /etc/rpm %s/etc' % rootpath, 1500)
+ self.target.run('cp -r /etc/dnf %s/etc' % rootpath, 1500)
+ self.target.run('cp /bin/sh %s/bin' % rootpath, 1500)
+ self.target.run('mount -o bind /dev %s/dev/' % rootpath, 1500)
+ self.dnf_with_repo('install --installroot=%s -v -y --rpmverbosity=debug busybox run-postinsts' % rootpath)
+ status, output = self.target.run('test -e %s/var/cache/dnf' % rootpath, 1500)
+ self.assertEqual(0, status, output)
+ status, output = self.target.run('test -e %s/bin/busybox' % rootpath, 1500)
+ self.assertEqual(0, status, output)
+
+ @OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
+ @skipIfNotInDataVar('DISTRO_FEATURES', 'usrmerge', 'Test run when enable usrmege')
+ @OEHasPackage('busybox')
+ def test_dnf_installroot_usrmerge(self):
+ rootpath = '/home/root/chroot/test'
+ #Copy necessary files to avoid errors with not yet installed tools on
+ #installroot directory.
+ self.target.run('mkdir -p %s/etc' % rootpath, 1500)
+ self.target.run('mkdir -p %s/usr/bin %s/usr/sbin' % (rootpath, rootpath), 1500)
+ self.target.run('ln -sf -r %s/usr/bin %s/bin' % (rootpath, rootpath), 1500)
+ self.target.run('ln -sf -r %s/usr/sbin %s/sbin' % (rootpath, rootpath), 1500)
+ self.target.run('mkdir -p %s/dev' % rootpath, 1500)
+ #Handle different architectures lib dirs
+ self.target.run('mkdir -p %s/usr/lib' % rootpath, 1500)
+ self.target.run('mkdir -p %s/usr/libx32' % rootpath, 1500)
+ self.target.run('mkdir -p %s/usr/lib64' % rootpath, 1500)
+ self.target.run('cp /lib/libtinfo.so.5 %s/usr/lib' % rootpath, 1500)
+ self.target.run('cp /libx32/libtinfo.so.5 %s/usr/libx32' % rootpath, 1500)
+ self.target.run('cp /lib64/libtinfo.so.5 %s/usr/lib64' % rootpath, 1500)
+ self.target.run('ln -sf -r %s/lib %s/usr/lib' % (rootpath,rootpath), 1500)
+ self.target.run('ln -sf -r %s/libx32 %s/usr/libx32' % (rootpath,rootpath), 1500)
+ self.target.run('ln -sf -r %s/lib64 %s/usr/lib64' % (rootpath,rootpath), 1500)
+ self.target.run('cp -r /etc/rpm %s/etc' % rootpath, 1500)
+ self.target.run('cp -r /etc/dnf %s/etc' % rootpath, 1500)
+ self.target.run('cp /bin/sh %s/bin' % rootpath, 1500)
+ self.target.run('mount -o bind /dev %s/dev/' % rootpath, 1500)
+ self.dnf_with_repo('install --installroot=%s -v -y --rpmverbosity=debug busybox run-postinsts' % rootpath)
+ status, output = self.target.run('test -e %s/var/cache/dnf' % rootpath, 1500)
+ self.assertEqual(0, status, output)
+ status, output = self.target.run('test -e %s/bin/busybox' % rootpath, 1500)
+ self.assertEqual(0, status, output)
+
+ @OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
+ def test_dnf_exclude(self):
+ excludepkg = 'curl-dev'
+ self.dnf_with_repo('install -y curl*')
+ self.dnf('list %s' % excludepkg, 0)
+ #Avoid remove dependencies to skip some errors on different archs and images
+ self.dnf_with_repo('remove --setopt=clean_requirements_on_remove=0 -y curl*')
+ #check curl-dev is not installed adter removing all curl occurrences
+ status, output = self.target.run('dnf list --installed | grep %s'% excludepkg, 1500)
+ self.assertEqual(1, status, "%s was not removed, is listed as installed"%excludepkg)
+ self.dnf_with_repo('install -y --exclude=%s --exclude=curl-staticdev curl*' % excludepkg)
+ #check curl-dev is not installed after being excluded
+ status, output = self.target.run('dnf list --installed | grep %s'% excludepkg , 1500)
+ self.assertEqual(1, status, "%s was not excluded, is listed as installed"%excludepkg)
diff --git a/meta/lib/oeqa/runtime/cases/gcc.py b/meta/lib/oeqa/runtime/cases/gcc.py
new file mode 100644
index 0000000000..1b6e431bf4
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/gcc.py
@@ -0,0 +1,69 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class GccCompileTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUp(cls):
+ dst = '/tmp/'
+ src = os.path.join(cls.tc.files_dir, 'test.c')
+ cls.tc.target.copyTo(src, dst)
+
+ src = os.path.join(cls.tc.runtime_files_dir, 'testmakefile')
+ cls.tc.target.copyTo(src, dst)
+
+ src = os.path.join(cls.tc.files_dir, 'test.cpp')
+ cls.tc.target.copyTo(src, dst)
+
+ @classmethod
+ def tearDown(cls):
+ files = '/tmp/test.c /tmp/test.o /tmp/test /tmp/testmakefile'
+ cls.tc.target.run('rm %s' % files)
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['gcc'])
+ def test_gcc_compile(self):
+ status, output = self.target.run('gcc /tmp/test.c -o /tmp/test -lm')
+ msg = 'gcc compile failed, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ status, output = self.target.run('/tmp/test')
+ msg = 'running compiled file failed, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['g++'])
+ def test_gpp_compile(self):
+ status, output = self.target.run('g++ /tmp/test.c -o /tmp/test -lm')
+ msg = 'g++ compile failed, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ status, output = self.target.run('/tmp/test')
+ msg = 'running compiled file failed, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['g++'])
+ def test_gpp2_compile(self):
+ status, output = self.target.run('g++ /tmp/test.cpp -o /tmp/test -lm')
+ msg = 'g++ compile failed, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ status, output = self.target.run('/tmp/test')
+ msg = 'running compiled file failed, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['gcc'])
+ @OEHasPackage(['make'])
+ def test_make(self):
+ status, output = self.target.run('cd /tmp; make -f testmakefile')
+ msg = 'running make failed, output %s' % output
+ self.assertEqual(status, 0, msg=msg)
diff --git a/meta/lib/oeqa/runtime/cases/gi.py b/meta/lib/oeqa/runtime/cases/gi.py
new file mode 100644
index 0000000000..42bd100a31
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/gi.py
@@ -0,0 +1,19 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class GObjectIntrospectionTest(OERuntimeTestCase):
+
+ @OETestDepends(["ssh.SSHTest.test_ssh"])
+ @OEHasPackage(["python3-pygobject"])
+ def test_python(self):
+ script = """from gi.repository import GLib; print(GLib.markup_escape_text("<testing&testing>"))"""
+ status, output = self.target.run("python3 -c '%s'" % script)
+ self.assertEqual(status, 0, msg="Python failed (%s)" % (output))
+ self.assertEqual(output, "&lt;testing&amp;testing&gt;", msg="Unexpected output (%s)" % output)
diff --git a/meta/lib/oeqa/runtime/cases/gstreamer.py b/meta/lib/oeqa/runtime/cases/gstreamer.py
new file mode 100644
index 0000000000..f735f82e3b
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/gstreamer.py
@@ -0,0 +1,18 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class GstreamerCliTest(OERuntimeTestCase):
+
+ @OEHasPackage(['gstreamer1.0'])
+ def test_gst_inspect_can_list_all_plugins(self):
+ status, output = self.target.run('gst-inspect-1.0')
+ self.assertEqual(status, 0, 'gst-inspect-1.0 does not appear to be running.')
+
+ @OEHasPackage(['gstreamer1.0'])
+ def test_gst_launch_can_create_video_pipeline(self):
+ status, output = self.target.run('gst-launch-1.0 -v fakesrc silent=false num-buffers=3 ! fakesink silent=false')
+ self.assertEqual(status, 0, 'gst-launch-1.0 does not appear to be running.')
diff --git a/meta/lib/oeqa/runtime/cases/kernelmodule.py b/meta/lib/oeqa/runtime/cases/kernelmodule.py
new file mode 100644
index 0000000000..47fd2f850c
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/kernelmodule.py
@@ -0,0 +1,46 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfNotFeature
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class KernelModuleTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUp(cls):
+ src = os.path.join(cls.tc.runtime_files_dir, 'hellomod.c')
+ dst = '/tmp/hellomod.c'
+ cls.tc.target.copyTo(src, dst)
+
+ src = os.path.join(cls.tc.runtime_files_dir, 'hellomod_makefile')
+ dst = '/tmp/Makefile'
+ cls.tc.target.copyTo(src, dst)
+
+ @classmethod
+ def tearDown(cls):
+ files = '/tmp/Makefile /tmp/hellomod.c'
+ cls.tc.target.run('rm %s' % files)
+
+ @skipIfNotFeature('tools-sdk',
+ 'Test requires tools-sdk to be in IMAGE_FEATURES')
+ @OETestDepends(['gcc.GccCompileTest.test_gcc_compile'])
+ @OEHasPackage(['kernel-devsrc'])
+ @OEHasPackage(['make'])
+ @OEHasPackage(['gcc'])
+ def test_kernel_module(self):
+ cmds = [
+ 'cd /usr/src/kernel && make scripts prepare',
+ 'cd /tmp && make',
+ 'cd /tmp && insmod hellomod.ko',
+ 'lsmod | grep hellomod',
+ 'dmesg | grep Hello',
+ 'rmmod hellomod', 'dmesg | grep "Cleaning up hellomod"'
+ ]
+ for cmd in cmds:
+ status, output = self.target.run(cmd, 900)
+ self.assertEqual(status, 0, msg='\n'.join([cmd, output]))
diff --git a/meta/lib/oeqa/runtime/cases/ksample.py b/meta/lib/oeqa/runtime/cases/ksample.py
new file mode 100644
index 0000000000..a9a1620ebd
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/ksample.py
@@ -0,0 +1,231 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import time
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfNotFeature
+
+# need some kernel fragments
+# echo "KERNEL_FEATURES_append += \" features\/kernel\-sample\/kernel\-sample.scc\"" >> local.conf
+class KSample(OERuntimeTestCase):
+ def cmd_and_check(self, cmd='', match_string=''):
+ status, output = self.target.run(cmd)
+ if not match_string:
+ # send cmd
+ msg = '%s failed, %s' % (cmd, output)
+ self.assertEqual(status, 0, msg=msg)
+ else:
+ # check result
+ result = ("%s" % match_string) in output
+ msg = output
+ self.assertTrue(result, msg)
+ self.assertEqual(status, 0, cmd)
+
+ def check_arch(self, archset=''):
+ status, output = self.target.run("uname -m")
+ result = ("%s" % output) in archset
+ if not result:
+ self.skipTest("This case doesn't support %s" % output)
+
+ def check_config(self, config_opt=''):
+ cmd = "zcat /proc/config.gz | grep %s" % config_opt
+ status, output = self.target.run(cmd)
+ result = ("%s=y" % config_opt) in output
+ if not result:
+ self.skipTest("%s is not set" % config_opt)
+
+ def check_module_exist(self, path='', module_name=''):
+ status, output = self.target.run("uname -r")
+ cmd = "ls " + "/lib/modules/" + output + "/kernel/samples/" + path + module_name
+ status, output = self.target.run(cmd)
+ if status != 0:
+ error_info = module_name + " doesn't exist"
+ self.skipTest(error_info)
+
+ def kfifo_func(self, name=''):
+ module_prename = name + "-example"
+ module_name = name + "-example.ko"
+ sysmbol_name = name + "_example"
+
+ # make sure if module exists
+ self.check_module_exist("kfifo/", module_name)
+ # modprobe
+ self.cmd_and_check("modprobe %s" % module_prename)
+ # lsmod
+ self.cmd_and_check("lsmod | grep %s | cut -d\' \' -f1" % sysmbol_name, sysmbol_name)
+ # check result
+ self.cmd_and_check("dmesg | grep \"test passed\" ", "test passed")
+ # rmmod
+ self.cmd_and_check("rmmod %s" % module_prename)
+
+ def kprobe_func(self, name=''):
+ # check config
+ self.check_config("CONFIG_KPROBES")
+
+ module_prename = name + "_example"
+ module_name = name + "_example.ko"
+ sysmbol_name = module_prename
+
+ # make sure if module exists
+ self.check_module_exist("kprobes/", module_name)
+ # modprobe
+ self.cmd_and_check("modprobe %s" % module_prename)
+ # lsmod
+ self.cmd_and_check("lsmod | grep %s | cut -d\' \' -f1" % sysmbol_name, sysmbol_name)
+ # check result
+ self.cmd_and_check("dmesg | grep Planted | head -n10", "Planted")
+ # rmmod
+ self.cmd_and_check("rmmod %s" % module_prename)
+
+ def kobject_func(self, name=''):
+ module_prename = name + "_example"
+ module_name = name + "-example.ko"
+ sysmbol_name = module_prename
+
+ # make sure if module exists
+ self.check_module_exist("kobject/", module_name)
+ # modprobe
+ self.cmd_and_check("modprobe %s" % module_prename)
+ # lsmod
+ self.cmd_and_check("lsmod | grep %s | cut -d\' \' -f1" % sysmbol_name, sysmbol_name)
+ # check result
+ self.cmd_and_check("ls /sys/kernel/%s/" % sysmbol_name, "bar")
+ # rmmod
+ self.cmd_and_check("rmmod %s" % module_prename)
+
+class KSampleTest(KSample):
+ # kfifo
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_kfifo_test(self):
+ index = ["dma", "bytestream", "inttype", "record"]
+ for i in index:
+ self.kfifo_func(i)
+
+ # kprobe
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_kprobe_test(self):
+ self.check_arch("x86_64 i686 ppc")
+ index = ["kprobe", "kretprobe"]
+ for i in index:
+ self.kprobe_func(i)
+
+ # kobject
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_kobject_test(self):
+ index = ["kobject", "kset"]
+ for i in index:
+ self.kobject_func(i)
+
+ #trace
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_trace_events(self):
+ # check config
+ self.check_config("CONFIG_TRACING_SUPPORT")
+ # make sure if module exists
+ self.check_module_exist("trace_events/", "trace-events-sample.ko")
+ # modprobe
+ self.cmd_and_check("modprobe trace-events-sample")
+ # lsmod
+ self.cmd_and_check("lsmod | grep trace_events_sample | cut -d\' \' -f1", "trace_events_sample")
+ # check dir
+ self.cmd_and_check("ls /sys/kernel/debug/tracing/events/ | grep sample-trace", "sample-trace")
+ # enable trace
+ self.cmd_and_check("echo 1 > /sys/kernel/debug/tracing/events/sample-trace/enable")
+ self.cmd_and_check("cat /sys/kernel/debug/tracing/events/sample-trace/enable")
+ # check result
+ status = 1
+ count = 0
+ while status != 0:
+ time.sleep(1)
+ status, output = self.target.run('cat /sys/kernel/debug/tracing/trace | grep hello | head -n1 | cut -d\':\' -f2')
+ if " foo_bar" in output:
+ break
+ count = count + 1
+ if count > 5:
+ self.assertTrue(False, "Time out when check result")
+ # disable trace
+ self.cmd_and_check("echo 0 > /sys/kernel/debug/tracing/events/sample-trace/enable")
+ # clean up trace
+ self.cmd_and_check("echo > /sys/kernel/debug/tracing/trace")
+ # rmmod
+ self.cmd_and_check("rmmod trace-events-sample")
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_trace_printk(self):
+ # check config
+ self.check_config("CONFIG_TRACING_SUPPORT")
+ # make sure if module exists
+ self.check_module_exist("trace_printk/", "trace-printk.ko")
+ # modprobe
+ self.cmd_and_check("modprobe trace-printk")
+ # lsmod
+ self.cmd_and_check("lsmod | grep trace_printk | cut -d\' \' -f1", "trace_printk")
+ # check result
+ self.cmd_and_check("cat /sys/kernel/debug/tracing/trace | grep trace_printk_irq_work | head -n1 | cut -d\':\' -f2", " trace_printk_irq_work")
+ # clean up trace
+ self.cmd_and_check("echo > /sys/kernel/debug/tracing/trace")
+ # rmmod
+ self.cmd_and_check("rmmod trace-printk")
+
+ # hw breakpoint
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_hw_breakpoint_example(self):
+ # check arch
+ status, output = self.target.run("uname -m")
+ result = ("x86_64" in output) or ("aarch64" in output)
+ if not result:
+ self.skipTest("the arch %s doesn't support hw breakpoint" % output)
+ # check config
+ self.check_config("CONFIG_KALLSYMS_ALL")
+ # make sure if module exists
+ self.check_module_exist("hw_breakpoint/", "data_breakpoint.ko")
+ # modprobe
+ self.cmd_and_check("modprobe data_breakpoint")
+ # lsmod
+ self.cmd_and_check("lsmod | grep data_breakpoint | cut -d\' \' -f1", "data_breakpoint")
+ # check result
+ self.cmd_and_check("cat /var/log/messages | grep sample_hbp_handler", "sample_hbp_handler")
+ # rmmod
+ self.cmd_and_check("rmmod data_breakpoint")
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_configfs_sample(self):
+ # check config
+ status, ret = self.target.run('zcat /proc/config.gz | grep CONFIG_CONFIGFS_FS')
+ if not ["CONFIG_CONFIGFS_FS=m" in ret or "CONFIG_CONFIGFS_FS=y" in ret]:
+ self.skipTest("CONFIG error")
+ # make sure if module exists
+ self.check_module_exist("configfs/", "configfs_sample.ko")
+ # modprobe
+ self.cmd_and_check("modprobe configfs_sample")
+ # lsmod
+ self.cmd_and_check("lsmod | grep configfs_sample | cut -d\' \' -f1 | head -n1", "configfs_sample")
+
+ status = 1
+ count = 0
+ while status != 0:
+ time.sleep(1)
+ status, ret = self.target.run('cat /sys/kernel/config/01-childless/description')
+ count = count + 1
+ if count > 200:
+ self.skipTest("Time out for check dir")
+
+ # rmmod
+ self.cmd_and_check("rmmod configfs_sample")
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_cn_test(self):
+ # make sure if module exists
+ self.check_module_exist("connector/", "cn_test.ko")
+ # modprobe
+ self.cmd_and_check("modprobe cn_test")
+ # lsmod
+ self.cmd_and_check("lsmod | grep cn_test | cut -d\' \' -f1", "cn_test")
+ # check result
+ self.cmd_and_check("cat /proc/net/connector | grep cn_test | head -n1 | cut -d\' \' -f1", "cn_test")
+ # rmmod
+ self.cmd_and_check("rmmod cn_test")
diff --git a/meta/lib/oeqa/runtime/cases/ldd.py b/meta/lib/oeqa/runtime/cases/ldd.py
new file mode 100644
index 0000000000..9c2caa8f65
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/ldd.py
@@ -0,0 +1,26 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfNotFeature
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class LddTest(OERuntimeTestCase):
+
+ @OEHasPackage(["ldd"])
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_ldd(self):
+ status, output = self.target.run('which ldd')
+ msg = 'ldd does not exist in PATH: which ldd: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ cmd = ('for i in $(which ldd | xargs cat | grep "^RTLDLIST"| '
+ 'cut -d\'=\' -f2|tr -d \'"\'); '
+ 'do test -f $i && echo $i && break; done')
+ status, output = self.target.run(cmd)
+ self.assertEqual(status, 0, msg="ldd path not correct or RTLDLIST files don't exist.")
+
+ status, output = self.target.run("ldd /bin/true")
+ self.assertEqual(status, 0, msg="ldd failed to execute: %s" % output)
diff --git a/meta/lib/oeqa/runtime/cases/logrotate.py b/meta/lib/oeqa/runtime/cases/logrotate.py
new file mode 100644
index 0000000000..bfa57c534a
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/logrotate.py
@@ -0,0 +1,49 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=289 testcase
+# Note that the image under test must have logrotate installed
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class LogrotateTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.tc.target.run('cp /etc/logrotate.d/wtmp $HOME/wtmp.oeqabak')
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.tc.target.run('mv -f $HOME/wtmp.oeqabak /etc/logrotate.d/wtmp && rm -rf $HOME/logrotate_dir')
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['logrotate'])
+ def test_1_logrotate_setup(self):
+ status, output = self.target.run('mkdir $HOME/logrotate_dir')
+ msg = 'Could not create logrotate_dir. Output: %s' % output
+ self.assertEqual(status, 0, msg = msg)
+
+ cmd = ('sed -i "s#wtmp {#wtmp {\\n olddir $HOME/logrotate_dir#"'
+ ' /etc/logrotate.d/wtmp')
+ status, output = self.target.run(cmd)
+ msg = ('Could not write to logrotate.d/wtmp file. Status and output: '
+ ' %s and %s' % (status, output))
+ self.assertEqual(status, 0, msg = msg)
+
+ @OETestDepends(['logrotate.LogrotateTest.test_1_logrotate_setup'])
+ def test_2_logrotate(self):
+ status, output = self.target.run('echo "create \n include /etc/logrotate.d" > /tmp/logrotate-test.conf')
+ status, output = self.target.run('logrotate -f /tmp/logrotate-test.conf')
+
+ msg = ('logrotate service could not be reloaded. Status and output: '
+ '%s and %s' % (status, output))
+ self.assertEqual(status, 0, msg = msg)
+
+ _, output = self.target.run('ls -la $HOME/logrotate_dir/ | wc -l')
+ msg = ('new logfile could not be created. List of files within log '
+ 'directory: %s' % (
+ self.target.run('ls -la $HOME/logrotate_dir')[1]))
+ self.assertTrue(int(output)>=3, msg = msg)
diff --git a/meta/lib/oeqa/runtime/cases/ltp.py b/meta/lib/oeqa/runtime/cases/ltp.py
new file mode 100644
index 0000000000..30548640bc
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/ltp.py
@@ -0,0 +1,118 @@
+# LTP runtime
+#
+# Copyright (c) 2019 MontaVista Software, LLC
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import time
+import datetime
+import pprint
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+from oeqa.utils.logparser import LtpParser
+
+class LtpTestBase(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.ltp_startup()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.ltp_finishup()
+
+ @classmethod
+ def ltp_startup(cls):
+ cls.sections = {}
+ cls.failmsg = ""
+ test_log_dir = os.path.join(cls.td.get('WORKDIR', ''), 'testimage')
+ timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+
+ cls.ltptest_log_dir_link = os.path.join(test_log_dir, 'ltp_log')
+ cls.ltptest_log_dir = '%s.%s' % (cls.ltptest_log_dir_link, timestamp)
+ os.makedirs(cls.ltptest_log_dir)
+
+ cls.tc.target.run("mkdir -p /opt/ltp/results")
+
+ if not hasattr(cls.tc, "extraresults"):
+ cls.tc.extraresults = {}
+ cls.extras = cls.tc.extraresults
+ cls.extras['ltpresult.rawlogs'] = {'log': ""}
+
+
+ @classmethod
+ def ltp_finishup(cls):
+ cls.extras['ltpresult.sections'] = cls.sections
+
+ # update symlink to ltp_log
+ if os.path.exists(cls.ltptest_log_dir_link):
+ os.remove(cls.ltptest_log_dir_link)
+ os.symlink(os.path.basename(cls.ltptest_log_dir), cls.ltptest_log_dir_link)
+
+ if cls.failmsg:
+ cls.fail(cls.failmsg)
+
+class LtpTest(LtpTestBase):
+
+ ltp_groups = ["math", "syscalls", "dio", "io", "mm", "ipc", "sched", "nptl", "pty", "containers", "controllers", "filecaps", "cap_bounds", "fcntl-locktests", "connectors","timers", "commands", "net.ipv6_lib", "input","fs_perms_simple"]
+
+ ltp_fs = ["fs", "fsx", "fs_bind", "fs_ext4"]
+ # skip kernel cpuhotplug
+ ltp_kernel = ["power_management_tests", "hyperthreading ", "kernel_misc", "hugetlb"]
+ ltp_groups += ltp_fs
+
+ def runltp(self, ltp_group):
+ cmd = '/opt/ltp/runltp -f %s -p -q -r /opt/ltp -l /opt/ltp/results/%s -I 1 -d /opt/ltp' % (ltp_group, ltp_group)
+ starttime = time.time()
+ (status, output) = self.target.run(cmd)
+ endtime = time.time()
+
+ with open(os.path.join(self.ltptest_log_dir, "%s-raw.log" % ltp_group), 'w') as f:
+ f.write(output)
+
+ self.extras['ltpresult.rawlogs']['log'] = self.extras['ltpresult.rawlogs']['log'] + output
+
+ # copy nice log from DUT
+ dst = os.path.join(self.ltptest_log_dir, "%s" % ltp_group )
+ remote_src = "/opt/ltp/results/%s" % ltp_group
+ (status, output) = self.target.copyFrom(remote_src, dst)
+ msg = 'File could not be copied. Output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ parser = LtpParser()
+ results, sections = parser.parse(dst)
+
+ runtime = int(endtime-starttime)
+ sections['duration'] = runtime
+ self.sections[ltp_group] = sections
+
+ failed_tests = {}
+ for test in results:
+ result = results[test]
+ testname = ("ltpresult." + ltp_group + "." + test)
+ self.extras[testname] = {'status': result}
+ if result == 'FAILED':
+ failed_tests[ltp_group] = test
+
+ if failed_tests:
+ self.failmsg = self.failmsg + "Failed ptests:\n%s" % pprint.pformat(failed_tests)
+
+ # LTP runtime tests
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(["ltp"])
+ def test_ltp_help(self):
+ (status, output) = self.target.run('/opt/ltp/runltp --help')
+ msg = 'Failed to get ltp help. Output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestDepends(['ltp.LtpTest.test_ltp_help'])
+ def test_ltp_groups(self):
+ for ltp_group in self.ltp_groups:
+ self.runltp(ltp_group)
+
+ @OETestDepends(['ltp.LtpTest.test_ltp_groups'])
+ def test_ltp_runltp_cve(self):
+ self.runltp("cve")
diff --git a/meta/lib/oeqa/runtime/cases/ltp_compliance.py b/meta/lib/oeqa/runtime/cases/ltp_compliance.py
new file mode 100644
index 0000000000..ba47c78fd4
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/ltp_compliance.py
@@ -0,0 +1,97 @@
+# LTP compliance runtime
+#
+# Copyright (c) 2019 MontaVista Software, LLC
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import time
+import datetime
+import pprint
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+from oeqa.utils.logparser import LtpComplianceParser
+
+class LtpPosixBase(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.ltp_startup()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.ltp_finishup()
+
+ @classmethod
+ def ltp_startup(cls):
+ cls.sections = {}
+ cls.failmsg = ""
+ test_log_dir = os.path.join(cls.td.get('WORKDIR', ''), 'testimage')
+ timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+
+ cls.ltptest_log_dir_link = os.path.join(test_log_dir, 'ltpcomp_log')
+ cls.ltptest_log_dir = '%s.%s' % (cls.ltptest_log_dir_link, timestamp)
+ os.makedirs(cls.ltptest_log_dir)
+
+ cls.tc.target.run("mkdir -p /opt/ltp/results")
+
+ if not hasattr(cls.tc, "extraresults"):
+ cls.tc.extraresults = {}
+ cls.extras = cls.tc.extraresults
+ cls.extras['ltpposixresult.rawlogs'] = {'log': ""}
+
+
+ @classmethod
+ def ltp_finishup(cls):
+ cls.extras['ltpposixresult.sections'] = cls.sections
+
+ # update symlink to ltp_log
+ if os.path.exists(cls.ltptest_log_dir_link):
+ os.remove(cls.ltptest_log_dir_link)
+
+ os.symlink(os.path.basename(cls.ltptest_log_dir), cls.ltptest_log_dir_link)
+
+ if cls.failmsg:
+ cls.fail(cls.failmsg)
+
+class LtpPosixTest(LtpPosixBase):
+ posix_groups = ["AIO", "MEM", "MSG", "SEM", "SIG", "THR", "TMR", "TPS"]
+
+ def runltp(self, posix_group):
+ cmd = "/opt/ltp/bin/run-posix-option-group-test.sh %s 2>@1 | tee /opt/ltp/results/%s" % (posix_group, posix_group)
+ starttime = time.time()
+ (status, output) = self.target.run(cmd)
+ endtime = time.time()
+
+ with open(os.path.join(self.ltptest_log_dir, "%s" % posix_group), 'w') as f:
+ f.write(output)
+
+ self.extras['ltpposixresult.rawlogs']['log'] = self.extras['ltpposixresult.rawlogs']['log'] + output
+
+ parser = LtpComplianceParser()
+ results, sections = parser.parse(os.path.join(self.ltptest_log_dir, "%s" % posix_group))
+
+ runtime = int(endtime-starttime)
+ sections['duration'] = runtime
+ self.sections[posix_group] = sections
+
+ failed_tests = {}
+ for test in results:
+ result = results[test]
+ testname = ("ltpposixresult." + posix_group + "." + test)
+ self.extras[testname] = {'status': result}
+ if result == 'FAILED':
+ failed_tests[posix_group] = test
+
+ if failed_tests:
+ self.failmsg = self.failmsg + "Failed ptests:\n%s" % pprint.pformat(failed_tests)
+
+ # LTP Posix compliance runtime tests
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(["ltp"])
+ def test_posix_groups(self):
+ for posix_group in self.posix_groups:
+ self.runltp(posix_group)
diff --git a/meta/lib/oeqa/runtime/cases/ltp_stress.py b/meta/lib/oeqa/runtime/cases/ltp_stress.py
new file mode 100644
index 0000000000..2445ffbc93
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/ltp_stress.py
@@ -0,0 +1,98 @@
+# LTP Stress runtime
+#
+# Copyright (c) 2019 MontaVista Software, LLC
+#
+# SPDX-License-Identifier: MIT
+#
+
+import time
+import datetime
+import pprint
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+from oeqa.core.decorator.data import skipIfQemu
+from oeqa.utils.logparser import LtpParser
+
+class LtpStressBase(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.ltp_startup()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.ltp_finishup()
+
+ @classmethod
+ def ltp_startup(cls):
+ cls.sections = {}
+ cls.failmsg = ""
+ test_log_dir = os.path.join(cls.td.get('WORKDIR', ''), 'testimage')
+ timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+
+ cls.ltptest_log_dir_link = os.path.join(test_log_dir, 'ltpstress_log')
+ cls.ltptest_log_dir = '%s.%s' % (cls.ltptest_log_dir_link, timestamp)
+ os.makedirs(cls.ltptest_log_dir)
+
+ cls.tc.target.run("mkdir -p /opt/ltp/results")
+
+ if not hasattr(cls.tc, "extraresults"):
+ cls.tc.extraresults = {}
+ cls.extras = cls.tc.extraresults
+ cls.extras['ltpstressresult.rawlogs'] = {'log': ""}
+
+
+ @classmethod
+ def ltp_finishup(cls):
+ cls.extras['ltpstressresult.sections'] = cls.sections
+
+ # update symlink to ltp_log
+ if os.path.exists(cls.ltptest_log_dir_link):
+ os.remove(cls.ltptest_log_dir_link)
+
+ os.symlink(os.path.basename(cls.ltptest_log_dir), cls.ltptest_log_dir_link)
+
+ if cls.failmsg:
+ cls.fail(cls.failmsg)
+
+class LtpStressTest(LtpStressBase):
+
+ def runltp(self, stress_group):
+ cmd = '/opt/ltp/runltp -f %s -p -q 2>@1 | tee /opt/ltp/results/%s' % (stress_group, stress_group)
+ starttime = time.time()
+ (status, output) = self.target.run(cmd)
+ endtime = time.time()
+ with open(os.path.join(self.ltptest_log_dir, "%s" % stress_group), 'w') as f:
+ f.write(output)
+
+ self.extras['ltpstressresult.rawlogs']['log'] = self.extras['ltpstressresult.rawlogs']['log'] + output
+
+ parser = LtpParser()
+ results, sections = parser.parse(os.path.join(self.ltptest_log_dir, "%s" % stress_group))
+
+ runtime = int(endtime-starttime)
+ sections['duration'] = runtime
+ self.sections[stress_group] = sections
+
+ failed_tests = {}
+ for test in results:
+ result = results[test]
+ testname = ("ltpstressresult." + stress_group + "." + test)
+ self.extras[testname] = {'status': result}
+ if result == 'FAILED':
+ failed_tests[stress_group] = test
+
+ if failed_tests:
+ self.failmsg = self.failmsg + "Failed ptests:\n%s" % pprint.pformat(failed_tests)
+
+ # LTP stress runtime tests
+ #
+ @skipIfQemu('qemuall', 'Test only runs on real hardware')
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(["ltp"])
+ def test_ltp_stress(self):
+ self.tc.target.run("sed -i -r 's/^fork12.*//' /opt/ltp/runtest/crashme")
+ self.runltp('crashme')
diff --git a/meta/lib/oeqa/runtime/cases/multilib.py b/meta/lib/oeqa/runtime/cases/multilib.py
new file mode 100644
index 0000000000..62e662b01c
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/multilib.py
@@ -0,0 +1,44 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfNotInDataVar
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class MultilibTest(OERuntimeTestCase):
+
+ def archtest(self, binary, arch):
+ """
+ Check that ``binary`` has the ELF class ``arch`` (e.g. ELF32/ELF64).
+ """
+
+ status, output = self.target.run('readelf -h %s' % binary)
+ self.assertEqual(status, 0, 'Failed to readelf %s' % binary)
+
+ l = [l.split()[1] for l in output.split('\n') if "Class:" in l]
+ if l:
+ theclass = l[0]
+ else:
+ self.fail('Cannot parse readelf. Output:\n%s' % output)
+
+ msg = "%s isn't %s (is %s)" % (binary, arch, theclass)
+ self.assertEqual(theclass, arch, msg=msg)
+
+ @skipIfNotInDataVar('MULTILIBS', 'multilib:lib32',
+ "This isn't a multilib:lib32 image")
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['binutils'])
+ @OEHasPackage(['lib32-libc6'])
+ def test_check_multilib_libc(self):
+ """
+ Check that a multilib image has both 32-bit and 64-bit libc in.
+ """
+ self.archtest("/lib/libc.so.6", "ELF32")
+ self.archtest("/lib64/libc.so.6", "ELF64")
+
+ @OETestDepends(['multilib.MultilibTest.test_check_multilib_libc'])
+ @OEHasPackage(['lib32-connman', '!connman'])
+ def test_file_connman(self):
+ self.archtest("/usr/sbin/connmand", "ELF32")
diff --git a/meta/lib/oeqa/runtime/cases/oe_syslog.py b/meta/lib/oeqa/runtime/cases/oe_syslog.py
new file mode 100644
index 0000000000..f3c2bedbaf
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/oe_syslog.py
@@ -0,0 +1,131 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfDataVar
+from oeqa.runtime.decorator.package import OEHasPackage
+import time
+
+class SyslogTest(OERuntimeTestCase):
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(["busybox-syslog", "sysklogd", "rsyslog", "syslog-ng"])
+ def test_syslog_running(self):
+ status, output = self.target.run(self.tc.target_cmds['ps'])
+ msg = "Failed to execute %s" % self.tc.target_cmds['ps']
+ self.assertEqual(status, 0, msg=msg)
+ msg = "No syslog daemon process; %s output:\n%s" % (self.tc.target_cmds['ps'], output)
+ hasdaemon = "syslogd" in output or "syslog-ng" in output or "svlogd" in output
+ self.assertTrue(hasdaemon, msg=msg)
+
+class SyslogTestConfig(OERuntimeTestCase):
+
+ def verif_not_running(self, pids):
+ for pid in pids:
+ status, err_output = self.target.run('kill -0 %s' %pid)
+ if not status:
+ self.logger.debug("previous %s is still running" %pid)
+ return 1
+
+ def verify_running(self, names):
+ pids = []
+ for name in names:
+ status, pid = self.target.run('pidof %s' %name)
+ if status:
+ self.logger.debug("%s is not running" %name)
+ return 1, pids
+ pids.append(pid)
+ return 0, pids
+
+
+ def restart_sanity(self, names, restart_cmd, pidchange=True):
+ status, original_pids = self.verify_running(names)
+ if status:
+ return False
+
+ status, output = self.target.run(restart_cmd)
+
+ msg = ('Could not restart %s service. Status and output: %s and %s' % (names, status, output))
+ self.assertEqual(status, 0, msg)
+
+ if not pidchange:
+ return True
+
+ # Always check for an error, most likely a race between shutting down and starting up
+ timeout = time.time() + 30
+
+ restarted = False
+ status = ""
+ while time.time() < timeout:
+ # Verify the previous ones are no longer running
+ status = self.verif_not_running(original_pids)
+ if status:
+ status = "Original syslog processes still running"
+ continue
+
+ status, pids = self.verify_running(names)
+ if status:
+ status = "New syslog processes not running"
+ continue
+
+ # Everything is fine now, so exit to continue the test
+ restarted = True
+ break
+
+ msg = ('%s didn\'t appear to restart: %s' % (names, status))
+ self.assertTrue(restarted, msg)
+
+ return True
+
+ @OETestDepends(['oe_syslog.SyslogTest.test_syslog_running'])
+ def test_syslog_logger(self):
+ status, output = self.target.run('logger foobar')
+ msg = "Can't log into syslog. Output: %s " % output
+ self.assertEqual(status, 0, msg=msg)
+
+ # There is no way to flush the logger to disk in all cases
+ time.sleep(1)
+
+ status, output = self.target.run('grep foobar /var/log/messages')
+ if status != 0:
+ if self.tc.td.get("VIRTUAL-RUNTIME_init_manager") == "systemd":
+ status, output = self.target.run('journalctl -o cat | grep foobar')
+ else:
+ status, output = self.target.run('logread | grep foobar')
+ msg = ('Test log string not found in /var/log/messages or logread.'
+ ' Output: %s ' % output)
+ self.assertEqual(status, 0, msg=msg)
+
+
+ @OETestDepends(['oe_syslog.SyslogTest.test_syslog_running'])
+ def test_syslog_restart(self):
+ if self.restart_sanity(['systemd-journald'], 'systemctl restart syslog.service', pidchange=False):
+ pass
+ elif self.restart_sanity(['rsyslogd'], '/etc/init.d/rsyslog restart'):
+ pass
+ elif self.restart_sanity(['syslogd', 'klogd'], '/etc/init.d/syslog restart'):
+ pass
+ else:
+ self.logger.info("No syslog found to restart, ignoring")
+
+
+ @OETestDepends(['oe_syslog.SyslogTestConfig.test_syslog_logger'])
+ @OEHasPackage(["busybox-syslog"])
+ @skipIfDataVar('VIRTUAL-RUNTIME_init_manager', 'systemd',
+ 'Not appropiate for systemd image')
+ def test_syslog_startup_config(self):
+ cmd = 'echo "LOGFILE=/var/log/test" >> /etc/syslog-startup.conf'
+ self.target.run(cmd)
+
+ self.test_syslog_restart()
+
+ cmd = 'logger foobar && grep foobar /var/log/test'
+ status,output = self.target.run(cmd)
+ msg = 'Test log string not found. Output: %s ' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ cmd = "sed -i 's#LOGFILE=/var/log/test##' /etc/syslog-startup.conf"
+ self.target.run(cmd)
+ self.test_syslog_restart()
diff --git a/meta/lib/oeqa/runtime/cases/opkg.py b/meta/lib/oeqa/runtime/cases/opkg.py
new file mode 100644
index 0000000000..9cfee1cd88
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/opkg.py
@@ -0,0 +1,58 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+from oeqa.utils.httpserver import HTTPService
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.data import skipIfNotDataVar, skipIfNotFeature, skipIfFeature
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class OpkgTest(OERuntimeTestCase):
+
+ def pkg(self, command, expected = 0):
+ command = 'opkg %s' % command
+ status, output = self.target.run(command, 1500)
+ message = os.linesep.join([command, output])
+ self.assertEqual(status, expected, message)
+ return output
+
+class OpkgRepoTest(OpkgTest):
+
+ @classmethod
+ def setUp(cls):
+ allarchfeed = 'all'
+ if cls.tc.td["MULTILIB_VARIANTS"]:
+ allarchfeed = cls.tc.td["TUNE_PKGARCH"]
+ service_repo = os.path.join(cls.tc.td['DEPLOY_DIR_IPK'], allarchfeed)
+ cls.repo_server = HTTPService(service_repo,
+ '0.0.0.0', port=cls.tc.target.server_port,
+ logger=cls.tc.logger)
+ cls.repo_server.start()
+
+ @classmethod
+ def tearDown(cls):
+ cls.repo_server.stop()
+
+ def setup_source_config_for_package_install(self):
+ apt_get_source_server = 'http://%s:%s/' % (self.tc.target.server_ip, self.repo_server.port)
+ apt_get_sourceslist_dir = '/etc/opkg/'
+ self.target.run('cd %s; echo src/gz all %s >> opkg.conf' % (apt_get_sourceslist_dir, apt_get_source_server))
+
+ def cleanup_source_config_for_package_install(self):
+ apt_get_sourceslist_dir = '/etc/opkg/'
+ self.target.run('cd %s; sed -i "/^src/d" opkg.conf' % (apt_get_sourceslist_dir))
+
+ @skipIfNotFeature('package-management',
+ 'Test requires package-management to be in IMAGE_FEATURES')
+ @skipIfNotDataVar('IMAGE_PKGTYPE', 'ipk',
+ 'IPK is not the primary package manager')
+ @skipIfFeature('read-only-rootfs',
+ 'Test does not work with read-only-rootfs in IMAGE_FEATURES')
+ @OEHasPackage(['opkg'])
+ def test_opkg_install_from_repo(self):
+ self.setup_source_config_for_package_install()
+ self.pkg('update')
+ self.pkg('remove run-postinsts-dev')
+ self.pkg('install run-postinsts-dev')
+ self.cleanup_source_config_for_package_install()
diff --git a/meta/lib/oeqa/runtime/cases/pam.py b/meta/lib/oeqa/runtime/cases/pam.py
new file mode 100644
index 0000000000..271a1943e3
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/pam.py
@@ -0,0 +1,35 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=287 testcase
+# Note that the image under test must have "pam" in DISTRO_FEATURES
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfNotFeature
+
+class PamBasicTest(OERuntimeTestCase):
+
+ @skipIfNotFeature('pam', 'Test requires pam to be in DISTRO_FEATURES')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_pam(self):
+ status, output = self.target.run('login --help')
+ msg = ('login command does not work as expected. '
+ 'Status and output:%s and %s' % (status, output))
+ self.assertEqual(status, 1, msg = msg)
+
+ status, output = self.target.run('passwd --help')
+ msg = ('passwd command does not work as expected. '
+ 'Status and output:%s and %s' % (status, output))
+ self.assertEqual(status, 0, msg = msg)
+
+ status, output = self.target.run('su --help')
+ msg = ('su command does not work as expected. '
+ 'Status and output:%s and %s' % (status, output))
+ self.assertEqual(status, 0, msg = msg)
+
+ status, output = self.target.run('useradd --help')
+ msg = ('useradd command does not work as expected. '
+ 'Status and output:%s and %s' % (status, output))
+ self.assertEqual(status, 0, msg = msg)
diff --git a/meta/lib/oeqa/runtime/parselogs.py b/meta/lib/oeqa/runtime/cases/parselogs.py
index 2d4a13375b..b7c7b30adf 100644
--- a/meta/lib/oeqa/runtime/parselogs.py
+++ b/meta/lib/oeqa/runtime/cases/parselogs.py
@@ -1,8 +1,15 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
import os
-import unittest
-import subprocess
-from oeqa.oetest import oeRuntimeTest
-from oeqa.utils.decorators import *
+
+from subprocess import check_output
+from shutil import rmtree
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfDataVar
+from oeqa.runtime.decorator.package import OEHasPackage
#in the future these lists could be moved outside of module
errors = ["error", "cannot", "can\'t", "failed"]
@@ -43,6 +50,13 @@ common_errors = [
"controller can't do DEVSLP, turning off",
"stmmac_dvr_probe: warning: cannot get CSR clock",
"error: couldn\'t mount because of unsupported optional features",
+ "GPT: Use GNU Parted to correct GPT errors",
+ "Cannot set xattr user.Librepo.DownloadInProgress",
+ "Failed to read /var/lib/nfs/statd/state: Success",
+ "error retry time-out =",
+ "logind: cannot setup systemd-logind helper (-61), using legacy fallback",
+ "Error changing net interface name 'eth0' to ",
+ "Cannot find a map file"
]
video_related = [
@@ -57,6 +71,10 @@ x86_common = [
'pmd_set_huge: Cannot satisfy',
'failed to setup card detect gpio',
'amd_nb: Cannot enumerate AMD northbridges',
+ 'failed to retrieve link info, disabling eDP',
+ 'Direct firmware load for iwlwifi',
+ 'Direct firmware load for regulatory.db',
+ 'failed to load regulatory.db',
] + common_errors
qemux86_common = [
@@ -64,15 +82,15 @@ qemux86_common = [
"fail to add MMCONFIG information, can't access extended PCI configuration space under this bridge.",
"can't claim BAR ",
'amd_nb: Cannot enumerate AMD northbridges',
- 'uvesafb: 5000 ms task timeout error',
- 'detected fb_set_par error, error code: -22',
- 'Getting VBE info block failed',
- 'vbe_init() failed with -22',
- 'uvesafb: mode switch failed',
+ 'uvesafb: 5000 ms task timeout, infinitely waiting',
'tsc: HPET/PMTIMER calibration failed',
+ "modeset(0): Failed to initialize the DRI2 extension",
+ "uvesafb: cannot reserve video memory at",
+ "uvesafb: probe of uvesafb.0 failed with error",
+ "glamor initialization failed",
] + common_errors
-ignore_errors = {
+ignore_errors = {
'default' : common_errors,
'qemux86' : [
'Failed to access perfctr msr (MSR',
@@ -82,9 +100,11 @@ ignore_errors = {
'qemumips' : [
'Failed to load module "glx"',
'pci 0000:00:00.0: [Firmware Bug]: reg 0x..: invalid BAR (can\'t size)',
+ 'cacheinfo: Failed to find cpu0 device node',
] + common_errors,
'qemumips64' : [
'pci 0000:00:00.0: [Firmware Bug]: reg 0x..: invalid BAR (can\'t size)',
+ 'cacheinfo: Failed to find cpu0 device node',
] + common_errors,
'qemuppc' : [
'PCI 0000:00 Cannot reserve Legacy IO [io 0x0000-0x0fff]',
@@ -103,6 +123,7 @@ ignore_errors = {
'OF: amba_device_add() failed (-19) for /amba/sctl@101e0000',
'OF: amba_device_add() failed (-19) for /amba/watchdog@101e1000',
'OF: amba_device_add() failed (-19) for /amba/sci@101f0000',
+ 'OF: amba_device_add() failed (-19) for /amba/spi@101f4000',
'OF: amba_device_add() failed (-19) for /amba/ssp@101f4000',
'OF: amba_device_add() failed (-19) for /amba/fpga/sci@a000',
'Failed to initialize \'/amba/timer@101e3000\': -22',
@@ -114,15 +135,6 @@ ignore_errors = {
'dmi: Firmware registration failed.',
'irq: type mismatch, failed to map hwirq-27 for /intc',
] + common_errors,
- 'emenlow' : [
- '[Firmware Bug]: ACPI: No _BQC method, cannot determine initial brightness',
- '(EE) Failed to load module "psb"',
- '(EE) Failed to load module psb',
- '(EE) Failed to load module "psbdrv"',
- '(EE) Failed to load module psbdrv',
- '(EE) open /dev/fb0: No such file or directory',
- '(EE) AIGLX: reverting to software rendering',
- ] + x86_common,
'intel-core2-32' : [
'ACPI: No _BQC method, cannot determine initial brightness',
'[Firmware Bug]: ACPI: No _BQC method, cannot determine initial brightness',
@@ -135,165 +147,219 @@ ignore_errors = {
'dmi: Firmware registration failed.',
'ioremap error for 0x78',
] + x86_common,
- 'intel-corei7-64' : x86_common,
- 'crownbay' : x86_common,
+ 'intel-corei7-64' : [
+ 'can\'t set Max Payload Size to 256',
+ 'intel_punit_ipc: can\'t request region for resource',
+ '[drm] parse error at position 4 in video mode \'efifb\'',
+ 'ACPI Error: Could not enable RealTimeClock event',
+ 'ACPI Warning: Could not enable fixed event - RealTimeClock',
+ 'hci_intel INT33E1:00: Unable to retrieve gpio',
+ 'hci_intel: probe of INT33E1:00 failed',
+ 'can\'t derive routing for PCI INT A',
+ 'failed to read out thermal zone',
+ 'Bluetooth: hci0: Setting Intel event mask failed',
+ 'ttyS2 - failed to request DMA',
+ 'Bluetooth: hci0: Failed to send firmware data (-38)',
+ 'atkbd serio0: Failed to enable keyboard on isa0060/serio0',
+ ] + x86_common,
'genericx86' : x86_common,
'genericx86-64' : [
'Direct firmware load for i915',
'Failed to load firmware i915',
'Failed to fetch GuC',
'Failed to initialize GuC',
+ 'Failed to load DMC firmware',
'The driver is built-in, so to load the firmware you need to',
] + x86_common,
'edgerouter' : [
+ 'not creating \'/sys/firmware/fdt\'',
+ 'Failed to find cpu0 device node',
'Fatal server error:',
+ 'Server terminated with error',
] + common_errors,
- 'jasperforest' : [
- 'Activated service \'org.bluez\' failed:',
- 'Unable to find NFC netlink family',
+ 'beaglebone-yocto' : [
+ 'Direct firmware load for regulatory.db',
+ 'failed to load regulatory.db',
+ 'l4_wkup_cm',
+ 'Failed to load module "glx"',
+ 'Failed to make EGL context current',
+ 'glamor initialization failed',
+ ] + common_errors,
+ 'mpc8315e-rdb' : [
+ 'of_irq_parse_pci: failed with',
+ 'Fatal server error:',
+ 'Server terminated with error',
] + common_errors,
}
log_locations = ["/var/log/","/var/log/dmesg", "/tmp/dmesg_output.log"]
-class ParseLogsTest(oeRuntimeTest):
+class ParseLogsTest(OERuntimeTestCase):
@classmethod
- def setUpClass(self):
- self.errors = errors
+ def setUpClass(cls):
+ cls.errors = errors
# When systemd is enabled we need to notice errors on
# circular dependencies in units.
- if self.hasFeature("systemd"):
- self.errors.extend([
+ if 'systemd' in cls.td.get('DISTRO_FEATURES', ''):
+ cls.errors.extend([
'Found ordering cycle on',
'Breaking ordering cycle by deleting job',
'deleted to break ordering cycle',
'Ordering cycle found, skipping',
])
- self.ignore_errors = ignore_errors
- self.log_locations = log_locations
- self.msg = ""
- (is_lsb, location) = oeRuntimeTest.tc.target.run("which LSB_Test.sh")
+ cls.ignore_errors = ignore_errors
+ cls.log_locations = log_locations
+ cls.msg = ''
+ is_lsb, _ = cls.tc.target.run("which LSB_Test.sh")
if is_lsb == 0:
- for machine in self.ignore_errors:
- self.ignore_errors[machine] = self.ignore_errors[machine] + video_related
+ for machine in cls.ignore_errors:
+ cls.ignore_errors[machine] = cls.ignore_errors[machine] \
+ + video_related
def getMachine(self):
- return oeRuntimeTest.tc.d.getVar("MACHINE", True)
+ return self.td.get('MACHINE', '')
def getWorkdir(self):
- return oeRuntimeTest.tc.d.getVar("WORKDIR", True)
+ return self.td.get('WORKDIR', '')
- #get some information on the CPU of the machine to display at the beginning of the output. This info might be useful in some cases.
+ # Get some information on the CPU of the machine to display at the
+ # beginning of the output. This info might be useful in some cases.
def getHardwareInfo(self):
hwi = ""
- (status, cpu_name) = self.target.run("cat /proc/cpuinfo | grep \"model name\" | head -n1 | awk 'BEGIN{FS=\":\"}{print $2}'")
- (status, cpu_physical_cores) = self.target.run("cat /proc/cpuinfo | grep \"cpu cores\" | head -n1 | awk {'print $4'}")
- (status, cpu_logical_cores) = self.target.run("cat /proc/cpuinfo | grep \"processor\" | wc -l")
- (status, cpu_arch) = self.target.run("uname -m")
- hwi += "Machine information: \n"
- hwi += "*******************************\n"
- hwi += "Machine name: "+self.getMachine()+"\n"
- hwi += "CPU: "+str(cpu_name)+"\n"
- hwi += "Arch: "+str(cpu_arch)+"\n"
- hwi += "Physical cores: "+str(cpu_physical_cores)+"\n"
- hwi += "Logical cores: "+str(cpu_logical_cores)+"\n"
- hwi += "*******************************\n"
+ cmd = ('cat /proc/cpuinfo | grep "model name" | head -n1 | '
+ " awk 'BEGIN{FS=\":\"}{print $2}'")
+ _, cpu_name = self.target.run(cmd)
+
+ cmd = ('cat /proc/cpuinfo | grep "cpu cores" | head -n1 | '
+ "awk {'print $4'}")
+ _, cpu_physical_cores = self.target.run(cmd)
+
+ cmd = 'cat /proc/cpuinfo | grep "processor" | wc -l'
+ _, cpu_logical_cores = self.target.run(cmd)
+
+ _, cpu_arch = self.target.run('uname -m')
+
+ hwi += 'Machine information: \n'
+ hwi += '*******************************\n'
+ hwi += 'Machine name: ' + self.getMachine() + '\n'
+ hwi += 'CPU: ' + str(cpu_name) + '\n'
+ hwi += 'Arch: ' + str(cpu_arch)+ '\n'
+ hwi += 'Physical cores: ' + str(cpu_physical_cores) + '\n'
+ hwi += 'Logical cores: ' + str(cpu_logical_cores) + '\n'
+ hwi += '*******************************\n'
+
return hwi
- #go through the log locations provided and if it's a folder create a list with all the .log files in it, if it's a file just add
- #it to that list
+ # Go through the log locations provided and if it's a folder
+ # create a list with all the .log files in it, if it's a file
+ # just add it to that list.
def getLogList(self, log_locations):
logs = []
for location in log_locations:
- (status, output) = self.target.run("test -f "+str(location))
- if (status == 0):
+ status, _ = self.target.run('test -f ' + str(location))
+ if status == 0:
logs.append(str(location))
else:
- (status, output) = self.target.run("test -d "+str(location))
- if (status == 0):
- (status, output) = self.target.run("find "+str(location)+"/*.log -maxdepth 1 -type f")
- if (status == 0):
+ status, _ = self.target.run('test -d ' + str(location))
+ if status == 0:
+ cmd = 'find ' + str(location) + '/*.log -maxdepth 1 -type f'
+ status, output = self.target.run(cmd)
+ if status == 0:
output = output.splitlines()
for logfile in output:
- logs.append(os.path.join(location,str(logfile)))
+ logs.append(os.path.join(location, str(logfile)))
return logs
- #copy the log files to be parsed locally
+ # Copy the log files to be parsed locally
def transfer_logs(self, log_list):
workdir = self.getWorkdir()
self.target_logs = workdir + '/' + 'target_logs'
target_logs = self.target_logs
- if not os.path.exists(target_logs):
- os.makedirs(target_logs)
- bb.utils.remove(self.target_logs + "/*")
+ if os.path.exists(target_logs):
+ rmtree(self.target_logs)
+ os.makedirs(target_logs)
for f in log_list:
- self.target.copy_from(f, target_logs)
+ self.target.copyFrom(str(f), target_logs)
- #get the local list of logs
+ # Get the local list of logs
def get_local_log_list(self, log_locations):
self.transfer_logs(self.getLogList(log_locations))
- logs = [ os.path.join(self.target_logs, f) for f in os.listdir(self.target_logs) if os.path.isfile(os.path.join(self.target_logs, f)) ]
+ list_dir = os.listdir(self.target_logs)
+ dir_files = [os.path.join(self.target_logs, f) for f in list_dir]
+ logs = [f for f in dir_files if os.path.isfile(f)]
return logs
- #build the grep command to be used with filters and exclusions
+ # Build the grep command to be used with filters and exclusions
def build_grepcmd(self, errors, ignore_errors, log):
- grepcmd = "grep "
- grepcmd +="-Ei \""
+ grepcmd = 'grep '
+ grepcmd += '-Ei "'
for error in errors:
- grepcmd += error+"|"
+ grepcmd += '\<' + error + '\>' + '|'
grepcmd = grepcmd[:-1]
- grepcmd += "\" "+str(log)+" | grep -Eiv \'"
+ grepcmd += '" ' + str(log) + " | grep -Eiv \'"
+
try:
errorlist = ignore_errors[self.getMachine()]
except KeyError:
- self.msg += "No ignore list found for this machine, using default\n"
+ self.msg += 'No ignore list found for this machine, using default\n'
errorlist = ignore_errors['default']
+
for ignore_error in errorlist:
- ignore_error = ignore_error.replace("(", "\(")
- ignore_error = ignore_error.replace(")", "\)")
- ignore_error = ignore_error.replace("'", ".")
- ignore_error = ignore_error.replace("?", "\?")
- ignore_error = ignore_error.replace("[", "\[")
- ignore_error = ignore_error.replace("]", "\]")
- ignore_error = ignore_error.replace("*", "\*")
- ignore_error = ignore_error.replace("0-9", "[0-9]")
- grepcmd += ignore_error+"|"
+ ignore_error = ignore_error.replace('(', '\(')
+ ignore_error = ignore_error.replace(')', '\)')
+ ignore_error = ignore_error.replace("'", '.')
+ ignore_error = ignore_error.replace('?', '\?')
+ ignore_error = ignore_error.replace('[', '\[')
+ ignore_error = ignore_error.replace(']', '\]')
+ ignore_error = ignore_error.replace('*', '\*')
+ ignore_error = ignore_error.replace('0-9', '[0-9]')
+ grepcmd += ignore_error + '|'
grepcmd = grepcmd[:-1]
grepcmd += "\'"
+
return grepcmd
- #grep only the errors so that their context could be collected. Default context is 10 lines before and after the error itself
- def parse_logs(self, errors, ignore_errors, logs, lines_before = 10, lines_after = 10):
+ # Grep only the errors so that their context could be collected.
+ # Default context is 10 lines before and after the error itself
+ def parse_logs(self, errors, ignore_errors, logs,
+ lines_before = 10, lines_after = 10):
results = {}
rez = []
grep_output = ''
+
for log in logs:
result = None
thegrep = self.build_grepcmd(errors, ignore_errors, log)
+
try:
- result = subprocess.check_output(thegrep, shell=True).decode("utf-8")
+ result = check_output(thegrep, shell=True).decode('utf-8')
except:
pass
- if (result is not None):
- results[log.replace('target_logs/','')] = {}
+
+ if result is not None:
+ results[log] = {}
rez = result.splitlines()
+
for xrez in rez:
try:
- grep_output = subprocess.check_output(['grep', '-F', xrez, '-B', str(lines_before), '-A', str(lines_after), log]).decode("utf-8")
+ cmd = ['grep', '-F', xrez, '-B', str(lines_before)]
+ cmd += ['-A', str(lines_after), log]
+ grep_output = check_output(cmd).decode('utf-8')
except:
pass
- results[log.replace('target_logs/','')][xrez]=grep_output
+ results[log][xrez]=grep_output
+
return results
- #get the output of dmesg and write it in a file. This file is added to log_locations.
+ # Get the output of dmesg and write it in a file.
+ # This file is added to log_locations.
def write_dmesg(self):
- (status, dmesg) = self.target.run("dmesg > /tmp/dmesg_output.log")
+ (status, dmesg) = self.target.run('dmesg > /tmp/dmesg_output.log')
- @testcase(1059)
- @skipUnlessPassed('test_ssh')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
def test_parselogs(self):
self.write_dmesg()
log_list = self.get_local_log_list(self.log_locations)
@@ -301,13 +367,13 @@ class ParseLogsTest(oeRuntimeTest):
print(self.getHardwareInfo())
errcount = 0
for log in result:
- self.msg += "Log: "+log+"\n"
- self.msg += "-----------------------\n"
+ self.msg += 'Log: ' + log + '\n'
+ self.msg += '-----------------------\n'
for error in result[log]:
errcount += 1
- self.msg += "Central error: "+str(error)+"\n"
- self.msg += "***********************\n"
- self.msg += result[str(log)][str(error)]+"\n"
- self.msg += "***********************\n"
- self.msg += "%s errors found in logs." % errcount
+ self.msg += 'Central error: ' + str(error) + '\n'
+ self.msg += '***********************\n'
+ self.msg += result[str(log)][str(error)] + '\n'
+ self.msg += '***********************\n'
+ self.msg += '%s errors found in logs.' % errcount
self.assertEqual(errcount, 0, msg=self.msg)
diff --git a/meta/lib/oeqa/runtime/cases/perl.py b/meta/lib/oeqa/runtime/cases/perl.py
new file mode 100644
index 0000000000..2c6b3b7846
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/perl.py
@@ -0,0 +1,17 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class PerlTest(OERuntimeTestCase):
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['perl'])
+ def test_perl_works(self):
+ status, output = self.target.run("perl -e '$_=\"Uryyb, jbeyq\"; tr/a-zA-Z/n-za-mN-ZA-M/;print'")
+ self.assertEqual(status, 0)
+ self.assertEqual(output, "Hello, world")
diff --git a/meta/lib/oeqa/runtime/cases/ping.py b/meta/lib/oeqa/runtime/cases/ping.py
new file mode 100644
index 0000000000..f6603f75ec
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/ping.py
@@ -0,0 +1,26 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from subprocess import Popen, PIPE
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.oetimeout import OETimeout
+
+class PingTest(OERuntimeTestCase):
+
+ @OETimeout(30)
+ def test_ping(self):
+ output = ''
+ count = 0
+ while count < 5:
+ cmd = 'ping -c 1 %s' % self.target.ip
+ proc = Popen(cmd, shell=True, stdout=PIPE)
+ output += proc.communicate()[0].decode('utf-8')
+ if proc.poll() == 0:
+ count += 1
+ else:
+ count = 0
+ msg = ('Expected 5 consecutive, got %d.\n'
+ 'ping output is:\n%s' % (count,output))
+ self.assertEqual(count, 5, msg = msg)
diff --git a/meta/lib/oeqa/runtime/cases/ptest.py b/meta/lib/oeqa/runtime/cases/ptest.py
new file mode 100644
index 0000000000..d8d1e1b344
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/ptest.py
@@ -0,0 +1,84 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import unittest
+import pprint
+import datetime
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfNotFeature
+from oeqa.runtime.decorator.package import OEHasPackage
+from oeqa.utils.logparser import PtestParser
+
+class PtestRunnerTest(OERuntimeTestCase):
+
+ @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['ptest-runner'])
+ @unittest.expectedFailure
+ def test_ptestrunner(self):
+ status, output = self.target.run('which ptest-runner', 0)
+ if status != 0:
+ self.skipTest("No -ptest packages are installed in the image")
+
+ test_log_dir = self.td.get('TEST_LOG_DIR', '')
+ # The TEST_LOG_DIR maybe NULL when testimage is added after
+ # testdata.json is generated.
+ if not test_log_dir:
+ test_log_dir = os.path.join(self.td.get('WORKDIR', ''), 'testimage')
+ # Don't use self.td.get('DATETIME'), it's from testdata.json, not
+ # up-to-date, and may cause "File exists" when re-reun.
+ timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+ ptest_log_dir_link = os.path.join(test_log_dir, 'ptest_log')
+ ptest_log_dir = '%s.%s' % (ptest_log_dir_link, timestamp)
+ ptest_runner_log = os.path.join(ptest_log_dir, 'ptest-runner.log')
+
+ status, output = self.target.run('ptest-runner', 0)
+ os.makedirs(ptest_log_dir)
+ with open(ptest_runner_log, 'w') as f:
+ f.write(output)
+
+ # status != 0 is OK since some ptest tests may fail
+ self.assertTrue(status != 127, msg="Cannot execute ptest-runner!")
+
+ if not hasattr(self.tc, "extraresults"):
+ self.tc.extraresults = {}
+ extras = self.tc.extraresults
+ extras['ptestresult.rawlogs'] = {'log': output}
+
+ # Parse and save results
+ parser = PtestParser()
+ results, sections = parser.parse(ptest_runner_log)
+ parser.results_as_files(ptest_log_dir)
+ if os.path.exists(ptest_log_dir_link):
+ # Remove the old link to create a new one
+ os.remove(ptest_log_dir_link)
+ os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link)
+
+ extras['ptestresult.sections'] = sections
+
+ trans = str.maketrans("()", "__")
+ for section in results:
+ for test in results[section]:
+ result = results[section][test]
+ testname = "ptestresult." + (section or "No-section") + "." + "_".join(test.translate(trans).split())
+ extras[testname] = {'status': result}
+
+ failed_tests = {}
+ for section in results:
+ failed_testcases = [ "_".join(test.translate(trans).split()) for test in results[section] if results[section][test] == 'fail' ]
+ if failed_testcases:
+ failed_tests[section] = failed_testcases
+
+ failmsg = ""
+ status, output = self.target.run('dmesg | grep "Killed process"', 0)
+ if output:
+ failmsg = "ERROR: Processes were killed by the OOM Killer:\n%s\n" % output
+
+ if failed_tests:
+ failmsg = failmsg + "Failed ptests:\n%s" % pprint.pformat(failed_tests)
+
+ if failmsg:
+ self.fail(failmsg)
diff --git a/meta/lib/oeqa/runtime/cases/python.py b/meta/lib/oeqa/runtime/cases/python.py
new file mode 100644
index 0000000000..ec54f1e1db
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/python.py
@@ -0,0 +1,19 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class PythonTest(OERuntimeTestCase):
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['python3-core'])
+ def test_python3(self):
+ cmd = "python3 -c \"import codecs; print(codecs.encode('Uryyb, jbeyq', 'rot13'))\""
+ status, output = self.target.run(cmd)
+ msg = 'Exit status was not 0. Output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ msg = 'Incorrect output: %s' % output
+ self.assertEqual(output, "Hello, world", msg=msg)
diff --git a/meta/lib/oeqa/runtime/cases/rpm.py b/meta/lib/oeqa/runtime/cases/rpm.py
new file mode 100644
index 0000000000..8e18b426f8
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/rpm.py
@@ -0,0 +1,153 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import fnmatch
+import time
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfDataVar
+from oeqa.runtime.decorator.package import OEHasPackage
+from oeqa.core.utils.path import findFile
+
+class RpmBasicTest(OERuntimeTestCase):
+
+ @OEHasPackage(['rpm'])
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_rpm_help(self):
+ status, output = self.target.run('rpm --help')
+ msg = 'status and output: %s and %s' % (status, output)
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestDepends(['rpm.RpmBasicTest.test_rpm_help'])
+ def test_rpm_query(self):
+ status, output = self.target.run('ls /var/lib/rpm/')
+ if status != 0:
+ self.skipTest('No /var/lib/rpm on target')
+ status, output = self.target.run('rpm -q rpm')
+ msg = 'status and output: %s and %s' % (status, output)
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestDepends(['rpm.RpmBasicTest.test_rpm_query'])
+ def test_rpm_query_nonroot(self):
+
+ def set_up_test_user(u):
+ status, output = self.target.run('id -u %s' % u)
+ if status:
+ status, output = self.target.run('useradd %s' % u)
+ msg = 'Failed to create new user: %s' % output
+ self.assertTrue(status == 0, msg=msg)
+
+ def exec_as_test_user(u):
+ status, output = self.target.run('su -c id %s' % u)
+ msg = 'Failed to execute as new user'
+ self.assertTrue("({0})".format(u) in output, msg=msg)
+
+ status, output = self.target.run('su -c "rpm -qa" %s ' % u)
+ msg = 'status: %s. Cannot run rpm -qa: %s' % (status, output)
+ self.assertEqual(status, 0, msg=msg)
+
+ def check_no_process_for_user(u):
+ _, output = self.target.run(self.tc.target_cmds['ps'])
+ if u + ' ' in output:
+ return False
+ else:
+ return True
+
+ def unset_up_test_user(u):
+ # ensure no test1 process in running
+ timeout = time.time() + 30
+ while time.time() < timeout:
+ if check_no_process_for_user(u):
+ break
+ else:
+ time.sleep(1)
+ status, output = self.target.run('userdel -r %s' % u)
+ msg = 'Failed to erase user: %s' % output
+ self.assertTrue(status == 0, msg=msg)
+
+ tuser = 'test1'
+
+ try:
+ set_up_test_user(tuser)
+ exec_as_test_user(tuser)
+ finally:
+ unset_up_test_user(tuser)
+
+
+class RpmInstallRemoveTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ pkgarch = cls.td['TUNE_PKGARCH'].replace('-', '_')
+ rpmdir = os.path.join(cls.tc.td['DEPLOY_DIR'], 'rpm', pkgarch)
+ # Pick base-passwd-doc as a test file to get installed, because it's small
+ # and it will always be built for standard targets
+ rpm_doc = 'base-passwd-doc-*.%s.rpm' % pkgarch
+ if not os.path.exists(rpmdir):
+ return
+ for f in fnmatch.filter(os.listdir(rpmdir), rpm_doc):
+ cls.test_file = os.path.join(rpmdir, f)
+ cls.dst = '/tmp/base-passwd-doc.rpm'
+
+ @OETestDepends(['rpm.RpmBasicTest.test_rpm_query'])
+ def test_rpm_install(self):
+ self.tc.target.copyTo(self.test_file, self.dst)
+ status, output = self.target.run('rpm -ivh /tmp/base-passwd-doc.rpm')
+ msg = 'Failed to install base-passwd-doc package: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+ self.tc.target.run('rm -f %s' % self.dst)
+
+ @OETestDepends(['rpm.RpmInstallRemoveTest.test_rpm_install'])
+ def test_rpm_remove(self):
+ status,output = self.target.run('rpm -e base-passwd-doc')
+ msg = 'Failed to remove base-passwd-doc package: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestDepends(['rpm.RpmInstallRemoveTest.test_rpm_remove'])
+ def test_check_rpm_install_removal_log_file_size(self):
+ """
+ Summary: Check that rpm writes into /var/log/messages
+ Expected: There should be some RPM prefixed entries in the above file.
+ Product: BSPs
+ Author: Alexandru Georgescu <alexandru.c.georgescu@intel.com>
+ Author: Alexander Kanavin <alex.kanavin@gmail.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+ db_files_cmd = 'ls /var/lib/rpm/__db.*'
+ check_log_cmd = "grep RPM /var/log/messages | wc -l"
+
+ # Make sure that some database files are under /var/lib/rpm as '__db.xxx'
+ status, output = self.target.run(db_files_cmd)
+ msg = 'Failed to find database files under /var/lib/rpm/ as __db.xxx'
+ self.assertEqual(0, status, msg=msg)
+
+ self.tc.target.copyTo(self.test_file, self.dst)
+
+ # Remove the package just in case
+ self.target.run('rpm -e base-passwd-doc')
+
+ # Install/Remove a package 10 times
+ for i in range(10):
+ status, output = self.target.run('rpm -ivh /tmp/base-passwd-doc.rpm')
+ msg = 'Failed to install base-passwd-doc package. Reason: {}'.format(output)
+ self.assertEqual(0, status, msg=msg)
+
+ status, output = self.target.run('rpm -e base-passwd-doc')
+ msg = 'Failed to remove base-passwd-doc package. Reason: {}'.format(output)
+ self.assertEqual(0, status, msg=msg)
+
+ self.tc.target.run('rm -f %s' % self.dst)
+
+ # if using systemd this should ensure all entries are flushed to /var
+ status, output = self.target.run("journalctl --sync")
+ # Get the amount of entries in the log file
+ status, output = self.target.run(check_log_cmd)
+ msg = 'Failed to get the final size of the log file.'
+ self.assertEqual(0, status, msg=msg)
+
+ # Check that there's enough of them
+ self.assertGreaterEqual(int(output), 80,
+ 'Cound not find sufficient amount of rpm entries in /var/log/messages, found {} entries'.format(output))
diff --git a/meta/lib/oeqa/runtime/cases/scons.py b/meta/lib/oeqa/runtime/cases/scons.py
new file mode 100644
index 0000000000..3c7c7f7270
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/scons.py
@@ -0,0 +1,37 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class SconsCompileTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUp(cls):
+ dst = '/tmp/'
+ src = os.path.join(cls.tc.runtime_files_dir, 'hello.c')
+ cls.tc.target.copyTo(src, dst)
+
+ src = os.path.join(cls.tc.runtime_files_dir, 'SConstruct')
+ cls.tc.target.copyTo(src, dst)
+
+ @classmethod
+ def tearDown(cls):
+ files = '/tmp/hello.c /tmp/hello.o /tmp/hello /tmp/SConstruct'
+ cls.tc.target.run('rm %s' % files)
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['gcc'])
+ @OEHasPackage(['python3-scons'])
+ def test_scons_compile(self):
+ status, output = self.target.run('cd /tmp/ && scons')
+ msg = 'scons compile failed, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ status, output = self.target.run('/tmp/hello')
+ msg = 'running compiled file failed, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
diff --git a/meta/lib/oeqa/runtime/cases/scp.py b/meta/lib/oeqa/runtime/cases/scp.py
new file mode 100644
index 0000000000..3a5f292152
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/scp.py
@@ -0,0 +1,37 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+from tempfile import mkstemp
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class ScpTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.tmp_fd, cls.tmp_path = mkstemp()
+ with os.fdopen(cls.tmp_fd, 'w') as f:
+ f.seek(2 ** 22 -1)
+ f.write(os.linesep)
+
+ @classmethod
+ def tearDownClass(cls):
+ os.remove(cls.tmp_path)
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['openssh-scp', 'dropbear'])
+ def test_scp_file(self):
+ dst = '/tmp/test_scp_file'
+
+ (status, output) = self.target.copyTo(self.tmp_path, dst)
+ msg = 'File could not be copied. Output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ (status, output) = self.target.run('ls -la %s' % dst)
+ self.assertEqual(status, 0, msg = 'SCP test failed')
+
+ self.target.run('rm %s' % dst)
diff --git a/meta/lib/oeqa/runtime/cases/skeletoninit.py b/meta/lib/oeqa/runtime/cases/skeletoninit.py
new file mode 100644
index 0000000000..4779cd6bb4
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/skeletoninit.py
@@ -0,0 +1,35 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=284
+# testcase. Image under test must have meta-skeleton layer in bblayers and
+# IMAGE_INSTALL_append = " service" in local.conf
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfDataVar
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class SkeletonBasicTest(OERuntimeTestCase):
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['service'])
+ @skipIfDataVar('VIRTUAL-RUNTIME_init_manager', 'systemd',
+ 'Not appropiate for systemd image')
+ def test_skeleton_availability(self):
+ status, output = self.target.run('ls /etc/init.d/skeleton')
+ msg = 'skeleton init script not found. Output:\n%s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ status, output = self.target.run('ls /usr/sbin/skeleton-test')
+ msg = 'skeleton-test not found. Output:\n%s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestDepends(['skeletoninit.SkeletonBasicTest.test_skeleton_availability'])
+ def test_skeleton_script(self):
+ output1 = self.target.run("/etc/init.d/skeleton start")[1]
+ cmd = '%s | grep [s]keleton-test' % self.tc.target_cmds['ps']
+ status, output2 = self.target.run(cmd)
+ msg = ('Skeleton script could not be started:'
+ '\n%s\n%s' % (output1, output2))
+ self.assertEqual(status, 0, msg=msg)
diff --git a/meta/lib/oeqa/runtime/cases/ssh.py b/meta/lib/oeqa/runtime/cases/ssh.py
new file mode 100644
index 0000000000..60a5fbbfbf
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/ssh.py
@@ -0,0 +1,19 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class SSHTest(OERuntimeTestCase):
+
+ @OETestDepends(['ping.PingTest.test_ping'])
+ @OEHasPackage(['dropbear', 'openssh-sshd'])
+ def test_ssh(self):
+ (status, output) = self.target.run('uname -a')
+ self.assertEqual(status, 0, msg='SSH Test failed: %s' % output)
+ (status, output) = self.target.run('cat /etc/masterimage')
+ msg = "This isn't the right image - /etc/masterimage " \
+ "shouldn't be here %s" % output
+ self.assertEqual(status, 1, msg=msg)
diff --git a/meta/lib/oeqa/runtime/cases/stap.py b/meta/lib/oeqa/runtime/cases/stap.py
new file mode 100644
index 0000000000..5342f6ac34
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/stap.py
@@ -0,0 +1,37 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfNotFeature
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class StapTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUp(cls):
+ src = os.path.join(cls.tc.runtime_files_dir, 'hello.stp')
+ dst = '/tmp/hello.stp'
+ cls.tc.target.copyTo(src, dst)
+
+ @classmethod
+ def tearDown(cls):
+ files = '/tmp/hello.stp'
+ cls.tc.target.run('rm %s' % files)
+
+ @skipIfNotFeature('tools-profile',
+ 'Test requires tools-profile to be in IMAGE_FEATURES')
+ @OETestDepends(['kernelmodule.KernelModuleTest.test_kernel_module'])
+ @OEHasPackage(['systemtap'])
+ def test_stap(self):
+ cmds = [
+ 'cd /usr/src/kernel && make scripts prepare',
+ 'cd /lib/modules/`uname -r` && (if [ ! -e build ]; then ln -s /usr/src/kernel build; fi)',
+ 'stap --disable-cache -DSTP_NO_VERREL_CHECK /tmp/hello.stp'
+ ]
+ for cmd in cmds:
+ status, output = self.target.run(cmd, 900)
+ self.assertEqual(status, 0, msg='\n'.join([cmd, output]))
diff --git a/meta/lib/oeqa/runtime/cases/storage.py b/meta/lib/oeqa/runtime/cases/storage.py
new file mode 100644
index 0000000000..166d26b252
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/storage.py
@@ -0,0 +1,149 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import re
+import time
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfQemu
+
+class StorageBase(OERuntimeTestCase):
+ def storage_mount(cls, tmo=1):
+
+ (status, output) = cls.target.run('mkdir -p %s' % cls.mount_point)
+ (status, output) = cls.target.run('mount %s %s' % (cls.device, cls.mount_point))
+ msg = ('Mount failed: %s.' % status)
+ cls.assertFalse(output, msg = msg)
+ time.sleep(tmo)
+ (status, output) = cls.target.run('cat /proc/mounts')
+ match = re.search('%s' % cls.device, output)
+ if match:
+ msg = ('Device %s not mounted.' % cls.device)
+ cls.assertTrue(match, msg = msg)
+
+ (status, output) = cls.target.run('mkdir -p %s' % cls.test_dir)
+
+ (status, output) = cls.target.run('rm -f %s/*' % cls.test_dir)
+ msg = ('Failed to cleanup files @ %s/*' % cls.test_dir)
+ cls.assertFalse(output, msg = msg)
+
+
+ def storage_basic(cls):
+ # create file on device
+ (status, output) = cls.target.run('touch %s/%s' % (cls.test_dir, cls.test_file))
+ msg = ('File %s not created on %s' % (cls.test_file, cls.device))
+ cls.assertFalse(status, msg = msg)
+ # move file
+ (status, output) = cls.target.run('mv %s/%s %s/%s1' %
+ (cls.test_dir, cls.test_file, cls.test_dir, cls.test_file))
+ msg = ('File %s not moved to %s' % (cls.test_file, cls.device))
+ cls.assertFalse(status, msg = msg)
+ # remove file
+ (status, output) = cls.target.run('rm %s/%s1' % (cls.test_dir, cls.test_file))
+ msg = ('File %s not removed on %s' % (cls.test_file, cls.device))
+ cls.assertFalse(status, msg = msg)
+
+ def storage_read(cls):
+ # check if message is in file
+ (status, output) = cls.target.run('cat %s/%s' %
+ (cls.test_dir, cls.test_file))
+
+ match = re.search('%s' % cls.test_msg, output)
+ msg = ('Test message %s not in file %s.' % (cls.test_msg, cls.test_file))
+ cls.assertEqual(status, 0, msg = msg)
+
+ def storage_write(cls):
+ # create test message in file on device
+ (status, output) = cls.target.run('echo "%s" > %s/%s' %
+ (cls.test_msg, cls.test_dir, cls.test_file))
+ msg = ('File %s not create test message on %s' % (cls.test_file, cls.device))
+ cls.assertEqual(status, 0, msg = msg)
+
+ def storage_umount(cls, tmo=1):
+ time.sleep(tmo)
+ (status, output) = cls.target.run('umount %s' % cls.mount_point)
+
+ if status == 32:
+ # already unmounted, should it fail?
+ return
+ else:
+ msg = ('Device not unmount %s' % cls.mount_point)
+ cls.assertEqual(status, 0, msg = msg)
+
+ (status, output) = cls.target.run('cat /proc/mounts')
+ match = re.search('%s' % cls.device, output)
+ if match:
+ msg = ('Device %s still mounted.' % cls.device)
+ cls.assertTrue(match, msg = msg)
+
+
+class UsbTest(StorageBase):
+ '''
+ This is to mimic the usb test previously done in manual bsp-hw.json
+ '''
+ @classmethod
+ def setUpClass(self):
+ self.test_msg = "Hello World - USB"
+ self.mount_point = "/media/usb"
+ self.device = "/dev/sda1"
+ self.test_file = "usb.tst"
+ self.test_dir = os.path.join(self.mount_point, "oeqa")
+
+ @skipIfQemu('qemuall', 'Test only runs on real hardware')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_usb_mount(self):
+ self.storage_umount(2)
+ self.storage_mount(5)
+
+ @skipIfQemu('qemuall', 'Test only runs on real hardware')
+ @OETestDepends(['storage.UsbTest.test_usb_mount'])
+ def test_usb_basic_operations(self):
+ self.storage_basic()
+
+ @skipIfQemu('qemuall', 'Test only runs on real hardware')
+ @OETestDepends(['storage.UsbTest.test_usb_basic_operations'])
+ def test_usb_basic_rw(self):
+ self.storage_write()
+ self.storage_read()
+
+ @skipIfQemu('qemuall', 'Test only runs on real hardware')
+ @OETestDepends(['storage.UsbTest.test_usb_mount'])
+ def test_usb_umount(self):
+ self.storage_umount(2)
+
+
+class MMCTest(StorageBase):
+ '''
+ This is to mimic the usb test previously done in manual bsp-hw.json
+ '''
+ @classmethod
+ def setUpClass(self):
+ self.test_msg = "Hello World - MMC"
+ self.mount_point = "/media/mmc"
+ self.device = "/dev/mmcblk1p1"
+ self.test_file = "mmc.tst"
+ self.test_dir = os.path.join(self.mount_point, "oeqa")
+
+ @skipIfQemu('qemuall', 'Test only runs on real hardware')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_mmc_mount(self):
+ self.storage_umount(2)
+ self.storage_mount()
+
+ @skipIfQemu('qemuall', 'Test only runs on real hardware')
+ @OETestDepends(['storage.MMCTest.test_mmc_mount'])
+ def test_mmc_basic_operations(self):
+ self.storage_basic()
+
+ @skipIfQemu('qemuall', 'Test only runs on real hardware')
+ @OETestDepends(['storage.MMCTest.test_mmc_basic_operations'])
+ def test_mmc_basic_rw(self):
+ self.storage_write()
+ self.storage_read()
+
+ @skipIfQemu('qemuall', 'Test only runs on real hardware')
+ @OETestDepends(['storage.MMCTest.test_mmc_mount'])
+ def test_mmc_umount(self):
+ self.storage_umount(2)
diff --git a/meta/lib/oeqa/runtime/cases/systemd.py b/meta/lib/oeqa/runtime/cases/systemd.py
new file mode 100644
index 0000000000..7c44abe8ed
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/systemd.py
@@ -0,0 +1,194 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import re
+import time
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfDataVar, skipIfNotDataVar
+from oeqa.runtime.decorator.package import OEHasPackage
+from oeqa.core.decorator.data import skipIfNotFeature, skipIfFeature
+
+class SystemdTest(OERuntimeTestCase):
+
+ def systemctl(self, action='', target='', expected=0, verbose=False):
+ command = 'SYSTEMD_BUS_TIMEOUT=240s systemctl %s %s' % (action, target)
+ status, output = self.target.run(command)
+ message = '\n'.join([command, output])
+ if status != expected and verbose:
+ cmd = 'SYSTEMD_BUS_TIMEOUT=240s systemctl status --full %s' % target
+ message += self.target.run(cmd)[1]
+ self.assertEqual(status, expected, message)
+ return output
+
+ #TODO: use pyjournalctl instead
+ def journalctl(self, args='',l_match_units=None):
+ """
+ Request for the journalctl output to the current target system
+
+ Arguments:
+ -args, an optional argument pass through argument
+ -l_match_units, an optional list of units to filter the output
+ Returns:
+ -string output of the journalctl command
+ Raises:
+ -AssertionError, on remote commands that fail
+ -ValueError, on a journalctl call with filtering by l_match_units that
+ returned no entries
+ """
+
+ query_units=''
+ if l_match_units:
+ query_units = ['_SYSTEMD_UNIT='+unit for unit in l_match_units]
+ query_units = ' '.join(query_units)
+ command = 'journalctl %s %s' %(args, query_units)
+ status, output = self.target.run(command)
+ if status:
+ raise AssertionError("Command '%s' returned non-zero exit "
+ 'code %d:\n%s' % (command, status, output))
+ if len(output) == 1 and "-- No entries --" in output:
+ raise ValueError('List of units to match: %s, returned no entries'
+ % l_match_units)
+ return output
+
+class SystemdBasicTests(SystemdTest):
+
+ def settle(self):
+ """
+ Block until systemd has finished activating any units being activated,
+ or until two minutes has elapsed.
+
+ Returns a tuple, either (True, '') if all units have finished
+ activating, or (False, message string) if there are still units
+ activating (generally, failing units that restart).
+ """
+ endtime = time.time() + (60 * 2)
+ while True:
+ status, output = self.target.run('SYSTEMD_BUS_TIMEOUT=240s systemctl --state=activating')
+ if "0 loaded units listed" in output:
+ return (True, '')
+ if time.time() >= endtime:
+ return (False, output)
+ time.sleep(10)
+
+ @skipIfNotFeature('systemd',
+ 'Test requires systemd to be in DISTRO_FEATURES')
+ @skipIfNotDataVar('VIRTUAL-RUNTIME_init_manager', 'systemd',
+ 'systemd is not the init manager for this image')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_systemd_basic(self):
+ self.systemctl('--version')
+
+ @OETestDepends(['systemd.SystemdBasicTests.test_systemd_basic'])
+ def test_systemd_list(self):
+ self.systemctl('list-unit-files')
+
+ @OETestDepends(['systemd.SystemdBasicTests.test_systemd_basic'])
+ def test_systemd_failed(self):
+ settled, output = self.settle()
+ msg = "Timed out waiting for systemd to settle:\n%s" % output
+ self.assertTrue(settled, msg=msg)
+
+ output = self.systemctl('list-units', '--failed')
+ match = re.search('0 loaded units listed', output)
+ if not match:
+ output += self.systemctl('status --full --failed')
+ self.assertTrue(match, msg='Some systemd units failed:\n%s' % output)
+
+
+class SystemdServiceTests(SystemdTest):
+
+ @OEHasPackage(['avahi-daemon'])
+ @OETestDepends(['systemd.SystemdBasicTests.test_systemd_basic'])
+ def test_systemd_status(self):
+ self.systemctl('status --full', 'avahi-daemon.service')
+
+ @OETestDepends(['systemd.SystemdServiceTests.test_systemd_status'])
+ def test_systemd_stop_start(self):
+ self.systemctl('stop', 'avahi-daemon.service')
+ self.systemctl('is-active', 'avahi-daemon.service',
+ expected=3, verbose=True)
+ self.systemctl('start','avahi-daemon.service')
+ self.systemctl('is-active', 'avahi-daemon.service', verbose=True)
+
+ @OETestDepends(['systemd.SystemdServiceTests.test_systemd_status'])
+ @skipIfFeature('read-only-rootfs',
+ 'Test is only meant to run without read-only-rootfs in IMAGE_FEATURES')
+ def test_systemd_disable_enable(self):
+ self.systemctl('disable', 'avahi-daemon.service')
+ self.systemctl('is-enabled', 'avahi-daemon.service', expected=1)
+ self.systemctl('enable', 'avahi-daemon.service')
+ self.systemctl('is-enabled', 'avahi-daemon.service')
+
+ @OETestDepends(['systemd.SystemdServiceTests.test_systemd_status'])
+ @skipIfNotFeature('read-only-rootfs',
+ 'Test is only meant to run with read-only-rootfs in IMAGE_FEATURES')
+ def test_systemd_disable_enable_ro(self):
+ status = self.target.run('mount -orw,remount /')[0]
+ self.assertTrue(status == 0, msg='Remounting / as r/w failed')
+ try:
+ self.test_systemd_disable_enable()
+ finally:
+ status = self.target.run('mount -oro,remount /')[0]
+ self.assertTrue(status == 0, msg='Remounting / as r/o failed')
+
+class SystemdJournalTests(SystemdTest):
+
+ @OETestDepends(['systemd.SystemdBasicTests.test_systemd_basic'])
+ def test_systemd_journal(self):
+ status, output = self.target.run('journalctl')
+ self.assertEqual(status, 0, output)
+
+ @OETestDepends(['systemd.SystemdBasicTests.test_systemd_basic'])
+ def test_systemd_boot_time(self, systemd_TimeoutStartSec=90):
+ """
+ Get the target boot time from journalctl and log it
+
+ Arguments:
+ -systemd_TimeoutStartSec, an optional argument containing systemd's
+ unit start timeout to compare against
+ """
+
+ # The expression chain that uniquely identifies the time boot message.
+ expr_items=['Startup finished', 'kernel', 'userspace','\.$']
+ try:
+ output = self.journalctl(args='-o cat --reverse')
+ except AssertionError:
+ self.fail('Error occurred while calling journalctl')
+ if not len(output):
+ self.fail('Error, unable to get startup time from systemd journal')
+
+ # Check for the regular expression items that match the startup time.
+ for line in output.split('\n'):
+ check_match = ''.join(re.findall('.*'.join(expr_items), line))
+ if check_match:
+ break
+ # Put the startup time in the test log
+ if check_match:
+ self.tc.logger.info('%s' % check_match)
+ else:
+ self.skipTest('Error at obtaining the boot time from journalctl')
+ boot_time_sec = 0
+
+ # Get the numeric values from the string and convert them to seconds
+ # same data will be placed in list and string for manipulation.
+ l_boot_time = check_match.split(' ')[-2:]
+ s_boot_time = ' '.join(l_boot_time)
+ try:
+ # Obtain the minutes it took to boot.
+ if l_boot_time[0].endswith('min') and l_boot_time[0][0].isdigit():
+ boot_time_min = s_boot_time.split('min')[0]
+ # Convert to seconds and accumulate it.
+ boot_time_sec += int(boot_time_min) * 60
+ # Obtain the seconds it took to boot and accumulate.
+ boot_time_sec += float(l_boot_time[1].split('s')[0])
+ except ValueError:
+ self.skipTest('Error when parsing time from boot string')
+
+ # Assert the target boot time against systemd's unit start timeout.
+ if boot_time_sec > systemd_TimeoutStartSec:
+ msg = ("Target boot time %s exceeds systemd's TimeoutStartSec %s"
+ % (boot_time_sec, systemd_TimeoutStartSec))
+ self.tc.logger.info(msg)
diff --git a/meta/lib/oeqa/runtime/cases/x32lib.py b/meta/lib/oeqa/runtime/cases/x32lib.py
new file mode 100644
index 0000000000..ddf220140e
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/x32lib.py
@@ -0,0 +1,21 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfNotInDataVar
+
+class X32libTest(OERuntimeTestCase):
+
+ @skipIfNotInDataVar('DEFAULTTUNE', 'x86-64-x32',
+ 'DEFAULTTUNE is not set to x86-64-x32')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_x32_file(self):
+ cmd = 'readelf -h /bin/ls | grep Class | grep ELF32'
+ status1 = self.target.run(cmd)[0]
+ cmd = 'readelf -h /bin/ls | grep Machine | grep X86-64'
+ status2 = self.target.run(cmd)[0]
+ msg = ("/bin/ls isn't an X86-64 ELF32 binary. readelf says: %s" %
+ self.target.run("readelf -h /bin/ls")[1])
+ self.assertTrue(status1 == 0 and status2 == 0, msg=msg)
diff --git a/meta/lib/oeqa/runtime/cases/xorg.py b/meta/lib/oeqa/runtime/cases/xorg.py
new file mode 100644
index 0000000000..d6845587c2
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/xorg.py
@@ -0,0 +1,21 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfNotFeature
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class XorgTest(OERuntimeTestCase):
+
+ @skipIfNotFeature('x11-base',
+ 'Test requires x11 to be in IMAGE_FEATURES')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['xserver-nodm-init'])
+ def test_xorg_running(self):
+ cmd ='%s | grep -v xinit | grep [X]org' % self.tc.target_cmds['ps']
+ status, output = self.target.run(cmd)
+ msg = ('Xorg does not appear to be running %s' %
+ self.target.run(self.tc.target_cmds['ps'])[1])
+ self.assertEqual(status, 0, msg=msg)
diff --git a/meta/lib/oeqa/runtime/connman.py b/meta/lib/oeqa/runtime/connman.py
deleted file mode 100644
index 003fefe2ce..0000000000
--- a/meta/lib/oeqa/runtime/connman.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import unittest
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- if not oeRuntimeTest.hasPackage("connman"):
- skipModule("No connman package in image")
-
-
-class ConnmanTest(oeRuntimeTest):
-
- def service_status(self, service):
- if oeRuntimeTest.hasFeature("systemd"):
- (status, output) = self.target.run('systemctl status -l %s' % service)
- return output
- else:
- return "Unable to get status or logs for %s" % service
-
- @testcase(961)
- @skipUnlessPassed('test_ssh')
- def test_connmand_help(self):
- (status, output) = self.target.run('/usr/sbin/connmand --help')
- self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output))
-
- @testcase(221)
- @skipUnlessPassed('test_connmand_help')
- def test_connmand_running(self):
- (status, output) = self.target.run(oeRuntimeTest.pscmd + ' | grep [c]onnmand')
- if status != 0:
- print(self.service_status("connman"))
- self.fail("No connmand process running")
diff --git a/meta/lib/oeqa/runtime/context.py b/meta/lib/oeqa/runtime/context.py
new file mode 100644
index 0000000000..2ecb1a8f01
--- /dev/null
+++ b/meta/lib/oeqa/runtime/context.py
@@ -0,0 +1,226 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+
+from oeqa.core.context import OETestContext, OETestContextExecutor
+from oeqa.core.target.ssh import OESSHTarget
+from oeqa.core.target.qemu import OEQemuTarget
+from oeqa.utils.dump import HostDumper
+
+from oeqa.runtime.loader import OERuntimeTestLoader
+
+class OERuntimeTestContext(OETestContext):
+ loaderClass = OERuntimeTestLoader
+ runtime_files_dir = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "files")
+
+ def __init__(self, td, logger, target,
+ host_dumper, image_packages, extract_dir):
+ super(OERuntimeTestContext, self).__init__(td, logger)
+
+ self.target = target
+ self.image_packages = image_packages
+ self.host_dumper = host_dumper
+ self.extract_dir = extract_dir
+ self._set_target_cmds()
+
+ def _set_target_cmds(self):
+ self.target_cmds = {}
+
+ self.target_cmds['ps'] = 'ps'
+ if 'procps' in self.image_packages:
+ self.target_cmds['ps'] = self.target_cmds['ps'] + ' -ef'
+
+class OERuntimeTestContextExecutor(OETestContextExecutor):
+ _context_class = OERuntimeTestContext
+
+ name = 'runtime'
+ help = 'runtime test component'
+ description = 'executes runtime tests over targets'
+
+ default_cases = os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'cases')
+ default_data = None
+ default_test_data = 'data/testdata.json'
+ default_tests = ''
+
+ default_target_type = 'simpleremote'
+ default_manifest = 'data/manifest'
+ default_server_ip = '192.168.7.1'
+ default_target_ip = '192.168.7.2'
+ default_extract_dir = 'packages/extracted'
+
+ def register_commands(self, logger, subparsers):
+ super(OERuntimeTestContextExecutor, self).register_commands(logger, subparsers)
+
+ runtime_group = self.parser.add_argument_group('runtime options')
+
+ runtime_group.add_argument('--target-type', action='store',
+ default=self.default_target_type, choices=['simpleremote', 'qemu'],
+ help="Target type of device under test, default: %s" \
+ % self.default_target_type)
+ runtime_group.add_argument('--target-ip', action='store',
+ default=self.default_target_ip,
+ help="IP address of device under test, default: %s" \
+ % self.default_target_ip)
+ runtime_group.add_argument('--server-ip', action='store',
+ default=self.default_target_ip,
+ help="IP address of device under test, default: %s" \
+ % self.default_server_ip)
+
+ runtime_group.add_argument('--host-dumper-dir', action='store',
+ help="Directory where host status is dumped, if tests fails")
+
+ runtime_group.add_argument('--packages-manifest', action='store',
+ default=self.default_manifest,
+ help="Package manifest of the image under testi, default: %s" \
+ % self.default_manifest)
+
+ runtime_group.add_argument('--extract-dir', action='store',
+ default=self.default_extract_dir,
+ help='Directory where extracted packages reside, default: %s' \
+ % self.default_extract_dir)
+
+ runtime_group.add_argument('--qemu-boot', action='store',
+ help="Qemu boot configuration, only needed when target_type is QEMU.")
+
+ @staticmethod
+ def getTarget(target_type, logger, target_ip, server_ip, **kwargs):
+ target = None
+
+ if target_ip:
+ target_ip_port = target_ip.split(':')
+ if len(target_ip_port) == 2:
+ target_ip = target_ip_port[0]
+ kwargs['port'] = target_ip_port[1]
+
+ if server_ip:
+ server_ip_port = server_ip.split(':')
+ if len(server_ip_port) == 2:
+ server_ip = server_ip_port[0]
+ kwargs['server_port'] = int(server_ip_port[1])
+
+ if target_type == 'simpleremote':
+ target = OESSHTarget(logger, target_ip, server_ip, **kwargs)
+ elif target_type == 'qemu':
+ target = OEQemuTarget(logger, server_ip, **kwargs)
+ else:
+ # XXX: This code uses the old naming convention for controllers and
+ # targets, the idea it is to leave just targets as the controller
+ # most of the time was just a wrapper.
+ # XXX: This code tries to import modules from lib/oeqa/controllers
+ # directory and treat them as controllers, it will less error prone
+ # to use introspection to load such modules.
+ # XXX: Don't base your targets on this code it will be refactored
+ # in the near future.
+ # Custom target module loading
+ target_modules_path = kwargs.get('target_modules_path', '')
+ controller = OERuntimeTestContextExecutor.getControllerModule(target_type, target_modules_path)
+ target = controller(logger, target_ip, server_ip, **kwargs)
+
+ return target
+
+ # Search oeqa.controllers module directory for and return a controller
+ # corresponding to the given target name.
+ # AttributeError raised if not found.
+ # ImportError raised if a provided module can not be imported.
+ @staticmethod
+ def getControllerModule(target, target_modules_path):
+ controllerslist = OERuntimeTestContextExecutor._getControllerModulenames(target_modules_path)
+ controller = OERuntimeTestContextExecutor._loadControllerFromName(target, controllerslist)
+ return controller
+
+ # Return a list of all python modules in lib/oeqa/controllers for each
+ # layer in bbpath
+ @staticmethod
+ def _getControllerModulenames(target_modules_path):
+
+ controllerslist = []
+
+ def add_controller_list(path):
+ if not os.path.exists(os.path.join(path, '__init__.py')):
+ raise OSError('Controllers directory %s exists but is missing __init__.py' % path)
+ files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_') and not f.startswith('.#')])
+ for f in files:
+ module = 'oeqa.controllers.' + f[:-3]
+ if module not in controllerslist:
+ controllerslist.append(module)
+ else:
+ raise RuntimeError("Duplicate controller module found for %s. Layers should create unique controller module names" % module)
+
+ extpath = target_modules_path.split(':')
+ for p in extpath:
+ controllerpath = os.path.join(p, 'lib', 'oeqa', 'controllers')
+ if os.path.exists(controllerpath):
+ add_controller_list(controllerpath)
+ return controllerslist
+
+ # Search for and return a controller from given target name and
+ # set of module names.
+ # Raise AttributeError if not found.
+ # Raise ImportError if a provided module can not be imported
+ @staticmethod
+ def _loadControllerFromName(target, modulenames):
+ for name in modulenames:
+ obj = OERuntimeTestContextExecutor._loadControllerFromModule(target, name)
+ if obj:
+ return obj
+ raise AttributeError("Unable to load {0} from available modules: {1}".format(target, str(modulenames)))
+
+ # Search for and return a controller or None from given module name
+ @staticmethod
+ def _loadControllerFromModule(target, modulename):
+ obj = None
+ # import module, allowing it to raise import exception
+ module = __import__(modulename, globals(), locals(), [target])
+ # look for target class in the module, catching any exceptions as it
+ # is valid that a module may not have the target class.
+ try:
+ obj = getattr(module, target)
+ except:
+ obj = None
+ return obj
+
+ @staticmethod
+ def readPackagesManifest(manifest):
+ if not manifest or not os.path.exists(manifest):
+ raise OSError("Manifest file not exists: %s" % manifest)
+
+ image_packages = set()
+ with open(manifest, 'r') as f:
+ for line in f.readlines():
+ line = line.strip()
+ if line and not line.startswith("#"):
+ image_packages.add(line.split()[0])
+
+ return image_packages
+
+ @staticmethod
+ def getHostDumper(cmds, directory):
+ return HostDumper(cmds, directory)
+
+ def _process_args(self, logger, args):
+ if not args.packages_manifest:
+ raise TypeError('Manifest file not provided')
+
+ super(OERuntimeTestContextExecutor, self)._process_args(logger, args)
+
+ target_kwargs = {}
+ target_kwargs['qemuboot'] = args.qemu_boot
+
+ self.tc_kwargs['init']['target'] = \
+ OERuntimeTestContextExecutor.getTarget(args.target_type,
+ None, args.target_ip, args.server_ip, **target_kwargs)
+ self.tc_kwargs['init']['host_dumper'] = \
+ OERuntimeTestContextExecutor.getHostDumper(None,
+ args.host_dumper_dir)
+ self.tc_kwargs['init']['image_packages'] = \
+ OERuntimeTestContextExecutor.readPackagesManifest(
+ args.packages_manifest)
+ self.tc_kwargs['init']['extract_dir'] = args.extract_dir
+
+_executor_class = OERuntimeTestContextExecutor
diff --git a/meta/lib/oeqa/runtime/date.py b/meta/lib/oeqa/runtime/date.py
deleted file mode 100644
index 447987e075..0000000000
--- a/meta/lib/oeqa/runtime/date.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from oeqa.oetest import oeRuntimeTest
-from oeqa.utils.decorators import *
-import re
-
-class DateTest(oeRuntimeTest):
-
- def setUpLocal(self):
- if oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager", True) == "systemd":
- self.target.run('systemctl stop systemd-timesyncd')
-
- def tearDownLocal(self):
- if oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager", True) == "systemd":
- self.target.run('systemctl start systemd-timesyncd')
-
- @testcase(211)
- @skipUnlessPassed("test_ssh")
- def test_date(self):
- (status, output) = self.target.run('date +"%Y-%m-%d %T"')
- self.assertEqual(status, 0, msg="Failed to get initial date, output: %s" % output)
- oldDate = output
-
- sampleDate = '"2016-08-09 10:00:00"'
- (status, output) = self.target.run("date -s %s" % sampleDate)
- self.assertEqual(status, 0, msg="Date set failed, output: %s" % output)
-
- (status, output) = self.target.run("date -R")
- p = re.match('Tue, 09 Aug 2016 10:00:.. \+0000', output)
- self.assertTrue(p, msg="The date was not set correctly, output: %s" % output)
-
- (status, output) = self.target.run('date -s "%s"' % oldDate)
- self.assertEqual(status, 0, msg="Failed to reset date, output: %s" % output)
diff --git a/meta/lib/oeqa/runtime/decorator/package.py b/meta/lib/oeqa/runtime/decorator/package.py
new file mode 100644
index 0000000000..4c5ca198b0
--- /dev/null
+++ b/meta/lib/oeqa/runtime/decorator/package.py
@@ -0,0 +1,56 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.core.decorator import OETestDecorator, registerDecorator
+from oeqa.core.utils.misc import strToSet
+
+@registerDecorator
+class OEHasPackage(OETestDecorator):
+ """
+ Checks if image has packages (un)installed.
+
+ The argument must be a string, set, or list of packages that must be
+ installed or not present in the image.
+
+ The way to tell a package must not be in an image is using an
+ exclamation point ('!') before the name of the package.
+
+ If test depends on pkg1 or pkg2 you need to use:
+ @OEHasPackage({'pkg1', 'pkg2'})
+
+ If test depends on pkg1 and pkg2 you need to use:
+ @OEHasPackage('pkg1')
+ @OEHasPackage('pkg2')
+
+ If test depends on pkg1 but pkg2 must not be present use:
+ @OEHasPackage({'pkg1', '!pkg2'})
+ """
+
+ attrs = ('need_pkgs',)
+
+ def setUpDecorator(self):
+ need_pkgs = set()
+ unneed_pkgs = set()
+ pkgs = strToSet(self.need_pkgs)
+ for pkg in pkgs:
+ if pkg.startswith('!'):
+ unneed_pkgs.add(pkg[1:])
+ else:
+ need_pkgs.add(pkg)
+
+ if unneed_pkgs:
+ msg = 'Checking if %s is not installed' % ', '.join(unneed_pkgs)
+ self.logger.debug(msg)
+ if not self.case.tc.image_packages.isdisjoint(unneed_pkgs):
+ msg = "Test can't run with %s installed" % ', or'.join(unneed_pkgs)
+ self.case.skipTest(msg)
+
+ if need_pkgs:
+ msg = 'Checking if at least one of %s is installed' % ', '.join(need_pkgs)
+ self.logger.debug(msg)
+ if self.case.tc.image_packages.isdisjoint(need_pkgs):
+ msg = "Test requires %s to be installed" % ', or'.join(need_pkgs)
+ self.case.skipTest(msg)
diff --git a/meta/lib/oeqa/runtime/df.py b/meta/lib/oeqa/runtime/df.py
deleted file mode 100644
index 09569d5ff6..0000000000
--- a/meta/lib/oeqa/runtime/df.py
+++ /dev/null
@@ -1,12 +0,0 @@
-import unittest
-from oeqa.oetest import oeRuntimeTest
-from oeqa.utils.decorators import *
-
-
-class DfTest(oeRuntimeTest):
-
- @testcase(234)
- @skipUnlessPassed("test_ssh")
- def test_df(self):
- (status,output) = self.target.run("df / | sed -n '2p' | awk '{print $4}'")
- self.assertTrue(int(output)>5120, msg="Not enough space on image. Current size is %s" % output)
diff --git a/meta/lib/oeqa/runtime/files/SConstruct b/meta/lib/oeqa/runtime/files/SConstruct
new file mode 100644
index 0000000000..d2cb6dd122
--- /dev/null
+++ b/meta/lib/oeqa/runtime/files/SConstruct
@@ -0,0 +1 @@
+Program('hello.c')
diff --git a/meta/lib/oeqa/runtime/files/hello.c b/meta/lib/oeqa/runtime/files/hello.c
new file mode 100644
index 0000000000..b0697a3304
--- /dev/null
+++ b/meta/lib/oeqa/runtime/files/hello.c
@@ -0,0 +1,5 @@
+int
+ main()
+ {
+ printf("Hello, world!\n");
+ }
diff --git a/meta/lib/oeqa/runtime/files/hello.stp b/meta/lib/oeqa/runtime/files/hello.stp
new file mode 100644
index 0000000000..3677147162
--- /dev/null
+++ b/meta/lib/oeqa/runtime/files/hello.stp
@@ -0,0 +1 @@
+probe oneshot { println("hello world") }
diff --git a/meta/lib/oeqa/runtime/files/test.pl b/meta/lib/oeqa/runtime/files/test.pl
deleted file mode 100644
index 689c8f1635..0000000000
--- a/meta/lib/oeqa/runtime/files/test.pl
+++ /dev/null
@@ -1,2 +0,0 @@
-$a = 9.01e+21 - 9.01e+21 + 0.01;
-print ("the value of a is ", $a, "\n");
diff --git a/meta/lib/oeqa/runtime/files/test.py b/meta/lib/oeqa/runtime/files/test.py
deleted file mode 100644
index f389225d72..0000000000
--- a/meta/lib/oeqa/runtime/files/test.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import os
-
-os.system('touch /tmp/testfile.python')
-
-a = 9.01e+21 - 9.01e+21 + 0.01
-print("the value of a is %s" % a)
diff --git a/meta/lib/oeqa/runtime/gcc.py b/meta/lib/oeqa/runtime/gcc.py
deleted file mode 100644
index d90cd1799a..0000000000
--- a/meta/lib/oeqa/runtime/gcc.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import unittest
-import os
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- if not oeRuntimeTest.hasFeature("tools-sdk"):
- skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
-
-
-class GccCompileTest(oeRuntimeTest):
-
- @classmethod
- def setUpClass(self):
- oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.c"), "/tmp/test.c")
- oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "testmakefile"), "/tmp/testmakefile")
- oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.cpp"), "/tmp/test.cpp")
-
- @testcase(203)
- def test_gcc_compile(self):
- (status, output) = self.target.run('gcc /tmp/test.c -o /tmp/test -lm')
- self.assertEqual(status, 0, msg="gcc compile failed, output: %s" % output)
- (status, output) = self.target.run('/tmp/test')
- self.assertEqual(status, 0, msg="running compiled file failed, output %s" % output)
-
- @testcase(200)
- def test_gpp_compile(self):
- (status, output) = self.target.run('g++ /tmp/test.c -o /tmp/test -lm')
- self.assertEqual(status, 0, msg="g++ compile failed, output: %s" % output)
- (status, output) = self.target.run('/tmp/test')
- self.assertEqual(status, 0, msg="running compiled file failed, output %s" % output)
-
- @testcase(1142)
- def test_gpp2_compile(self):
- (status, output) = self.target.run('g++ /tmp/test.cpp -o /tmp/test -lm')
- self.assertEqual(status, 0, msg="g++ compile failed, output: %s" % output)
- (status, output) = self.target.run('/tmp/test')
- self.assertEqual(status, 0, msg="running compiled file failed, output %s" % output)
-
- @testcase(204)
- def test_make(self):
- (status, output) = self.target.run('cd /tmp; make -f testmakefile')
- self.assertEqual(status, 0, msg="running make failed, output %s" % output)
-
- @classmethod
- def tearDownClass(self):
- oeRuntimeTest.tc.target.run("rm /tmp/test.c /tmp/test.o /tmp/test /tmp/testmakefile")
diff --git a/meta/lib/oeqa/runtime/kernelmodule.py b/meta/lib/oeqa/runtime/kernelmodule.py
deleted file mode 100644
index 38ca184540..0000000000
--- a/meta/lib/oeqa/runtime/kernelmodule.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import unittest
-import os
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- if not oeRuntimeTest.hasFeature("tools-sdk"):
- skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
-
-
-class KernelModuleTest(oeRuntimeTest):
-
- def setUpLocal(self):
- self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "hellomod.c"), "/tmp/hellomod.c")
- self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "hellomod_makefile"), "/tmp/Makefile")
-
- @testcase('316')
- @skipUnlessPassed('test_ssh')
- @skipUnlessPassed('test_gcc_compile')
- def test_kernel_module(self):
- cmds = [
- 'cd /usr/src/kernel && make scripts',
- 'cd /tmp && make',
- 'cd /tmp && insmod hellomod.ko',
- 'lsmod | grep hellomod',
- 'dmesg | grep Hello',
- 'rmmod hellomod', 'dmesg | grep "Cleaning up hellomod"'
- ]
- for cmd in cmds:
- (status, output) = self.target.run(cmd, 900)
- self.assertEqual(status, 0, msg="\n".join([cmd, output]))
-
- def tearDownLocal(self):
- self.target.run('rm -f /tmp/Makefile /tmp/hellomod.c')
diff --git a/meta/lib/oeqa/runtime/ldd.py b/meta/lib/oeqa/runtime/ldd.py
deleted file mode 100644
index 47b3885df2..0000000000
--- a/meta/lib/oeqa/runtime/ldd.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import unittest
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- if not oeRuntimeTest.hasFeature("tools-sdk"):
- skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
-
-class LddTest(oeRuntimeTest):
-
- @testcase(962)
- @skipUnlessPassed('test_ssh')
- def test_ldd_exists(self):
- (status, output) = self.target.run('which ldd')
- self.assertEqual(status, 0, msg = "ldd does not exist in PATH: which ldd: %s" % output)
-
- @testcase(239)
- @skipUnlessPassed('test_ldd_exists')
- def test_ldd_rtldlist_check(self):
- (status, output) = self.target.run('for i in $(which ldd | xargs cat | grep "^RTLDLIST"|cut -d\'=\' -f2|tr -d \'"\'); do test -f $i && echo $i && break; done')
- self.assertEqual(status, 0, msg = "ldd path not correct or RTLDLIST files don't exist. ")
diff --git a/meta/lib/oeqa/runtime/loader.py b/meta/lib/oeqa/runtime/loader.py
new file mode 100644
index 0000000000..7041ddfde8
--- /dev/null
+++ b/meta/lib/oeqa/runtime/loader.py
@@ -0,0 +1,19 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.core.loader import OETestLoader
+from oeqa.runtime.case import OERuntimeTestCase
+
+class OERuntimeTestLoader(OETestLoader):
+ caseClass = OERuntimeTestCase
+
+ def _getTestCase(self, testCaseClass, tcName):
+ case = super(OERuntimeTestLoader, self)._getTestCase(testCaseClass, tcName)
+
+ # Adds custom attributes to the OERuntimeTestCase
+ setattr(case, 'target', self.tc.target)
+
+ return case
diff --git a/meta/lib/oeqa/runtime/logrotate.py b/meta/lib/oeqa/runtime/logrotate.py
deleted file mode 100644
index de300bf55f..0000000000
--- a/meta/lib/oeqa/runtime/logrotate.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=289 testcase
-# Note that the image under test must have logrotate installed
-
-import unittest
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- if not oeRuntimeTest.hasPackage("logrotate"):
- skipModule("No logrotate package in image")
-
-
-class LogrotateTest(oeRuntimeTest):
-
- @skipUnlessPassed("test_ssh")
- def test_1_logrotate_setup(self):
- (status, output) = self.target.run('mkdir $HOME/logrotate_dir')
- self.assertEqual(status, 0, msg = "Could not create logrotate_dir. Output: %s" % output)
- (status, output) = self.target.run("sed -i \"s#wtmp {#wtmp {\\n olddir $HOME/logrotate_dir#\" /etc/logrotate.conf")
- self.assertEqual(status, 0, msg = "Could not write to logrotate.conf file. Status and output: %s and %s)" % (status, output))
-
- @testcase(289)
- @skipUnlessPassed("test_1_logrotate_setup")
- def test_2_logrotate(self):
- (status, output) = self.target.run('logrotate -f /etc/logrotate.conf')
- self.assertEqual(status, 0, msg = "logrotate service could not be reloaded. Status and output: %s and %s" % (status, output))
- output = self.target.run('ls -la $HOME/logrotate_dir/ | wc -l')[1]
- self.assertTrue(int(output)>=3, msg = "new logfile could not be created. List of files within log directory: %s" %(self.target.run('ls -la $HOME/logrotate_dir')[1]))
diff --git a/meta/lib/oeqa/runtime/multilib.py b/meta/lib/oeqa/runtime/multilib.py
deleted file mode 100644
index 593d385021..0000000000
--- a/meta/lib/oeqa/runtime/multilib.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import unittest
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- multilibs = oeRuntimeTest.tc.d.getVar("MULTILIBS", True) or ""
- if "multilib:lib32" not in multilibs:
- skipModule("this isn't a multilib:lib32 image")
-
-
-class MultilibTest(oeRuntimeTest):
-
- def archtest(self, binary, arch):
- """
- Check that ``binary`` has the ELF class ``arch`` (e.g. ELF32/ELF64).
- """
-
- (status, output) = self.target.run("readelf -h %s" % binary)
- self.assertEqual(status, 0, "Failed to readelf %s" % binary)
-
- l = [l.split()[1] for l in output.split('\n') if "Class:" in l]
- if l:
- theclass = l[0]
- else:
- self.fail("Cannot parse readelf output\n" + s)
-
- self.assertEqual(theclass, arch, msg="%s isn't %s (is %s)" % (binary, arch, theclass))
-
- @skipUnlessPassed('test_ssh')
- def test_check_multilib_libc(self):
- """
- Check that a multilib image has both 32-bit and 64-bit libc in.
- """
- self.archtest("/lib/libc.so.6", "ELF32")
- self.archtest("/lib64/libc.so.6", "ELF64")
-
- @testcase('279')
- @skipUnlessPassed('test_check_multilib_libc')
- def test_file_connman(self):
- self.assertTrue(oeRuntimeTest.hasPackage('lib32-connman'), msg="This test assumes lib32-connman is installed")
-
- self.archtest("/usr/sbin/connmand", "ELF32")
diff --git a/meta/lib/oeqa/runtime/pam.py b/meta/lib/oeqa/runtime/pam.py
deleted file mode 100644
index c8205c9abc..0000000000
--- a/meta/lib/oeqa/runtime/pam.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=287 testcase
-# Note that the image under test must have "pam" in DISTRO_FEATURES
-
-import unittest
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- if not oeRuntimeTest.hasFeature("pam"):
- skipModule("target doesn't have 'pam' in DISTRO_FEATURES")
-
-
-class PamBasicTest(oeRuntimeTest):
-
- @testcase(287)
- @skipUnlessPassed('test_ssh')
- def test_pam(self):
- (status, output) = self.target.run('login --help')
- self.assertEqual(status, 1, msg = "login command does not work as expected. Status and output:%s and %s" %(status, output))
- (status, output) = self.target.run('passwd --help')
- self.assertEqual(status, 0, msg = "passwd command does not work as expected. Status and output:%s and %s" %(status, output))
- (status, output) = self.target.run('su --help')
- self.assertEqual(status, 0, msg = "su command does not work as expected. Status and output:%s and %s" %(status, output))
- (status, output) = self.target.run('useradd --help')
- self.assertEqual(status, 0, msg = "useradd command does not work as expected. Status and output:%s and %s" %(status, output))
diff --git a/meta/lib/oeqa/runtime/perl.py b/meta/lib/oeqa/runtime/perl.py
deleted file mode 100644
index e044d0a5fe..0000000000
--- a/meta/lib/oeqa/runtime/perl.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import unittest
-import os
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- if not oeRuntimeTest.hasPackage("perl"):
- skipModule("No perl package in the image")
-
-
-class PerlTest(oeRuntimeTest):
-
- @classmethod
- def setUpClass(self):
- oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.pl"), "/tmp/test.pl")
-
- @testcase(1141)
- def test_perl_exists(self):
- (status, output) = self.target.run('which perl')
- self.assertEqual(status, 0, msg="Perl binary not in PATH or not on target.")
-
- @testcase(208)
- def test_perl_works(self):
- (status, output) = self.target.run('perl /tmp/test.pl')
- self.assertEqual(status, 0, msg="Exit status was not 0. Output: %s" % output)
- self.assertEqual(output, "the value of a is 0.01", msg="Incorrect output: %s" % output)
-
- @classmethod
- def tearDownClass(self):
- oeRuntimeTest.tc.target.run("rm /tmp/test.pl")
diff --git a/meta/lib/oeqa/runtime/ping.py b/meta/lib/oeqa/runtime/ping.py
deleted file mode 100644
index 0f27447926..0000000000
--- a/meta/lib/oeqa/runtime/ping.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import subprocess
-import unittest
-import sys
-import time
-from oeqa.oetest import oeRuntimeTest
-from oeqa.utils.decorators import *
-
-class PingTest(oeRuntimeTest):
-
- @testcase(964)
- def test_ping(self):
- output = ''
- count = 0
- endtime = time.time() + 60
- while count < 5 and time.time() < endtime:
- proc = subprocess.Popen("ping -c 1 %s" % self.target.ip, shell=True, stdout=subprocess.PIPE)
- output += proc.communicate()[0].decode("utf-8")
- if proc.poll() == 0:
- count += 1
- else:
- count = 0
- self.assertEqual(count, 5, msg = "Expected 5 consecutive replies, got %d.\nping output is:\n%s" % (count,output))
diff --git a/meta/lib/oeqa/runtime/python.py b/meta/lib/oeqa/runtime/python.py
deleted file mode 100644
index 29a231c7c3..0000000000
--- a/meta/lib/oeqa/runtime/python.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import unittest
-import os
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- if not oeRuntimeTest.hasPackage("python-core"):
- skipModule("No python package in the image")
-
-
-class PythonTest(oeRuntimeTest):
-
- @classmethod
- def setUpClass(self):
- oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.py"), "/tmp/test.py")
-
- @testcase(1145)
- def test_python_exists(self):
- (status, output) = self.target.run('which python')
- self.assertEqual(status, 0, msg="Python binary not in PATH or not on target.")
-
- @testcase(965)
- def test_python_stdout(self):
- (status, output) = self.target.run('python /tmp/test.py')
- self.assertEqual(status, 0, msg="Exit status was not 0. Output: %s" % output)
- self.assertEqual(output, "the value of a is 0.01", msg="Incorrect output: %s" % output)
-
- @testcase(1146)
- def test_python_testfile(self):
- (status, output) = self.target.run('ls /tmp/testfile.python')
- self.assertEqual(status, 0, msg="Python test file generate failed.")
-
- @classmethod
- def tearDownClass(self):
- oeRuntimeTest.tc.target.run("rm /tmp/test.py /tmp/testfile.python")
diff --git a/meta/lib/oeqa/runtime/rpm.py b/meta/lib/oeqa/runtime/rpm.py
deleted file mode 100644
index 7f514ca00c..0000000000
--- a/meta/lib/oeqa/runtime/rpm.py
+++ /dev/null
@@ -1,120 +0,0 @@
-import unittest
-import os
-import fnmatch
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- if not oeRuntimeTest.hasFeature("package-management"):
- skipModule("rpm module skipped: target doesn't have package-management in IMAGE_FEATURES")
- if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]:
- skipModule("rpm module skipped: target doesn't have rpm as primary package manager")
-
-
-class RpmBasicTest(oeRuntimeTest):
-
- @testcase(960)
- @skipUnlessPassed('test_ssh')
- def test_rpm_help(self):
- (status, output) = self.target.run('rpm --help')
- self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output))
-
- @testcase(191)
- @skipUnlessPassed('test_rpm_help')
- def test_rpm_query(self):
- (status, output) = self.target.run('rpm -q rpm')
- self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output))
-
-class RpmInstallRemoveTest(oeRuntimeTest):
-
- @classmethod
- def setUpClass(self):
- pkgarch = oeRuntimeTest.tc.d.getVar('TUNE_PKGARCH', True).replace("-", "_")
- rpmdir = os.path.join(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), "rpm", pkgarch)
- # pick rpm-doc as a test file to get installed, because it's small and it will always be built for standard targets
- for f in fnmatch.filter(os.listdir(rpmdir), "rpm-doc-*.%s.rpm" % pkgarch):
- testrpmfile = f
- oeRuntimeTest.tc.target.copy_to(os.path.join(rpmdir,testrpmfile), "/tmp/rpm-doc.rpm")
-
- @testcase(192)
- @skipUnlessPassed('test_rpm_help')
- def test_rpm_install(self):
- (status, output) = self.target.run('rpm -ivh /tmp/rpm-doc.rpm')
- self.assertEqual(status, 0, msg="Failed to install rpm-doc package: %s" % output)
-
- @testcase(194)
- @skipUnlessPassed('test_rpm_install')
- def test_rpm_remove(self):
- (status,output) = self.target.run('rpm -e rpm-doc')
- self.assertEqual(status, 0, msg="Failed to remove rpm-doc package: %s" % output)
-
- @testcase(1096)
- @skipUnlessPassed('test_ssh')
- def test_rpm_query_nonroot(self):
-
- def set_up_test_user(u):
- (status, output) = self.target.run("id -u %s" % u)
- if status == 0:
- pass
- else:
- (status, output) = self.target.run("useradd %s" % u)
- self.assertTrue(status == 0, msg="Failed to create new user: " + output)
-
- def exec_as_test_user(u):
- (status, output) = self.target.run("su -c id %s" % u)
- self.assertTrue("({0})".format(u) in output, msg="Failed to execute as new user")
- (status, output) = self.target.run("su -c \"rpm -qa\" %s " % u)
- self.assertEqual(status, 0, msg="status: %s. Cannot run rpm -qa: %s" % (status, output))
-
- def unset_up_test_user(u):
- (status, output) = self.target.run("userdel -r %s" % u)
- self.assertTrue(status == 0, msg="Failed to erase user: %s" % output)
-
- tuser = 'test1'
-
- try:
- set_up_test_user(tuser)
- exec_as_test_user(tuser)
- finally:
- unset_up_test_user(tuser)
-
- @testcase(195)
- @skipUnlessPassed('test_rpm_install')
- def test_check_rpm_install_removal_log_file_size(self):
- """
- Summary: Check rpm install/removal log file size
- Expected: There should be some method to keep rpm log in a small size .
- Product: BSPs
- Author: Alexandru Georgescu <alexandru.c.georgescu@intel.com>
- AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
- """
- db_files_cmd = 'ls /var/lib/rpm/__db.*'
- get_log_size_cmd = "du /var/lib/rpm/log/log.* | awk '{print $1}'"
-
- # Make sure that some database files are under /var/lib/rpm as '__db.xxx'
- (status, output) = self.target.run(db_files_cmd)
- self.assertEqual(0, status, 'Failed to find database files under /var/lib/rpm/ as __db.xxx')
-
- # Remove the package just in case
- self.target.run('rpm -e rpm-doc')
-
- # Install/Remove a package 10 times
- for i in range(10):
- (status, output) = self.target.run('rpm -ivh /tmp/rpm-doc.rpm')
- self.assertEqual(0, status, "Failed to install rpm-doc package. Reason: {}".format(output))
-
- (status, output) = self.target.run('rpm -e rpm-doc')
- self.assertEqual(0, status, "Failed to remove rpm-doc package. Reason: {}".format(output))
-
- # Get the size of log file
- (status, output) = self.target.run(get_log_size_cmd)
- self.assertEqual(0, status, 'Failed to get the final size of the log file.')
-
- # Compare each log size
- for log_file_size in output:
- self.assertLessEqual(int(log_file_size), 11264,
- 'Log file size is greater that expected (~10MB), found {} bytes'.format(log_file_size))
-
- @classmethod
- def tearDownClass(self):
- oeRuntimeTest.tc.target.run('rm -f /tmp/rpm-doc.rpm')
diff --git a/meta/lib/oeqa/runtime/scanelf.py b/meta/lib/oeqa/runtime/scanelf.py
deleted file mode 100644
index 67e02ff455..0000000000
--- a/meta/lib/oeqa/runtime/scanelf.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import unittest
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- if not oeRuntimeTest.hasPackage("pax-utils"):
- skipModule("pax-utils package not installed")
-
-class ScanelfTest(oeRuntimeTest):
-
- def setUpLocal(self):
- self.scancmd = 'scanelf --quiet --recursive --mount --ldpath --path'
-
- @testcase(966)
- @skipUnlessPassed('test_ssh')
- def test_scanelf_textrel(self):
- # print TEXTREL information
- self.scancmd += " --textrel"
- (status, output) = self.target.run(self.scancmd)
- self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output]))
-
- @testcase(967)
- @skipUnlessPassed('test_ssh')
- def test_scanelf_rpath(self):
- # print RPATH information
- self.scancmd += " --rpath"
- (status, output) = self.target.run(self.scancmd)
- self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output]))
diff --git a/meta/lib/oeqa/runtime/scp.py b/meta/lib/oeqa/runtime/scp.py
deleted file mode 100644
index 48e87d2d0b..0000000000
--- a/meta/lib/oeqa/runtime/scp.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import os
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import skipUnlessPassed, testcase
-
-def setUpModule():
- if not (oeRuntimeTest.hasPackage("dropbear") or oeRuntimeTest.hasPackage("openssh-sshd")):
- skipModule("No ssh package in image")
-
-class ScpTest(oeRuntimeTest):
-
- @testcase(220)
- @skipUnlessPassed('test_ssh')
- def test_scp_file(self):
- test_log_dir = oeRuntimeTest.tc.d.getVar("TEST_LOG_DIR", True)
- test_file_path = os.path.join(test_log_dir, 'test_scp_file')
- with open(test_file_path, 'w') as test_scp_file:
- test_scp_file.seek(2 ** 22 - 1)
- test_scp_file.write(os.linesep)
- (status, output) = self.target.copy_to(test_file_path, '/tmp/test_scp_file')
- self.assertEqual(status, 0, msg = "File could not be copied. Output: %s" % output)
- (status, output) = self.target.run("ls -la /tmp/test_scp_file")
- self.assertEqual(status, 0, msg = "SCP test failed")
diff --git a/meta/lib/oeqa/runtime/skeletoninit.py b/meta/lib/oeqa/runtime/skeletoninit.py
deleted file mode 100644
index cb0cb9b4cf..0000000000
--- a/meta/lib/oeqa/runtime/skeletoninit.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=284 testcase
-# Note that the image under test must have meta-skeleton layer in bblayers and IMAGE_INSTALL_append = " service" in local.conf
-
-import unittest
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- if not oeRuntimeTest.hasPackage("service"):
- skipModule("No service package in image")
-
-
-class SkeletonBasicTest(oeRuntimeTest):
-
- @skipUnlessPassed('test_ssh')
- @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager", False), "Not appropiate for systemd image")
- def test_skeleton_availability(self):
- (status, output) = self.target.run('ls /etc/init.d/skeleton')
- self.assertEqual(status, 0, msg = "skeleton init script not found. Output:\n%s " % output)
- (status, output) = self.target.run('ls /usr/sbin/skeleton-test')
- self.assertEqual(status, 0, msg = "skeleton-test not found. Output:\n%s" % output)
-
- @testcase(284)
- @skipUnlessPassed('test_skeleton_availability')
- @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager", False), "Not appropiate for systemd image")
- def test_skeleton_script(self):
- output1 = self.target.run("/etc/init.d/skeleton start")[1]
- (status, output2) = self.target.run(oeRuntimeTest.pscmd + ' | grep [s]keleton-test')
- self.assertEqual(status, 0, msg = "Skeleton script could not be started:\n%s\n%s" % (output1, output2))
diff --git a/meta/lib/oeqa/runtime/smart.py b/meta/lib/oeqa/runtime/smart.py
deleted file mode 100644
index 6cdb10d631..0000000000
--- a/meta/lib/oeqa/runtime/smart.py
+++ /dev/null
@@ -1,218 +0,0 @@
-import unittest
-import re
-import oe
-import subprocess
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-from oeqa.utils.httpserver import HTTPService
-
-def setUpModule():
- if not oeRuntimeTest.hasFeature("package-management"):
- skipModule("Image doesn't have package management feature")
- if not oeRuntimeTest.hasPackage("smartpm"):
- skipModule("Image doesn't have smart installed")
- if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]:
- skipModule("Rpm is not the primary package manager")
-
-class SmartTest(oeRuntimeTest):
-
- @skipUnlessPassed('test_smart_help')
- def smart(self, command, expected = 0):
- command = 'smart %s' % command
- status, output = self.target.run(command, 1500)
- message = os.linesep.join([command, output])
- self.assertEqual(status, expected, message)
- self.assertFalse("Cannot allocate memory" in output, message)
- return output
-
-class SmartBasicTest(SmartTest):
-
- @testcase(716)
- @skipUnlessPassed('test_ssh')
- def test_smart_help(self):
- self.smart('--help')
-
- @testcase(968)
- def test_smart_version(self):
- self.smart('--version')
-
- @testcase(721)
- def test_smart_info(self):
- self.smart('info python-smartpm')
-
- @testcase(421)
- def test_smart_query(self):
- self.smart('query python-smartpm')
-
- @testcase(720)
- def test_smart_search(self):
- self.smart('search python-smartpm')
-
- @testcase(722)
- def test_smart_stats(self):
- self.smart('stats')
-
-class SmartRepoTest(SmartTest):
-
- @classmethod
- def create_index(self, arg):
- index_cmd = arg
- try:
- bb.note("Executing '%s' ..." % index_cmd)
- result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- except subprocess.CalledProcessError as e:
- return("Index creation command '%s' failed with return code %d:\n%s" %
- (e.cmd, e.returncode, e.output.decode("utf-8")))
- if result:
- bb.note(result)
- return None
-
- @classmethod
- def setUpClass(self):
- self.repolist = []
-
- # Index RPMs
- rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo")
- index_cmds = []
- rpm_dirs_found = False
- archs = (oeRuntimeTest.tc.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
- for arch in archs:
- rpm_dir = os.path.join(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR_RPM', True), arch)
- idx_path = os.path.join(oeRuntimeTest.tc.d.getVar('WORKDIR', True), 'rpm', arch)
- db_path = os.path.join(oeRuntimeTest.tc.d.getVar('WORKDIR', True), 'rpmdb', arch)
- if not os.path.isdir(rpm_dir):
- continue
- if os.path.exists(db_path):
- bb.utils.remove(dbpath, True)
- lockfilename = oeRuntimeTest.tc.d.getVar('DEPLOY_DIR_RPM', True) + "/rpm.lock"
- lf = bb.utils.lockfile(lockfilename, False)
- oe.path.copyhardlinktree(rpm_dir, idx_path)
- # Full indexes overload a 256MB image so reduce the number of rpms
- # in the feed. Filter to p* since we use the psplash packages and
- # this leaves some allarch and machine arch packages too.
- bb.utils.remove(idx_path + "*/[a-oq-z]*.rpm")
- bb.utils.unlockfile(lf)
- index_cmds.append("%s --dbpath %s --update -q %s" % (rpm_createrepo, db_path, idx_path))
- rpm_dirs_found = True
- # Create repodata¬
- result = oe.utils.multiprocess_exec(index_cmds, self.create_index)
- if result:
- bb.fatal('%s' % ('\n'.join(result)))
- self.repo_server = HTTPService(oeRuntimeTest.tc.d.getVar('WORKDIR', True), oeRuntimeTest.tc.target.server_ip)
- self.repo_server.start()
-
- @classmethod
- def tearDownClass(self):
- self.repo_server.stop()
- for i in self.repolist:
- oeRuntimeTest.tc.target.run('smart channel -y --remove '+str(i))
-
- @testcase(1143)
- def test_smart_channel(self):
- self.smart('channel', 1)
-
- @testcase(719)
- def test_smart_channel_add(self):
- image_pkgtype = self.tc.d.getVar('IMAGE_PKGTYPE', True)
- deploy_url = 'http://%s:%s/%s' %(self.target.server_ip, self.repo_server.port, image_pkgtype)
- pkgarchs = self.tc.d.getVar('PACKAGE_ARCHS', True).replace("-","_").split()
- for arch in os.listdir('%s/%s' % (self.repo_server.root_dir, image_pkgtype)):
- if arch in pkgarchs:
- self.smart('channel -y --add {a} type=rpm-md baseurl={u}/{a}'.format(a=arch, u=deploy_url))
- self.repolist.append(arch)
- self.smart('update')
-
- @testcase(969)
- def test_smart_channel_help(self):
- self.smart('channel --help')
-
- @testcase(970)
- def test_smart_channel_list(self):
- self.smart('channel --list')
-
- @testcase(971)
- def test_smart_channel_show(self):
- self.smart('channel --show')
-
- @testcase(717)
- def test_smart_channel_rpmsys(self):
- self.smart('channel --show rpmsys')
- self.smart('channel --disable rpmsys')
- self.smart('channel --enable rpmsys')
-
- @testcase(1144)
- @skipUnlessPassed('test_smart_channel_add')
- def test_smart_install(self):
- self.smart('remove -y psplash-default')
- self.smart('install -y psplash-default')
-
- @testcase(728)
- @skipUnlessPassed('test_smart_install')
- def test_smart_install_dependency(self):
- self.smart('remove -y psplash')
- self.smart('install -y psplash-default')
-
- @testcase(723)
- @skipUnlessPassed('test_smart_channel_add')
- def test_smart_install_from_disk(self):
- self.smart('remove -y psplash-default')
- self.smart('download psplash-default')
- self.smart('install -y ./psplash-default*')
-
- @testcase(725)
- @skipUnlessPassed('test_smart_channel_add')
- def test_smart_install_from_http(self):
- output = self.smart('download --urls psplash-default')
- url = re.search('(http://.*/psplash-default.*\.rpm)', output)
- self.assertTrue(url, msg="Couln't find download url in %s" % output)
- self.smart('remove -y psplash-default')
- self.smart('install -y %s' % url.group(0))
-
- @testcase(729)
- @skipUnlessPassed('test_smart_install')
- def test_smart_reinstall(self):
- self.smart('reinstall -y psplash-default')
-
- @testcase(727)
- @skipUnlessPassed('test_smart_channel_add')
- def test_smart_remote_repo(self):
- self.smart('update')
- self.smart('install -y psplash')
- self.smart('remove -y psplash')
-
- @testcase(726)
- def test_smart_local_dir(self):
- self.target.run('mkdir /tmp/myrpmdir')
- self.smart('channel --add myrpmdir type=rpm-dir path=/tmp/myrpmdir -y')
- self.target.run('cd /tmp/myrpmdir')
- self.smart('download psplash')
- output = self.smart('channel --list')
- for i in output.split("\n"):
- if ("rpmsys" != str(i)) and ("myrpmdir" != str(i)):
- self.smart('channel --disable '+str(i))
- self.target.run('cd $HOME')
- self.smart('install psplash')
- for i in output.split("\n"):
- if ("rpmsys" != str(i)) and ("myrpmdir" != str(i)):
- self.smart('channel --enable '+str(i))
- self.smart('channel --remove myrpmdir -y')
- self.target.run("rm -rf /tmp/myrpmdir")
-
- @testcase(718)
- def test_smart_add_rpmdir(self):
- self.target.run('mkdir /tmp/myrpmdir')
- self.smart('channel --add myrpmdir type=rpm-dir path=/tmp/myrpmdir -y')
- self.smart('channel --disable myrpmdir -y')
- output = self.smart('channel --show myrpmdir')
- self.assertTrue("disabled = yes" in output, msg="Failed to disable rpm dir")
- self.smart('channel --enable myrpmdir -y')
- output = self.smart('channel --show myrpmdir')
- self.assertFalse("disabled = yes" in output, msg="Failed to enable rpm dir")
- self.smart('channel --remove myrpmdir -y')
- self.target.run("rm -rf /tmp/myrpmdir")
-
- @testcase(731)
- @skipUnlessPassed('test_smart_channel_add')
- def test_smart_remove_package(self):
- self.smart('install -y psplash')
- self.smart('remove -y psplash')
diff --git a/meta/lib/oeqa/runtime/ssh.py b/meta/lib/oeqa/runtime/ssh.py
deleted file mode 100644
index 0e76d5d512..0000000000
--- a/meta/lib/oeqa/runtime/ssh.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import subprocess
-import unittest
-import sys
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- if not (oeRuntimeTest.hasPackage("dropbear") or oeRuntimeTest.hasPackage("openssh")):
- skipModule("No ssh package in image")
-
-class SshTest(oeRuntimeTest):
-
- @testcase(224)
- @skipUnlessPassed('test_ping')
- def test_ssh(self):
- (status, output) = self.target.run('uname -a')
- self.assertEqual(status, 0, msg="SSH Test failed: %s" % output)
- (status, output) = self.target.run('cat /etc/masterimage')
- self.assertEqual(status, 1, msg="This isn't the right image - /etc/masterimage shouldn't be here %s" % output)
diff --git a/meta/lib/oeqa/runtime/syslog.py b/meta/lib/oeqa/runtime/syslog.py
deleted file mode 100644
index 8f550329e7..0000000000
--- a/meta/lib/oeqa/runtime/syslog.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import unittest
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- if not (oeRuntimeTest.hasPackage("busybox-syslog") or oeRuntimeTest.hasPackage("sysklogd")):
- skipModule("No syslog package in image")
-
-class SyslogTest(oeRuntimeTest):
-
- @testcase(201)
- def test_syslog_running(self):
- (status,output) = self.target.run(oeRuntimeTest.pscmd + ' | grep -i [s]yslogd')
- self.assertEqual(status, 0, msg="no syslogd process, ps output: %s" % self.target.run(oeRuntimeTest.pscmd)[1])
-
-class SyslogTestConfig(oeRuntimeTest):
-
- @testcase(1149)
- @skipUnlessPassed("test_syslog_running")
- def test_syslog_logger(self):
- (status, output) = self.target.run('logger foobar')
- self.assertEqual(status, 0, msg="Can't log into syslog. Output: %s " % output)
-
- (status, output) = self.target.run('grep foobar /var/log/messages')
- if status != 0:
- if oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager", "") == "systemd":
- (status, output) = self.target.run('journalctl -o cat | grep foobar')
- else:
- (status, output) = self.target.run('logread | grep foobar')
- self.assertEqual(status, 0, msg="Test log string not found in /var/log/messages or logread. Output: %s " % output)
-
- @testcase(1150)
- @skipUnlessPassed("test_syslog_running")
- def test_syslog_restart(self):
- if "systemd" != oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager", False):
- (status,output) = self.target.run('/etc/init.d/syslog restart')
- else:
- (status,output) = self.target.run('systemctl restart syslog.service')
-
- @testcase(202)
- @skipUnlessPassed("test_syslog_restart")
- @skipUnlessPassed("test_syslog_logger")
- @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager", False), "Not appropiate for systemd image")
- @unittest.skipIf(oeRuntimeTest.hasPackage("sysklogd") or not oeRuntimeTest.hasPackage("busybox"), "Non-busybox syslog")
- def test_syslog_startup_config(self):
- self.target.run('echo "LOGFILE=/var/log/test" >> /etc/syslog-startup.conf')
- (status,output) = self.target.run('/etc/init.d/syslog restart')
- self.assertEqual(status, 0, msg="Could not restart syslog service. Status and output: %s and %s" % (status,output))
- (status,output) = self.target.run('logger foobar && grep foobar /var/log/test')
- self.assertEqual(status, 0, msg="Test log string not found. Output: %s " % output)
- self.target.run("sed -i 's#LOGFILE=/var/log/test##' /etc/syslog-startup.conf")
- self.target.run('/etc/init.d/syslog restart')
diff --git a/meta/lib/oeqa/runtime/systemd.py b/meta/lib/oeqa/runtime/systemd.py
deleted file mode 100644
index 8de799cd63..0000000000
--- a/meta/lib/oeqa/runtime/systemd.py
+++ /dev/null
@@ -1,178 +0,0 @@
-import unittest
-import re
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- if not oeRuntimeTest.hasFeature("systemd"):
- skipModule("target doesn't have systemd in DISTRO_FEATURES")
- if "systemd" != oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager", True):
- skipModule("systemd is not the init manager for this image")
-
-
-class SystemdTest(oeRuntimeTest):
-
- def systemctl(self, action = '', target = '', expected = 0, verbose = False):
- command = 'systemctl %s %s' % (action, target)
- status, output = self.target.run(command)
- message = '\n'.join([command, output])
- if status != expected and verbose:
- message += self.target.run('systemctl status --full %s' % target)[1]
- self.assertEqual(status, expected, message)
- return output
-
- #TODO: use pyjournalctl instead
- def journalctl(self, args='',l_match_units=[]):
- """
- Request for the journalctl output to the current target system
-
- Arguments:
- -args, an optional argument pass through argument
- -l_match_units, an optional list of units to filter the output
- Returns:
- -string output of the journalctl command
- Raises:
- -AssertionError, on remote commands that fail
- -ValueError, on a journalctl call with filtering by l_match_units that
- returned no entries
- """
- query_units=""
- if len(l_match_units):
- query_units = ['_SYSTEMD_UNIT='+unit for unit in l_match_units]
- query_units = " ".join(query_units)
- command = 'journalctl %s %s' %(args, query_units)
- status, output = self.target.run(command)
- if status:
- raise AssertionError("Command '%s' returned non-zero exit \
- code %d:\n%s" % (command, status, output))
- if len(output) == 1 and "-- No entries --" in output:
- raise ValueError("List of units to match: %s, returned no entries"
- % l_match_units)
- return output
-
-class SystemdBasicTests(SystemdTest):
-
- @skipUnlessPassed('test_ssh')
- def test_systemd_basic(self):
- self.systemctl('--version')
-
- @testcase(551)
- @skipUnlessPassed('test_systemd_basic')
- def test_systemd_list(self):
- self.systemctl('list-unit-files')
-
- def settle(self):
- """
- Block until systemd has finished activating any units being activated,
- or until two minutes has elapsed.
-
- Returns a tuple, either (True, '') if all units have finished
- activating, or (False, message string) if there are still units
- activating (generally, failing units that restart).
- """
- import time
- endtime = time.time() + (60 * 2)
- while True:
- status, output = self.target.run('systemctl --state=activating')
- if "0 loaded units listed" in output:
- return (True, '')
- if time.time() >= endtime:
- return (False, output)
- time.sleep(10)
-
- @testcase(550)
- @skipUnlessPassed('test_systemd_basic')
- def test_systemd_failed(self):
- settled, output = self.settle()
- self.assertTrue(settled, msg="Timed out waiting for systemd to settle:\n" + output)
-
- output = self.systemctl('list-units', '--failed')
- match = re.search("0 loaded units listed", output)
- if not match:
- output += self.systemctl('status --full --failed')
- self.assertTrue(match, msg="Some systemd units failed:\n%s" % output)
-
-
-class SystemdServiceTests(SystemdTest):
-
- def check_for_avahi(self):
- if not self.hasPackage('avahi-daemon'):
- raise unittest.SkipTest("Testcase dependency not met: need avahi-daemon installed on target")
-
- @skipUnlessPassed('test_systemd_basic')
- def test_systemd_status(self):
- self.check_for_avahi()
- self.systemctl('status --full', 'avahi-daemon.service')
-
- @testcase(695)
- @skipUnlessPassed('test_systemd_status')
- def test_systemd_stop_start(self):
- self.check_for_avahi()
- self.systemctl('stop', 'avahi-daemon.service')
- self.systemctl('is-active', 'avahi-daemon.service', expected=3, verbose=True)
- self.systemctl('start','avahi-daemon.service')
- self.systemctl('is-active', 'avahi-daemon.service', verbose=True)
-
- @testcase(696)
- @skipUnlessPassed('test_systemd_basic')
- def test_systemd_disable_enable(self):
- self.check_for_avahi()
- self.systemctl('disable', 'avahi-daemon.service')
- self.systemctl('is-enabled', 'avahi-daemon.service', expected=1)
- self.systemctl('enable', 'avahi-daemon.service')
- self.systemctl('is-enabled', 'avahi-daemon.service')
-
-class SystemdJournalTests(SystemdTest):
- @skipUnlessPassed('test_ssh')
- def test_systemd_journal(self):
- (status, output) = self.target.run('journalctl')
- self.assertEqual(status, 0, output)
-
- @skipUnlessPassed('test_systemd_basic')
- def test_systemd_boot_time(self, systemd_TimeoutStartSec=90):
- """
- Get the target boot time from journalctl and log it
-
- Arguments:
- -systemd_TimeoutStartSec, an optional argument containing systemd's
- unit start timeout to compare against
- """
-
- # the expression chain that uniquely identifies the time boot message
- expr_items=["Startup finished","kernel", "userspace","\.$"]
- try:
- output = self.journalctl(args="-o cat --reverse")
- except AssertionError:
- self.fail("Error occurred while calling journalctl")
- if not len(output):
- self.fail("Error, unable to get startup time from systemd journal")
-
- # check for the regular expression items that match the startup time
- for line in output.split('\n'):
- check_match = "".join(re.findall(".*".join(expr_items), line))
- if check_match: break
- # put the startup time in the test log
- if check_match:
- print("%s" % check_match)
- else:
- self.skipTest("Error at obtaining the boot time from journalctl")
- boot_time_sec = 0
-
- # get the numeric values from the string and convert them to seconds
- # same data will be placed in list and string for manipulation
- l_boot_time = check_match.split(" ")[-2:]
- s_boot_time = " ".join(l_boot_time)
- try:
- # Obtain the minutes it took to boot
- if l_boot_time[0].endswith('min') and l_boot_time[0][0].isdigit():
- boot_time_min = s_boot_time.split("min")[0]
- # convert to seconds and accumulate it
- boot_time_sec += int(boot_time_min) * 60
- # Obtain the seconds it took to boot and accumulate
- boot_time_sec += float(l_boot_time[1].split("s")[0])
- except ValueError:
- self.skipTest("Error when parsing time from boot string")
- #Assert the target boot time against systemd's unit start timeout
- if boot_time_sec > systemd_TimeoutStartSec:
- print("Target boot time %s exceeds systemd's TimeoutStartSec %s"\
- %(boot_time_sec, systemd_TimeoutStartSec))
diff --git a/meta/lib/oeqa/runtime/utils/__init__.py b/meta/lib/oeqa/runtime/utils/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/meta/lib/oeqa/runtime/utils/__init__.py
diff --git a/meta/lib/oeqa/runtime/utils/targetbuildproject.py b/meta/lib/oeqa/runtime/utils/targetbuildproject.py
new file mode 100644
index 0000000000..f4f4816a9b
--- /dev/null
+++ b/meta/lib/oeqa/runtime/utils/targetbuildproject.py
@@ -0,0 +1,44 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.utils.buildproject import BuildProject
+
+class TargetBuildProject(BuildProject):
+
+ def __init__(self, target, uri, foldername=None, dl_dir=None):
+ self.target = target
+ self.targetdir = "~/buildtest/"
+ BuildProject.__init__(self, uri, foldername, dl_dir=dl_dir)
+
+ def download_archive(self):
+ self.target.run("mkdir " + self.targetdir + " || true")
+
+ self._download_archive()
+
+ status, output = self.target.copyTo(self.localarchive, self.targetdir)
+ if status:
+ raise Exception('Failed to copy archive to target, '
+ 'output: %s' % output)
+
+ cmd = 'tar xf %s%s -C %s' % (self.targetdir,
+ self.archive,
+ self.targetdir)
+ status, output = self.target.run(cmd)
+ if status:
+ raise Exception('Failed to extract archive, '
+ 'output: %s' % output)
+
+ # Change targetdir to project folder
+ self.targetdir = self.targetdir + self.fname
+
+ # The timeout parameter of target.run is set to 0
+ # to make the ssh command run with no timeout.
+ def _run(self, cmd):
+ ret = self.target.run(cmd, 0)
+ msg = "Command %s failed with exit code %s: %s" % (cmd, ret[0], ret[1])
+ if ret[0] != 0:
+ raise Exception(msg)
+ return ret[0]
diff --git a/meta/lib/oeqa/runtime/x32lib.py b/meta/lib/oeqa/runtime/x32lib.py
deleted file mode 100644
index ce5e214035..0000000000
--- a/meta/lib/oeqa/runtime/x32lib.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import unittest
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- #check if DEFAULTTUNE is set and it's value is: x86-64-x32
- defaulttune = oeRuntimeTest.tc.d.getVar("DEFAULTTUNE", True)
- if "x86-64-x32" not in defaulttune:
- skipModule("DEFAULTTUNE is not set to x86-64-x32")
-
-class X32libTest(oeRuntimeTest):
-
- @testcase(281)
- @skipUnlessPassed("test_ssh")
- def test_x32_file(self):
- status1 = self.target.run("readelf -h /bin/ls | grep Class | grep ELF32")[0]
- status2 = self.target.run("readelf -h /bin/ls | grep Machine | grep X86-64")[0]
- self.assertTrue(status1 == 0 and status2 == 0, msg="/bin/ls isn't an X86-64 ELF32 binary. readelf says: %s" % self.target.run("readelf -h /bin/ls")[1])
diff --git a/meta/lib/oeqa/runtime/xorg.py b/meta/lib/oeqa/runtime/xorg.py
deleted file mode 100644
index 12bcd371af..0000000000
--- a/meta/lib/oeqa/runtime/xorg.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import unittest
-from oeqa.oetest import oeRuntimeTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- if not oeRuntimeTest.hasFeature("x11-base"):
- skipModule("target doesn't have x11 in IMAGE_FEATURES")
-
-
-class XorgTest(oeRuntimeTest):
-
- @testcase(1151)
- @skipUnlessPassed('test_ssh')
- def test_xorg_running(self):
- (status, output) = self.target.run(oeRuntimeTest.pscmd + ' | grep -v xinit | grep [X]org')
- self.assertEqual(status, 0, msg="Xorg does not appear to be running %s" % self.target.run(oeRuntimeTest.pscmd)[1])
diff --git a/meta/lib/oeqa/sdk/__init__.py b/meta/lib/oeqa/sdk/__init__.py
index 4cf3fa76b6..e69de29bb2 100644
--- a/meta/lib/oeqa/sdk/__init__.py
+++ b/meta/lib/oeqa/sdk/__init__.py
@@ -1,3 +0,0 @@
-# Enable other layers to have tests in the same named directory
-from pkgutil import extend_path
-__path__ = extend_path(__path__, __name__)
diff --git a/meta/lib/oeqa/sdk/buildcvs.py b/meta/lib/oeqa/sdk/buildcvs.py
deleted file mode 100644
index c7146fa4af..0000000000
--- a/meta/lib/oeqa/sdk/buildcvs.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from oeqa.oetest import oeSDKTest, skipModule
-from oeqa.utils.decorators import *
-from oeqa.utils.targetbuild import SDKBuildProject
-
-class BuildCvsTest(oeSDKTest):
-
- @classmethod
- def setUpClass(self):
- self.project = SDKBuildProject(oeSDKTest.tc.sdktestdir + "/cvs/", oeSDKTest.tc.sdkenv, oeSDKTest.tc.d,
- "http://ftp.gnu.org/non-gnu/cvs/source/feature/1.12.13/cvs-1.12.13.tar.bz2")
- self.project.download_archive()
-
- def test_cvs(self):
- self.assertEqual(self.project.run_configure(), 0,
- msg="Running configure failed")
-
- self.assertEqual(self.project.run_make(), 0,
- msg="Running make failed")
-
- self.assertEqual(self.project.run_install(), 0,
- msg="Running make install failed")
-
- @classmethod
- def tearDownClass(self):
- self.project.clean()
diff --git a/meta/lib/oeqa/sdk/buildgalculator.py b/meta/lib/oeqa/sdk/buildgalculator.py
deleted file mode 100644
index dc2fa9ce19..0000000000
--- a/meta/lib/oeqa/sdk/buildgalculator.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from oeqa.oetest import oeSDKTest, skipModule
-from oeqa.utils.decorators import *
-from oeqa.utils.targetbuild import SDKBuildProject
-
-def setUpModule():
- if not (oeSDKTest.hasPackage("gtk+3") or oeSDKTest.hasPackage("libgtk-3.0")):
- skipModule("Image doesn't have gtk+3 in manifest")
-
-class GalculatorTest(oeSDKTest):
- def test_galculator(self):
- try:
- project = SDKBuildProject(oeSDKTest.tc.sdktestdir + "/galculator/",
- oeSDKTest.tc.sdkenv, oeSDKTest.tc.d,
- "http://galculator.mnim.org/downloads/galculator-2.1.4.tar.bz2")
-
- project.download_archive()
-
- # regenerate configure to get support for --with-libtool-sysroot
- legacy_preconf=("autoreconf -i -f -I ${OECORE_TARGET_SYSROOT}/usr/share/aclocal -I m4;")
-
- self.assertEqual(project.run_configure(extra_cmds=legacy_preconf),
- 0, msg="Running configure failed")
-
- self.assertEqual(project.run_make(), 0,
- msg="Running make failed")
- finally:
- project.clean()
diff --git a/meta/lib/oeqa/sdk/buildiptables.py b/meta/lib/oeqa/sdk/buildiptables.py
deleted file mode 100644
index f0cb8a4287..0000000000
--- a/meta/lib/oeqa/sdk/buildiptables.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from oeqa.oetest import oeSDKTest
-from oeqa.utils.decorators import *
-from oeqa.utils.targetbuild import SDKBuildProject
-
-
-class BuildIptablesTest(oeSDKTest):
-
- @classmethod
- def setUpClass(self):
- self.project = SDKBuildProject(oeSDKTest.tc.sdktestdir + "/iptables/", oeSDKTest.tc.sdkenv, oeSDKTest.tc.d,
- "http://downloads.yoctoproject.org/mirror/sources/iptables-1.4.13.tar.bz2")
- self.project.download_archive()
-
- def test_iptables(self):
- self.assertEqual(self.project.run_configure(), 0,
- msg="Running configure failed")
-
- self.assertEqual(self.project.run_make(), 0,
- msg="Running make failed")
-
- self.assertEqual(self.project.run_install(), 0,
- msg="Running make install failed")
-
- @classmethod
- def tearDownClass(self):
- self.project.clean()
diff --git a/meta/lib/oeqa/sdk/case.py b/meta/lib/oeqa/sdk/case.py
new file mode 100644
index 0000000000..ebb03af9eb
--- /dev/null
+++ b/meta/lib/oeqa/sdk/case.py
@@ -0,0 +1,54 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import subprocess
+
+from oeqa.core.case import OETestCase
+
+class OESDKTestCase(OETestCase):
+ def _run(self, cmd):
+ return subprocess.check_output(". %s > /dev/null; %s;" % \
+ (self.tc.sdk_env, cmd), shell=True, executable="/bin/bash",
+ stderr=subprocess.STDOUT, universal_newlines=True)
+
+ def fetch(self, workdir, dl_dir, url, archive=None):
+ if not archive:
+ from urllib.parse import urlparse
+ archive = os.path.basename(urlparse(url).path)
+
+ if dl_dir:
+ tarball = os.path.join(dl_dir, archive)
+ if os.path.exists(tarball):
+ return tarball
+
+ tarball = os.path.join(workdir, archive)
+ subprocess.check_output(["wget", "-O", tarball, url])
+ return tarball
+
+ def check_elf(self, path, target_os=None, target_arch=None):
+ """
+ Verify that the ELF binary $path matches the specified target
+ OS/architecture, or if not specified the currently configured MACHINE's
+ OS/architecture.
+ """
+ import oe.qa, oe.elf
+
+ if not target_os or not target_arch:
+ output = self._run("echo $OECORE_TARGET_OS:$OECORE_TARGET_ARCH")
+ target_os, target_arch = output.strip().split(":")
+
+ machine_data = oe.elf.machine_dict(None)[target_os][target_arch]
+ (machine, osabi, abiversion, endian, bits) = machine_data
+
+ elf = oe.qa.ELFFile(path)
+ elf.open()
+
+ self.assertEqual(machine, elf.machine(),
+ "Binary was %s but expected %s" %
+ (oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine)))
+ self.assertEqual(bits, elf.abiSize())
+ self.assertEqual(endian, elf.isLittleEndian())
diff --git a/meta/lib/oeqa/sdk/cases/assimp.py b/meta/lib/oeqa/sdk/cases/assimp.py
new file mode 100644
index 0000000000..f26b17f2e9
--- /dev/null
+++ b/meta/lib/oeqa/sdk/cases/assimp.py
@@ -0,0 +1,40 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import subprocess
+import tempfile
+import unittest
+from oeqa.sdk.case import OESDKTestCase
+
+from oeqa.utils.subprocesstweak import errors_have_output
+errors_have_output()
+
+class BuildAssimp(OESDKTestCase):
+ """
+ Test case to build a project using cmake.
+ """
+
+ def setUp(self):
+ if not (self.tc.hasHostPackage("nativesdk-cmake") or
+ self.tc.hasHostPackage("cmake-native")):
+ raise unittest.SkipTest("Needs cmake")
+
+ def test_assimp(self):
+ with tempfile.TemporaryDirectory(prefix="assimp", dir=self.tc.sdk_dir) as testdir:
+ tarball = self.fetch(testdir, self.td["DL_DIR"], "https://github.com/assimp/assimp/archive/v4.1.0.tar.gz")
+
+ dirs = {}
+ dirs["source"] = os.path.join(testdir, "assimp-4.1.0")
+ dirs["build"] = os.path.join(testdir, "build")
+ dirs["install"] = os.path.join(testdir, "install")
+
+ subprocess.check_output(["tar", "xf", tarball, "-C", testdir])
+ self.assertTrue(os.path.isdir(dirs["source"]))
+ os.makedirs(dirs["build"])
+
+ self._run("cd {build} && cmake -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON {source}".format(**dirs))
+ self._run("cmake --build {build} -- -j".format(**dirs))
+ self._run("cmake --build {build} --target install -- DESTDIR={install}".format(**dirs))
+ self.check_elf(os.path.join(dirs["install"], "usr", "local", "lib", "libassimp.so.4.1.0"))
diff --git a/meta/lib/oeqa/sdk/cases/buildcpio.py b/meta/lib/oeqa/sdk/cases/buildcpio.py
new file mode 100644
index 0000000000..0a5e68d5fd
--- /dev/null
+++ b/meta/lib/oeqa/sdk/cases/buildcpio.py
@@ -0,0 +1,35 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import tempfile
+import subprocess
+import unittest
+
+from oeqa.sdk.case import OESDKTestCase
+from oeqa.utils.subprocesstweak import errors_have_output
+errors_have_output()
+
+class BuildCpioTest(OESDKTestCase):
+ """
+ Check that autotools will cross-compile correctly.
+ """
+ def test_cpio(self):
+ with tempfile.TemporaryDirectory(prefix="cpio-", dir=self.tc.sdk_dir) as testdir:
+ tarball = self.fetch(testdir, self.td["DL_DIR"], "https://ftp.gnu.org/gnu/cpio/cpio-2.12.tar.gz")
+
+ dirs = {}
+ dirs["source"] = os.path.join(testdir, "cpio-2.12")
+ dirs["build"] = os.path.join(testdir, "build")
+ dirs["install"] = os.path.join(testdir, "install")
+
+ subprocess.check_output(["tar", "xf", tarball, "-C", testdir])
+ self.assertTrue(os.path.isdir(dirs["source"]))
+ os.makedirs(dirs["build"])
+
+ self._run("cd {build} && {source}/configure $CONFIGURE_FLAGS".format(**dirs))
+ self._run("cd {build} && make -j".format(**dirs))
+ self._run("cd {build} && make install DESTDIR={install}".format(**dirs))
+
+ self.check_elf(os.path.join(dirs["install"], "usr", "local", "bin", "cpio"))
diff --git a/meta/lib/oeqa/sdk/cases/buildepoxy.py b/meta/lib/oeqa/sdk/cases/buildepoxy.py
new file mode 100644
index 0000000000..4211955f8d
--- /dev/null
+++ b/meta/lib/oeqa/sdk/cases/buildepoxy.py
@@ -0,0 +1,41 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import subprocess
+import tempfile
+import unittest
+
+from oeqa.sdk.case import OESDKTestCase
+from oeqa.utils.subprocesstweak import errors_have_output
+errors_have_output()
+
+class EpoxyTest(OESDKTestCase):
+ """
+ Test that Meson builds correctly.
+ """
+ def setUp(self):
+ if not (self.tc.hasHostPackage("nativesdk-meson")):
+ raise unittest.SkipTest("GalculatorTest class: SDK doesn't contain Meson")
+
+ def test_epoxy(self):
+ with tempfile.TemporaryDirectory(prefix="epoxy", dir=self.tc.sdk_dir) as testdir:
+ tarball = self.fetch(testdir, self.td["DL_DIR"], "https://github.com/anholt/libepoxy/releases/download/1.5.3/libepoxy-1.5.3.tar.xz")
+
+ dirs = {}
+ dirs["source"] = os.path.join(testdir, "libepoxy-1.5.3")
+ dirs["build"] = os.path.join(testdir, "build")
+ dirs["install"] = os.path.join(testdir, "install")
+
+ subprocess.check_output(["tar", "xf", tarball, "-C", testdir])
+ self.assertTrue(os.path.isdir(dirs["source"]))
+ os.makedirs(dirs["build"])
+
+ log = self._run("meson -Degl=no -Dglx=no -Dx11=false {build} {source}".format(**dirs))
+ # Check that Meson thinks we're doing a cross build and not a native
+ self.assertIn("Build type: cross build", log)
+ self._run("ninja -C {build} -v".format(**dirs))
+ self._run("DESTDIR={install} ninja -C {build} -v install".format(**dirs))
+
+ self.check_elf(os.path.join(dirs["install"], "usr", "local", "lib", "libepoxy.so"))
diff --git a/meta/lib/oeqa/sdk/cases/buildgalculator.py b/meta/lib/oeqa/sdk/cases/buildgalculator.py
new file mode 100644
index 0000000000..bbaa5c55c9
--- /dev/null
+++ b/meta/lib/oeqa/sdk/cases/buildgalculator.py
@@ -0,0 +1,43 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import subprocess
+import tempfile
+import unittest
+
+from oeqa.sdk.case import OESDKTestCase
+from oeqa.utils.subprocesstweak import errors_have_output
+errors_have_output()
+
+class GalculatorTest(OESDKTestCase):
+ """
+ Test that autotools and GTK+ 3 compiles correctly.
+ """
+ def setUp(self):
+ if not (self.tc.hasTargetPackage("gtk+3", multilib=True) or \
+ self.tc.hasTargetPackage("libgtk-3.0", multilib=True)):
+ raise unittest.SkipTest("GalculatorTest class: SDK don't support gtk+3")
+ if not (self.tc.hasHostPackage("nativesdk-gettext-dev")):
+ raise unittest.SkipTest("GalculatorTest class: SDK doesn't contain gettext")
+
+ def test_galculator(self):
+ with tempfile.TemporaryDirectory(prefix="galculator", dir=self.tc.sdk_dir) as testdir:
+ tarball = self.fetch(testdir, self.td["DL_DIR"], "http://galculator.mnim.org/downloads/galculator-2.1.4.tar.bz2")
+
+ dirs = {}
+ dirs["source"] = os.path.join(testdir, "galculator-2.1.4")
+ dirs["build"] = os.path.join(testdir, "build")
+ dirs["install"] = os.path.join(testdir, "install")
+
+ subprocess.check_output(["tar", "xf", tarball, "-C", testdir])
+ self.assertTrue(os.path.isdir(dirs["source"]))
+ os.makedirs(dirs["build"])
+
+ self._run("cd {source} && autoreconf -i -f -I $OECORE_TARGET_SYSROOT/usr/share/aclocal -I m4".format(**dirs))
+ self._run("cd {build} && {source}/configure $CONFIGURE_FLAGS".format(**dirs))
+ self._run("cd {build} && make -j".format(**dirs))
+ self._run("cd {build} && make install DESTDIR={install}".format(**dirs))
+
+ self.check_elf(os.path.join(dirs["install"], "usr", "local", "bin", "galculator"))
diff --git a/meta/lib/oeqa/sdk/cases/buildlzip.py b/meta/lib/oeqa/sdk/cases/buildlzip.py
new file mode 100644
index 0000000000..515acd2891
--- /dev/null
+++ b/meta/lib/oeqa/sdk/cases/buildlzip.py
@@ -0,0 +1,37 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os, tempfile, subprocess, unittest
+from oeqa.sdk.case import OESDKTestCase
+from oeqa.utils.subprocesstweak import errors_have_output
+errors_have_output()
+
+class BuildLzipTest(OESDKTestCase):
+ """
+ Test that "plain" compilation works, using just $CC $CFLAGS etc.
+ """
+ def test_lzip(self):
+ with tempfile.TemporaryDirectory(prefix="lzip", dir=self.tc.sdk_dir) as testdir:
+ tarball = self.fetch(testdir, self.td["DL_DIR"], "http://downloads.yoctoproject.org/mirror/sources/lzip-1.19.tar.gz")
+
+ dirs = {}
+ dirs["source"] = os.path.join(testdir, "lzip-1.19")
+ dirs["build"] = os.path.join(testdir, "build")
+ dirs["install"] = os.path.join(testdir, "install")
+
+ subprocess.check_output(["tar", "xf", tarball, "-C", testdir])
+ self.assertTrue(os.path.isdir(dirs["source"]))
+ os.makedirs(dirs["build"])
+
+ cmd = """cd {build} && \
+ {source}/configure --srcdir {source} \
+ CXX="$CXX" \
+ CPPFLAGS="$CPPFLAGS" \
+ CXXFLAGS="$CXXFLAGS" \
+ LDFLAGS="$LDFLAGS" \
+ """
+ self._run(cmd.format(**dirs))
+ self._run("cd {build} && make -j".format(**dirs))
+ self._run("cd {build} && make install DESTDIR={install}".format(**dirs))
+ self.check_elf(os.path.join(dirs["install"], "usr", "local", "bin", "lzip"))
diff --git a/meta/lib/oeqa/sdk/cases/gcc.py b/meta/lib/oeqa/sdk/cases/gcc.py
new file mode 100644
index 0000000000..eb08eadd28
--- /dev/null
+++ b/meta/lib/oeqa/sdk/cases/gcc.py
@@ -0,0 +1,50 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import shutil
+import unittest
+
+from oeqa.core.utils.path import remove_safe
+from oeqa.sdk.case import OESDKTestCase
+
+from oeqa.utils.subprocesstweak import errors_have_output
+errors_have_output()
+
+class GccCompileTest(OESDKTestCase):
+ td_vars = ['MACHINE']
+
+ @classmethod
+ def setUpClass(self):
+ files = {'test.c' : self.tc.files_dir, 'test.cpp' : self.tc.files_dir,
+ 'testsdkmakefile' : self.tc.sdk_files_dir}
+ for f in files:
+ shutil.copyfile(os.path.join(files[f], f),
+ os.path.join(self.tc.sdk_dir, f))
+
+ def setUp(self):
+ machine = self.td.get("MACHINE")
+ if not (self.tc.hasHostPackage("packagegroup-cross-canadian-%s" % machine) or
+ self.tc.hasHostPackage("^gcc-", regex=True)):
+ raise unittest.SkipTest("GccCompileTest class: SDK doesn't contain a cross-canadian toolchain")
+
+ def test_gcc_compile(self):
+ self._run('$CC %s/test.c -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir))
+
+ def test_gpp_compile(self):
+ self._run('$CXX %s/test.c -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir))
+
+ def test_gpp2_compile(self):
+ self._run('$CXX %s/test.cpp -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir))
+
+ def test_make(self):
+ self._run('cd %s; make -f testsdkmakefile' % self.tc.sdk_dir)
+
+ @classmethod
+ def tearDownClass(self):
+ files = [os.path.join(self.tc.sdk_dir, f) \
+ for f in ['test.c', 'test.cpp', 'test.o', 'test',
+ 'testsdkmakefile']]
+ for f in files:
+ remove_safe(f)
diff --git a/meta/lib/oeqa/sdk/cases/perl.py b/meta/lib/oeqa/sdk/cases/perl.py
new file mode 100644
index 0000000000..14d76d820f
--- /dev/null
+++ b/meta/lib/oeqa/sdk/cases/perl.py
@@ -0,0 +1,20 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import unittest
+from oeqa.sdk.case import OESDKTestCase
+
+from oeqa.utils.subprocesstweak import errors_have_output
+errors_have_output()
+
+class PerlTest(OESDKTestCase):
+ def setUp(self):
+ if not (self.tc.hasHostPackage("nativesdk-perl") or
+ self.tc.hasHostPackage("perl-native")):
+ raise unittest.SkipTest("No perl package in the SDK")
+
+ def test_perl(self):
+ cmd = "perl -e '$_=\"Uryyb, jbeyq\"; tr/a-zA-Z/n-za-mN-ZA-M/;print'"
+ output = self._run(cmd)
+ self.assertEqual(output, "Hello, world")
diff --git a/meta/lib/oeqa/sdk/cases/python.py b/meta/lib/oeqa/sdk/cases/python.py
new file mode 100644
index 0000000000..a334abce5f
--- /dev/null
+++ b/meta/lib/oeqa/sdk/cases/python.py
@@ -0,0 +1,31 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import subprocess, unittest
+from oeqa.sdk.case import OESDKTestCase
+
+from oeqa.utils.subprocesstweak import errors_have_output
+errors_have_output()
+
+class Python2Test(OESDKTestCase):
+ def setUp(self):
+ if not (self.tc.hasHostPackage("nativesdk-python-core") or
+ self.tc.hasHostPackage("python-core-native")):
+ raise unittest.SkipTest("No python package in the SDK")
+
+ def test_python2(self):
+ cmd = "python -c \"import codecs; print(codecs.encode('Uryyb, jbeyq', 'rot13'))\""
+ output = self._run(cmd)
+ self.assertEqual(output, "Hello, world\n")
+
+class Python3Test(OESDKTestCase):
+ def setUp(self):
+ if not (self.tc.hasHostPackage("nativesdk-python3-core") or
+ self.tc.hasHostPackage("python3-core-native")):
+ raise unittest.SkipTest("No python3 package in the SDK")
+
+ def test_python3(self):
+ cmd = "python3 -c \"import codecs; print(codecs.encode('Uryyb, jbeyq', 'rot13'))\""
+ output = self._run(cmd)
+ self.assertEqual(output, "Hello, world\n")
diff --git a/meta/lib/oeqa/sdk/context.py b/meta/lib/oeqa/sdk/context.py
new file mode 100644
index 0000000000..09e77c19fe
--- /dev/null
+++ b/meta/lib/oeqa/sdk/context.py
@@ -0,0 +1,153 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import sys
+import glob
+import re
+
+from oeqa.core.context import OETestContext, OETestContextExecutor
+
+class OESDKTestContext(OETestContext):
+ sdk_files_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "files")
+
+ def __init__(self, td=None, logger=None, sdk_dir=None, sdk_env=None,
+ target_pkg_manifest=None, host_pkg_manifest=None):
+ super(OESDKTestContext, self).__init__(td, logger)
+
+ self.sdk_dir = sdk_dir
+ self.sdk_env = sdk_env
+ self.target_pkg_manifest = target_pkg_manifest
+ self.host_pkg_manifest = host_pkg_manifest
+
+ def _hasPackage(self, manifest, pkg, regex=False):
+ if regex:
+ # do regex match
+ pat = re.compile(pkg)
+ for p in manifest.keys():
+ if pat.search(p):
+ return True
+ else:
+ # do exact match
+ if pkg in manifest.keys():
+ return True
+ return False
+
+ def hasHostPackage(self, pkg, regex=False):
+ return self._hasPackage(self.host_pkg_manifest, pkg, regex=regex)
+
+ def hasTargetPackage(self, pkg, multilib=False, regex=False):
+ if multilib:
+ # match multilib according to sdk_env
+ mls = self.td.get('MULTILIB_VARIANTS', '').split()
+ for ml in mls:
+ if ('ml'+ml) in self.sdk_env:
+ pkg = ml + '-' + pkg
+ return self._hasPackage(self.target_pkg_manifest, pkg, regex=regex)
+
+class OESDKTestContextExecutor(OETestContextExecutor):
+ _context_class = OESDKTestContext
+
+ name = 'sdk'
+ help = 'sdk test component'
+ description = 'executes sdk tests'
+
+ default_cases = [os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'cases')]
+ default_test_data = None
+
+ def register_commands(self, logger, subparsers):
+ super(OESDKTestContextExecutor, self).register_commands(logger, subparsers)
+
+ sdk_group = self.parser.add_argument_group('sdk options')
+ sdk_group.add_argument('--sdk-env', action='store',
+ help='sdk environment')
+ sdk_group.add_argument('--target-manifest', action='store',
+ help='sdk target manifest')
+ sdk_group.add_argument('--host-manifest', action='store',
+ help='sdk host manifest')
+
+ sdk_dgroup = self.parser.add_argument_group('sdk display options')
+ sdk_dgroup.add_argument('--list-sdk-env', action='store_true',
+ default=False, help='sdk list available environment')
+
+ # XXX this option is required but argparse_oe has a bug handling
+ # required options, seems that don't keep track of already parsed
+ # options
+ sdk_rgroup = self.parser.add_argument_group('sdk required options')
+ sdk_rgroup.add_argument('--sdk-dir', required=False, action='store',
+ help='sdk installed directory')
+
+ self.parser.add_argument('-j', '--num-processes', dest='processes', action='store',
+ type=int, help="number of processes to execute in parallel with")
+
+ @staticmethod
+ def _load_manifest(manifest):
+ pkg_manifest = {}
+ if manifest:
+ with open(manifest) as f:
+ for line in f:
+ (pkg, arch, version) = line.strip().split()
+ pkg_manifest[pkg] = (version, arch)
+
+ return pkg_manifest
+
+ def _process_args(self, logger, args):
+ super(OESDKTestContextExecutor, self)._process_args(logger, args)
+
+ self.tc_kwargs['init']['sdk_dir'] = args.sdk_dir
+ self.tc_kwargs['init']['sdk_env'] = self.sdk_env
+ self.tc_kwargs['init']['target_pkg_manifest'] = \
+ OESDKTestContextExecutor._load_manifest(args.target_manifest)
+ self.tc_kwargs['init']['host_pkg_manifest'] = \
+ OESDKTestContextExecutor._load_manifest(args.host_manifest)
+ self.tc_kwargs['run']['processes'] = args.processes
+
+ @staticmethod
+ def _get_sdk_environs(sdk_dir):
+ sdk_env = {}
+
+ environ_pattern = sdk_dir + '/environment-setup-*'
+ full_sdk_env = glob.glob(sdk_dir + '/environment-setup-*')
+ for env in full_sdk_env:
+ m = re.search('environment-setup-(.*)', env)
+ if m:
+ sdk_env[m.group(1)] = env
+
+ return sdk_env
+
+ def _display_sdk_envs(self, log, args, sdk_envs):
+ log("Available SDK environments at directory %s:" \
+ % args.sdk_dir)
+ log("")
+ for env in sdk_envs:
+ log(env)
+
+ def run(self, logger, args):
+ import argparse_oe
+
+ if not args.sdk_dir:
+ raise argparse_oe.ArgumentUsageError("No SDK directory "\
+ "specified please do, --sdk-dir SDK_DIR", self.name)
+
+ sdk_envs = OESDKTestContextExecutor._get_sdk_environs(args.sdk_dir)
+ if not sdk_envs:
+ raise argparse_oe.ArgumentUsageError("No available SDK "\
+ "enviroments found at %s" % args.sdk_dir, self.name)
+
+ if args.list_sdk_env:
+ self._display_sdk_envs(logger.info, args, sdk_envs)
+ sys.exit(0)
+
+ if not args.sdk_env in sdk_envs:
+ self._display_sdk_envs(logger.error, args, sdk_envs)
+ raise argparse_oe.ArgumentUsageError("No valid SDK "\
+ "environment (%s) specified" % args.sdk_env, self.name)
+
+ self.sdk_env = sdk_envs[args.sdk_env]
+ return super(OESDKTestContextExecutor, self).run(logger, args)
+
+_executor_class = OESDKTestContextExecutor
diff --git a/meta/lib/oeqa/runtime/files/testsdkmakefile b/meta/lib/oeqa/sdk/files/testsdkmakefile
index fb05f822f3..fb05f822f3 100644
--- a/meta/lib/oeqa/runtime/files/testsdkmakefile
+++ b/meta/lib/oeqa/sdk/files/testsdkmakefile
diff --git a/meta/lib/oeqa/sdk/gcc.py b/meta/lib/oeqa/sdk/gcc.py
deleted file mode 100644
index 8395b9b908..0000000000
--- a/meta/lib/oeqa/sdk/gcc.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import unittest
-import os
-import shutil
-from oeqa.oetest import oeSDKTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- machine = oeSDKTest.tc.d.getVar("MACHINE", True)
- if not oeSDKTest.hasHostPackage("packagegroup-cross-canadian-" + machine):
- skipModule("SDK doesn't contain a cross-canadian toolchain")
-
-
-class GccCompileTest(oeSDKTest):
-
- @classmethod
- def setUpClass(self):
- for f in ['test.c', 'test.cpp', 'testsdkmakefile']:
- shutil.copyfile(os.path.join(self.tc.filesdir, f), self.tc.sdktestdir + f)
-
- def test_gcc_compile(self):
- self._run('$CC %s/test.c -o %s/test -lm' % (self.tc.sdktestdir, self.tc.sdktestdir))
-
- def test_gpp_compile(self):
- self._run('$CXX %s/test.c -o %s/test -lm' % (self.tc.sdktestdir, self.tc.sdktestdir))
-
- def test_gpp2_compile(self):
- self._run('$CXX %s/test.cpp -o %s/test -lm' % (self.tc.sdktestdir, self.tc.sdktestdir))
-
- def test_make(self):
- self._run('cd %s; make -f testsdkmakefile' % self.tc.sdktestdir)
-
- @classmethod
- def tearDownClass(self):
- files = [self.tc.sdktestdir + f for f in ['test.c', 'test.cpp', 'test.o', 'test', 'testsdkmakefile']]
- for f in files:
- bb.utils.remove(f)
diff --git a/meta/lib/oeqa/sdk/perl.py b/meta/lib/oeqa/sdk/perl.py
deleted file mode 100644
index 45f422ef0b..0000000000
--- a/meta/lib/oeqa/sdk/perl.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import unittest
-import os
-import shutil
-from oeqa.oetest import oeSDKTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- if not oeSDKTest.hasHostPackage("nativesdk-perl"):
- skipModule("No perl package in the SDK")
-
-
-class PerlTest(oeSDKTest):
-
- @classmethod
- def setUpClass(self):
- for f in ['test.pl']:
- shutil.copyfile(os.path.join(self.tc.filesdir, f), self.tc.sdktestdir + f)
- self.testfile = self.tc.sdktestdir + "test.pl"
-
- def test_perl_exists(self):
- self._run('which perl')
-
- def test_perl_works(self):
- self._run('perl %s/test.pl' % self.tc.sdktestdir)
-
- @classmethod
- def tearDownClass(self):
- bb.utils.remove("%s/test.pl" % self.tc.sdktestdir)
diff --git a/meta/lib/oeqa/sdk/python.py b/meta/lib/oeqa/sdk/python.py
deleted file mode 100644
index 896fab4dfb..0000000000
--- a/meta/lib/oeqa/sdk/python.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import unittest
-import os
-import shutil
-from oeqa.oetest import oeSDKTest, skipModule
-from oeqa.utils.decorators import *
-
-def setUpModule():
- if not oeSDKTest.hasHostPackage("nativesdk-python"):
- skipModule("No python package in the SDK")
-
-
-class PythonTest(oeSDKTest):
-
- @classmethod
- def setUpClass(self):
- for f in ['test.py']:
- shutil.copyfile(os.path.join(self.tc.filesdir, f), self.tc.sdktestdir + f)
-
- def test_python_exists(self):
- self._run('which python')
-
- def test_python_stdout(self):
- output = self._run('python %s/test.py' % self.tc.sdktestdir)
- self.assertEqual(output.strip(), "the value of a is 0.01", msg="Incorrect output: %s" % output)
-
- def test_python_testfile(self):
- self._run('ls /tmp/testfile.python')
-
- @classmethod
- def tearDownClass(self):
- bb.utils.remove("%s/test.py" % self.tc.sdktestdir)
- bb.utils.remove("/tmp/testfile.python")
diff --git a/meta/lib/oeqa/sdk/testsdk.py b/meta/lib/oeqa/sdk/testsdk.py
new file mode 100644
index 0000000000..35e40187bc
--- /dev/null
+++ b/meta/lib/oeqa/sdk/testsdk.py
@@ -0,0 +1,145 @@
+#
+# Copyright 2018 by Garmin Ltd. or its subsidiaries
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.sdk.context import OESDKTestContext, OESDKTestContextExecutor
+
+class TestSDKBase(object):
+ @staticmethod
+ def get_sdk_configuration(d, test_type):
+ import platform
+ import oe.lsb
+ from oeqa.utils.metadata import get_layers
+ configuration = {'TEST_TYPE': test_type,
+ 'MACHINE': d.getVar("MACHINE"),
+ 'SDKMACHINE': d.getVar("SDKMACHINE"),
+ 'IMAGE_BASENAME': d.getVar("IMAGE_BASENAME"),
+ 'IMAGE_PKGTYPE': d.getVar("IMAGE_PKGTYPE"),
+ 'STARTTIME': d.getVar("DATETIME"),
+ 'HOST_DISTRO': oe.lsb.distro_identifier().replace(' ', '-'),
+ 'LAYERS': get_layers(d.getVar("BBLAYERS"))}
+ return configuration
+
+ @staticmethod
+ def get_sdk_json_result_dir(d):
+ json_result_dir = os.path.join(d.getVar("LOG_DIR"), 'oeqa')
+ custom_json_result_dir = d.getVar("OEQA_JSON_RESULT_DIR")
+ if custom_json_result_dir:
+ json_result_dir = custom_json_result_dir
+ return json_result_dir
+
+ @staticmethod
+ def get_sdk_result_id(configuration):
+ return '%s_%s_%s_%s_%s' % (configuration['TEST_TYPE'], configuration['IMAGE_BASENAME'], configuration['SDKMACHINE'], configuration['MACHINE'], configuration['STARTTIME'])
+
+class TestSDK(TestSDKBase):
+ context_executor_class = OESDKTestContextExecutor
+ context_class = OESDKTestContext
+ test_type = 'sdk'
+
+ def get_tcname(self, d):
+ """
+ Get the name of the SDK file
+ """
+ return d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh")
+
+ def extract_sdk(self, tcname, sdk_dir, d):
+ """
+ Extract the SDK to the specified location
+ """
+ import subprocess
+
+ try:
+ subprocess.check_output("cd %s; %s <<EOF\n./\nY\nEOF" % (sdk_dir, tcname), shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Couldn't install the SDK:\n%s" % e.output.decode("utf-8"))
+
+ def setup_context(self, d):
+ """
+ Return a dictionary of additional arguments that should be passed to
+ the context_class on construction
+ """
+ return dict()
+
+ def run(self, d):
+
+ import os
+ import subprocess
+ import json
+ import logging
+
+ from bb.utils import export_proxies
+ from oeqa.utils import make_logger_bitbake_compatible
+
+ pn = d.getVar("PN")
+ logger = make_logger_bitbake_compatible(logging.getLogger("BitBake"))
+
+ # sdk use network for download projects for build
+ export_proxies(d)
+
+ tcname = self.get_tcname(d)
+
+ if not os.path.exists(tcname):
+ bb.fatal("The toolchain %s is not built. Build it before running the tests: 'bitbake <image> -c populate_sdk' ." % tcname)
+
+ tdname = d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.testdata.json")
+ test_data = json.load(open(tdname, "r"))
+
+ target_pkg_manifest = self.context_executor_class._load_manifest(
+ d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.target.manifest"))
+ host_pkg_manifest = self.context_executor_class._load_manifest(
+ d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"))
+
+ processes = d.getVar("TESTIMAGE_NUMBER_THREADS") or d.getVar("BB_NUMBER_THREADS")
+ if processes:
+ try:
+ import testtools, subunit
+ except ImportError:
+ bb.warn("Failed to import testtools or subunit, the testcases will run serially")
+ processes = None
+
+ sdk_dir = d.expand("${WORKDIR}/testimage-sdk/")
+ bb.utils.remove(sdk_dir, True)
+ bb.utils.mkdirhier(sdk_dir)
+
+ context_args = self.setup_context(d)
+
+ self.extract_sdk(tcname, sdk_dir, d)
+
+ fail = False
+ sdk_envs = self.context_executor_class._get_sdk_environs(sdk_dir)
+ for s in sdk_envs:
+ sdk_env = sdk_envs[s]
+ bb.plain("SDK testing environment: %s" % s)
+ tc = self.context_class(td=test_data, logger=logger, sdk_dir=sdk_dir,
+ sdk_env=sdk_env, target_pkg_manifest=target_pkg_manifest,
+ host_pkg_manifest=host_pkg_manifest, **context_args)
+
+ try:
+ tc.loadTests(self.context_executor_class.default_cases)
+ except Exception as e:
+ import traceback
+ bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
+
+ if processes:
+ result = tc.runTests(processes=int(processes))
+ else:
+ result = tc.runTests()
+
+ component = "%s %s" % (pn, self.context_executor_class.name)
+ context_msg = "%s:%s" % (os.path.basename(tcname), os.path.basename(sdk_env))
+ configuration = self.get_sdk_configuration(d, self.test_type)
+ result.logDetails(self.get_sdk_json_result_dir(d),
+ configuration,
+ self.get_sdk_result_id(configuration))
+ result.logSummary(component, context_msg)
+
+ if not result.wasSuccessful():
+ fail = True
+
+ if fail:
+ bb.fatal("%s - FAILED - check the task log and the commands log" % pn)
+
+
diff --git a/meta/lib/oeqa/sdk/utils/__init__.py b/meta/lib/oeqa/sdk/utils/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/meta/lib/oeqa/sdk/utils/__init__.py
diff --git a/meta/lib/oeqa/sdk/utils/sdkbuildproject.py b/meta/lib/oeqa/sdk/utils/sdkbuildproject.py
new file mode 100644
index 0000000000..32f5e3310d
--- /dev/null
+++ b/meta/lib/oeqa/sdk/utils/sdkbuildproject.py
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import subprocess
+
+from oeqa.utils.buildproject import BuildProject
+
+class SDKBuildProject(BuildProject):
+ def __init__(self, testpath, sdkenv, uri, testlogdir, builddatetime,
+ foldername=None, dl_dir=None):
+ self.sdkenv = sdkenv
+ self.testdir = testpath
+ self.targetdir = testpath
+ os.makedirs(testpath, exist_ok=True)
+ self.datetime = builddatetime
+ self.testlogdir = testlogdir
+ os.makedirs(self.testlogdir, exist_ok=True)
+ self.logfile = os.path.join(self.testlogdir, "sdk_target_log.%s" % self.datetime)
+ BuildProject.__init__(self, uri, foldername, tmpdir=testpath, dl_dir=dl_dir)
+
+ def download_archive(self):
+ self._download_archive()
+
+ cmd = 'tar xf %s -C %s' % (os.path.join(self.targetdir, self.archive), self.targetdir)
+ subprocess.check_output(cmd, shell=True)
+
+ #Change targetdir to project folder
+ self.targetdir = os.path.join(self.targetdir, self.fname)
+
+ def run_configure(self, configure_args='', extra_cmds=''):
+ return super(SDKBuildProject, self).run_configure(configure_args=(configure_args or '$CONFIGURE_FLAGS'), extra_cmds=extra_cmds)
+
+ def run_install(self, install_args=''):
+ return super(SDKBuildProject, self).run_install(install_args=(install_args or "DESTDIR=%s/../install" % self.targetdir))
+
+ def log(self, msg):
+ if self.logfile:
+ with open(self.logfile, "a") as f:
+ f.write("%s\n" % msg)
+
+ def _run(self, cmd):
+ self.log("Running . %s; " % self.sdkenv + cmd)
+ try:
+ output = subprocess.check_output(". %s; " % self.sdkenv + cmd, shell=True,
+ executable='/bin/bash', stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as exc:
+ print(exc.output.decode('utf-8'))
+ return exc.returncode
+ return 0
diff --git a/meta/lib/oeqa/sdkext/__init__.py b/meta/lib/oeqa/sdkext/__init__.py
index 4cf3fa76b6..e69de29bb2 100644
--- a/meta/lib/oeqa/sdkext/__init__.py
+++ b/meta/lib/oeqa/sdkext/__init__.py
@@ -1,3 +0,0 @@
-# Enable other layers to have tests in the same named directory
-from pkgutil import extend_path
-__path__ = extend_path(__path__, __name__)
diff --git a/meta/lib/oeqa/sdkext/case.py b/meta/lib/oeqa/sdkext/case.py
new file mode 100644
index 0000000000..668faec9b9
--- /dev/null
+++ b/meta/lib/oeqa/sdkext/case.py
@@ -0,0 +1,24 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import subprocess
+
+from oeqa.utils import avoid_paths_in_environ
+from oeqa.sdk.case import OESDKTestCase
+
+class OESDKExtTestCase(OESDKTestCase):
+ def _run(self, cmd):
+ # extensible sdk shows a warning if found bitbake in the path
+ # because can cause contamination, i.e. use devtool from
+ # poky/scripts instead of eSDK one.
+ env = os.environ.copy()
+ paths_to_avoid = ['bitbake/bin', 'poky/scripts']
+ env['PATH'] = avoid_paths_in_environ(paths_to_avoid)
+
+ return subprocess.check_output(". %s > /dev/null;"\
+ " %s;" % (self.tc.sdk_env, cmd), stderr=subprocess.STDOUT,
+ shell=True, env=env, universal_newlines=True)
diff --git a/meta/lib/oeqa/sdkext/cases/devtool.py b/meta/lib/oeqa/sdkext/cases/devtool.py
new file mode 100644
index 0000000000..8e92bf8064
--- /dev/null
+++ b/meta/lib/oeqa/sdkext/cases/devtool.py
@@ -0,0 +1,120 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import shutil
+import subprocess
+
+from oeqa.sdkext.case import OESDKExtTestCase
+from oeqa.utils.httpserver import HTTPService
+
+from oeqa.utils.subprocesstweak import errors_have_output
+errors_have_output()
+
+class DevtoolTest(OESDKExtTestCase):
+ @classmethod
+ def setUpClass(cls):
+ myapp_src = os.path.join(cls.tc.esdk_files_dir, "myapp")
+ cls.myapp_dst = os.path.join(cls.tc.sdk_dir, "myapp")
+ shutil.copytree(myapp_src, cls.myapp_dst)
+
+ myapp_cmake_src = os.path.join(cls.tc.esdk_files_dir, "myapp_cmake")
+ cls.myapp_cmake_dst = os.path.join(cls.tc.sdk_dir, "myapp_cmake")
+ shutil.copytree(myapp_cmake_src, cls.myapp_cmake_dst)
+
+ @classmethod
+ def tearDownClass(cls):
+ shutil.rmtree(cls.myapp_dst)
+ shutil.rmtree(cls.myapp_cmake_dst)
+
+ def _test_devtool_build(self, directory):
+ self._run('devtool add myapp %s' % directory)
+ try:
+ self._run('devtool build myapp')
+ finally:
+ self._run('devtool reset myapp')
+
+ def _test_devtool_build_package(self, directory):
+ self._run('devtool add myapp %s' % directory)
+ try:
+ self._run('devtool package myapp')
+ finally:
+ self._run('devtool reset myapp')
+
+ def test_devtool_location(self):
+ output = self._run('which devtool')
+ self.assertEqual(output.startswith(self.tc.sdk_dir), True, \
+ msg="Seems that devtool isn't the eSDK one: %s" % output)
+
+ def test_devtool_add_reset(self):
+ self._run('devtool add myapp %s' % self.myapp_dst)
+ self._run('devtool reset myapp')
+
+ def test_devtool_build_make(self):
+ self._test_devtool_build(self.myapp_dst)
+
+ def test_devtool_build_esdk_package(self):
+ self._test_devtool_build_package(self.myapp_dst)
+
+ def test_devtool_build_cmake(self):
+ self._test_devtool_build(self.myapp_cmake_dst)
+
+ def test_extend_autotools_recipe_creation(self):
+ req = 'https://github.com/rdfa/librdfa'
+ recipe = "librdfa"
+ self._run('devtool sdk-install libxml2')
+ self._run('devtool add %s %s' % (recipe, req) )
+ try:
+ self._run('devtool build %s' % recipe)
+ finally:
+ self._run('devtool reset %s' % recipe)
+
+ def test_devtool_kernelmodule(self):
+ docfile = 'https://git.yoctoproject.org/git/kernel-module-hello-world'
+ recipe = 'kernel-module-hello-world'
+ self._run('devtool add %s %s' % (recipe, docfile) )
+ try:
+ self._run('devtool build %s' % recipe)
+ finally:
+ self._run('devtool reset %s' % recipe)
+
+ def test_recipes_for_nodejs(self):
+ package_nodejs = "npm://registry.npmjs.org;name=winston;version=2.2.0"
+ self._run('devtool add %s ' % package_nodejs)
+ try:
+ self._run('devtool build %s ' % package_nodejs)
+ finally:
+ self._run('devtool reset %s '% package_nodejs)
+
+class SdkUpdateTest(OESDKExtTestCase):
+ @classmethod
+ def setUpClass(self):
+ self.publish_dir = os.path.join(self.tc.sdk_dir, 'esdk_publish')
+ if os.path.exists(self.publish_dir):
+ shutil.rmtree(self.publish_dir)
+ os.mkdir(self.publish_dir)
+
+ base_tcname = "%s/%s" % (self.td.get("SDK_DEPLOY", ''),
+ self.td.get("TOOLCHAINEXT_OUTPUTNAME", ''))
+ tcname_new = "%s-new.sh" % base_tcname
+ if not os.path.exists(tcname_new):
+ tcname_new = "%s.sh" % base_tcname
+
+ cmd = 'oe-publish-sdk %s %s' % (tcname_new, self.publish_dir)
+ subprocess.check_output(cmd, shell=True)
+
+ self.http_service = HTTPService(self.publish_dir)
+ self.http_service.start()
+
+ self.http_url = "http://127.0.0.1:%d" % self.http_service.port
+
+ def test_sdk_update_http(self):
+ output = self._run("devtool sdk-update \"%s\"" % self.http_url)
+
+ @classmethod
+ def tearDownClass(self):
+ self.http_service.stop()
+ shutil.rmtree(self.publish_dir)
diff --git a/meta/lib/oeqa/sdkext/context.py b/meta/lib/oeqa/sdkext/context.py
new file mode 100644
index 0000000000..2ac2bf6ff7
--- /dev/null
+++ b/meta/lib/oeqa/sdkext/context.py
@@ -0,0 +1,32 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+from oeqa.sdk.context import OESDKTestContext, OESDKTestContextExecutor
+
+class OESDKExtTestContext(OESDKTestContext):
+ esdk_files_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "files")
+
+ # FIXME - We really need to do better mapping of names here, this at
+ # least allows some tests to run
+ def hasHostPackage(self, pkg):
+ # We force a toolchain to be installed into the eSDK even if its minimal
+ if pkg.startswith("packagegroup-cross-canadian-"):
+ return True
+ return self._hasPackage(self.host_pkg_manifest, pkg)
+
+class OESDKExtTestContextExecutor(OESDKTestContextExecutor):
+ _context_class = OESDKExtTestContext
+
+ name = 'esdk'
+ help = 'esdk test component'
+ description = 'executes esdk tests'
+
+ default_cases = OESDKTestContextExecutor.default_cases + \
+ [os.path.join(os.path.abspath(os.path.dirname(__file__)), 'cases')]
+ default_test_data = None
+
+_executor_class = OESDKExtTestContextExecutor
diff --git a/meta/lib/oeqa/sdkext/devtool.py b/meta/lib/oeqa/sdkext/devtool.py
deleted file mode 100644
index 65f41f6875..0000000000
--- a/meta/lib/oeqa/sdkext/devtool.py
+++ /dev/null
@@ -1,108 +0,0 @@
-import shutil
-import subprocess
-import urllib.request
-from oeqa.oetest import oeSDKExtTest
-from oeqa.utils.decorators import *
-
-class DevtoolTest(oeSDKExtTest):
- @classmethod
- def setUpClass(self):
- self.myapp_src = os.path.join(self.tc.sdkextfilesdir, "myapp")
- self.myapp_dst = os.path.join(self.tc.sdktestdir, "myapp")
- shutil.copytree(self.myapp_src, self.myapp_dst)
-
- self.myapp_cmake_src = os.path.join(self.tc.sdkextfilesdir, "myapp_cmake")
- self.myapp_cmake_dst = os.path.join(self.tc.sdktestdir, "myapp_cmake")
- shutil.copytree(self.myapp_cmake_src, self.myapp_cmake_dst)
-
- def _test_devtool_build(self, directory):
- self._run('devtool add myapp %s' % directory)
- try:
- self._run('devtool build myapp')
- except Exception as e:
- print(e.output)
- self._run('devtool reset myapp')
- raise e
- self._run('devtool reset myapp')
-
- def _test_devtool_build_package(self, directory):
- self._run('devtool add myapp %s' % directory)
- try:
- self._run('devtool package myapp')
- except Exception as e:
- print(e.output)
- self._run('devtool reset myapp')
- raise e
- self._run('devtool reset myapp')
-
- def test_devtool_location(self):
- output = self._run('which devtool')
- self.assertEqual(output.startswith(self.tc.sdktestdir), True, \
- msg="Seems that devtool isn't the eSDK one: %s" % output)
-
- @skipUnlessPassed('test_devtool_location')
- def test_devtool_add_reset(self):
- self._run('devtool add myapp %s' % self.myapp_dst)
- self._run('devtool reset myapp')
-
- @testcase(1473)
- @skipUnlessPassed('test_devtool_location')
- def test_devtool_build_make(self):
- self._test_devtool_build(self.myapp_dst)
-
- @testcase(1474)
- @skipUnlessPassed('test_devtool_location')
- def test_devtool_build_esdk_package(self):
- self._test_devtool_build_package(self.myapp_dst)
-
- @testcase(1479)
- @skipUnlessPassed('test_devtool_location')
- def test_devtool_build_cmake(self):
- self._test_devtool_build(self.myapp_cmake_dst)
-
- @testcase(1482)
- @skipUnlessPassed('test_devtool_location')
- def test_extend_autotools_recipe_creation(self):
- req = 'https://github.com/rdfa/librdfa'
- recipe = "bbexample"
- self._run('devtool add %s %s' % (recipe, req) )
- try:
- self._run('devtool build %s' % recipe)
- except Exception as e:
- print(e.output)
- self._run('devtool reset %s' % recipe)
- raise e
- self._run('devtool reset %s' % recipe)
-
- @testcase(1484)
- @skipUnlessPassed('test_devtool_location')
- def test_devtool_kernelmodule(self):
- docfile = 'https://github.com/umlaeute/v4l2loopback.git'
- recipe = 'v4l2loopback-driver'
- self._run('devtool add %s %s' % (recipe, docfile) )
- try:
- self._run('devtool build %s' % recipe)
- except Exception as e:
- print(e.output)
- self._run('devtool reset %s' % recipe)
- raise e
- self._run('devtool reset %s' % recipe)
-
- @testcase(1478)
- @skipUnlessPassed('test_devtool_location')
- def test_recipes_for_nodejs(self):
- package_nodejs = "npm://registry.npmjs.org;name=winston;version=2.2.0"
- self._run('devtool add %s ' % package_nodejs)
- try:
- self._run('devtool build %s ' % package_nodejs)
- except Exception as e:
- print(e.output)
- self._run('devtool reset %s' % package_nodejs)
- raise e
- self._run('devtool reset %s '% package_nodejs)
-
-
- @classmethod
- def tearDownClass(self):
- shutil.rmtree(self.myapp_dst)
- shutil.rmtree(self.myapp_cmake_dst)
diff --git a/meta/lib/oeqa/sdkext/sdk_update.py b/meta/lib/oeqa/sdkext/sdk_update.py
deleted file mode 100644
index 2ade839c05..0000000000
--- a/meta/lib/oeqa/sdkext/sdk_update.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import os
-import shutil
-import subprocess
-
-from oeqa.oetest import oeSDKExtTest
-from oeqa.utils.httpserver import HTTPService
-
-class SdkUpdateTest(oeSDKExtTest):
-
- @classmethod
- def setUpClass(self):
- self.publish_dir = os.path.join(self.tc.sdktestdir, 'esdk_publish')
- if os.path.exists(self.publish_dir):
- shutil.rmtree(self.publish_dir)
- os.mkdir(self.publish_dir)
-
- tcname_new = self.tc.d.expand(
- "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}-new.sh")
- if not os.path.exists(tcname_new):
- tcname_new = self.tc.tcname
-
- cmd = 'oe-publish-sdk %s %s' % (tcname_new, self.publish_dir)
- subprocess.check_output(cmd, shell=True)
-
- self.http_service = HTTPService(self.publish_dir)
- self.http_service.start()
-
- self.http_url = "http://127.0.0.1:%d" % self.http_service.port
-
- def test_sdk_update_http(self):
- output = self._run("devtool sdk-update \"%s\"" % self.http_url)
-
- @classmethod
- def tearDownClass(self):
- self.http_service.stop()
- shutil.rmtree(self.publish_dir)
diff --git a/meta/lib/oeqa/sdkext/testsdk.py b/meta/lib/oeqa/sdkext/testsdk.py
new file mode 100644
index 0000000000..785b5dda53
--- /dev/null
+++ b/meta/lib/oeqa/sdkext/testsdk.py
@@ -0,0 +1,107 @@
+#
+# Copyright 2018 by Garmin Ltd. or its subsidiaries
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.sdk.testsdk import TestSDKBase
+
+class TestSDKExt(TestSDKBase):
+ def run(self, d):
+ import os
+ import json
+ import subprocess
+ import logging
+
+ from bb.utils import export_proxies
+ from oeqa.utils import avoid_paths_in_environ, make_logger_bitbake_compatible, subprocesstweak
+ from oeqa.sdkext.context import OESDKExtTestContext, OESDKExtTestContextExecutor
+
+ pn = d.getVar("PN")
+ logger = make_logger_bitbake_compatible(logging.getLogger("BitBake"))
+
+ # extensible sdk use network
+ export_proxies(d)
+
+ subprocesstweak.errors_have_output()
+
+ # extensible sdk can be contaminated if native programs are
+ # in PATH, i.e. use perl-native instead of eSDK one.
+ paths_to_avoid = [d.getVar('STAGING_DIR'),
+ d.getVar('BASE_WORKDIR')]
+ os.environ['PATH'] = avoid_paths_in_environ(paths_to_avoid)
+
+ tcname = d.expand("${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.sh")
+ if not os.path.exists(tcname):
+ bb.fatal("The toolchain ext %s is not built. Build it before running the" \
+ " tests: 'bitbake <image> -c populate_sdk_ext' ." % tcname)
+
+ tdname = d.expand("${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.testdata.json")
+ test_data = json.load(open(tdname, "r"))
+
+ target_pkg_manifest = OESDKExtTestContextExecutor._load_manifest(
+ d.expand("${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"))
+ host_pkg_manifest = OESDKExtTestContextExecutor._load_manifest(
+ d.expand("${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"))
+
+ sdk_dir = d.expand("${WORKDIR}/testsdkext/")
+ bb.utils.remove(sdk_dir, True)
+ bb.utils.mkdirhier(sdk_dir)
+ try:
+ subprocess.check_output("%s -y -d %s" % (tcname, sdk_dir), shell=True)
+ except subprocess.CalledProcessError as e:
+ msg = "Couldn't install the extensible SDK:\n%s" % e.output.decode("utf-8")
+ logfn = os.path.join(sdk_dir, 'preparing_build_system.log')
+ if os.path.exists(logfn):
+ msg += '\n\nContents of preparing_build_system.log:\n'
+ with open(logfn, 'r') as f:
+ for line in f:
+ msg += line
+ bb.fatal(msg)
+
+ fail = False
+ sdk_envs = OESDKExtTestContextExecutor._get_sdk_environs(sdk_dir)
+ for s in sdk_envs:
+ bb.plain("Extensible SDK testing environment: %s" % s)
+
+ sdk_env = sdk_envs[s]
+
+ # Use our own SSTATE_DIR and DL_DIR so that updates to the eSDK come from our sstate cache
+ # and we don't spend hours downloading kernels for the kernel module test
+ # Abuse auto.conf since local.conf would be overwritten by the SDK
+ with open(os.path.join(sdk_dir, 'conf', 'auto.conf'), 'a+') as f:
+ f.write('SSTATE_MIRRORS += " \\n file://.* file://%s/PATH"\n' % test_data.get('SSTATE_DIR'))
+ f.write('SOURCE_MIRROR_URL = "file://%s"\n' % test_data.get('DL_DIR'))
+ f.write('INHERIT += "own-mirrors"\n')
+ f.write('PREMIRRORS_prepend = " git://git.yoctoproject.org/.* git://%s/git2/git.yoctoproject.org.BASENAME \\n "\n' % test_data.get('DL_DIR'))
+
+ # We need to do this in case we have a minimal SDK
+ subprocess.check_output(". %s > /dev/null; devtool sdk-install meta-extsdk-toolchain" % \
+ sdk_env, cwd=sdk_dir, shell=True, stderr=subprocess.STDOUT)
+
+ tc = OESDKExtTestContext(td=test_data, logger=logger, sdk_dir=sdk_dir,
+ sdk_env=sdk_env, target_pkg_manifest=target_pkg_manifest,
+ host_pkg_manifest=host_pkg_manifest)
+
+ try:
+ tc.loadTests(OESDKExtTestContextExecutor.default_cases)
+ except Exception as e:
+ import traceback
+ bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
+
+ result = tc.runTests()
+
+ component = "%s %s" % (pn, OESDKExtTestContextExecutor.name)
+ context_msg = "%s:%s" % (os.path.basename(tcname), os.path.basename(sdk_env))
+ configuration = self.get_sdk_configuration(d, 'sdkext')
+ result.logDetails(self.get_sdk_json_result_dir(d),
+ configuration,
+ self.get_sdk_result_id(configuration))
+ result.logSummary(component, context_msg)
+
+ if not result.wasSuccessful():
+ fail = True
+
+ if fail:
+ bb.fatal("%s - FAILED - check the task log and the commands log" % pn)
+
diff --git a/meta/lib/oeqa/selftest/__init__.py b/meta/lib/oeqa/selftest/__init__.py
deleted file mode 100644
index 3ad9513f40..0000000000
--- a/meta/lib/oeqa/selftest/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from pkgutil import extend_path
-__path__ = extend_path(__path__, __name__)
diff --git a/meta/lib/oeqa/selftest/_toaster.py b/meta/lib/oeqa/selftest/_toaster.py
deleted file mode 100644
index 15ea9df9ef..0000000000
--- a/meta/lib/oeqa/selftest/_toaster.py
+++ /dev/null
@@ -1,320 +0,0 @@
-import unittest
-import os
-import sys
-import shlex, subprocess
-import urllib.request, urllib.parse, urllib.error, subprocess, time, getpass, re, json, shlex
-
-import oeqa.utils.ftools as ftools
-from oeqa.selftest.base import oeSelfTest
-from oeqa.utils.commands import runCmd
-
-sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../', 'bitbake/lib/toaster')))
-os.environ.setdefault("DJANGO_SETTINGS_MODULE", "toastermain.settings")
-
-import toastermain.settings
-from django.db.models import Q
-from orm.models import *
-from oeqa.utils.decorators import testcase
-
-class ToasterSetup(oeSelfTest):
-
- def recipe_parse(self, file_path, var):
- for line in open(file_path,'r'):
- if line.find(var) > -1:
- val = line.split(" = ")[1].replace("\"", "").strip()
- return val
-
- def fix_file_path(self, file_path):
- if ":" in file_path:
- file_path=file_path.split(":")[2]
- return file_path
-
-class Toaster_DB_Tests(ToasterSetup):
-
- # Check if build name is unique - tc_id=795
- @testcase(795)
- def test_Build_Unique_Name(self):
- all_builds = Build.objects.all().count()
- distinct_builds = Build.objects.values('id').distinct().count()
- self.assertEqual(distinct_builds, all_builds, msg = 'Build name is not unique')
-
- # Check if build coocker log path is unique - tc_id=819
- @testcase(819)
- def test_Build_Unique_Cooker_Log_Path(self):
- distinct_path = Build.objects.values('cooker_log_path').distinct().count()
- total_builds = Build.objects.values('id').count()
- self.assertEqual(distinct_path, total_builds, msg = 'Build coocker log path is not unique')
-
- # Check if task order is unique for one build - tc=824
- @testcase(824)
- def test_Task_Unique_Order(self):
- builds = Build.objects.values('id')
- cnt_err = []
- for build in builds:
- total_task_order = Task.objects.filter(build = build['id']).values('order').count()
- distinct_task_order = Task.objects.filter(build = build['id']).values('order').distinct().count()
- if (total_task_order != distinct_task_order):
- cnt_err.append(build['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for build id: %s' % cnt_err)
-
- # Check task order sequence for one build - tc=825
- @testcase(825)
- def test_Task_Order_Sequence(self):
- builds = builds = Build.objects.values('id')
- cnt_err = []
- for build in builds:
- tasks = Task.objects.filter(Q(build = build['id']), ~Q(order = None), ~Q(task_name__contains = '_setscene')).values('id', 'order').order_by("order")
- cnt_tasks = 0
- for task in tasks:
- cnt_tasks += 1
- if (task['order'] != cnt_tasks):
- cnt_err.append(task['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
-
- # Check if disk_io matches the difference between EndTimeIO and StartTimeIO in build stats - tc=828
- ### this needs to be updated ###
- #def test_Task_Disk_IO_TC828(self):
-
- # Check if outcome = 2 (SSTATE) then sstate_result must be 3 (RESTORED) - tc=832
- @testcase(832)
- def test_Task_If_Outcome_2_Sstate_Result_Must_Be_3(self):
- tasks = Task.objects.filter(outcome = 2).values('id', 'sstate_result')
- cnt_err = []
- for task in tasks:
- if (row['sstate_result'] != 3):
- cnt_err.append(task['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
-
- # Check if outcome = 1 (COVERED) or 3 (EXISTING) then sstate_result must be 0 (SSTATE_NA) - tc=833
- @testcase(833)
- def test_Task_If_Outcome_1_3_Sstate_Result_Must_Be_0(self):
- tasks = Task.objects.filter(outcome__in = (1, 3)).values('id', 'sstate_result')
- cnt_err = []
- for task in tasks:
- if (task['sstate_result'] != 0):
- cnt_err.append(task['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
-
- # Check if outcome is 0 (SUCCESS) or 4 (FAILED) then sstate_result must be 0 (NA), 1 (MISS) or 2 (FAILED) - tc=834
- @testcase(834)
- def test_Task_If_Outcome_0_4_Sstate_Result_Must_Be_0_1_2(self):
- tasks = Task.objects.filter(outcome__in = (0, 4)).values('id', 'sstate_result')
- cnt_err = []
- for task in tasks:
- if (task['sstate_result'] not in [0, 1, 2]):
- cnt_err.append(task['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
-
- # Check if task_executed = TRUE (1), script_type must be 0 (CODING_NA), 2 (CODING_PYTHON), 3 (CODING_SHELL) - tc=891
- @testcase(891)
- def test_Task_If_Task_Executed_True_Script_Type_0_2_3(self):
- tasks = Task.objects.filter(task_executed = 1).values('id', 'script_type')
- cnt_err = []
- for task in tasks:
- if (task['script_type'] not in [0, 2, 3]):
- cnt_err.append(task['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
-
- # Check if task_executed = TRUE (1), outcome must be 0 (SUCCESS) or 4 (FAILED) - tc=836
- @testcase(836)
- def test_Task_If_Task_Executed_True_Outcome_0_4(self):
- tasks = Task.objects.filter(task_executed = 1).values('id', 'outcome')
- cnt_err = []
- for task in tasks:
- if (task['outcome'] not in [0, 4]):
- cnt_err.append(task['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
-
- # Check if task_executed = FALSE (0), script_type must be 0 - tc=890
- @testcase(890)
- def test_Task_If_Task_Executed_False_Script_Type_0(self):
- tasks = Task.objects.filter(task_executed = 0).values('id', 'script_type')
- cnt_err = []
- for task in tasks:
- if (task['script_type'] != 0):
- cnt_err.append(task['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
-
- # Check if task_executed = FALSE (0) and build outcome = SUCCEEDED (0), task outcome must be 1 (COVERED), 2 (CACHED), 3 (PREBUILT), 5 (EMPTY) - tc=837
- @testcase(837)
- def test_Task_If_Task_Executed_False_Outcome_1_2_3_5(self):
- builds = Build.objects.filter(outcome = 0).values('id')
- cnt_err = []
- for build in builds:
- tasks = Task.objects.filter(build = build['id'], task_executed = 0).values('id', 'outcome')
- for task in tasks:
- if (task['outcome'] not in [1, 2, 3, 5]):
- cnt_err.append(task['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
-
- # Key verification - tc=888
- @testcase(888)
- def test_Target_Installed_Package(self):
- rows = Target_Installed_Package.objects.values('id', 'target_id', 'package_id')
- cnt_err = []
- for row in rows:
- target = Target.objects.filter(id = row['target_id']).values('id')
- package = Package.objects.filter(id = row['package_id']).values('id')
- if (not target or not package):
- cnt_err.append(row['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for target installed package id: %s' % cnt_err)
-
- # Key verification - tc=889
- @testcase(889)
- def test_Task_Dependency(self):
- rows = Task_Dependency.objects.values('id', 'task_id', 'depends_on_id')
- cnt_err = []
- for row in rows:
- task_id = Task.objects.filter(id = row['task_id']).values('id')
- depends_on_id = Task.objects.filter(id = row['depends_on_id']).values('id')
- if (not task_id or not depends_on_id):
- cnt_err.append(row['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for task dependency id: %s' % cnt_err)
-
- # Check if build target file_name is populated only if is_image=true AND orm_build.outcome=0 then if the file exists and its size matches the file_size value
- ### Need to add the tc in the test run
- @testcase(1037)
- def test_Target_File_Name_Populated(self):
- builds = Build.objects.filter(outcome = 0).values('id')
- for build in builds:
- targets = Target.objects.filter(build_id = build['id'], is_image = 1).values('id')
- for target in targets:
- target_files = Target_Image_File.objects.filter(target_id = target['id']).values('id', 'file_name', 'file_size')
- cnt_err = []
- for file_info in target_files:
- target_id = file_info['id']
- target_file_name = file_info['file_name']
- target_file_size = file_info['file_size']
- if (not target_file_name or not target_file_size):
- cnt_err.append(target_id)
- else:
- if (not os.path.exists(target_file_name)):
- cnt_err.append(target_id)
- else:
- if (os.path.getsize(target_file_name) != target_file_size):
- cnt_err.append(target_id)
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for target image file id: %s' % cnt_err)
-
- # Key verification - tc=884
- @testcase(884)
- def test_Package_Dependency(self):
- cnt_err = []
- deps = Package_Dependency.objects.values('id', 'package_id', 'depends_on_id')
- for dep in deps:
- if (dep['package_id'] == dep['depends_on_id']):
- cnt_err.append(dep['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for package dependency id: %s' % cnt_err)
-
- # Recipe key verification, recipe name does not depends on a recipe having the same name - tc=883
- @testcase(883)
- def test_Recipe_Dependency(self):
- deps = Recipe_Dependency.objects.values('id', 'recipe_id', 'depends_on_id')
- cnt_err = []
- for dep in deps:
- if (not dep['recipe_id'] or not dep['depends_on_id']):
- cnt_err.append(dep['id'])
- else:
- name = Recipe.objects.filter(id = dep['recipe_id']).values('name')
- dep_name = Recipe.objects.filter(id = dep['depends_on_id']).values('name')
- if (name == dep_name):
- cnt_err.append(dep['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe dependency id: %s' % cnt_err)
-
- # Check if package name does not start with a number (0-9) - tc=846
- @testcase(846)
- def test_Package_Name_For_Number(self):
- packages = Package.objects.filter(~Q(size = -1)).values('id', 'name')
- cnt_err = []
- for package in packages:
- if (package['name'][0].isdigit() is True):
- cnt_err.append(package['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
-
- # Check if package version starts with a number (0-9) - tc=847
- @testcase(847)
- def test_Package_Version_Starts_With_Number(self):
- packages = Package.objects.filter(~Q(size = -1)).values('id', 'version')
- cnt_err = []
- for package in packages:
- if (package['version'][0].isdigit() is False):
- cnt_err.append(package['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
-
- # Check if package revision starts with 'r' - tc=848
- @testcase(848)
- def test_Package_Revision_Starts_With_r(self):
- packages = Package.objects.filter(~Q(size = -1)).values('id', 'revision')
- cnt_err = []
- for package in packages:
- if (package['revision'][0].startswith("r") is False):
- cnt_err.append(package['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
-
- # Check the validity of the package build_id
- ### TC must be added in test run
- @testcase(1038)
- def test_Package_Build_Id(self):
- packages = Package.objects.filter(~Q(size = -1)).values('id', 'build_id')
- cnt_err = []
- for package in packages:
- build_id = Build.objects.filter(id = package['build_id']).values('id')
- if (not build_id):
- cnt_err.append(package['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
-
- # Check the validity of package recipe_id
- ### TC must be added in test run
- @testcase(1039)
- def test_Package_Recipe_Id(self):
- packages = Package.objects.filter(~Q(size = -1)).values('id', 'recipe_id')
- cnt_err = []
- for package in packages:
- recipe_id = Recipe.objects.filter(id = package['recipe_id']).values('id')
- if (not recipe_id):
- cnt_err.append(package['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
-
- # Check if package installed_size field is not null
- ### TC must be aded in test run
- @testcase(1040)
- def test_Package_Installed_Size_Not_NULL(self):
- packages = Package.objects.filter(installed_size__isnull = True).values('id')
- cnt_err = []
- for package in packages:
- cnt_err.append(package['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
-
- # Check if all layers requests return exit code is 200 - tc=843
- @testcase(843)
- def test_Layers_Requests_Exit_Code(self):
- layers = Layer.objects.values('id', 'layer_index_url')
- cnt_err = []
- for layer in layers:
- resp = urllib.request.urlopen(layer['layer_index_url'])
- if (resp.getcode() != 200):
- cnt_err.append(layer['id'])
- self.assertEqual(len(cnt_err), 0, msg = 'Errors for layer id: %s' % cnt_err)
-
- # Check if django server starts regardless of the timezone set on the machine - tc=905
- @testcase(905)
- def test_Start_Django_Timezone(self):
- current_path = os.getcwd()
- zonefilelist = []
- ZONEINFOPATH = '/usr/share/zoneinfo/'
- os.chdir("../bitbake/lib/toaster/")
- cnt_err = 0
- for filename in os.listdir(ZONEINFOPATH):
- if os.path.isfile(os.path.join(ZONEINFOPATH, filename)):
- zonefilelist.append(filename)
- for k in range(len(zonefilelist)):
- if k <= 5:
- files = zonefilelist[k]
- os.system("export TZ="+str(files)+"; python manage.py runserver > /dev/null 2>&1 &")
- time.sleep(3)
- pid = subprocess.check_output("ps aux | grep '[/u]sr/bin/python manage.py runserver' | awk '{print $2}'", shell = True)
- if pid:
- os.system("kill -9 "+str(pid))
- else:
- cnt_err.append(zonefilelist[k])
- self.assertEqual(cnt_err, 0, msg = 'Errors django server does not start with timezone: %s' % cnt_err)
- os.chdir(current_path)
diff --git a/meta/lib/oeqa/selftest/archiver.py b/meta/lib/oeqa/selftest/archiver.py
deleted file mode 100644
index f2030c446d..0000000000
--- a/meta/lib/oeqa/selftest/archiver.py
+++ /dev/null
@@ -1,50 +0,0 @@
-from oeqa.selftest.base import oeSelfTest
-from oeqa.utils.commands import bitbake, get_bb_var
-from oeqa.utils.decorators import testcase
-import glob
-import os
-import shutil
-
-
-class Archiver(oeSelfTest):
-
- @testcase(1345)
- def test_archiver_allows_to_filter_on_recipe_name(self):
- """
- Summary: The archiver should offer the possibility to filter on the recipe. (#6929)
- Expected: 1. Included recipe (busybox) should be included
- 2. Excluded recipe (zlib) should be excluded
- Product: oe-core
- Author: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
- AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
- """
-
- include_recipe = 'busybox'
- exclude_recipe = 'zlib'
-
- features = 'INHERIT += "archiver"\n'
- features += 'ARCHIVER_MODE[src] = "original"\n'
- features += 'COPYLEFT_PN_INCLUDE = "%s"\n' % include_recipe
- features += 'COPYLEFT_PN_EXCLUDE = "%s"\n' % exclude_recipe
-
- # Update local.conf
- self.write_config(features)
-
- tmp_dir = get_bb_var('TMPDIR')
- deploy_dir_src = get_bb_var('DEPLOY_DIR_SRC')
- target_sys = get_bb_var('TARGET_SYS')
- src_path = os.path.join(deploy_dir_src, target_sys)
-
- # Delete tmp directory
- shutil.rmtree(tmp_dir)
-
- # Build core-image-minimal
- bitbake('core-image-minimal')
-
- # Check that include_recipe was included
- is_included = len(glob.glob(src_path + '/%s*' % include_recipe))
- self.assertEqual(1, is_included, 'Recipe %s was not included.' % include_recipe)
-
- # Check that exclude_recipe was excluded
- is_excluded = len(glob.glob(src_path + '/%s*' % exclude_recipe))
- self.assertEqual(0, is_excluded, 'Recipe %s was not excluded.' % exclude_recipe)
diff --git a/meta/lib/oeqa/selftest/base.py b/meta/lib/oeqa/selftest/base.py
deleted file mode 100644
index 26c93f905a..0000000000
--- a/meta/lib/oeqa/selftest/base.py
+++ /dev/null
@@ -1,235 +0,0 @@
-# Copyright (c) 2013 Intel Corporation
-#
-# Released under the MIT license (see COPYING.MIT)
-
-
-# DESCRIPTION
-# Base class inherited by test classes in meta/lib/oeqa/selftest
-
-import unittest
-import os
-import sys
-import shutil
-import logging
-import errno
-
-import oeqa.utils.ftools as ftools
-from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer
-from oeqa.utils.decorators import LogResults
-from random import choice
-import glob
-
-@LogResults
-class oeSelfTest(unittest.TestCase):
-
- log = logging.getLogger("selftest.base")
- longMessage = True
-
- def __init__(self, methodName="runTest"):
- self.builddir = os.environ.get("BUILDDIR")
- self.localconf_path = os.path.join(self.builddir, "conf/local.conf")
- self.localconf_backup = os.path.join(self.builddir, "conf/local.bk")
- self.testinc_path = os.path.join(self.builddir, "conf/selftest.inc")
- self.local_bblayers_path = os.path.join(self.builddir, "conf/bblayers.conf")
- self.local_bblayers_backup = os.path.join(self.builddir,
- "conf/bblayers.bk")
- self.testinc_bblayers_path = os.path.join(self.builddir, "conf/bblayers.inc")
- self.machineinc_path = os.path.join(self.builddir, "conf/machine.inc")
- self.testlayer_path = oeSelfTest.testlayer_path
- self._extra_tear_down_commands = []
- self._track_for_cleanup = [
- self.testinc_path, self.testinc_bblayers_path,
- self.machineinc_path, self.localconf_backup,
- self.local_bblayers_backup]
- super(oeSelfTest, self).__init__(methodName)
-
- def setUp(self):
- os.chdir(self.builddir)
- # Check if local.conf or bblayers.conf files backup exists
- # from a previous failed test and restore them
- if os.path.isfile(self.localconf_backup) or os.path.isfile(
- self.local_bblayers_backup):
- self.log.debug("Found a local.conf and/or bblayers.conf backup \
-from a previously aborted test. Restoring these files now, but tests should \
-be re-executed from a clean environment to ensure accurate results.")
- try:
- shutil.copyfile(self.localconf_backup, self.localconf_path)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- try:
- shutil.copyfile(self.local_bblayers_backup,
- self.local_bblayers_path)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- else:
- # backup local.conf and bblayers.conf
- shutil.copyfile(self.localconf_path, self.localconf_backup)
- shutil.copyfile(self.local_bblayers_path,
- self.local_bblayers_backup)
- self.log.debug("Creating local.conf and bblayers.conf backups.")
- # we don't know what the previous test left around in config or inc files
- # if it failed so we need a fresh start
- try:
- os.remove(self.testinc_path)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- for root, _, files in os.walk(self.testlayer_path):
- for f in files:
- if f == 'test_recipe.inc':
- os.remove(os.path.join(root, f))
-
- for incl_file in [self.testinc_bblayers_path, self.machineinc_path]:
- try:
- os.remove(incl_file)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
-
- # Get CUSTOMMACHINE from env (set by --machine argument to oe-selftest)
- custommachine = os.getenv('CUSTOMMACHINE')
- if custommachine:
- if custommachine == 'random':
- machine = get_random_machine()
- else:
- machine = custommachine
- machine_conf = 'MACHINE ??= "%s"\n' % machine
- self.set_machine_config(machine_conf)
- print('MACHINE: %s' % machine)
-
- # tests might need their own setup
- # but if they overwrite this one they have to call
- # super each time, so let's give them an alternative
- self.setUpLocal()
-
- def setUpLocal(self):
- pass
-
- def tearDown(self):
- if self._extra_tear_down_commands:
- failed_extra_commands = []
- for command in self._extra_tear_down_commands:
- result = runCmd(command, ignore_status=True)
- if not result.status == 0:
- failed_extra_commands.append(command)
- if failed_extra_commands:
- self.log.warning("tearDown commands have failed: %s" % ', '.join(map(str, failed_extra_commands)))
- self.log.debug("Trying to move on.")
- self._extra_tear_down_commands = []
-
- if self._track_for_cleanup:
- for path in self._track_for_cleanup:
- if os.path.isdir(path):
- shutil.rmtree(path)
- if os.path.isfile(path):
- os.remove(path)
- self._track_for_cleanup = []
-
- self.tearDownLocal()
-
- def tearDownLocal(self):
- pass
-
- # add test specific commands to the tearDown method.
- def add_command_to_tearDown(self, command):
- self.log.debug("Adding command '%s' to tearDown for this test." % command)
- self._extra_tear_down_commands.append(command)
- # add test specific files or directories to be removed in the tearDown method
- def track_for_cleanup(self, path):
- self.log.debug("Adding path '%s' to be cleaned up when test is over" % path)
- self._track_for_cleanup.append(path)
-
- # write to <builddir>/conf/selftest.inc
- def write_config(self, data):
- self.log.debug("Writing to: %s\n%s\n" % (self.testinc_path, data))
- ftools.write_file(self.testinc_path, data)
-
- custommachine = os.getenv('CUSTOMMACHINE')
- if custommachine and 'MACHINE' in data:
- machine = get_bb_var('MACHINE')
- self.log.warning('MACHINE overridden: %s' % machine)
-
- # append to <builddir>/conf/selftest.inc
- def append_config(self, data):
- self.log.debug("Appending to: %s\n%s\n" % (self.testinc_path, data))
- ftools.append_file(self.testinc_path, data)
-
- custommachine = os.getenv('CUSTOMMACHINE')
- if custommachine and 'MACHINE' in data:
- machine = get_bb_var('MACHINE')
- self.log.warning('MACHINE overridden: %s' % machine)
-
- # remove data from <builddir>/conf/selftest.inc
- def remove_config(self, data):
- self.log.debug("Removing from: %s\n\%s\n" % (self.testinc_path, data))
- ftools.remove_from_file(self.testinc_path, data)
-
- # write to meta-sefltest/recipes-test/<recipe>/test_recipe.inc
- def write_recipeinc(self, recipe, data):
- inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
- self.log.debug("Writing to: %s\n%s\n" % (inc_file, data))
- ftools.write_file(inc_file, data)
-
- # append data to meta-sefltest/recipes-test/<recipe>/test_recipe.inc
- def append_recipeinc(self, recipe, data):
- inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
- self.log.debug("Appending to: %s\n%s\n" % (inc_file, data))
- ftools.append_file(inc_file, data)
-
- # remove data from meta-sefltest/recipes-test/<recipe>/test_recipe.inc
- def remove_recipeinc(self, recipe, data):
- inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
- self.log.debug("Removing from: %s\n%s\n" % (inc_file, data))
- ftools.remove_from_file(inc_file, data)
-
- # delete meta-sefltest/recipes-test/<recipe>/test_recipe.inc file
- def delete_recipeinc(self, recipe):
- inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
- self.log.debug("Deleting file: %s" % inc_file)
- try:
- os.remove(inc_file)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
-
- # write to <builddir>/conf/bblayers.inc
- def write_bblayers_config(self, data):
- self.log.debug("Writing to: %s\n%s\n" % (self.testinc_bblayers_path, data))
- ftools.write_file(self.testinc_bblayers_path, data)
-
- # append to <builddir>/conf/bblayers.inc
- def append_bblayers_config(self, data):
- self.log.debug("Appending to: %s\n%s\n" % (self.testinc_bblayers_path, data))
- ftools.append_file(self.testinc_bblayers_path, data)
-
- # remove data from <builddir>/conf/bblayers.inc
- def remove_bblayers_config(self, data):
- self.log.debug("Removing from: %s\n\%s\n" % (self.testinc_bblayers_path, data))
- ftools.remove_from_file(self.testinc_bblayers_path, data)
-
- # write to <builddir>/conf/machine.inc
- def set_machine_config(self, data):
- self.log.debug("Writing to: %s\n%s\n" % (self.machineinc_path, data))
- ftools.write_file(self.machineinc_path, data)
-
-
-def get_available_machines():
- # Get a list of all available machines
- bbpath = get_bb_var('BBPATH').split(':')
- machines = []
-
- for path in bbpath:
- found_machines = glob.glob(os.path.join(path, 'conf', 'machine', '*.conf'))
- if found_machines:
- for i in found_machines:
- # eg: '/home/<user>/poky/meta-intel/conf/machine/intel-core2-32.conf'
- machines.append(os.path.splitext(os.path.basename(i))[0])
-
- return machines
-
-
-def get_random_machine():
- # Get a random machine
- return choice(get_available_machines())
diff --git a/meta/lib/oeqa/selftest/case.py b/meta/lib/oeqa/selftest/case.py
new file mode 100644
index 0000000000..ac3308d8a4
--- /dev/null
+++ b/meta/lib/oeqa/selftest/case.py
@@ -0,0 +1,290 @@
+#
+# Copyright (C) 2013-2017 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import sys
+import os
+import shutil
+import glob
+import errno
+from unittest.util import safe_repr
+
+import oeqa.utils.ftools as ftools
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var
+from oeqa.core.case import OETestCase
+
+import bb.utils
+
+class OESelftestTestCase(OETestCase):
+ def __init__(self, methodName="runTest"):
+ self._extra_tear_down_commands = []
+ super(OESelftestTestCase, self).__init__(methodName)
+
+ @classmethod
+ def setUpClass(cls):
+ super(OESelftestTestCase, cls).setUpClass()
+
+ cls.testlayer_path = cls.tc.config_paths['testlayer_path']
+ cls.builddir = cls.tc.config_paths['builddir']
+
+ cls.localconf_path = cls.tc.config_paths['localconf']
+ cls.localconf_backup = cls.tc.config_paths['localconf_class_backup']
+ cls.local_bblayers_path = cls.tc.config_paths['bblayers']
+ cls.local_bblayers_backup = cls.tc.config_paths['bblayers_class_backup']
+
+ cls.testinc_path = os.path.join(cls.tc.config_paths['builddir'],
+ "conf/selftest.inc")
+ cls.testinc_bblayers_path = os.path.join(cls.tc.config_paths['builddir'],
+ "conf/bblayers.inc")
+ cls.machineinc_path = os.path.join(cls.tc.config_paths['builddir'],
+ "conf/machine.inc")
+
+ cls._track_for_cleanup = [
+ cls.testinc_path, cls.testinc_bblayers_path,
+ cls.machineinc_path, cls.localconf_backup,
+ cls.local_bblayers_backup]
+
+ cls.add_include()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.remove_include()
+ cls.remove_inc_files()
+ super(OESelftestTestCase, cls).tearDownClass()
+
+ @classmethod
+ def add_include(cls):
+ if "#include added by oe-selftest" \
+ not in ftools.read_file(os.path.join(cls.builddir, "conf/local.conf")):
+ cls.logger.info("Adding: \"include selftest.inc\" in %s" % os.path.join(cls.builddir, "conf/local.conf"))
+ ftools.append_file(os.path.join(cls.builddir, "conf/local.conf"), \
+ "\n#include added by oe-selftest\ninclude machine.inc\ninclude selftest.inc")
+
+ if "#include added by oe-selftest" \
+ not in ftools.read_file(os.path.join(cls.builddir, "conf/bblayers.conf")):
+ cls.logger.info("Adding: \"include bblayers.inc\" in bblayers.conf")
+ ftools.append_file(os.path.join(cls.builddir, "conf/bblayers.conf"), \
+ "\n#include added by oe-selftest\ninclude bblayers.inc")
+
+ @classmethod
+ def remove_include(cls):
+ if "#include added by oe-selftest.py" \
+ in ftools.read_file(os.path.join(cls.builddir, "conf/local.conf")):
+ cls.logger.info("Removing the include from local.conf")
+ ftools.remove_from_file(os.path.join(cls.builddir, "conf/local.conf"), \
+ "\n#include added by oe-selftest.py\ninclude machine.inc\ninclude selftest.inc")
+
+ if "#include added by oe-selftest.py" \
+ in ftools.read_file(os.path.join(cls.builddir, "conf/bblayers.conf")):
+ cls.logger.info("Removing the include from bblayers.conf")
+ ftools.remove_from_file(os.path.join(cls.builddir, "conf/bblayers.conf"), \
+ "\n#include added by oe-selftest.py\ninclude bblayers.inc")
+
+ @classmethod
+ def remove_inc_files(cls):
+ try:
+ os.remove(os.path.join(cls.builddir, "conf/selftest.inc"))
+ for root, _, files in os.walk(cls.testlayer_path):
+ for f in files:
+ if f == 'test_recipe.inc':
+ os.remove(os.path.join(root, f))
+ except OSError as e:
+ pass
+
+ for incl_file in ['conf/bblayers.inc', 'conf/machine.inc']:
+ try:
+ os.remove(os.path.join(cls.builddir, incl_file))
+ except:
+ pass
+
+ def setUp(self):
+ super(OESelftestTestCase, self).setUp()
+ os.chdir(self.builddir)
+ # Check if local.conf or bblayers.conf files backup exists
+ # from a previous failed test and restore them
+ if os.path.isfile(self.localconf_backup) or os.path.isfile(
+ self.local_bblayers_backup):
+ self.logger.debug("\
+Found a local.conf and/or bblayers.conf backup from a previously aborted test.\
+Restoring these files now, but tests should be re-executed from a clean environment\
+to ensure accurate results.")
+ try:
+ shutil.copyfile(self.localconf_backup, self.localconf_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ try:
+ shutil.copyfile(self.local_bblayers_backup,
+ self.local_bblayers_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ else:
+ # backup local.conf and bblayers.conf
+ shutil.copyfile(self.localconf_path, self.localconf_backup)
+ shutil.copyfile(self.local_bblayers_path, self.local_bblayers_backup)
+ self.logger.debug("Creating local.conf and bblayers.conf backups.")
+ # we don't know what the previous test left around in config or inc files
+ # if it failed so we need a fresh start
+ try:
+ os.remove(self.testinc_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ for root, _, files in os.walk(self.testlayer_path):
+ for f in files:
+ if f == 'test_recipe.inc':
+ os.remove(os.path.join(root, f))
+
+ for incl_file in [self.testinc_bblayers_path, self.machineinc_path]:
+ try:
+ os.remove(incl_file)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ if self.tc.custommachine:
+ machine_conf = 'MACHINE ??= "%s"\n' % self.tc.custommachine
+ self.set_machine_config(machine_conf)
+
+ # tests might need their own setup
+ # but if they overwrite this one they have to call
+ # super each time, so let's give them an alternative
+ self.setUpLocal()
+
+ def setUpLocal(self):
+ pass
+
+ def tearDown(self):
+ if self._extra_tear_down_commands:
+ failed_extra_commands = []
+ for command in self._extra_tear_down_commands:
+ result = runCmd(command, ignore_status=True)
+ if not result.status == 0:
+ failed_extra_commands.append(command)
+ if failed_extra_commands:
+ self.logger.warning("tearDown commands have failed: %s" % ', '.join(map(str, failed_extra_commands)))
+ self.logger.debug("Trying to move on.")
+ self._extra_tear_down_commands = []
+
+ if self._track_for_cleanup:
+ for path in self._track_for_cleanup:
+ if os.path.isdir(path):
+ bb.utils.remove(path, recurse=True)
+ if os.path.isfile(path):
+ os.remove(path)
+ self._track_for_cleanup = []
+
+ self.tearDownLocal()
+ super(OESelftestTestCase, self).tearDown()
+
+ def tearDownLocal(self):
+ pass
+
+ def add_command_to_tearDown(self, command):
+ """Add test specific commands to the tearDown method"""
+ self.logger.debug("Adding command '%s' to tearDown for this test." % command)
+ self._extra_tear_down_commands.append(command)
+
+ def track_for_cleanup(self, path):
+ """Add test specific files or directories to be removed in the tearDown method"""
+ self.logger.debug("Adding path '%s' to be cleaned up when test is over" % path)
+ self._track_for_cleanup.append(path)
+
+ def write_config(self, data, multiconfig=None):
+ """Write to config file"""
+ if multiconfig:
+ multiconfigdir = "%s/conf/multiconfig" % self.builddir
+ os.makedirs(multiconfigdir, exist_ok=True)
+ dest_path = '%s/%s.conf' % (multiconfigdir, multiconfig)
+ self.track_for_cleanup(dest_path)
+ else:
+ dest_path = self.testinc_path
+
+ self.logger.debug("Writing to: %s\n%s\n" % (dest_path, data))
+ ftools.write_file(dest_path, data)
+
+ if not multiconfig and self.tc.custommachine and 'MACHINE' in data:
+ machine = get_bb_var('MACHINE')
+ self.logger.warning('MACHINE overridden: %s' % machine)
+
+ def append_config(self, data):
+ """Append to <builddir>/conf/selftest.inc"""
+ self.logger.debug("Appending to: %s\n%s\n" % (self.testinc_path, data))
+ ftools.append_file(self.testinc_path, data)
+
+ if self.tc.custommachine and 'MACHINE' in data:
+ machine = get_bb_var('MACHINE')
+ self.logger.warning('MACHINE overridden: %s' % machine)
+
+ def remove_config(self, data):
+ """Remove data from <builddir>/conf/selftest.inc"""
+ self.logger.debug("Removing from: %s\n%s\n" % (self.testinc_path, data))
+ ftools.remove_from_file(self.testinc_path, data)
+
+ def recipeinc(self, recipe):
+ """Return absolute path of meta-selftest/recipes-test/<recipe>/test_recipe.inc"""
+ return os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
+
+ def write_recipeinc(self, recipe, data):
+ """Write to meta-selftest/recipes-test/<recipe>/test_recipe.inc"""
+ inc_file = self.recipeinc(recipe)
+ self.logger.debug("Writing to: %s\n%s\n" % (inc_file, data))
+ ftools.write_file(inc_file, data)
+ return inc_file
+
+ def append_recipeinc(self, recipe, data):
+ """Append data to meta-selftest/recipes-test/<recipe>/test_recipe.inc"""
+ inc_file = self.recipeinc(recipe)
+ self.logger.debug("Appending to: %s\n%s\n" % (inc_file, data))
+ ftools.append_file(inc_file, data)
+ return inc_file
+
+ def remove_recipeinc(self, recipe, data):
+ """Remove data from meta-selftest/recipes-test/<recipe>/test_recipe.inc"""
+ inc_file = self.recipeinc(recipe)
+ self.logger.debug("Removing from: %s\n%s\n" % (inc_file, data))
+ ftools.remove_from_file(inc_file, data)
+
+ def delete_recipeinc(self, recipe):
+ """Delete meta-selftest/recipes-test/<recipe>/test_recipe.inc file"""
+ inc_file = self.recipeinc(recipe)
+ self.logger.debug("Deleting file: %s" % inc_file)
+ try:
+ os.remove(inc_file)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ def write_bblayers_config(self, data):
+ """Write to <builddir>/conf/bblayers.inc"""
+ self.logger.debug("Writing to: %s\n%s\n" % (self.testinc_bblayers_path, data))
+ ftools.write_file(self.testinc_bblayers_path, data)
+
+ def append_bblayers_config(self, data):
+ """Append to <builddir>/conf/bblayers.inc"""
+ self.logger.debug("Appending to: %s\n%s\n" % (self.testinc_bblayers_path, data))
+ ftools.append_file(self.testinc_bblayers_path, data)
+
+ def remove_bblayers_config(self, data):
+ """Remove data from <builddir>/conf/bblayers.inc"""
+ self.logger.debug("Removing from: %s\n%s\n" % (self.testinc_bblayers_path, data))
+ ftools.remove_from_file(self.testinc_bblayers_path, data)
+
+ def set_machine_config(self, data):
+ """Write to <builddir>/conf/machine.inc"""
+ self.logger.debug("Writing to: %s\n%s\n" % (self.machineinc_path, data))
+ ftools.write_file(self.machineinc_path, data)
+
+ # check does path exist
+ def assertExists(self, expr, msg=None):
+ if not os.path.exists(expr):
+ msg = self._formatMessage(msg, "%s does not exist" % safe_repr(expr))
+ raise self.failureException(msg)
+
+ # check does path not exist
+ def assertNotExists(self, expr, msg=None):
+ if os.path.exists(expr):
+ msg = self._formatMessage(msg, "%s exists when it should not" % safe_repr(expr))
+ raise self.failureException(msg)
diff --git a/meta/lib/oeqa/selftest/_sstatetests_noauto.py b/meta/lib/oeqa/selftest/cases/_sstatetests_noauto.py
index fc9ae7efb9..f7c356ad09 100644
--- a/meta/lib/oeqa/selftest/_sstatetests_noauto.py
+++ b/meta/lib/oeqa/selftest/cases/_sstatetests_noauto.py
@@ -1,19 +1,20 @@
-import datetime
-import unittest
+#
+# SPDX-License-Identifier: MIT
+#
+
import os
-import re
import shutil
import oeqa.utils.ftools as ftools
-from oeqa.selftest.base import oeSelfTest
from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer
-from oeqa.selftest.sstate import SStateBase
+from oeqa.selftest.cases.sstate import SStateBase
class RebuildFromSState(SStateBase):
@classmethod
def setUpClass(self):
+ super(RebuildFromSState, self).setUpClass()
self.builddir = os.path.join(os.environ.get('BUILDDIR'))
def get_dep_targets(self, primary_targets):
diff --git a/meta/lib/oeqa/selftest/cases/archiver.py b/meta/lib/oeqa/selftest/cases/archiver.py
new file mode 100644
index 0000000000..6bd0e06ec4
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/archiver.py
@@ -0,0 +1,197 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import glob
+from oeqa.utils.commands import bitbake, get_bb_vars
+from oeqa.selftest.case import OESelftestTestCase
+
+class Archiver(OESelftestTestCase):
+
+ def test_archiver_allows_to_filter_on_recipe_name(self):
+ """
+ Summary: The archiver should offer the possibility to filter on the recipe. (#6929)
+ Expected: 1. Included recipe (busybox) should be included
+ 2. Excluded recipe (zlib) should be excluded
+ Product: oe-core
+ Author: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+
+ include_recipe = 'busybox'
+ exclude_recipe = 'zlib'
+
+ features = 'INHERIT += "archiver"\n'
+ features += 'ARCHIVER_MODE[src] = "original"\n'
+ features += 'COPYLEFT_PN_INCLUDE = "%s"\n' % include_recipe
+ features += 'COPYLEFT_PN_EXCLUDE = "%s"\n' % exclude_recipe
+ self.write_config(features)
+
+ bitbake('-c clean %s %s' % (include_recipe, exclude_recipe))
+ bitbake("-c deploy_archives %s %s" % (include_recipe, exclude_recipe))
+
+ bb_vars = get_bb_vars(['DEPLOY_DIR_SRC', 'TARGET_SYS'])
+ src_path = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['TARGET_SYS'])
+
+ # Check that include_recipe was included
+ included_present = len(glob.glob(src_path + '/%s-*' % include_recipe))
+ self.assertTrue(included_present, 'Recipe %s was not included.' % include_recipe)
+
+ # Check that exclude_recipe was excluded
+ excluded_present = len(glob.glob(src_path + '/%s-*' % exclude_recipe))
+ self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % exclude_recipe)
+
+ def test_archiver_filters_by_type(self):
+ """
+ Summary: The archiver is documented to filter on the recipe type.
+ Expected: 1. included recipe type (target) should be included
+ 2. other types should be excluded
+ Product: oe-core
+ Author: André Draszik <adraszik@tycoint.com>
+ """
+
+ target_recipe = 'initscripts'
+ native_recipe = 'zlib-native'
+
+ features = 'INHERIT += "archiver"\n'
+ features += 'ARCHIVER_MODE[src] = "original"\n'
+ features += 'COPYLEFT_RECIPE_TYPES = "target"\n'
+ self.write_config(features)
+
+ bitbake('-c clean %s %s' % (target_recipe, native_recipe))
+ bitbake("%s -c deploy_archives %s" % (target_recipe, native_recipe))
+
+ bb_vars = get_bb_vars(['DEPLOY_DIR_SRC', 'TARGET_SYS', 'BUILD_SYS'])
+ src_path_target = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['TARGET_SYS'])
+ src_path_native = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['BUILD_SYS'])
+
+ # Check that target_recipe was included
+ included_present = len(glob.glob(src_path_target + '/%s-*' % target_recipe))
+ self.assertTrue(included_present, 'Recipe %s was not included.' % target_recipe)
+
+ # Check that native_recipe was excluded
+ excluded_present = len(glob.glob(src_path_native + '/%s-*' % native_recipe))
+ self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % native_recipe)
+
+ def test_archiver_filters_by_type_and_name(self):
+ """
+ Summary: Test that the archiver archives by recipe type, taking the
+ recipe name into account.
+ Expected: 1. included recipe type (target) should be included
+ 2. other types should be excluded
+ 3. recipe by name should be included / excluded,
+ overriding previous decision by type
+ Product: oe-core
+ Author: André Draszik <adraszik@tycoint.com>
+ """
+
+ target_recipes = [ 'initscripts', 'zlib' ]
+ native_recipes = [ 'update-rc.d-native', 'zlib-native' ]
+
+ features = 'INHERIT += "archiver"\n'
+ features += 'ARCHIVER_MODE[src] = "original"\n'
+ features += 'COPYLEFT_RECIPE_TYPES = "target"\n'
+ features += 'COPYLEFT_PN_INCLUDE = "%s"\n' % native_recipes[1]
+ features += 'COPYLEFT_PN_EXCLUDE = "%s"\n' % target_recipes[1]
+ self.write_config(features)
+
+ bitbake('-c clean %s %s' % (' '.join(target_recipes), ' '.join(native_recipes)))
+ bitbake('-c deploy_archives %s %s' % (' '.join(target_recipes), ' '.join(native_recipes)))
+
+ bb_vars = get_bb_vars(['DEPLOY_DIR_SRC', 'TARGET_SYS', 'BUILD_SYS'])
+ src_path_target = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['TARGET_SYS'])
+ src_path_native = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['BUILD_SYS'])
+
+ # Check that target_recipe[0] and native_recipes[1] were included
+ included_present = len(glob.glob(src_path_target + '/%s-*' % target_recipes[0]))
+ self.assertTrue(included_present, 'Recipe %s was not included.' % target_recipes[0])
+
+ included_present = len(glob.glob(src_path_native + '/%s-*' % native_recipes[1]))
+ self.assertTrue(included_present, 'Recipe %s was not included.' % native_recipes[1])
+
+ # Check that native_recipes[0] and target_recipes[1] were excluded
+ excluded_present = len(glob.glob(src_path_native + '/%s-*' % native_recipes[0]))
+ self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % native_recipes[0])
+
+ excluded_present = len(glob.glob(src_path_target + '/%s-*' % target_recipes[1]))
+ self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % target_recipes[1])
+
+
+
+ def test_archiver_srpm_mode(self):
+ """
+ Test that in srpm mode, the added recipe dependencies at least exist/work [YOCTO #11121]
+ """
+
+ features = 'INHERIT += "archiver"\n'
+ features += 'ARCHIVER_MODE[srpm] = "1"\n'
+ self.write_config(features)
+
+ bitbake('-n core-image-sato')
+
+ def _test_archiver_mode(self, mode, target_file_name, extra_config=None):
+ target = "selftest-ed"
+
+ features = 'INHERIT += "archiver"\n'
+ features += 'ARCHIVER_MODE[src] = "%s"\n' % (mode)
+ if extra_config:
+ features += extra_config
+ self.write_config(features)
+
+ bitbake('-c clean %s' % (target))
+ bitbake('-c deploy_archives %s' % (target))
+
+ bb_vars = get_bb_vars(['DEPLOY_DIR_SRC', 'TARGET_SYS'])
+ glob_str = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['TARGET_SYS'], '%s-*' % (target))
+ glob_result = glob.glob(glob_str)
+ self.assertTrue(glob_result, 'Missing archiver directory for %s' % (target))
+
+ archive_path = os.path.join(glob_result[0], target_file_name)
+ self.assertTrue(os.path.exists(archive_path), 'Missing archive file %s' % (target_file_name))
+
+ def test_archiver_mode_original(self):
+ """
+ Test that the archiver works in with `ARCHIVER_MODE[src] = "original"`.
+ """
+
+ self._test_archiver_mode('original', 'ed-1.14.1.tar.lz')
+
+ def test_archiver_mode_patched(self):
+ """
+ Test that the archiver works in with `ARCHIVER_MODE[src] = "patched"`.
+ """
+
+ self._test_archiver_mode('patched', 'selftest-ed-1.14.1-r0-patched.tar.gz')
+
+ def test_archiver_mode_configured(self):
+ """
+ Test that the archiver works in with `ARCHIVER_MODE[src] = "configured"`.
+ """
+
+ self._test_archiver_mode('configured', 'selftest-ed-1.14.1-r0-configured.tar.gz')
+
+ def test_archiver_mode_recipe(self):
+ """
+ Test that the archiver works in with `ARCHIVER_MODE[recipe] = "1"`.
+ """
+
+ self._test_archiver_mode('patched', 'selftest-ed-1.14.1-r0-recipe.tar.gz',
+ 'ARCHIVER_MODE[recipe] = "1"\n')
+
+ def test_archiver_mode_diff(self):
+ """
+ Test that the archiver works in with `ARCHIVER_MODE[diff] = "1"`.
+ Exclusions controlled by `ARCHIVER_MODE[diff-exclude]` are not yet tested.
+ """
+
+ self._test_archiver_mode('patched', 'selftest-ed-1.14.1-r0-diff.gz',
+ 'ARCHIVER_MODE[diff] = "1"\n')
+
+ def test_archiver_mode_dumpdata(self):
+ """
+ Test that the archiver works in with `ARCHIVER_MODE[dumpdata] = "1"`.
+ """
+
+ self._test_archiver_mode('patched', 'selftest-ed-1.14.1-r0-showdata.dump',
+ 'ARCHIVER_MODE[dumpdata] = "1"\n')
diff --git a/meta/lib/oeqa/selftest/bblayers.py b/meta/lib/oeqa/selftest/cases/bblayers.py
index d23675e84a..f131d9856c 100644
--- a/meta/lib/oeqa/selftest/bblayers.py
+++ b/meta/lib/oeqa/selftest/cases/bblayers.py
@@ -1,39 +1,35 @@
-import unittest
+#
+# SPDX-License-Identifier: MIT
+#
+
import os
-import logging
import re
-import shutil
import oeqa.utils.ftools as ftools
-from oeqa.selftest.base import oeSelfTest
-from oeqa.utils.commands import runCmd, get_bb_var
-from oeqa.utils.decorators import testcase
+from oeqa.utils.commands import runCmd, get_bb_var, get_bb_vars
+
+from oeqa.selftest.case import OESelftestTestCase
-class BitbakeLayers(oeSelfTest):
+class BitbakeLayers(OESelftestTestCase):
- @testcase(756)
def test_bitbakelayers_showcrossdepends(self):
result = runCmd('bitbake-layers show-cross-depends')
- self.assertTrue('aspell' in result.output, msg = "No dependencies were shown. bitbake-layers show-cross-depends output: %s" % result.output)
+ self.assertIn('aspell', result.output)
- @testcase(83)
def test_bitbakelayers_showlayers(self):
result = runCmd('bitbake-layers show-layers')
- self.assertTrue('meta-selftest' in result.output, msg = "No layers were shown. bitbake-layers show-layers output: %s" % result.output)
+ self.assertIn('meta-selftest', result.output)
- @testcase(93)
def test_bitbakelayers_showappends(self):
recipe = "xcursor-transparent-theme"
bb_file = self.get_recipe_basename(recipe)
result = runCmd('bitbake-layers show-appends')
- self.assertTrue(bb_file in result.output, msg="%s file was not recognised. bitbake-layers show-appends output: %s" % (bb_file, result.output))
+ self.assertIn(bb_file, result.output)
- @testcase(90)
def test_bitbakelayers_showoverlayed(self):
result = runCmd('bitbake-layers show-overlayed')
- self.assertTrue('aspell' in result.output, msg="aspell overlayed recipe was not recognised bitbake-layers show-overlayed %s" % result.output)
+ self.assertIn('aspell', result.output)
- @testcase(95)
def test_bitbakelayers_flatten(self):
recipe = "xcursor-transparent-theme"
recipe_path = "recipes-graphics/xcursor-transparent-theme"
@@ -48,7 +44,6 @@ class BitbakeLayers(oeSelfTest):
find_in_contents = re.search("##### bbappended from meta-selftest #####\n(.*\n)*include test_recipe.inc", contents)
self.assertTrue(find_in_contents, msg = "Flattening layers did not work. bitbake-layers flatten output: %s" % result.output)
- @testcase(1195)
def test_bitbakelayers_add_remove(self):
test_layer = os.path.join(get_bb_var('COREBASE'), 'meta-skeleton')
result = runCmd('bitbake-layers show-layers')
@@ -66,22 +61,16 @@ class BitbakeLayers(oeSelfTest):
result = runCmd('bitbake-layers show-layers')
self.assertNotIn('meta-skeleton', result.output, msg = "meta-skeleton should have been removed at this step. bitbake-layers show-layers output: %s" % result.output)
- @testcase(1384)
def test_bitbakelayers_showrecipes(self):
result = runCmd('bitbake-layers show-recipes')
self.assertIn('aspell:', result.output)
self.assertIn('mtd-utils:', result.output)
- self.assertIn('linux-yocto:', result.output)
self.assertIn('core-image-minimal:', result.output)
result = runCmd('bitbake-layers show-recipes mtd-utils')
self.assertIn('mtd-utils:', result.output)
self.assertNotIn('aspell:', result.output)
- result = runCmd('bitbake-layers show-recipes -i kernel')
- self.assertIn('linux-yocto:', result.output)
- self.assertNotIn('mtd-utils:', result.output)
result = runCmd('bitbake-layers show-recipes -i image')
self.assertIn('core-image-minimal', result.output)
- self.assertNotIn('linux-yocto:', result.output)
self.assertNotIn('mtd-utils:', result.output)
result = runCmd('bitbake-layers show-recipes -i cmake,pkgconfig')
self.assertIn('libproxy:', result.output)
@@ -92,6 +81,31 @@ class BitbakeLayers(oeSelfTest):
self.assertNotEqual(result.status, 0, 'bitbake-layers show-recipes -i nonexistentclass should have failed')
self.assertIn('ERROR:', result.output)
+ def test_bitbakelayers_createlayer(self):
+ priority = 10
+ layername = 'test-bitbakelayer-layercreate'
+ layerpath = os.path.join(self.builddir, layername)
+ self.assertFalse(os.path.exists(layerpath), '%s should not exist at this point in time' % layerpath)
+ result = runCmd('bitbake-layers create-layer --priority=%d %s' % (priority, layerpath))
+ self.track_for_cleanup(layerpath)
+ result = runCmd('bitbake-layers add-layer %s' % layerpath)
+ self.add_command_to_tearDown('bitbake-layers remove-layer %s' % layerpath)
+ result = runCmd('bitbake-layers show-layers')
+ find_in_contents = re.search(re.escape(layername) + r'\s+' + re.escape(layerpath) + r'\s+' + re.escape(str(priority)), result.output)
+ self.assertTrue(find_in_contents, "%s not found in layers\n%s" % (layername, result.output))
+
+ layervars = ['BBFILE_PRIORITY', 'BBFILE_PATTERN', 'LAYERDEPENDS', 'LAYERSERIES_COMPAT']
+ bb_vars = get_bb_vars(['BBFILE_COLLECTIONS'] + ['%s_%s' % (v, layername) for v in layervars])
+
+ for v in layervars:
+ varname = '%s_%s' % (v, layername)
+ self.assertIsNotNone(bb_vars[varname], "%s not found" % varname)
+
+ find_in_contents = re.search(r'(^|\s)' + re.escape(layername) + r'($|\s)', bb_vars['BBFILE_COLLECTIONS'])
+ self.assertTrue(find_in_contents, "%s not in BBFILE_COLLECTIONS" % layername)
+
+ self.assertEqual(bb_vars['BBFILE_PRIORITY_%s' % layername], str(priority), 'BBFILE_PRIORITY_%s != %d' % (layername, priority))
+
def get_recipe_basename(self, recipe):
recipe_file = ""
result = runCmd("bitbake-layers show-recipes -f %s" % recipe)
diff --git a/meta/lib/oeqa/selftest/bbtests.py b/meta/lib/oeqa/selftest/cases/bbtests.py
index d8cb835829..dc423ec439 100644
--- a/meta/lib/oeqa/selftest/bbtests.py
+++ b/meta/lib/oeqa/selftest/cases/bbtests.py
@@ -1,60 +1,66 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
import os
import re
import oeqa.utils.ftools as ftools
-from oeqa.selftest.base import oeSelfTest
-from oeqa.utils.commands import runCmd, bitbake, get_bb_var
-from oeqa.utils.decorators import testcase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
+
+from oeqa.selftest.case import OESelftestTestCase
-class BitbakeTests(oeSelfTest):
+class BitbakeTests(OESelftestTestCase):
def getline(self, res, line):
for l in res.output.split('\n'):
if line in l:
return l
- @testcase(789)
+ # Test bitbake can run from the <builddir>/conf directory
def test_run_bitbake_from_dir_1(self):
os.chdir(os.path.join(self.builddir, 'conf'))
self.assertEqual(bitbake('-e').status, 0, msg = "bitbake couldn't run from \"conf\" dir")
- @testcase(790)
+ # Test bitbake can run from the <builddir>'s parent directory
def test_run_bitbake_from_dir_2(self):
my_env = os.environ.copy()
my_env['BBPATH'] = my_env['BUILDDIR']
os.chdir(os.path.dirname(os.environ['BUILDDIR']))
- self.assertEqual(bitbake('-e', env=my_env).status, 0, msg = "bitbake couldn't run from builddir")
+ self.assertEqual(bitbake('-e', env=my_env).status, 0, msg = "bitbake couldn't run from builddir's parent directory")
+
+ # Test bitbake can run from some other random system location (we use /tmp/)
+ def test_run_bitbake_from_dir_3(self):
+ my_env = os.environ.copy()
+ my_env['BBPATH'] = my_env['BUILDDIR']
+ os.chdir("/tmp/")
+ self.assertEqual(bitbake('-e', env=my_env).status, 0, msg = "bitbake couldn't run from /tmp/")
+
- @testcase(806)
def test_event_handler(self):
self.write_config("INHERIT += \"test_events\"")
result = bitbake('m4-native')
- find_build_started = re.search("NOTE: Test for bb\.event\.BuildStarted(\n.*)*NOTE: Executing RunQueue Tasks", result.output)
- find_build_completed = re.search("Tasks Summary:.*(\n.*)*NOTE: Test for bb\.event\.BuildCompleted", result.output)
+ find_build_started = re.search(r"NOTE: Test for bb\.event\.BuildStarted(\n.*)*NOTE: Executing.*Tasks", result.output)
+ find_build_completed = re.search(r"Tasks Summary:.*(\n.*)*NOTE: Test for bb\.event\.BuildCompleted", result.output)
self.assertTrue(find_build_started, msg = "Match failed in:\n%s" % result.output)
self.assertTrue(find_build_completed, msg = "Match failed in:\n%s" % result.output)
- self.assertFalse('Test for bb.event.InvalidEvent' in result.output, msg = "\"Test for bb.event.InvalidEvent\" message found during bitbake process. bitbake output: %s" % result.output)
+ self.assertNotIn('Test for bb.event.InvalidEvent', result.output)
- @testcase(103)
def test_local_sstate(self):
- bitbake('m4-native -ccleansstate')
bitbake('m4-native')
bitbake('m4-native -cclean')
result = bitbake('m4-native')
find_setscene = re.search("m4-native.*do_.*_setscene", result.output)
self.assertTrue(find_setscene, msg = "No \"m4-native.*do_.*_setscene\" message found during bitbake m4-native. bitbake output: %s" % result.output )
- @testcase(105)
def test_bitbake_invalid_recipe(self):
result = bitbake('-b asdf', ignore_status=True)
self.assertTrue("ERROR: Unable to find any recipe file matching 'asdf'" in result.output, msg = "Though asdf recipe doesn't exist, bitbake didn't output any err. message. bitbake output: %s" % result.output)
- @testcase(107)
def test_bitbake_invalid_target(self):
result = bitbake('asdf', ignore_status=True)
- self.assertTrue("ERROR: Nothing PROVIDES 'asdf'" in result.output, msg = "Though no 'asdf' target exists, bitbake didn't output any err. message. bitbake output: %s" % result.output)
+ self.assertIn("ERROR: Nothing PROVIDES 'asdf'", result.output)
- @testcase(106)
def test_warnings_errors(self):
result = bitbake('-b asdf', ignore_status=True)
find_warnings = re.search("Summary: There w.{2,3}? [1-9][0-9]* WARNING messages* shown", result.output)
@@ -62,29 +68,30 @@ class BitbakeTests(oeSelfTest):
self.assertTrue(find_warnings, msg="Did not find the mumber of warnings at the end of the build:\n" + result.output)
self.assertTrue(find_errors, msg="Did not find the mumber of errors at the end of the build:\n" + result.output)
- @testcase(108)
def test_invalid_patch(self):
- # This patch already exists in SRC_URI so adding it again will cause the
- # patch to fail.
- self.write_recipeinc('man', 'SRC_URI += "file://man-1.5h1-make.patch"')
+ # This patch should fail to apply.
+ self.write_recipeinc('man-db', 'FILESEXTRAPATHS_prepend := "${THISDIR}/files:"\nSRC_URI += "file://0001-Test-patch-here.patch"')
self.write_config("INHERIT_remove = \"report-error\"")
- result = bitbake('man -c patch', ignore_status=True)
- self.delete_recipeinc('man')
- bitbake('-cclean man')
- line = self.getline(result, "Function failed: patch_do_patch")
- self.assertTrue(line and line.startswith("ERROR:"), msg = "Repeated patch application didn't fail. bitbake output: %s" % result.output)
+ result = bitbake('man-db -c patch', ignore_status=True)
+ self.delete_recipeinc('man-db')
+ bitbake('-cclean man-db')
+ found = False
+ for l in result.output.split('\n'):
+ if l.startswith("ERROR:") and "failed" in l and "do_patch" in l:
+ found = l
+ self.assertTrue(found and found.startswith("ERROR:"), msg = "Incorrectly formed patch application didn't fail. bitbake output: %s" % result.output)
- @testcase(1354)
def test_force_task_1(self):
# test 1 from bug 5875
test_recipe = 'zlib'
test_data = "Microsoft Made No Profit From Anyone's Zunes Yo"
- image_dir = get_bb_var('D', test_recipe)
- pkgsplit_dir = get_bb_var('PKGDEST', test_recipe)
- man_dir = get_bb_var('mandir', test_recipe)
+ bb_vars = get_bb_vars(['D', 'PKGDEST', 'mandir'], test_recipe)
+ image_dir = bb_vars['D']
+ pkgsplit_dir = bb_vars['PKGDEST']
+ man_dir = bb_vars['mandir']
- bitbake('-c cleansstate %s' % test_recipe)
- bitbake(test_recipe)
+ bitbake('-c clean %s' % test_recipe)
+ bitbake('-c package -f %s' % test_recipe)
self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe)
man_file = os.path.join(image_dir + man_dir, 'man3/zlib.3')
@@ -98,12 +105,10 @@ class BitbakeTests(oeSelfTest):
ret = bitbake(test_recipe)
self.assertIn('task do_package_write_rpm:', ret.output, 'Task do_package_write_rpm did not re-executed.')
- @testcase(163)
def test_force_task_2(self):
# test 2 from bug 5875
test_recipe = 'zlib'
- bitbake('-c cleansstate %s' % test_recipe)
bitbake(test_recipe)
self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe)
@@ -112,133 +117,120 @@ class BitbakeTests(oeSelfTest):
for task in look_for_tasks:
self.assertIn(task, result.output, msg="Couldn't find %s task.")
- @testcase(167)
def test_bitbake_g(self):
- result = bitbake('-g core-image-full-cmdline')
- for f in ['pn-buildlist', 'pn-depends.dot', 'package-depends.dot', 'task-depends.dot']:
+ recipe = 'base-files'
+ result = bitbake('-g %s' % recipe)
+ for f in ['pn-buildlist', 'task-depends.dot']:
self.addCleanup(os.remove, f)
- self.assertTrue('NOTE: PN build list saved to \'pn-buildlist\'' in result.output, msg = "No dependency \"pn-buildlist\" file was generated for the given task target. bitbake output: %s" % result.output)
- self.assertTrue('openssh' in ftools.read_file(os.path.join(self.builddir, 'pn-buildlist')), msg = "No \"openssh\" dependency found in pn-buildlist file.")
+ self.assertTrue('Task dependencies saved to \'task-depends.dot\'' in result.output, msg = "No task dependency \"task-depends.dot\" file was generated for the given task target. bitbake output: %s" % result.output)
+ self.assertIn(recipe, ftools.read_file(os.path.join(self.builddir, 'task-depends.dot')))
- @testcase(899)
def test_image_manifest(self):
bitbake('core-image-minimal')
- deploydir = get_bb_var("DEPLOY_DIR_IMAGE", target="core-image-minimal")
- imagename = get_bb_var("IMAGE_LINK_NAME", target="core-image-minimal")
+ bb_vars = get_bb_vars(["DEPLOY_DIR_IMAGE", "IMAGE_LINK_NAME"], "core-image-minimal")
+ deploydir = bb_vars["DEPLOY_DIR_IMAGE"]
+ imagename = bb_vars["IMAGE_LINK_NAME"]
manifest = os.path.join(deploydir, imagename + ".manifest")
self.assertTrue(os.path.islink(manifest), msg="No manifest file created for image. It should have been created in %s" % manifest)
- @testcase(168)
def test_invalid_recipe_src_uri(self):
data = 'SRC_URI = "file://invalid"'
- self.write_recipeinc('man', data)
+ self.write_recipeinc('man-db', data)
self.write_config("""DL_DIR = \"${TOPDIR}/download-selftest\"
SSTATE_DIR = \"${TOPDIR}/download-selftest\"
INHERIT_remove = \"report-error\"
""")
self.track_for_cleanup(os.path.join(self.builddir, "download-selftest"))
- bitbake('-ccleanall man')
- result = bitbake('-c fetch man', ignore_status=True)
- bitbake('-ccleanall man')
- self.delete_recipeinc('man')
+ bitbake('-ccleanall man-db')
+ result = bitbake('-c fetch man-db', ignore_status=True)
+ bitbake('-ccleanall man-db')
+ self.delete_recipeinc('man-db')
self.assertEqual(result.status, 1, msg="Command succeded when it should have failed. bitbake output: %s" % result.output)
- self.assertTrue('Fetcher failure: Unable to find file file://invalid anywhere. The paths that were searched were:' in result.output, msg = "\"invalid\" file \
-doesn't exist, yet no error message encountered. bitbake output: %s" % result.output)
- line = self.getline(result, 'Function failed: Fetcher failure for URL: \'file://invalid\'. Unable to fetch URL from any source.')
+ self.assertIn('Fetcher failure: Unable to find file file://invalid anywhere. The paths that were searched were:', result.output)
+ line = self.getline(result, 'Fetcher failure for URL: \'file://invalid\'. Unable to fetch URL from any source.')
self.assertTrue(line and line.startswith("ERROR:"), msg = "\"invalid\" file \
doesn't exist, yet fetcher didn't report any error. bitbake output: %s" % result.output)
- @testcase(171)
def test_rename_downloaded_file(self):
+ # TODO unique dldir instead of using cleanall
+ # TODO: need to set sstatedir?
self.write_config("""DL_DIR = \"${TOPDIR}/download-selftest\"
SSTATE_DIR = \"${TOPDIR}/download-selftest\"
""")
self.track_for_cleanup(os.path.join(self.builddir, "download-selftest"))
- data = 'SRC_URI_append = ";downloadfilename=test-aspell.tar.gz"'
+ data = 'SRC_URI = "${GNU_MIRROR}/aspell/aspell-${PV}.tar.gz;downloadfilename=test-aspell.tar.gz"'
self.write_recipeinc('aspell', data)
- bitbake('-ccleanall aspell')
- result = bitbake('-c fetch aspell', ignore_status=True)
+ result = bitbake('-f -c fetch aspell', ignore_status=True)
self.delete_recipeinc('aspell')
self.assertEqual(result.status, 0, msg = "Couldn't fetch aspell. %s" % result.output)
- self.assertTrue(os.path.isfile(os.path.join(get_bb_var("DL_DIR"), 'test-aspell.tar.gz')), msg = "File rename failed. No corresponding test-aspell.tar.gz file found under %s" % str(get_bb_var("DL_DIR")))
- self.assertTrue(os.path.isfile(os.path.join(get_bb_var("DL_DIR"), 'test-aspell.tar.gz.done')), "File rename failed. No corresponding test-aspell.tar.gz.done file found under %s" % str(get_bb_var("DL_DIR")))
+ dl_dir = get_bb_var("DL_DIR")
+ self.assertTrue(os.path.isfile(os.path.join(dl_dir, 'test-aspell.tar.gz')), msg = "File rename failed. No corresponding test-aspell.tar.gz file found under %s" % dl_dir)
+ self.assertTrue(os.path.isfile(os.path.join(dl_dir, 'test-aspell.tar.gz.done')), "File rename failed. No corresponding test-aspell.tar.gz.done file found under %s" % dl_dir)
- @testcase(1028)
def test_environment(self):
self.write_config("TEST_ENV=\"localconf\"")
result = runCmd('bitbake -e | grep TEST_ENV=')
- self.assertTrue('localconf' in result.output, msg = "bitbake didn't report any value for TEST_ENV variable. To test, run 'bitbake -e | grep TEST_ENV='")
+ self.assertIn('localconf', result.output)
- @testcase(1029)
def test_dry_run(self):
result = runCmd('bitbake -n m4-native')
self.assertEqual(0, result.status, "bitbake dry run didn't run as expected. %s" % result.output)
- @testcase(1030)
def test_just_parse(self):
result = runCmd('bitbake -p')
self.assertEqual(0, result.status, "errors encountered when parsing recipes. %s" % result.output)
- @testcase(1031)
def test_version(self):
result = runCmd('bitbake -s | grep wget')
- find = re.search("wget *:([0-9a-zA-Z\.\-]+)", result.output)
+ find = re.search(r"wget *:([0-9a-zA-Z\.\-]+)", result.output)
self.assertTrue(find, "No version returned for searched recipe. bitbake output: %s" % result.output)
- @testcase(1032)
def test_prefile(self):
preconf = os.path.join(self.builddir, 'conf/prefile.conf')
self.track_for_cleanup(preconf)
ftools.write_file(preconf ,"TEST_PREFILE=\"prefile\"")
result = runCmd('bitbake -r conf/prefile.conf -e | grep TEST_PREFILE=')
- self.assertTrue('prefile' in result.output, "Preconfigure file \"prefile.conf\"was not taken into consideration. ")
+ self.assertIn('prefile', result.output)
self.write_config("TEST_PREFILE=\"localconf\"")
result = runCmd('bitbake -r conf/prefile.conf -e | grep TEST_PREFILE=')
- self.assertTrue('localconf' in result.output, "Preconfigure file \"prefile.conf\"was not taken into consideration.")
+ self.assertIn('localconf', result.output)
- @testcase(1033)
def test_postfile(self):
postconf = os.path.join(self.builddir, 'conf/postfile.conf')
self.track_for_cleanup(postconf)
ftools.write_file(postconf , "TEST_POSTFILE=\"postfile\"")
self.write_config("TEST_POSTFILE=\"localconf\"")
result = runCmd('bitbake -R conf/postfile.conf -e | grep TEST_POSTFILE=')
- self.assertTrue('postfile' in result.output, "Postconfigure file \"postfile.conf\"was not taken into consideration.")
+ self.assertIn('postfile', result.output)
- @testcase(1034)
def test_checkuri(self):
result = runCmd('bitbake -c checkuri m4')
self.assertEqual(0, result.status, msg = "\"checkuri\" task was not executed. bitbake output: %s" % result.output)
- @testcase(1035)
def test_continue(self):
self.write_config("""DL_DIR = \"${TOPDIR}/download-selftest\"
SSTATE_DIR = \"${TOPDIR}/download-selftest\"
INHERIT_remove = \"report-error\"
""")
self.track_for_cleanup(os.path.join(self.builddir, "download-selftest"))
- self.write_recipeinc('man',"\ndo_fail_task () {\nexit 1 \n}\n\naddtask do_fail_task before do_fetch\n" )
- runCmd('bitbake -c cleanall man xcursor-transparent-theme')
- result = runCmd('bitbake man xcursor-transparent-theme -k', ignore_status=True)
+ self.write_recipeinc('man-db',"\ndo_fail_task () {\nexit 1 \n}\n\naddtask do_fail_task before do_fetch\n" )
+ runCmd('bitbake -c cleanall man-db xcursor-transparent-theme')
+ result = runCmd('bitbake -c unpack -k man-db xcursor-transparent-theme', ignore_status=True)
errorpos = result.output.find('ERROR: Function failed: do_fail_task')
manver = re.search("NOTE: recipe xcursor-transparent-theme-(.*?): task do_unpack: Started", result.output)
continuepos = result.output.find('NOTE: recipe xcursor-transparent-theme-%s: task do_unpack: Started' % manver.group(1))
self.assertLess(errorpos,continuepos, msg = "bitbake didn't pass do_fail_task. bitbake output: %s" % result.output)
- @testcase(1119)
def test_non_gplv3(self):
- data = 'INCOMPATIBLE_LICENSE = "GPLv3"'
- conf = os.path.join(self.builddir, 'conf/local.conf')
- ftools.append_file(conf ,data)
- self.addCleanup(ftools.remove_from_file, conf ,data)
- result = bitbake('readline', ignore_status=True)
+ self.write_config('INCOMPATIBLE_LICENSE = "GPLv3"')
+ result = bitbake('selftest-ed', ignore_status=True)
self.assertEqual(result.status, 0, "Bitbake failed, exit code %s, output %s" % (result.status, result.output))
- self.assertFalse(os.path.isfile(os.path.join(self.builddir, 'tmp/deploy/licenses/readline/generic_GPLv3')))
- self.assertTrue(os.path.isfile(os.path.join(self.builddir, 'tmp/deploy/licenses/readline/generic_GPLv2')))
+ lic_dir = get_bb_var('LICENSE_DIRECTORY')
+ self.assertFalse(os.path.isfile(os.path.join(lic_dir, 'selftest-ed/generic_GPLv3')))
+ self.assertTrue(os.path.isfile(os.path.join(lic_dir, 'selftest-ed/generic_GPLv2')))
- @testcase(1422)
def test_setscene_only(self):
""" Bitbake option to restore from sstate only within a build (i.e. execute no real tasks, only setscene)"""
test_recipe = 'ed'
@@ -253,12 +245,42 @@ INHERIT_remove = \"report-error\"
self.assertIn('_setscene', task, 'A task different from _setscene ran: %s.\n'
'Executed tasks were: %s' % (task, str(tasks)))
- @testcase(1425)
+ def test_skip_setscene(self):
+ test_recipe = 'ed'
+
+ bitbake(test_recipe)
+ bitbake('-c clean %s' % test_recipe)
+
+ ret = bitbake('--setscene-only %s' % test_recipe)
+ tasks = re.findall(r'task\s+(do_\S+):', ret.output)
+
+ for task in tasks:
+ self.assertIn('_setscene', task, 'A task different from _setscene ran: %s.\n'
+ 'Executed tasks were: %s' % (task, str(tasks)))
+
+ # Run without setscene. Should do nothing
+ ret = bitbake('--skip-setscene %s' % test_recipe)
+ tasks = re.findall(r'task\s+(do_\S+):', ret.output)
+
+ self.assertFalse(tasks, 'Tasks %s ran when they should not have' % (str(tasks)))
+
+ # Clean (leave sstate cache) and run with --skip-setscene. No setscene
+ # tasks should run
+ bitbake('-c clean %s' % test_recipe)
+
+ ret = bitbake('--skip-setscene %s' % test_recipe)
+ tasks = re.findall(r'task\s+(do_\S+):', ret.output)
+
+ for task in tasks:
+ self.assertNotIn('_setscene', task, 'A _setscene task ran: %s.\n'
+ 'Executed tasks were: %s' % (task, str(tasks)))
+
def test_bbappend_order(self):
""" Bitbake should bbappend to recipe in a predictable order """
test_recipe = 'ed'
- test_recipe_summary_before = get_bb_var('SUMMARY', test_recipe)
- test_recipe_pv = get_bb_var('PV', test_recipe)
+ bb_vars = get_bb_vars(['SUMMARY', 'PV'], test_recipe)
+ test_recipe_summary_before = bb_vars['SUMMARY']
+ test_recipe_pv = bb_vars['PV']
recipe_append_file = test_recipe + '_' + test_recipe_pv + '.bbappend'
expected_recipe_summary = test_recipe_summary_before
diff --git a/meta/lib/oeqa/selftest/cases/binutils.py b/meta/lib/oeqa/selftest/cases/binutils.py
new file mode 100644
index 0000000000..821f52f5a8
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/binutils.py
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: MIT
+import os
+import sys
+import re
+import logging
+from oeqa.core.decorator import OETestTag
+from oeqa.core.case import OEPTestResultTestCase
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake, get_bb_var, get_bb_vars
+
+def parse_values(content):
+ for i in content:
+ for v in ["PASS", "FAIL", "XPASS", "XFAIL", "UNRESOLVED", "UNSUPPORTED", "UNTESTED", "ERROR", "WARNING"]:
+ if i.startswith(v + ": "):
+ yield i[len(v) + 2:].strip(), v
+ break
+
+@OETestTag("toolchain-user", "toolchain-system")
+class BinutilsCrossSelfTest(OESelftestTestCase, OEPTestResultTestCase):
+ def test_binutils(self):
+ self.run_binutils("binutils")
+
+ def test_gas(self):
+ self.run_binutils("gas")
+
+ def test_ld(self):
+ self.run_binutils("ld")
+
+ def run_binutils(self, suite):
+ features = []
+ features.append('CHECK_TARGETS = "{0}"'.format(suite))
+ self.write_config("\n".join(features))
+
+ recipe = "binutils-cross-testsuite"
+ bb_vars = get_bb_vars(["B", "TARGET_SYS", "T"], recipe)
+ builddir, target_sys, tdir = bb_vars["B"], bb_vars["TARGET_SYS"], bb_vars["T"]
+
+ bitbake("{0} -c check".format(recipe))
+
+ sumspath = os.path.join(builddir, suite, "{0}.sum".format(suite))
+ if not os.path.exists(sumspath):
+ sumspath = os.path.join(builddir, suite, "testsuite", "{0}.sum".format(suite))
+ logpath = os.path.splitext(sumspath)[0] + ".log"
+
+ ptestsuite = "binutils-{}".format(suite) if suite != "binutils" else suite
+ self.ptest_section(ptestsuite, logfile = logpath)
+ with open(sumspath, "r") as f:
+ for test, result in parse_values(f):
+ self.ptest_result(ptestsuite, test, result)
+
diff --git a/meta/lib/oeqa/selftest/buildhistory.py b/meta/lib/oeqa/selftest/cases/buildhistory.py
index 674da6205a..d865da6252 100644
--- a/meta/lib/oeqa/selftest/buildhistory.py
+++ b/meta/lib/oeqa/selftest/cases/buildhistory.py
@@ -1,16 +1,20 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
import os
import re
import datetime
-from oeqa.selftest.base import oeSelfTest
-from oeqa.utils.commands import bitbake, get_bb_var
-from oeqa.utils.decorators import testcase
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake, get_bb_vars
-class BuildhistoryBase(oeSelfTest):
+class BuildhistoryBase(OESelftestTestCase):
def config_buildhistory(self, tmp_bh_location=False):
- if (not 'buildhistory' in get_bb_var('USER_CLASSES')) and (not 'buildhistory' in get_bb_var('INHERIT')):
+ bb_vars = get_bb_vars(['USER_CLASSES', 'INHERIT'])
+ if (not 'buildhistory' in bb_vars['USER_CLASSES']) and (not 'buildhistory' in bb_vars['INHERIT']):
add_buildhistory_config = 'INHERIT += "buildhistory"\nBUILDHISTORY_COMMIT = "1"'
self.append_config(add_buildhistory_config)
diff --git a/meta/lib/oeqa/selftest/buildoptions.py b/meta/lib/oeqa/selftest/cases/buildoptions.py
index 86e4836b83..e91f0bd18f 100644
--- a/meta/lib/oeqa/selftest/buildoptions.py
+++ b/meta/lib/oeqa/selftest/cases/buildoptions.py
@@ -1,100 +1,89 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
import os
import re
import glob as g
import shutil
import tempfile
-from oeqa.selftest.base import oeSelfTest
-from oeqa.selftest.buildhistory import BuildhistoryBase
-from oeqa.utils.commands import runCmd, bitbake, get_bb_var
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.selftest.cases.buildhistory import BuildhistoryBase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
import oeqa.utils.ftools as ftools
-from oeqa.utils.decorators import testcase
-class ImageOptionsTests(oeSelfTest):
+class ImageOptionsTests(OESelftestTestCase):
- @testcase(761)
def test_incremental_image_generation(self):
image_pkgtype = get_bb_var("IMAGE_PKGTYPE")
if image_pkgtype != 'rpm':
self.skipTest('Not using RPM as main package format')
- bitbake("-c cleanall core-image-minimal")
+ bitbake("-c clean core-image-minimal")
self.write_config('INC_RPM_IMAGE_GEN = "1"')
self.append_config('IMAGE_FEATURES += "ssh-server-openssh"')
bitbake("core-image-minimal")
log_data_file = os.path.join(get_bb_var("WORKDIR", "core-image-minimal"), "temp/log.do_rootfs")
log_data_created = ftools.read_file(log_data_file)
- incremental_created = re.search("NOTE: load old install solution for incremental install\nNOTE: old install solution not exist\nNOTE: creating new install solution for incremental install(\n.*)*NOTE: Installing the following packages:.*packagegroup-core-ssh-openssh", log_data_created)
+ incremental_created = re.search(r"Installing\s*:\s*packagegroup-core-ssh-openssh", log_data_created)
self.remove_config('IMAGE_FEATURES += "ssh-server-openssh"')
self.assertTrue(incremental_created, msg = "Match failed in:\n%s" % log_data_created)
bitbake("core-image-minimal")
log_data_removed = ftools.read_file(log_data_file)
- incremental_removed = re.search("NOTE: load old install solution for incremental install\nNOTE: creating new install solution for incremental install(\n.*)*NOTE: incremental removed:.*openssh-sshd-.*", log_data_removed)
+ incremental_removed = re.search(r"Erasing\s*:\s*packagegroup-core-ssh-openssh", log_data_removed)
self.assertTrue(incremental_removed, msg = "Match failed in:\n%s" % log_data_removed)
- @testcase(925)
- def test_rm_old_image(self):
- bitbake("core-image-minimal")
- deploydir = get_bb_var("DEPLOY_DIR_IMAGE", target="core-image-minimal")
- imagename = get_bb_var("IMAGE_LINK_NAME", target="core-image-minimal")
- deploydir_files = os.listdir(deploydir)
- track_original_files = []
- for image_file in deploydir_files:
- if imagename in image_file and os.path.islink(os.path.join(deploydir, image_file)):
- track_original_files.append(os.path.realpath(os.path.join(deploydir, image_file)))
- self.write_config("RM_OLD_IMAGE = \"1\"")
- bitbake("-C rootfs core-image-minimal")
- deploydir_files = os.listdir(deploydir)
- remaining_not_expected = [path for path in track_original_files if os.path.basename(path) in deploydir_files]
- self.assertFalse(remaining_not_expected, msg="\nThe following image files were not removed: %s" % ', '.join(map(str, remaining_not_expected)))
-
- @testcase(286)
def test_ccache_tool(self):
bitbake("ccache-native")
- self.assertTrue(os.path.isfile(os.path.join(get_bb_var('STAGING_BINDIR_NATIVE', 'ccache-native'), "ccache")), msg = "No ccache found under %s" % str(get_bb_var('STAGING_BINDIR_NATIVE', 'ccache-native')))
+ bb_vars = get_bb_vars(['SYSROOT_DESTDIR', 'bindir'], 'ccache-native')
+ p = bb_vars['SYSROOT_DESTDIR'] + bb_vars['bindir'] + "/" + "ccache"
+ self.assertTrue(os.path.isfile(p), msg = "No ccache found (%s)" % p)
self.write_config('INHERIT += "ccache"')
- bitbake("m4 -c cleansstate")
- bitbake("m4 -c compile")
- self.addCleanup(bitbake, 'ccache-native -ccleansstate')
- res = runCmd("grep ccache %s" % (os.path.join(get_bb_var("WORKDIR","m4"),"temp/log.do_compile")), ignore_status=True)
- self.assertEqual(0, res.status, msg="No match for ccache in m4 log.do_compile. For further details: %s" % os.path.join(get_bb_var("WORKDIR","m4"),"temp/log.do_compile"))
+ self.add_command_to_tearDown('bitbake -c clean m4-native')
+ bitbake("m4-native -c clean")
+ bitbake("m4-native -f -c compile")
+ log_compile = os.path.join(get_bb_var("WORKDIR","m4-native"), "temp/log.do_compile")
+ with open(log_compile, "r") as f:
+ loglines = "".join(f.readlines())
+ self.assertIn("ccache", loglines, msg="No match for ccache in m4-native log.do_compile. For further details: %s" % log_compile)
- @testcase(1435)
def test_read_only_image(self):
+ distro_features = get_bb_var('DISTRO_FEATURES')
+ if not ('x11' in distro_features and 'opengl' in distro_features):
+ self.skipTest('core-image-sato requires x11 and opengl in distro features')
self.write_config('IMAGE_FEATURES += "read-only-rootfs"')
bitbake("core-image-sato")
# do_image will fail if there are any pending postinsts
-class DiskMonTest(oeSelfTest):
+class DiskMonTest(OESelftestTestCase):
- @testcase(277)
def test_stoptask_behavior(self):
self.write_config('BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},100000G,100K"')
- res = bitbake("m4", ignore_status = True)
+ res = bitbake("delay -c delay", ignore_status = True)
self.assertTrue('ERROR: No new tasks can be executed since the disk space monitor action is "STOPTASKS"!' in res.output, msg = "Tasks should have stopped. Disk monitor is set to STOPTASK: %s" % res.output)
self.assertEqual(res.status, 1, msg = "bitbake reported exit code %s. It should have been 1. Bitbake output: %s" % (str(res.status), res.output))
self.write_config('BB_DISKMON_DIRS = "ABORT,${TMPDIR},100000G,100K"')
- res = bitbake("m4", ignore_status = True)
+ res = bitbake("delay -c delay", ignore_status = True)
self.assertTrue('ERROR: Immediately abort since the disk space monitor action is "ABORT"!' in res.output, "Tasks should have been aborted immediatelly. Disk monitor is set to ABORT: %s" % res.output)
self.assertEqual(res.status, 1, msg = "bitbake reported exit code %s. It should have been 1. Bitbake output: %s" % (str(res.status), res.output))
self.write_config('BB_DISKMON_DIRS = "WARN,${TMPDIR},100000G,100K"')
- res = bitbake("m4")
+ res = bitbake("delay -c delay")
self.assertTrue('WARNING: The free space' in res.output, msg = "A warning should have been displayed for disk monitor is set to WARN: %s" %res.output)
-class SanityOptionsTest(oeSelfTest):
+class SanityOptionsTest(OESelftestTestCase):
def getline(self, res, line):
for l in res.output.split('\n'):
if line in l:
return l
- @testcase(927)
def test_options_warnqa_errorqa_switch(self):
- bitbake("xcursor-transparent-theme -ccleansstate")
self.write_config("INHERIT_remove = \"report-error\"")
if "packages-list" not in get_bb_var("ERROR_QA"):
self.append_config("ERROR_QA_append = \" packages-list\"")
self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"')
- res = bitbake("xcursor-transparent-theme", ignore_status=True)
+ self.add_command_to_tearDown('bitbake -c clean xcursor-transparent-theme')
+ res = bitbake("xcursor-transparent-theme -f -c package", ignore_status=True)
self.delete_recipeinc('xcursor-transparent-theme')
line = self.getline(res, "QA Issue: xcursor-transparent-theme-dbg is listed in PACKAGES multiple times, this leads to packaging errors.")
self.assertTrue(line and line.startswith("ERROR:"), msg=res.output)
@@ -102,50 +91,11 @@ class SanityOptionsTest(oeSelfTest):
self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"')
self.append_config('ERROR_QA_remove = "packages-list"')
self.append_config('WARN_QA_append = " packages-list"')
- bitbake("xcursor-transparent-theme -ccleansstate")
- res = bitbake("xcursor-transparent-theme")
+ res = bitbake("xcursor-transparent-theme -f -c package")
self.delete_recipeinc('xcursor-transparent-theme')
line = self.getline(res, "QA Issue: xcursor-transparent-theme-dbg is listed in PACKAGES multiple times, this leads to packaging errors.")
self.assertTrue(line and line.startswith("WARNING:"), msg=res.output)
- @testcase(278)
- def test_sanity_unsafe_script_references(self):
- self.write_config('WARN_QA_append = " unsafe-references-in-scripts"')
-
- bitbake("-ccleansstate gzip")
- res = bitbake("gzip")
- line = self.getline(res, "QA Issue: gzip")
- self.assertFalse(line, "WARNING: QA Issue: gzip message is present in bitbake's output and shouldn't be: %s" % res.output)
-
- self.append_config("""
-do_install_append_pn-gzip () {
- echo "\n${bindir}/test" >> ${D}${bindir}/zcat
-}
-""")
- res = bitbake("gzip")
- line = self.getline(res, "QA Issue: gzip")
- self.assertTrue(line and line.startswith("WARNING:"), "WARNING: QA Issue: gzip message is not present in bitbake's output: %s" % res.output)
-
- @testcase(1434)
- def test_sanity_unsafe_binary_references(self):
- self.write_config('WARN_QA_append = " unsafe-references-in-binaries"')
-
- bitbake("-ccleansstate nfs-utils")
- #res = bitbake("nfs-utils")
- # FIXME when nfs-utils passes this test
- #line = self.getline(res, "QA Issue: nfs-utils")
- #self.assertFalse(line, "WARNING: QA Issue: nfs-utils message is present in bitbake's output and shouldn't be: %s" % res.output)
-
-# self.append_config("""
-#do_install_append_pn-nfs-utils () {
-# echo "\n${bindir}/test" >> ${D}${base_sbindir}/osd_login
-#}
-#""")
- res = bitbake("nfs-utils")
- line = self.getline(res, "QA Issue: nfs-utils")
- self.assertTrue(line and line.startswith("WARNING:"), "WARNING: QA Issue: nfs-utils message is not present in bitbake's output: %s" % res.output)
-
- @testcase(1421)
def test_layer_without_git_dir(self):
"""
Summary: Test that layer git revisions are displayed and do not fail without git repository
@@ -187,30 +137,62 @@ do_install_append_pn-gzip () {
class BuildhistoryTests(BuildhistoryBase):
- @testcase(293)
def test_buildhistory_basic(self):
self.run_buildhistory_operation('xcursor-transparent-theme')
self.assertTrue(os.path.isdir(get_bb_var('BUILDHISTORY_DIR')), "buildhistory dir was not created.")
- @testcase(294)
def test_buildhistory_buildtime_pr_backwards(self):
- self.add_command_to_tearDown('cleanup-workdir')
target = 'xcursor-transparent-theme'
- error = "ERROR:.*QA Issue: Package version for package %s went backwards which would break package feeds from (.*-r1.* to .*-r0.*)" % target
+ error = "ERROR:.*QA Issue: Package version for package %s went backwards which would break package feeds \(from .*-r1.* to .*-r0.*\)" % target
self.run_buildhistory_operation(target, target_config="PR = \"r1\"", change_bh_location=True)
self.run_buildhistory_operation(target, target_config="PR = \"r0\"", change_bh_location=False, expect_error=True, error_regex=error)
-class ArchiverTest(oeSelfTest):
- @testcase(926)
+class ArchiverTest(OESelftestTestCase):
def test_arch_work_dir_and_export_source(self):
"""
Test for archiving the work directory and exporting the source files.
"""
- self.add_command_to_tearDown('cleanup-workdir')
self.write_config("INHERIT += \"archiver\"\nARCHIVER_MODE[src] = \"original\"\nARCHIVER_MODE[srpm] = \"1\"")
res = bitbake("xcursor-transparent-theme", ignore_status=True)
self.assertEqual(res.status, 0, "\nCouldn't build xcursortransparenttheme.\nbitbake output %s" % res.output)
- pkgs_path = g.glob(str(self.builddir) + "/tmp/deploy/sources/allarch*/xcurs*")
+ deploy_dir_src = get_bb_var('DEPLOY_DIR_SRC')
+ pkgs_path = g.glob(str(deploy_dir_src) + "/allarch*/xcurs*")
src_file_glob = str(pkgs_path[0]) + "/xcursor*.src.rpm"
tar_file_glob = str(pkgs_path[0]) + "/xcursor*.tar.gz"
- self.assertTrue((g.glob(src_file_glob) and g.glob(tar_file_glob)), "Couldn't find .src.rpm and .tar.gz files under tmp/deploy/sources/allarch*/xcursor*")
+ self.assertTrue((g.glob(src_file_glob) and g.glob(tar_file_glob)), "Couldn't find .src.rpm and .tar.gz files under %s/allarch*/xcursor*" % deploy_dir_src)
+
+class ToolchainOptions(OESelftestTestCase):
+ def test_toolchain_fortran(self):
+ """
+ Test that Fortran works by building a Hello, World binary.
+ """
+
+ features = 'FORTRAN_forcevariable = ",fortran"\n'
+ self.write_config(features)
+ bitbake('fortran-helloworld')
+
+class SourceMirroring(OESelftestTestCase):
+ # Can we download everything from the Yocto Sources Mirror over http only
+ def test_yocto_source_mirror(self):
+ self.write_config("""
+BB_ALLOWED_NETWORKS = "downloads.yoctoproject.org"
+MIRRORS = ""
+DL_DIR = "${TMPDIR}/test_downloads"
+STAMPS_DIR = "${TMPDIR}/test_stamps"
+SSTATE_DIR = "${TMPDIR}/test_sstate-cache"
+PREMIRRORS = "\\
+ bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ gitsm://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ http://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ https://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n"
+""")
+
+ bitbake("world --runall fetch")
+
diff --git a/meta/lib/oeqa/selftest/cases/containerimage.py b/meta/lib/oeqa/selftest/cases/containerimage.py
new file mode 100644
index 0000000000..c0998e319e
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/containerimage.py
@@ -0,0 +1,88 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake, get_bb_vars, runCmd
+
+# This test builds an image with using the "container" IMAGE_FSTYPE, and
+# ensures that then files in the image are only the ones expected.
+#
+# The only package added to the image is container_image_testpkg, which
+# contains one file. However, due to some other things not cleaning up during
+# rootfs creation, there is some cruft. Ideally bugs will be filed and the
+# cruft removed, but for now we whitelist some known set.
+#
+# Also for performance reasons we're only checking the cruft when using ipk.
+# When using deb, and rpm it is a bit different and we could test all
+# of them, but this test is more to catch if other packages get added by
+# default other than what is in ROOTFS_BOOTSTRAP_INSTALL.
+#
+class ContainerImageTests(OESelftestTestCase):
+
+ # Verify that when specifying a IMAGE_TYPEDEP_ of the form "foo.bar" that
+ # the conversion type bar gets added as a dep as well
+ def test_expected_files(self):
+
+ def get_each_path_part(path):
+ if path:
+ part = [ '.' + path + '/' ]
+ result = get_each_path_part(path.rsplit('/', 1)[0])
+ if result:
+ return part + result
+ else:
+ return part
+ else:
+ return None
+
+ self.write_config("""PREFERRED_PROVIDER_virtual/kernel = "linux-dummy"
+IMAGE_FSTYPES = "container"
+PACKAGE_CLASSES = "package_ipk"
+IMAGE_FEATURES = ""
+IMAGE_BUILDINFO_FILE = ""
+""")
+
+ bbvars = get_bb_vars(['bindir', 'sysconfdir', 'localstatedir',
+ 'DEPLOY_DIR_IMAGE', 'IMAGE_LINK_NAME'],
+ target='container-test-image')
+ expected_files = [
+ './',
+ '.{bindir}/theapp',
+ '.{sysconfdir}/default/',
+ '.{sysconfdir}/default/postinst',
+ '.{sysconfdir}/ld.so.cache',
+ '.{sysconfdir}/timestamp',
+ '.{sysconfdir}/version',
+ './run/',
+ '.{localstatedir}/cache/',
+ '.{localstatedir}/cache/ldconfig/',
+ '.{localstatedir}/cache/ldconfig/aux-cache',
+ '.{localstatedir}/cache/opkg/',
+ '.{localstatedir}/lib/',
+ '.{localstatedir}/lib/opkg/'
+ ]
+
+ expected_files = [ x.format(bindir=bbvars['bindir'],
+ sysconfdir=bbvars['sysconfdir'],
+ localstatedir=bbvars['localstatedir'])
+ for x in expected_files ]
+
+ # Since tar lists all directories individually, make sure each element
+ # from bindir, sysconfdir, etc is added
+ expected_files += get_each_path_part(bbvars['bindir'])
+ expected_files += get_each_path_part(bbvars['sysconfdir'])
+ expected_files += get_each_path_part(bbvars['localstatedir'])
+
+ expected_files = sorted(expected_files)
+
+ # Build the image of course
+ bitbake('container-test-image')
+
+ image = os.path.join(bbvars['DEPLOY_DIR_IMAGE'],
+ bbvars['IMAGE_LINK_NAME'] + '.tar.bz2')
+
+ # Ensure the files in the image are what we expect
+ result = runCmd("tar tf {} | sort".format(image), shell=True)
+ self.assertEqual(result.output.split('\n'), expected_files)
diff --git a/meta/lib/oeqa/selftest/devtool.py b/meta/lib/oeqa/selftest/cases/devtool.py
index e992dcf771..57e6662e4a 100644
--- a/meta/lib/oeqa/selftest/devtool.py
+++ b/meta/lib/oeqa/selftest/cases/devtool.py
@@ -1,6 +1,8 @@
-import unittest
+#
+# SPDX-License-Identifier: MIT
+#
+
import os
-import logging
import re
import shutil
import tempfile
@@ -8,16 +10,134 @@ import glob
import fnmatch
import oeqa.utils.ftools as ftools
-from oeqa.selftest.base import oeSelfTest
-from oeqa.utils.commands import runCmd, bitbake, get_bb_var, create_temp_layer, runqemu, get_test_layer
-from oeqa.utils.decorators import testcase
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, create_temp_layer
+from oeqa.utils.commands import get_bb_vars, runqemu, get_test_layer
+
+oldmetapath = None
+
+def setUpModule():
+ import bb.utils
+
+ global templayerdir
+ templayerdir = tempfile.mkdtemp(prefix='devtoolqa')
+ corecopydir = os.path.join(templayerdir, 'core-copy')
+ bblayers_conf = os.path.join(os.environ['BUILDDIR'], 'conf', 'bblayers.conf')
+ edited_layers = []
+
+ # We need to take a copy of the meta layer so we can modify it and not
+ # have any races against other tests that might be running in parallel
+ # however things like COREBASE mean that you can't just copy meta, you
+ # need the whole repository.
+ def bblayers_edit_cb(layerpath, canonical_layerpath):
+ global oldmetapath
+ if not canonical_layerpath.endswith('/'):
+ # This helps us match exactly when we're using this path later
+ canonical_layerpath += '/'
+ if not edited_layers and canonical_layerpath.endswith('/meta/'):
+ canonical_layerpath = os.path.realpath(canonical_layerpath) + '/'
+ edited_layers.append(layerpath)
+ oldmetapath = os.path.realpath(layerpath)
+ result = runCmd('git rev-parse --show-toplevel', cwd=canonical_layerpath)
+ oldreporoot = result.output.rstrip()
+ newmetapath = os.path.join(corecopydir, os.path.relpath(oldmetapath, oldreporoot))
+ runCmd('git clone %s %s' % (oldreporoot, corecopydir), cwd=templayerdir)
+ # Now we need to copy any modified files
+ # You might ask "why not just copy the entire tree instead of
+ # cloning and doing this?" - well, the problem with that is
+ # TMPDIR or an equally large subdirectory might exist
+ # under COREBASE and we don't want to copy that, so we have
+ # to be selective.
+ result = runCmd('git status --porcelain', cwd=oldreporoot)
+ for line in result.output.splitlines():
+ if line.startswith(' M ') or line.startswith('?? '):
+ relpth = line.split()[1]
+ pth = os.path.join(oldreporoot, relpth)
+ if pth.startswith(canonical_layerpath):
+ if relpth.endswith('/'):
+ destdir = os.path.join(corecopydir, relpth)
+ shutil.copytree(pth, destdir)
+ else:
+ destdir = os.path.join(corecopydir, os.path.dirname(relpth))
+ bb.utils.mkdirhier(destdir)
+ shutil.copy2(pth, destdir)
+ return newmetapath
+ else:
+ return layerpath
+ bb.utils.edit_bblayers_conf(bblayers_conf, None, None, bblayers_edit_cb)
-class DevtoolBase(oeSelfTest):
+def tearDownModule():
+ if oldmetapath:
+ edited_layers = []
+ def bblayers_edit_cb(layerpath, canonical_layerpath):
+ if not edited_layers and canonical_layerpath.endswith('/meta'):
+ edited_layers.append(layerpath)
+ return oldmetapath
+ else:
+ return layerpath
+ bblayers_conf = os.path.join(os.environ['BUILDDIR'], 'conf', 'bblayers.conf')
+ bb.utils.edit_bblayers_conf(bblayers_conf, None, None, bblayers_edit_cb)
+ shutil.rmtree(templayerdir)
+
+class DevtoolBase(OESelftestTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(DevtoolBase, cls).setUpClass()
+ bb_vars = get_bb_vars(['TOPDIR', 'SSTATE_DIR'])
+ cls.original_sstate = bb_vars['SSTATE_DIR']
+ cls.devtool_sstate = os.path.join(bb_vars['TOPDIR'], 'sstate_devtool')
+ cls.sstate_conf = 'SSTATE_DIR = "%s"\n' % cls.devtool_sstate
+ cls.sstate_conf += ('SSTATE_MIRRORS += "file://.* file:///%s/PATH"\n'
+ % cls.original_sstate)
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.logger.debug('Deleting devtool sstate cache on %s' % cls.devtool_sstate)
+ runCmd('rm -rf %s' % cls.devtool_sstate)
+ super(DevtoolBase, cls).tearDownClass()
+
+ def setUp(self):
+ """Test case setup function"""
+ super(DevtoolBase, self).setUp()
+ self.workspacedir = os.path.join(self.builddir, 'workspace')
+ self.assertTrue(not os.path.exists(self.workspacedir),
+ 'This test cannot be run with a workspace directory '
+ 'under the build directory')
+ self.append_config(self.sstate_conf)
+
+ def _check_src_repo(self, repo_dir):
+ """Check srctree git repository"""
+ self.assertTrue(os.path.isdir(os.path.join(repo_dir, '.git')),
+ 'git repository for external source tree not found')
+ result = runCmd('git status --porcelain', cwd=repo_dir)
+ self.assertEqual(result.output.strip(), "",
+ 'Created git repo is not clean')
+ result = runCmd('git symbolic-ref HEAD', cwd=repo_dir)
+ self.assertEqual(result.output.strip(), "refs/heads/devtool",
+ 'Wrong branch in git repo')
+
+ def _check_repo_status(self, repo_dir, expected_status):
+ """Check the worktree status of a repository"""
+ result = runCmd('git status . --porcelain',
+ cwd=repo_dir)
+ for line in result.output.splitlines():
+ for ind, (f_status, fn_re) in enumerate(expected_status):
+ if re.match(fn_re, line[3:]):
+ if f_status != line[:2]:
+ self.fail('Unexpected status in line: %s' % line)
+ expected_status.pop(ind)
+ break
+ else:
+ self.fail('Unexpected modified file in line: %s' % line)
+ if expected_status:
+ self.fail('Missing file changes: %s' % expected_status)
def _test_recipe_contents(self, recipefile, checkvars, checkinherits):
with open(recipefile, 'r') as f:
invar = None
invalue = None
+ inherits = set()
for line in f:
var = None
if invar:
@@ -39,14 +159,17 @@ class DevtoolBase(oeSelfTest):
invar = var
continue
elif line.startswith('inherit '):
- inherits = line.split()[1:]
+ inherits.update(line.split()[1:])
if var and var in checkvars:
needvalue = checkvars.pop(var)
if needvalue is None:
- self.fail('Variable %s should not appear in recipe')
+ self.fail('Variable %s should not appear in recipe, but value is being set to "%s"' % (var, value))
if isinstance(needvalue, set):
- value = set(value.split())
+ if var == 'LICENSE':
+ value = set(value.split(' & '))
+ else:
+ value = set(value.split())
self.assertEqual(value, needvalue, 'values for %s do not match' % var)
@@ -114,46 +237,13 @@ class DevtoolBase(oeSelfTest):
class DevtoolTests(DevtoolBase):
- def setUp(self):
- """Test case setup function"""
- super(DevtoolTests, self).setUp()
- self.workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(self.workspacedir),
- 'This test cannot be run with a workspace directory '
- 'under the build directory')
-
- def _check_src_repo(self, repo_dir):
- """Check srctree git repository"""
- self.assertTrue(os.path.isdir(os.path.join(repo_dir, '.git')),
- 'git repository for external source tree not found')
- result = runCmd('git status --porcelain', cwd=repo_dir)
- self.assertEqual(result.output.strip(), "",
- 'Created git repo is not clean')
- result = runCmd('git symbolic-ref HEAD', cwd=repo_dir)
- self.assertEqual(result.output.strip(), "refs/heads/devtool",
- 'Wrong branch in git repo')
-
- def _check_repo_status(self, repo_dir, expected_status):
- """Check the worktree status of a repository"""
- result = runCmd('git status . --porcelain',
- cwd=repo_dir)
- for line in result.output.splitlines():
- for ind, (f_status, fn_re) in enumerate(expected_status):
- if re.match(fn_re, line[3:]):
- if f_status != line[:2]:
- self.fail('Unexpected status in line: %s' % line)
- expected_status.pop(ind)
- break
- else:
- self.fail('Unexpected modified file in line: %s' % line)
- if expected_status:
- self.fail('Missing file changes: %s' % expected_status)
-
- @testcase(1158)
def test_create_workspace(self):
# Check preconditions
result = runCmd('bitbake-layers show-layers')
- self.assertTrue('/workspace' not in result.output, 'This test cannot be run with a workspace layer in bblayers.conf')
+ self.assertTrue('\nworkspace' not in result.output, 'This test cannot be run with a workspace layer in bblayers.conf')
+ # remove conf/devtool.conf to avoid it corrupting tests
+ devtoolconf = os.path.join(self.builddir, 'conf', 'devtool.conf')
+ self.track_for_cleanup(devtoolconf)
# Try creating a workspace layer with a specific path
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
@@ -170,40 +260,52 @@ class DevtoolTests(DevtoolBase):
self.assertNotIn(tempdir, result.output)
self.assertIn(self.workspacedir, result.output)
- @testcase(1159)
+class DevtoolAddTests(DevtoolBase):
+
def test_devtool_add(self):
# Fetch source
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
+ pn = 'pv'
+ pv = '1.5.3'
url = 'http://www.ivarch.com/programs/sources/pv-1.5.3.tar.bz2'
result = runCmd('wget %s' % url, cwd=tempdir)
- result = runCmd('tar xfv pv-1.5.3.tar.bz2', cwd=tempdir)
- srcdir = os.path.join(tempdir, 'pv-1.5.3')
+ result = runCmd('tar xfv %s' % os.path.basename(url), cwd=tempdir)
+ srcdir = os.path.join(tempdir, '%s-%s' % (pn, pv))
self.assertTrue(os.path.isfile(os.path.join(srcdir, 'configure')), 'Unable to find configure script in source directory')
# Test devtool add
self.track_for_cleanup(self.workspacedir)
- self.add_command_to_tearDown('bitbake -c cleansstate pv')
+ self.add_command_to_tearDown('bitbake -c cleansstate %s' % pn)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
- result = runCmd('devtool add pv %s' % srcdir)
- self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created')
+ result = runCmd('devtool add %s %s' % (pn, srcdir))
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created')
# Test devtool status
result = runCmd('devtool status')
- self.assertIn('pv', result.output)
+ recipepath = '%s/recipes/%s/%s_%s.bb' % (self.workspacedir, pn, pn, pv)
+ self.assertIn(recipepath, result.output)
self.assertIn(srcdir, result.output)
+ # Test devtool find-recipe
+ result = runCmd('devtool -q find-recipe %s' % pn)
+ self.assertEqual(recipepath, result.output.strip())
+ # Test devtool edit-recipe
+ result = runCmd('VISUAL="echo 123" devtool -q edit-recipe %s' % pn)
+ self.assertEqual('123 %s' % recipepath, result.output.strip())
# Clean up anything in the workdir/sysroot/sstate cache (have to do this *after* devtool add since the recipe only exists then)
- bitbake('pv -c cleansstate')
+ bitbake('%s -c cleansstate' % pn)
# Test devtool build
- result = runCmd('devtool build pv')
- installdir = get_bb_var('D', 'pv')
+ result = runCmd('devtool build %s' % pn)
+ bb_vars = get_bb_vars(['D', 'bindir'], pn)
+ installdir = bb_vars['D']
self.assertTrue(installdir, 'Could not query installdir variable')
- bindir = get_bb_var('bindir', 'pv')
+ bindir = bb_vars['bindir']
self.assertTrue(bindir, 'Could not query bindir variable')
if bindir[0] == '/':
bindir = bindir[1:]
self.assertTrue(os.path.isfile(os.path.join(installdir, bindir, 'pv')), 'pv binary not found in D')
- @testcase(1423)
def test_devtool_add_git_local(self):
+ # We need dbus built so that DEPENDS recognition works
+ bitbake('dbus')
# Fetch source from a remote URL, but do it outside of devtool
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
@@ -222,7 +324,7 @@ class DevtoolTests(DevtoolBase):
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
# Don't specify a name since we should be able to auto-detect it
result = runCmd('devtool add %s' % srcdir)
- self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created')
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created')
# Check the recipe name is correct
recipefile = get_bb_var('FILE', pn)
self.assertIn('%s_git.bb' % pn, recipefile, 'Recipe file incorrectly named')
@@ -242,10 +344,7 @@ class DevtoolTests(DevtoolBase):
checkvars['DEPENDS'] = set(['dbus'])
self._test_recipe_contents(recipefile, checkvars, [])
- @testcase(1162)
def test_devtool_add_library(self):
- # We don't have the ability to pick up this dependency automatically yet...
- bitbake('libusb1')
# Fetch source
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
@@ -259,7 +358,7 @@ class DevtoolTests(DevtoolBase):
self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool add libftdi %s -V %s' % (srcdir, version))
- self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created')
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created')
# Test devtool status
result = runCmd('devtool status')
self.assertIn('libftdi', result.output)
@@ -274,13 +373,17 @@ class DevtoolTests(DevtoolBase):
result = runCmd('recipetool setvar %s EXTRA_OECMAKE -- \'-DPYTHON_BINDINGS=OFF -DLIBFTDI_CMAKE_CONFIG_DIR=${datadir}/cmake/Modules\'' % recipefile)
with open(recipefile, 'a') as f:
f.write('\nFILES_${PN}-dev += "${datadir}/cmake/Modules"\n')
+ # We don't have the ability to pick up this dependency automatically yet...
+ f.write('\nDEPENDS += "libusb1"\n')
+ f.write('\nTESTLIBOUTPUT = "${COMPONENTS_DIR}/${TUNE_PKGARCH}/${PN}/${libdir}"\n')
# Test devtool build
result = runCmd('devtool build libftdi')
- staging_libdir = get_bb_var('STAGING_LIBDIR', 'libftdi')
- self.assertTrue(staging_libdir, 'Could not query STAGING_LIBDIR variable')
+ bb_vars = get_bb_vars(['TESTLIBOUTPUT', 'STAMP'], 'libftdi')
+ staging_libdir = bb_vars['TESTLIBOUTPUT']
+ self.assertTrue(staging_libdir, 'Could not query TESTLIBOUTPUT variable')
self.assertTrue(os.path.isfile(os.path.join(staging_libdir, 'libftdi1.so.2.1.0')), "libftdi binary not found in STAGING_LIBDIR. Output of devtool build libftdi %s" % result.output)
# Test devtool reset
- stampprefix = get_bb_var('STAMP', 'libftdi')
+ stampprefix = bb_vars['STAMP']
result = runCmd('devtool reset libftdi')
result = runCmd('devtool status')
self.assertNotIn('libftdi', result.output)
@@ -289,13 +392,12 @@ class DevtoolTests(DevtoolBase):
self.assertFalse(matches, 'Stamp files exist for recipe libftdi that should have been cleaned')
self.assertFalse(os.path.isfile(os.path.join(staging_libdir, 'libftdi1.so.2.1.0')), 'libftdi binary still found in STAGING_LIBDIR after cleaning')
- @testcase(1160)
def test_devtool_add_fetch(self):
# Fetch source
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
testver = '0.23'
- url = 'https://pypi.python.org/packages/source/M/MarkupSafe/MarkupSafe-%s.tar.gz' % testver
+ url = 'https://files.pythonhosted.org/packages/c0/41/bae1254e0396c0cc8cf1751cb7d9afc90a602353695af5952530482c963f/MarkupSafe-%s.tar.gz' % testver
testrecipe = 'python-markupsafe'
srcdir = os.path.join(tempdir, testrecipe)
# Test devtool add
@@ -303,7 +405,7 @@ class DevtoolTests(DevtoolBase):
self.add_command_to_tearDown('bitbake -c cleansstate %s' % testrecipe)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool add %s %s -f %s' % (testrecipe, srcdir, url))
- self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created. %s' % result.output)
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created. %s' % result.output)
self.assertTrue(os.path.isfile(os.path.join(srcdir, 'setup.py')), 'Unable to find setup.py in source directory')
self.assertTrue(os.path.isdir(os.path.join(srcdir, '.git')), 'git repository for external source tree was not created')
# Test devtool status
@@ -335,22 +437,20 @@ class DevtoolTests(DevtoolBase):
checkvars['SRC_URI'] = url
self._test_recipe_contents(recipefile, checkvars, [])
- @testcase(1161)
def test_devtool_add_fetch_git(self):
- # Fetch source
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
- url = 'git://git.yoctoproject.org/libmatchbox'
- checkrev = '462f0652055d89c648ddd54fd7b03f175c2c6973'
- testrecipe = 'libmatchbox2'
+ url = 'gitsm://git.yoctoproject.org/mraa'
+ checkrev = 'ae127b19a50aa54255e4330ccfdd9a5d058e581d'
+ testrecipe = 'mraa'
srcdir = os.path.join(tempdir, testrecipe)
# Test devtool add
self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake -c cleansstate %s' % testrecipe)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool add %s %s -a -f %s' % (testrecipe, srcdir, url))
- self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created: %s' % result.output)
- self.assertTrue(os.path.isfile(os.path.join(srcdir, 'configure.ac')), 'Unable to find configure.ac in source directory')
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created: %s' % result.output)
+ self.assertTrue(os.path.isfile(os.path.join(srcdir, 'imraa', 'imraa.c')), 'Unable to find imraa/imraa.c in source directory')
# Test devtool status
result = runCmd('devtool status')
self.assertIn(testrecipe, result.output)
@@ -360,7 +460,7 @@ class DevtoolTests(DevtoolBase):
self.assertIn('_git.bb', recipefile, 'Recipe file incorrectly named')
checkvars = {}
checkvars['S'] = '${WORKDIR}/git'
- checkvars['PV'] = '1.12+git${SRCPV}'
+ checkvars['PV'] = '1.0+git${SRCPV}'
checkvars['SRC_URI'] = url
checkvars['SRCREV'] = '${AUTOREV}'
self._test_recipe_contents(recipefile, checkvars, [])
@@ -369,7 +469,7 @@ class DevtoolTests(DevtoolBase):
shutil.rmtree(srcdir)
url_rev = '%s;rev=%s' % (url, checkrev)
result = runCmd('devtool add %s %s -f "%s" -V 1.5' % (testrecipe, srcdir, url_rev))
- self.assertTrue(os.path.isfile(os.path.join(srcdir, 'configure.ac')), 'Unable to find configure.ac in source directory')
+ self.assertTrue(os.path.isfile(os.path.join(srcdir, 'imraa', 'imraa.c')), 'Unable to find imraa/imraa.c in source directory')
# Test devtool status
result = runCmd('devtool status')
self.assertIn(testrecipe, result.output)
@@ -384,7 +484,6 @@ class DevtoolTests(DevtoolBase):
checkvars['SRCREV'] = checkrev
self._test_recipe_contents(recipefile, checkvars, [])
- @testcase(1391)
def test_devtool_add_fetch_simple(self):
# Fetch source from a remote URL, auto-detecting name
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
@@ -397,7 +496,7 @@ class DevtoolTests(DevtoolBase):
self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool add %s' % url)
- self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created. %s' % result.output)
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created. %s' % result.output)
self.assertTrue(os.path.isfile(os.path.join(srcdir, 'configure')), 'Unable to find configure script in source directory')
self.assertTrue(os.path.isdir(os.path.join(srcdir, '.git')), 'git repository for external source tree was not created')
# Test devtool status
@@ -412,52 +511,112 @@ class DevtoolTests(DevtoolBase):
checkvars['SRC_URI'] = url.replace(testver, '${PV}')
self._test_recipe_contents(recipefile, checkvars, [])
- @testcase(1164)
+class DevtoolModifyTests(DevtoolBase):
+
def test_devtool_modify(self):
- # Clean up anything in the workdir/sysroot/sstate cache
- bitbake('mdadm -c cleansstate')
- # Try modifying a recipe
+ import oe.path
+
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
self.track_for_cleanup(self.workspacedir)
- self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean mdadm')
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool modify mdadm -x %s' % tempdir)
- self.assertTrue(os.path.exists(os.path.join(tempdir, 'Makefile')), 'Extracted source could not be found')
- self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created')
+ self.assertExists(os.path.join(tempdir, 'Makefile'), 'Extracted source could not be found')
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created')
matches = glob.glob(os.path.join(self.workspacedir, 'appends', 'mdadm_*.bbappend'))
self.assertTrue(matches, 'bbappend not created %s' % result.output)
+
# Test devtool status
result = runCmd('devtool status')
self.assertIn('mdadm', result.output)
self.assertIn(tempdir, result.output)
- # Check git repo
self._check_src_repo(tempdir)
- # Try building
- bitbake('mdadm')
- # Try making (minor) modifications to the source
- result = runCmd("sed -i 's!^\.TH.*!.TH MDADM 8 \"\" v9.999-custom!' %s" % os.path.join(tempdir, 'mdadm.8.in'))
- bitbake('mdadm -c package')
- pkgd = get_bb_var('PKGD', 'mdadm')
+
+ bitbake('mdadm -C unpack')
+
+ def check_line(checkfile, expected, message, present=True):
+ # Check for $expected, on a line on its own, in checkfile.
+ with open(checkfile, 'r') as f:
+ if present:
+ self.assertIn(expected + '\n', f, message)
+ else:
+ self.assertNotIn(expected + '\n', f, message)
+
+ modfile = os.path.join(tempdir, 'mdadm.8.in')
+ bb_vars = get_bb_vars(['PKGD', 'mandir'], 'mdadm')
+ pkgd = bb_vars['PKGD']
self.assertTrue(pkgd, 'Could not query PKGD variable')
- mandir = get_bb_var('mandir', 'mdadm')
+ mandir = bb_vars['mandir']
self.assertTrue(mandir, 'Could not query mandir variable')
- if mandir[0] == '/':
- mandir = mandir[1:]
- with open(os.path.join(pkgd, mandir, 'man8', 'mdadm.8'), 'r') as f:
- for line in f:
- if line.startswith('.TH'):
- self.assertEqual(line.rstrip(), '.TH MDADM 8 "" v9.999-custom', 'man file not modified. man searched file path: %s' % os.path.join(pkgd, mandir, 'man8', 'mdadm.8'))
- # Test devtool reset
- stampprefix = get_bb_var('STAMP', 'mdadm')
+ manfile = oe.path.join(pkgd, mandir, 'man8', 'mdadm.8')
+
+ check_line(modfile, 'Linux Software RAID', 'Could not find initial string')
+ check_line(modfile, 'antique pin sardine', 'Unexpectedly found replacement string', present=False)
+
+ result = runCmd("sed -i 's!^Linux Software RAID$!antique pin sardine!' %s" % modfile)
+ check_line(modfile, 'antique pin sardine', 'mdadm.8.in file not modified (sed failed)')
+
+ bitbake('mdadm -c package')
+ check_line(manfile, 'antique pin sardine', 'man file not modified. man searched file path: %s' % manfile)
+
+ result = runCmd('git checkout -- %s' % modfile, cwd=tempdir)
+ check_line(modfile, 'Linux Software RAID', 'man .in file not restored (git failed)')
+
+ bitbake('mdadm -c package')
+ check_line(manfile, 'Linux Software RAID', 'man file not updated. man searched file path: %s' % manfile)
+
result = runCmd('devtool reset mdadm')
result = runCmd('devtool status')
self.assertNotIn('mdadm', result.output)
- self.assertTrue(stampprefix, 'Unable to get STAMP value for recipe mdadm')
- matches = glob.glob(stampprefix + '*')
- self.assertFalse(matches, 'Stamp files exist for recipe mdadm that should have been cleaned')
- @testcase(1166)
+ def test_devtool_buildclean(self):
+ def assertFile(path, *paths):
+ f = os.path.join(path, *paths)
+ self.assertExists(f)
+ def assertNoFile(path, *paths):
+ f = os.path.join(path, *paths)
+ self.assertNotExists(f)
+
+ # Clean up anything in the workdir/sysroot/sstate cache
+ bitbake('mdadm m4 -c cleansstate')
+ # Try modifying a recipe
+ tempdir_mdadm = tempfile.mkdtemp(prefix='devtoolqa')
+ tempdir_m4 = tempfile.mkdtemp(prefix='devtoolqa')
+ builddir_m4 = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir_mdadm)
+ self.track_for_cleanup(tempdir_m4)
+ self.track_for_cleanup(builddir_m4)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake -c clean mdadm m4')
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ self.write_recipeinc('m4', 'EXTERNALSRC_BUILD = "%s"\ndo_clean() {\n\t:\n}\n' % builddir_m4)
+ try:
+ runCmd('devtool modify mdadm -x %s' % tempdir_mdadm)
+ runCmd('devtool modify m4 -x %s' % tempdir_m4)
+ assertNoFile(tempdir_mdadm, 'mdadm')
+ assertNoFile(builddir_m4, 'src/m4')
+ result = bitbake('m4 -e')
+ result = bitbake('mdadm m4 -c compile')
+ self.assertEqual(result.status, 0)
+ assertFile(tempdir_mdadm, 'mdadm')
+ assertFile(builddir_m4, 'src/m4')
+ # Check that buildclean task exists and does call make clean
+ bitbake('mdadm m4 -c buildclean')
+ assertNoFile(tempdir_mdadm, 'mdadm')
+ assertNoFile(builddir_m4, 'src/m4')
+ runCmd('echo "#Trigger rebuild" >> %s/Makefile' % tempdir_mdadm)
+ bitbake('mdadm m4 -c compile')
+ assertFile(tempdir_mdadm, 'mdadm')
+ assertFile(builddir_m4, 'src/m4')
+ bitbake('mdadm m4 -c clean')
+ # Check that buildclean task is run before clean for B == S
+ assertNoFile(tempdir_mdadm, 'mdadm')
+ # Check that buildclean task is not run before clean for B != S
+ assertFile(builddir_m4, 'src/m4')
+ finally:
+ self.delete_recipeinc('m4')
+
def test_devtool_modify_invalid(self):
# Try modifying some recipes
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
@@ -486,7 +645,6 @@ class DevtoolTests(DevtoolBase):
self.assertNotEqual(result.status, 0, 'devtool modify on %s should have failed. devtool output: %s' % (testrecipe, result.output))
self.assertIn('ERROR: ', result.output, 'devtool modify on %s should have given an ERROR' % testrecipe)
- @testcase(1365)
def test_devtool_modify_native(self):
# Check preconditions
self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
@@ -516,10 +674,9 @@ class DevtoolTests(DevtoolBase):
self.assertTrue(inheritnative, 'None of these recipes do "inherit native" - need to adjust testrecipes list: %s' % ', '.join(testrecipes))
- @testcase(1165)
def test_devtool_modify_git(self):
# Check preconditions
- testrecipe = 'mkelfimage'
+ testrecipe = 'psplash'
src_uri = get_bb_var('SRC_URI', testrecipe)
self.assertIn('git://', src_uri, 'This test expects the %s recipe to be a git recipe' % testrecipe)
# Clean up anything in the workdir/sysroot/sstate cache
@@ -528,12 +685,12 @@ class DevtoolTests(DevtoolBase):
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
self.track_for_cleanup(self.workspacedir)
- self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean %s' % testrecipe)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
- self.assertTrue(os.path.exists(os.path.join(tempdir, 'Makefile')), 'Extracted source could not be found')
- self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created. devtool output: %s' % result.output)
- matches = glob.glob(os.path.join(self.workspacedir, 'appends', 'mkelfimage_*.bbappend'))
+ self.assertExists(os.path.join(tempdir, 'Makefile.am'), 'Extracted source could not be found')
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created. devtool output: %s' % result.output)
+ matches = glob.glob(os.path.join(self.workspacedir, 'appends', 'psplash_*.bbappend'))
self.assertTrue(matches, 'bbappend not created')
# Test devtool status
result = runCmd('devtool status')
@@ -544,7 +701,6 @@ class DevtoolTests(DevtoolBase):
# Try building
bitbake(testrecipe)
- @testcase(1167)
def test_devtool_modify_localfiles(self):
# Check preconditions
testrecipe = 'lighttpd'
@@ -561,11 +717,11 @@ class DevtoolTests(DevtoolBase):
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
self.track_for_cleanup(self.workspacedir)
- self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean %s' % testrecipe)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
- self.assertTrue(os.path.exists(os.path.join(tempdir, 'configure.ac')), 'Extracted source could not be found')
- self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created')
+ self.assertExists(os.path.join(tempdir, 'configure.ac'), 'Extracted source could not be found')
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created')
matches = glob.glob(os.path.join(self.workspacedir, 'appends', '%s_*.bbappend' % testrecipe))
self.assertTrue(matches, 'bbappend not created')
# Test devtool status
@@ -575,18 +731,17 @@ class DevtoolTests(DevtoolBase):
# Try building
bitbake(testrecipe)
- @testcase(1378)
def test_devtool_modify_virtual(self):
# Try modifying a virtual recipe
- virtrecipe = 'virtual/libx11'
- realrecipe = 'libx11'
+ virtrecipe = 'virtual/make'
+ realrecipe = 'make'
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool modify %s -x %s' % (virtrecipe, tempdir))
- self.assertTrue(os.path.exists(os.path.join(tempdir, 'Makefile.am')), 'Extracted source could not be found')
- self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created')
+ self.assertExists(os.path.join(tempdir, 'Makefile.am'), 'Extracted source could not be found')
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created')
matches = glob.glob(os.path.join(self.workspacedir, 'appends', '%s_*.bbappend' % realrecipe))
self.assertTrue(matches, 'bbappend not created %s' % result.output)
# Test devtool status
@@ -597,13 +752,14 @@ class DevtoolTests(DevtoolBase):
self._check_src_repo(tempdir)
# This is probably sufficient
+class DevtoolUpdateTests(DevtoolBase):
- @testcase(1169)
def test_devtool_update_recipe(self):
# Check preconditions
testrecipe = 'minicom'
- recipefile = get_bb_var('FILE', testrecipe)
- src_uri = get_bb_var('SRC_URI', testrecipe)
+ bb_vars = get_bb_vars(['FILE', 'SRC_URI'], testrecipe)
+ recipefile = bb_vars['FILE']
+ src_uri = bb_vars['SRC_URI']
self.assertNotIn('git://', src_uri, 'This test expects the %s recipe to NOT be a git recipe' % testrecipe)
self._check_repo_status(os.path.dirname(recipefile), [])
# First, modify a recipe
@@ -630,12 +786,12 @@ class DevtoolTests(DevtoolBase):
('??', '.*/0002-Add-a-new-file.patch$')]
self._check_repo_status(os.path.dirname(recipefile), expected_status)
- @testcase(1172)
def test_devtool_update_recipe_git(self):
# Check preconditions
testrecipe = 'mtd-utils'
- recipefile = get_bb_var('FILE', testrecipe)
- src_uri = get_bb_var('SRC_URI', testrecipe)
+ bb_vars = get_bb_vars(['FILE', 'SRC_URI'], testrecipe)
+ recipefile = bb_vars['FILE']
+ src_uri = bb_vars['SRC_URI']
self.assertIn('git://', src_uri, 'This test expects the %s recipe to be a git recipe' % testrecipe)
patches = []
for entry in src_uri.split():
@@ -654,7 +810,7 @@ class DevtoolTests(DevtoolBase):
self._check_src_repo(tempdir)
# Add a couple of commits
# FIXME: this only tests adding, need to also test update and remove
- result = runCmd('echo "# Additional line" >> Makefile', cwd=tempdir)
+ result = runCmd('echo "# Additional line" >> Makefile.am', cwd=tempdir)
result = runCmd('git commit -a -m "Change the Makefile"', cwd=tempdir)
result = runCmd('echo "A new file" > devtool-new-file', cwd=tempdir)
result = runCmd('git add devtool-new-file', cwd=tempdir)
@@ -699,12 +855,12 @@ class DevtoolTests(DevtoolBase):
('??', '%s/0002-Add-a-new-file.patch' % relpatchpath)]
self._check_repo_status(os.path.dirname(recipefile), expected_status)
- @testcase(1170)
def test_devtool_update_recipe_append(self):
# Check preconditions
testrecipe = 'mdadm'
- recipefile = get_bb_var('FILE', testrecipe)
- src_uri = get_bb_var('SRC_URI', testrecipe)
+ bb_vars = get_bb_vars(['FILE', 'SRC_URI'], testrecipe)
+ recipefile = bb_vars['FILE']
+ src_uri = bb_vars['SRC_URI']
self.assertNotIn('git://', src_uri, 'This test expects the %s recipe to NOT be a git recipe' % testrecipe)
self._check_repo_status(os.path.dirname(recipefile), [])
# First, modify a recipe
@@ -734,7 +890,7 @@ class DevtoolTests(DevtoolBase):
appenddir = os.path.join(templayerdir, splitpath[-2], splitpath[-1])
bbappendfile = self._check_bbappend(testrecipe, recipefile, appenddir)
patchfile = os.path.join(appenddir, testrecipe, '0001-Add-our-custom-version.patch')
- self.assertTrue(os.path.exists(patchfile), 'Patch file not created')
+ self.assertExists(patchfile, 'Patch file not created')
# Check bbappend contents
expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
@@ -751,7 +907,7 @@ class DevtoolTests(DevtoolBase):
# Drop new commit and check patch gets deleted
result = runCmd('git reset HEAD^', cwd=tempsrcdir)
result = runCmd('devtool update-recipe %s -a %s' % (testrecipe, templayerdir))
- self.assertFalse(os.path.exists(patchfile), 'Patch file not deleted')
+ self.assertNotExists(patchfile, 'Patch file not deleted')
expectedlines2 = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
'\n']
with open(bbappendfile, 'r') as f:
@@ -762,17 +918,17 @@ class DevtoolTests(DevtoolBase):
result = runCmd('bitbake-layers remove-layer %s' % templayerdir, cwd=self.builddir)
result = runCmd('devtool update-recipe %s -a %s' % (testrecipe, templayerdir))
self.assertIn('WARNING: Specified layer is not currently enabled in bblayers.conf', result.output)
- self.assertTrue(os.path.exists(patchfile), 'Patch file not created (with disabled layer)')
+ self.assertExists(patchfile, 'Patch file not created (with disabled layer)')
with open(bbappendfile, 'r') as f:
self.assertEqual(expectedlines, f.readlines())
# Deleting isn't expected to work under these circumstances
- @testcase(1171)
def test_devtool_update_recipe_append_git(self):
# Check preconditions
testrecipe = 'mtd-utils'
- recipefile = get_bb_var('FILE', testrecipe)
- src_uri = get_bb_var('SRC_URI', testrecipe)
+ bb_vars = get_bb_vars(['FILE', 'SRC_URI'], testrecipe)
+ recipefile = bb_vars['FILE']
+ src_uri = bb_vars['SRC_URI']
self.assertIn('git://', src_uri, 'This test expects the %s recipe to be a git recipe' % testrecipe)
for entry in src_uri.split():
if entry.startswith('git://'):
@@ -791,7 +947,7 @@ class DevtoolTests(DevtoolBase):
# Check git repo
self._check_src_repo(tempsrcdir)
# Add a commit
- result = runCmd('echo "# Additional line" >> Makefile', cwd=tempsrcdir)
+ result = runCmd('echo "# Additional line" >> Makefile.am', cwd=tempsrcdir)
result = runCmd('git commit -a -m "Change the Makefile"', cwd=tempsrcdir)
self.add_command_to_tearDown('cd %s; rm -f %s/*.patch; git checkout .' % (os.path.dirname(recipefile), testrecipe))
# Create a temporary layer
@@ -803,6 +959,7 @@ class DevtoolTests(DevtoolBase):
f.write('BBFILE_PATTERN_oeselftesttemplayer = "^${LAYERDIR}/"\n')
f.write('BBFILE_PRIORITY_oeselftesttemplayer = "999"\n')
f.write('BBFILE_PATTERN_IGNORE_EMPTY_oeselftesttemplayer = "1"\n')
+ f.write('LAYERSERIES_COMPAT_oeselftesttemplayer = "${LAYERSERIES_COMPAT_core}"\n')
self.add_command_to_tearDown('bitbake-layers remove-layer %s || true' % templayerdir)
result = runCmd('bitbake-layers add-layer %s' % templayerdir, cwd=self.builddir)
# Create the bbappend
@@ -814,7 +971,7 @@ class DevtoolTests(DevtoolBase):
splitpath = os.path.dirname(recipefile).split(os.sep)
appenddir = os.path.join(templayerdir, splitpath[-2], splitpath[-1])
bbappendfile = self._check_bbappend(testrecipe, recipefile, appenddir)
- self.assertFalse(os.path.exists(os.path.join(appenddir, testrecipe)), 'Patch directory should not be created')
+ self.assertNotExists(os.path.join(appenddir, testrecipe), 'Patch directory should not be created')
# Check bbappend contents
result = runCmd('git rev-parse HEAD', cwd=tempsrcdir)
@@ -832,7 +989,7 @@ class DevtoolTests(DevtoolBase):
# Drop new commit and check SRCREV changes
result = runCmd('git reset HEAD^', cwd=tempsrcdir)
result = runCmd('devtool update-recipe -m srcrev %s -a %s' % (testrecipe, templayerdir))
- self.assertFalse(os.path.exists(os.path.join(appenddir, testrecipe)), 'Patch directory should not be created')
+ self.assertNotExists(os.path.join(appenddir, testrecipe), 'Patch directory should not be created')
result = runCmd('git rev-parse HEAD', cwd=tempsrcdir)
expectedlines = set(['SRCREV = "%s"\n' % result.output,
'\n',
@@ -846,7 +1003,7 @@ class DevtoolTests(DevtoolBase):
result = runCmd('bitbake-layers remove-layer %s' % templayerdir, cwd=self.builddir)
result = runCmd('devtool update-recipe -m srcrev %s -a %s' % (testrecipe, templayerdir))
self.assertIn('WARNING: Specified layer is not currently enabled in bblayers.conf', result.output)
- self.assertFalse(os.path.exists(os.path.join(appenddir, testrecipe)), 'Patch directory should not be created')
+ self.assertNotExists(os.path.join(appenddir, testrecipe), 'Patch directory should not be created')
result = runCmd('git rev-parse HEAD', cwd=tempsrcdir)
expectedlines = set(['SRCREV = "%s"\n' % result.output,
'\n',
@@ -856,7 +1013,6 @@ class DevtoolTests(DevtoolBase):
self.assertEqual(expectedlines, set(f.readlines()))
# Deleting isn't expected to work under these circumstances
- @testcase(1370)
def test_devtool_update_recipe_local_files(self):
"""Check that local source files are copied over instead of patched"""
testrecipe = 'makedevs'
@@ -871,6 +1027,8 @@ class DevtoolTests(DevtoolBase):
result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
# Check git repo
self._check_src_repo(tempdir)
+ # Try building just to ensure we haven't broken that
+ bitbake("%s" % testrecipe)
# Edit / commit local source
runCmd('echo "/* Foobar */" >> oe-local-files/makedevs.c', cwd=tempdir)
runCmd('echo "Foo" > oe-local-files/new-local', cwd=tempdir)
@@ -886,11 +1044,14 @@ class DevtoolTests(DevtoolBase):
('??', '.*/makedevs/0001-Add-new-file.patch$')]
self._check_repo_status(os.path.dirname(recipefile), expected_status)
- @testcase(1371)
def test_devtool_update_recipe_local_files_2(self):
"""Check local source files support when oe-local-files is in Git"""
- testrecipe = 'lzo'
+ testrecipe = 'devtool-test-local'
recipefile = get_bb_var('FILE', testrecipe)
+ recipedir = os.path.dirname(recipefile)
+ result = runCmd('git status --porcelain .', cwd=recipedir)
+ if result.output.strip():
+ self.fail('Recipe directory for %s contains uncommitted changes' % testrecipe)
# Setup srctree for modifying the recipe
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
@@ -904,9 +1065,9 @@ class DevtoolTests(DevtoolBase):
runCmd('git add oe-local-files', cwd=tempdir)
runCmd('git commit -m "Add local sources"', cwd=tempdir)
# Edit / commit local sources
- runCmd('echo "# Foobar" >> oe-local-files/acinclude.m4', cwd=tempdir)
+ runCmd('echo "# Foobar" >> oe-local-files/file1', cwd=tempdir)
runCmd('git commit -am "Edit existing file"', cwd=tempdir)
- runCmd('git rm oe-local-files/run-ptest', cwd=tempdir)
+ runCmd('git rm oe-local-files/file2', cwd=tempdir)
runCmd('git commit -m"Remove file"', cwd=tempdir)
runCmd('echo "Foo" > oe-local-files/new-local', cwd=tempdir)
runCmd('git add oe-local-files/new-local', cwd=tempdir)
@@ -918,39 +1079,109 @@ class DevtoolTests(DevtoolBase):
os.path.dirname(recipefile))
# Checkout unmodified file to working copy -> devtool should still pick
# the modified version from HEAD
- runCmd('git checkout HEAD^ -- oe-local-files/acinclude.m4', cwd=tempdir)
+ runCmd('git checkout HEAD^ -- oe-local-files/file1', cwd=tempdir)
runCmd('devtool update-recipe %s' % testrecipe)
expected_status = [(' M', '.*/%s$' % os.path.basename(recipefile)),
- (' M', '.*/acinclude.m4$'),
- (' D', '.*/run-ptest$'),
+ (' M', '.*/file1$'),
+ (' D', '.*/file2$'),
('??', '.*/new-local$'),
('??', '.*/0001-Add-new-file.patch$')]
self._check_repo_status(os.path.dirname(recipefile), expected_status)
- @testcase(1163)
+ def test_devtool_update_recipe_local_files_3(self):
+ # First, modify the recipe
+ testrecipe = 'devtool-test-localonly'
+ bb_vars = get_bb_vars(['FILE', 'SRC_URI'], testrecipe)
+ recipefile = bb_vars['FILE']
+ src_uri = bb_vars['SRC_URI']
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ # (don't bother with cleaning the recipe on teardown, we won't be building it)
+ result = runCmd('devtool modify %s' % testrecipe)
+ # Modify one file
+ runCmd('echo "Another line" >> file2', cwd=os.path.join(self.workspacedir, 'sources', testrecipe, 'oe-local-files'))
+ self.add_command_to_tearDown('cd %s; rm %s/*; git checkout %s %s' % (os.path.dirname(recipefile), testrecipe, testrecipe, os.path.basename(recipefile)))
+ result = runCmd('devtool update-recipe %s' % testrecipe)
+ expected_status = [(' M', '.*/%s/file2$' % testrecipe)]
+ self._check_repo_status(os.path.dirname(recipefile), expected_status)
+
+ def test_devtool_update_recipe_local_patch_gz(self):
+ # First, modify the recipe
+ testrecipe = 'devtool-test-patch-gz'
+ if get_bb_var('DISTRO') == 'poky-tiny':
+ self.skipTest("The DISTRO 'poky-tiny' does not provide the dependencies needed by %s" % testrecipe)
+ bb_vars = get_bb_vars(['FILE', 'SRC_URI'], testrecipe)
+ recipefile = bb_vars['FILE']
+ src_uri = bb_vars['SRC_URI']
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ # (don't bother with cleaning the recipe on teardown, we won't be building it)
+ result = runCmd('devtool modify %s' % testrecipe)
+ # Modify one file
+ srctree = os.path.join(self.workspacedir, 'sources', testrecipe)
+ runCmd('echo "Another line" >> README', cwd=srctree)
+ runCmd('git commit -a --amend --no-edit', cwd=srctree)
+ self.add_command_to_tearDown('cd %s; rm %s/*; git checkout %s %s' % (os.path.dirname(recipefile), testrecipe, testrecipe, os.path.basename(recipefile)))
+ result = runCmd('devtool update-recipe %s' % testrecipe)
+ expected_status = [(' M', '.*/%s/readme.patch.gz$' % testrecipe)]
+ self._check_repo_status(os.path.dirname(recipefile), expected_status)
+ patch_gz = os.path.join(os.path.dirname(recipefile), testrecipe, 'readme.patch.gz')
+ result = runCmd('file %s' % patch_gz)
+ if 'gzip compressed data' not in result.output:
+ self.fail('New patch file is not gzipped - file reports:\n%s' % result.output)
+
+ def test_devtool_update_recipe_local_files_subdir(self):
+ # Try devtool update-recipe on a recipe that has a file with subdir= set in
+ # SRC_URI such that it overwrites a file that was in an archive that
+ # was also in SRC_URI
+ # First, modify the recipe
+ testrecipe = 'devtool-test-subdir'
+ bb_vars = get_bb_vars(['FILE', 'SRC_URI'], testrecipe)
+ recipefile = bb_vars['FILE']
+ src_uri = bb_vars['SRC_URI']
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ # (don't bother with cleaning the recipe on teardown, we won't be building it)
+ result = runCmd('devtool modify %s' % testrecipe)
+ testfile = os.path.join(self.workspacedir, 'sources', testrecipe, 'testfile')
+ self.assertExists(testfile, 'Extracted source could not be found')
+ with open(testfile, 'r') as f:
+ contents = f.read().rstrip()
+ self.assertEqual(contents, 'Modified version', 'File has apparently not been overwritten as it should have been')
+ # Test devtool update-recipe without modifying any files
+ self.add_command_to_tearDown('cd %s; rm %s/*; git checkout %s %s' % (os.path.dirname(recipefile), testrecipe, testrecipe, os.path.basename(recipefile)))
+ result = runCmd('devtool update-recipe %s' % testrecipe)
+ expected_status = []
+ self._check_repo_status(os.path.dirname(recipefile), expected_status)
+
+class DevtoolExtractTests(DevtoolBase):
+
def test_devtool_extract(self):
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
# Try devtool extract
self.track_for_cleanup(tempdir)
- self.append_config('PREFERRED_PROVIDER_virtual/make = "remake"')
- result = runCmd('devtool extract remake %s' % tempdir)
- self.assertTrue(os.path.exists(os.path.join(tempdir, 'Makefile.am')), 'Extracted source could not be found')
- # devtool extract shouldn't create the workspace
- self.assertFalse(os.path.exists(self.workspacedir))
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool extract matchbox-terminal %s' % tempdir)
+ self.assertExists(os.path.join(tempdir, 'Makefile.am'), 'Extracted source could not be found')
self._check_src_repo(tempdir)
- @testcase(1379)
def test_devtool_extract_virtual(self):
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
# Try devtool extract
self.track_for_cleanup(tempdir)
- result = runCmd('devtool extract virtual/libx11 %s' % tempdir)
- self.assertTrue(os.path.exists(os.path.join(tempdir, 'Makefile.am')), 'Extracted source could not be found')
- # devtool extract shouldn't create the workspace
- self.assertFalse(os.path.exists(self.workspacedir))
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool extract virtual/make %s' % tempdir)
+ self.assertExists(os.path.join(tempdir, 'Makefile.am'), 'Extracted source could not be found')
self._check_src_repo(tempdir)
- @testcase(1168)
def test_devtool_reset_all(self):
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
@@ -977,7 +1208,6 @@ class DevtoolTests(DevtoolBase):
matches2 = glob.glob(stampprefix2 + '*')
self.assertFalse(matches2, 'Stamp files exist for recipe %s that should have been cleaned' % testrecipe2)
- @testcase(1272)
def test_devtool_deploy_target(self):
# NOTE: Whilst this test would seemingly be better placed as a runtime test,
# unfortunately the runtime tests run under bitbake and you can't run
@@ -1018,8 +1248,8 @@ class DevtoolTests(DevtoolBase):
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
self.track_for_cleanup(self.workspacedir)
- self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean %s' % testrecipe)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
# Test that deploy-target at this point fails (properly)
result = runCmd('devtool deploy-target -n %s root@localhost' % testrecipe, ignore_status=True)
@@ -1038,10 +1268,11 @@ class DevtoolTests(DevtoolBase):
result = runCmd('ssh %s root@%s %s' % (sshargs, qemu.ip, testcommand))
# Check if it deployed all of the files with the right ownership/perms
# First look on the host - need to do this under pseudo to get the correct ownership/perms
- installdir = get_bb_var('D', testrecipe)
- fakerootenv = get_bb_var('FAKEROOTENV', testrecipe)
- fakerootcmd = get_bb_var('FAKEROOTCMD', testrecipe)
- result = runCmd('%s %s find . -type f -exec ls -l {} \;' % (fakerootenv, fakerootcmd), cwd=installdir)
+ bb_vars = get_bb_vars(['D', 'FAKEROOTENV', 'FAKEROOTCMD'], testrecipe)
+ installdir = bb_vars['D']
+ fakerootenv = bb_vars['FAKEROOTENV']
+ fakerootcmd = bb_vars['FAKEROOTCMD']
+ result = runCmd('%s %s find . -type f -exec ls -l {} \\;' % (fakerootenv, fakerootcmd), cwd=installdir)
filelist1 = self._process_ls_output(result.output)
# Now look on the target
@@ -1062,15 +1293,14 @@ class DevtoolTests(DevtoolBase):
result = runCmd('ssh %s root@%s %s' % (sshargs, qemu.ip, testcommand), ignore_status=True)
self.assertNotEqual(result, 0, 'undeploy-target did not remove command as it should have')
- @testcase(1366)
def test_devtool_build_image(self):
"""Test devtool build-image plugin"""
# Check preconditions
self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
image = 'core-image-minimal'
self.track_for_cleanup(self.workspacedir)
- self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean %s' % image)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
bitbake('%s -c clean' % image)
# Add target and native recipes to workspace
recipes = ['mdadm', 'parted-native']
@@ -1096,7 +1326,8 @@ class DevtoolTests(DevtoolBase):
if reqpkgs:
self.fail('The following packages were not present in the image as expected: %s' % ', '.join(reqpkgs))
- @testcase(1367)
+class DevtoolUpgradeTests(DevtoolBase):
+
def test_devtool_upgrade(self):
# Check preconditions
self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
@@ -1121,10 +1352,10 @@ class DevtoolTests(DevtoolBase):
# Check if srctree at least is populated
self.assertTrue(len(os.listdir(tempdir)) > 0, 'srctree (%s) should be populated with new (%s) source code' % (tempdir, version))
# Check new recipe subdirectory is present
- self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'recipes', recipe, '%s-%s' % (recipe, version))), 'Recipe folder should exist')
+ self.assertExists(os.path.join(self.workspacedir, 'recipes', recipe, '%s-%s' % (recipe, version)), 'Recipe folder should exist')
# Check new recipe file is present
newrecipefile = os.path.join(self.workspacedir, 'recipes', recipe, '%s_%s.bb' % (recipe, version))
- self.assertTrue(os.path.exists(newrecipefile), 'Recipe file should exist after upgrade')
+ self.assertExists(newrecipefile, 'Recipe file should exist after upgrade')
# Check devtool status and make sure recipe is present
result = runCmd('devtool status')
self.assertIn(recipe, result.output)
@@ -1139,9 +1370,8 @@ class DevtoolTests(DevtoolBase):
result = runCmd('devtool reset %s -n' % recipe)
result = runCmd('devtool status')
self.assertNotIn(recipe, result.output)
- self.assertFalse(os.path.exists(os.path.join(self.workspacedir, 'recipes', recipe)), 'Recipe directory should not exist after resetting')
+ self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipe), 'Recipe directory should not exist after resetting')
- @testcase(1433)
def test_devtool_upgrade_git(self):
# Check preconditions
self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
@@ -1161,7 +1391,7 @@ class DevtoolTests(DevtoolBase):
self.assertTrue(len(os.listdir(tempdir)) > 0, 'srctree (%s) should be populated with new (%s) source code' % (tempdir, commit))
# Check new recipe file is present
newrecipefile = os.path.join(self.workspacedir, 'recipes', recipe, os.path.basename(oldrecipefile))
- self.assertTrue(os.path.exists(newrecipefile), 'Recipe file should exist after upgrade')
+ self.assertExists(newrecipefile, 'Recipe file should exist after upgrade')
# Check devtool status and make sure recipe is present
result = runCmd('devtool status')
self.assertIn(recipe, result.output)
@@ -1176,9 +1406,8 @@ class DevtoolTests(DevtoolBase):
result = runCmd('devtool reset %s -n' % recipe)
result = runCmd('devtool status')
self.assertNotIn(recipe, result.output)
- self.assertFalse(os.path.exists(os.path.join(self.workspacedir, 'recipes', recipe)), 'Recipe directory should not exist after resetting')
+ self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipe), 'Recipe directory should not exist after resetting')
- @testcase(1352)
def test_devtool_layer_plugins(self):
"""Test that devtool can use plugins from other layers.
@@ -1191,6 +1420,49 @@ class DevtoolTests(DevtoolBase):
result = runCmd("devtool --quiet selftest-reverse \"%s\"" % s)
self.assertEqual(result.output, s[::-1])
+ def _copy_file_with_cleanup(self, srcfile, basedstdir, *paths):
+ dstdir = basedstdir
+ self.assertExists(dstdir)
+ for p in paths:
+ dstdir = os.path.join(dstdir, p)
+ if not os.path.exists(dstdir):
+ os.makedirs(dstdir)
+ self.track_for_cleanup(dstdir)
+ dstfile = os.path.join(dstdir, os.path.basename(srcfile))
+ if srcfile != dstfile:
+ shutil.copy(srcfile, dstfile)
+ self.track_for_cleanup(dstfile)
+
+ def test_devtool_load_plugin(self):
+ """Test that devtool loads only the first found plugin in BBPATH."""
+
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+
+ devtool = runCmd("which devtool")
+ fromname = runCmd("devtool --quiet pluginfile")
+ srcfile = fromname.output
+ bbpath = get_bb_var('BBPATH')
+ searchpath = bbpath.split(':') + [os.path.dirname(devtool.output)]
+ plugincontent = []
+ with open(srcfile) as fh:
+ plugincontent = fh.readlines()
+ try:
+ self.assertIn('meta-selftest', srcfile, 'wrong bbpath plugin found')
+ for path in searchpath:
+ self._copy_file_with_cleanup(srcfile, path, 'lib', 'devtool')
+ result = runCmd("devtool --quiet count")
+ self.assertEqual(result.output, '1')
+ result = runCmd("devtool --quiet multiloaded")
+ self.assertEqual(result.output, "no")
+ for path in searchpath:
+ result = runCmd("devtool --quiet bbdir")
+ self.assertEqual(result.output, path)
+ os.unlink(os.path.join(result.output, 'lib', 'devtool', 'bbpath.py'))
+ finally:
+ with open(srcfile, 'w') as fh:
+ fh.writelines(plugincontent)
+
def _setup_test_devtool_finish_upgrade(self):
# Check preconditions
self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
@@ -1225,11 +1497,13 @@ class DevtoolTests(DevtoolBase):
recipedir = os.path.dirname(oldrecipefile)
olddir = os.path.join(recipedir, recipe + '-' + oldversion)
patchfn = '0001-Add-a-note-line-to-the-quick-reference.patch'
- self.assertTrue(os.path.exists(os.path.join(olddir, patchfn)), 'Original patch file does not exist')
- return recipe, oldrecipefile, recipedir, olddir, newversion, patchfn
+ backportedpatchfn = 'backported.patch'
+ self.assertExists(os.path.join(olddir, patchfn), 'Original patch file does not exist')
+ self.assertExists(os.path.join(olddir, backportedpatchfn), 'Backported patch file does not exist')
+ return recipe, oldrecipefile, recipedir, olddir, newversion, patchfn, backportedpatchfn
def test_devtool_finish_upgrade_origlayer(self):
- recipe, oldrecipefile, recipedir, olddir, newversion, patchfn = self._setup_test_devtool_finish_upgrade()
+ recipe, oldrecipefile, recipedir, olddir, newversion, patchfn, backportedpatchfn = self._setup_test_devtool_finish_upgrade()
# Ensure the recipe is where we think it should be (so that cleanup doesn't trash things)
self.assertIn('/meta-selftest/', recipedir)
# Try finish to the original layer
@@ -1237,17 +1511,26 @@ class DevtoolTests(DevtoolBase):
result = runCmd('devtool finish %s meta-selftest' % recipe)
result = runCmd('devtool status')
self.assertNotIn(recipe, result.output, 'Recipe should have been reset by finish but wasn\'t')
- self.assertFalse(os.path.exists(os.path.join(self.workspacedir, 'recipes', recipe)), 'Recipe directory should not exist after finish')
- self.assertFalse(os.path.exists(oldrecipefile), 'Old recipe file should have been deleted but wasn\'t')
- self.assertFalse(os.path.exists(os.path.join(olddir, patchfn)), 'Old patch file should have been deleted but wasn\'t')
+ self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipe), 'Recipe directory should not exist after finish')
+ self.assertNotExists(oldrecipefile, 'Old recipe file should have been deleted but wasn\'t')
+ self.assertNotExists(os.path.join(olddir, patchfn), 'Old patch file should have been deleted but wasn\'t')
+ self.assertNotExists(os.path.join(olddir, backportedpatchfn), 'Old backported patch file should have been deleted but wasn\'t')
newrecipefile = os.path.join(recipedir, '%s_%s.bb' % (recipe, newversion))
newdir = os.path.join(recipedir, recipe + '-' + newversion)
- self.assertTrue(os.path.exists(newrecipefile), 'New recipe file should have been copied into existing layer but wasn\'t')
- self.assertTrue(os.path.exists(os.path.join(newdir, patchfn)), 'Patch file should have been copied into new directory but wasn\'t')
- self.assertTrue(os.path.exists(os.path.join(newdir, '0002-Add-a-comment-to-the-code.patch')), 'New patch file should have been created but wasn\'t')
+ self.assertExists(newrecipefile, 'New recipe file should have been copied into existing layer but wasn\'t')
+ self.assertExists(os.path.join(newdir, patchfn), 'Patch file should have been copied into new directory but wasn\'t')
+ self.assertNotExists(os.path.join(newdir, backportedpatchfn), 'Backported patch file should not have been copied into new directory but was')
+ self.assertExists(os.path.join(newdir, '0002-Add-a-comment-to-the-code.patch'), 'New patch file should have been created but wasn\'t')
+ with open(newrecipefile, 'r') as f:
+ newcontent = f.read()
+ self.assertNotIn(backportedpatchfn, newcontent, "Backported patch should have been removed from the recipe but wasn't")
+ self.assertIn(patchfn, newcontent, "Old patch should have not been removed from the recipe but was")
+ self.assertIn("0002-Add-a-comment-to-the-code.patch", newcontent, "New patch should have been added to the recipe but wasn't")
+ self.assertIn("http://www.ivarch.com/programs/sources/pv-${PV}.tar.gz", newcontent, "New recipe no longer has upstream source in SRC_URI")
+
def test_devtool_finish_upgrade_otherlayer(self):
- recipe, oldrecipefile, recipedir, olddir, newversion, patchfn = self._setup_test_devtool_finish_upgrade()
+ recipe, oldrecipefile, recipedir, olddir, newversion, patchfn, backportedpatchfn = self._setup_test_devtool_finish_upgrade()
# Ensure the recipe is where we think it should be (so that cleanup doesn't trash things)
self.assertIn('/meta-selftest/', recipedir)
# Try finish to a different layer - should create a bbappend
@@ -1260,13 +1543,21 @@ class DevtoolTests(DevtoolBase):
result = runCmd('devtool finish %s oe-core' % recipe)
result = runCmd('devtool status')
self.assertNotIn(recipe, result.output, 'Recipe should have been reset by finish but wasn\'t')
- self.assertFalse(os.path.exists(os.path.join(self.workspacedir, 'recipes', recipe)), 'Recipe directory should not exist after finish')
- self.assertTrue(os.path.exists(oldrecipefile), 'Old recipe file should not have been deleted')
- self.assertTrue(os.path.exists(os.path.join(olddir, patchfn)), 'Old patch file should not have been deleted')
+ self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipe), 'Recipe directory should not exist after finish')
+ self.assertExists(oldrecipefile, 'Old recipe file should not have been deleted')
+ self.assertExists(os.path.join(olddir, patchfn), 'Old patch file should not have been deleted')
+ self.assertExists(os.path.join(olddir, backportedpatchfn), 'Old backported patch file should not have been deleted')
newdir = os.path.join(newrecipedir, recipe + '-' + newversion)
- self.assertTrue(os.path.exists(newrecipefile), 'New recipe file should have been copied into existing layer but wasn\'t')
- self.assertTrue(os.path.exists(os.path.join(newdir, patchfn)), 'Patch file should have been copied into new directory but wasn\'t')
- self.assertTrue(os.path.exists(os.path.join(newdir, '0002-Add-a-comment-to-the-code.patch')), 'New patch file should have been created but wasn\'t')
+ self.assertExists(newrecipefile, 'New recipe file should have been copied into existing layer but wasn\'t')
+ self.assertExists(os.path.join(newdir, patchfn), 'Patch file should have been copied into new directory but wasn\'t')
+ self.assertNotExists(os.path.join(newdir, backportedpatchfn), 'Backported patch file should not have been copied into new directory but was')
+ self.assertExists(os.path.join(newdir, '0002-Add-a-comment-to-the-code.patch'), 'New patch file should have been created but wasn\'t')
+ with open(newrecipefile, 'r') as f:
+ newcontent = f.read()
+ self.assertNotIn(backportedpatchfn, newcontent, "Backported patch should have been removed from the recipe but wasn't")
+ self.assertIn(patchfn, newcontent, "Old patch should have not been removed from the recipe but was")
+ self.assertIn("0002-Add-a-comment-to-the-code.patch", newcontent, "New patch should have been added to the recipe but wasn't")
+ self.assertIn("http://www.ivarch.com/programs/sources/pv-${PV}.tar.gz", newcontent, "New recipe no longer has upstream source in SRC_URI")
def _setup_test_devtool_finish_modify(self):
# Check preconditions
@@ -1283,7 +1574,7 @@ class DevtoolTests(DevtoolBase):
self.track_for_cleanup(tempdir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool modify %s %s' % (recipe, tempdir))
- self.assertTrue(os.path.exists(os.path.join(tempdir, 'Makefile')), 'Extracted source could not be found')
+ self.assertExists(os.path.join(tempdir, 'Makefile'), 'Extracted source could not be found')
# Test devtool status
result = runCmd('devtool status')
self.assertIn(recipe, result.output)
@@ -1310,7 +1601,7 @@ class DevtoolTests(DevtoolBase):
result = runCmd('devtool finish %s meta' % recipe)
result = runCmd('devtool status')
self.assertNotIn(recipe, result.output, 'Recipe should have been reset by finish but wasn\'t')
- self.assertFalse(os.path.exists(os.path.join(self.workspacedir, 'recipes', recipe)), 'Recipe directory should not exist after finish')
+ self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipe), 'Recipe directory should not exist after finish')
expected_status = [(' M', '.*/%s$' % os.path.basename(oldrecipefile)),
('??', '.*/.*-Add-a-comment-to-the-code.patch$')]
self._check_repo_status(recipedir, expected_status)
@@ -1327,14 +1618,14 @@ class DevtoolTests(DevtoolBase):
result = runCmd('devtool finish %s meta-selftest' % recipe)
result = runCmd('devtool status')
self.assertNotIn(recipe, result.output, 'Recipe should have been reset by finish but wasn\'t')
- self.assertFalse(os.path.exists(os.path.join(self.workspacedir, 'recipes', recipe)), 'Recipe directory should not exist after finish')
+ self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipe), 'Recipe directory should not exist after finish')
result = runCmd('git status --porcelain .', cwd=recipedir)
if result.output.strip():
self.fail('Recipe directory for %s contains the following unexpected changes after finish:\n%s' % (recipe, result.output.strip()))
recipefn = os.path.splitext(os.path.basename(oldrecipefile))[0]
recipefn = recipefn.split('_')[0] + '_%'
appendfile = os.path.join(appenddir, recipefn + '.bbappend')
- self.assertTrue(os.path.exists(appendfile), 'bbappend %s should have been created but wasn\'t' % appendfile)
+ self.assertExists(appendfile, 'bbappend %s should have been created but wasn\'t' % appendfile)
newdir = os.path.join(appenddir, recipe)
files = os.listdir(newdir)
foundpatch = None
@@ -1346,3 +1637,143 @@ class DevtoolTests(DevtoolBase):
files.remove(foundpatch)
if files:
self.fail('Unexpected file(s) copied next to bbappend: %s' % ', '.join(files))
+
+ def test_devtool_rename(self):
+ # Check preconditions
+ self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+
+ # First run devtool add
+ # We already have this recipe in OE-Core, but that doesn't matter
+ recipename = 'i2c-tools'
+ recipever = '3.1.2'
+ recipefile = os.path.join(self.workspacedir, 'recipes', recipename, '%s_%s.bb' % (recipename, recipever))
+ url = 'http://downloads.yoctoproject.org/mirror/sources/i2c-tools-%s.tar.bz2' % recipever
+ def add_recipe():
+ result = runCmd('devtool add %s' % url)
+ self.assertExists(recipefile, 'Expected recipe file not created')
+ self.assertExists(os.path.join(self.workspacedir, 'sources', recipename), 'Source directory not created')
+ checkvars = {}
+ checkvars['S'] = None
+ checkvars['SRC_URI'] = url.replace(recipever, '${PV}')
+ self._test_recipe_contents(recipefile, checkvars, [])
+ add_recipe()
+ # Now rename it - change both name and version
+ newrecipename = 'mynewrecipe'
+ newrecipever = '456'
+ newrecipefile = os.path.join(self.workspacedir, 'recipes', newrecipename, '%s_%s.bb' % (newrecipename, newrecipever))
+ result = runCmd('devtool rename %s %s -V %s' % (recipename, newrecipename, newrecipever))
+ self.assertExists(newrecipefile, 'Recipe file not renamed')
+ self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipename), 'Old recipe directory still exists')
+ newsrctree = os.path.join(self.workspacedir, 'sources', newrecipename)
+ self.assertExists(newsrctree, 'Source directory not renamed')
+ checkvars = {}
+ checkvars['S'] = '${WORKDIR}/%s-%s' % (recipename, recipever)
+ checkvars['SRC_URI'] = url
+ self._test_recipe_contents(newrecipefile, checkvars, [])
+ # Try again - change just name this time
+ result = runCmd('devtool reset -n %s' % newrecipename)
+ shutil.rmtree(newsrctree)
+ add_recipe()
+ newrecipefile = os.path.join(self.workspacedir, 'recipes', newrecipename, '%s_%s.bb' % (newrecipename, recipever))
+ result = runCmd('devtool rename %s %s' % (recipename, newrecipename))
+ self.assertExists(newrecipefile, 'Recipe file not renamed')
+ self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipename), 'Old recipe directory still exists')
+ self.assertExists(os.path.join(self.workspacedir, 'sources', newrecipename), 'Source directory not renamed')
+ checkvars = {}
+ checkvars['S'] = '${WORKDIR}/%s-${PV}' % recipename
+ checkvars['SRC_URI'] = url.replace(recipever, '${PV}')
+ self._test_recipe_contents(newrecipefile, checkvars, [])
+ # Try again - change just version this time
+ result = runCmd('devtool reset -n %s' % newrecipename)
+ shutil.rmtree(newsrctree)
+ add_recipe()
+ newrecipefile = os.path.join(self.workspacedir, 'recipes', recipename, '%s_%s.bb' % (recipename, newrecipever))
+ result = runCmd('devtool rename %s -V %s' % (recipename, newrecipever))
+ self.assertExists(newrecipefile, 'Recipe file not renamed')
+ self.assertExists(os.path.join(self.workspacedir, 'sources', recipename), 'Source directory no longer exists')
+ checkvars = {}
+ checkvars['S'] = '${WORKDIR}/${BPN}-%s' % recipever
+ checkvars['SRC_URI'] = url
+ self._test_recipe_contents(newrecipefile, checkvars, [])
+
+ def test_devtool_virtual_kernel_modify(self):
+ """
+ Summary: The purpose of this test case is to verify that
+ devtool modify works correctly when building
+ the kernel.
+ Dependencies: NA
+ Steps: 1. Build kernel with bitbake.
+ 2. Save the config file generated.
+ 3. Clean the environment.
+ 4. Use `devtool modify virtual/kernel` to validate following:
+ 4.1 The source is checked out correctly.
+ 4.2 The resulting configuration is the same as
+ what was get on step 2.
+ 4.3 The Kernel can be build correctly.
+ 4.4 Changes made on the source are reflected on the
+ subsequent builds.
+ 4.5 Changes on the configuration are reflected on the
+ subsequent builds
+ Expected: devtool modify is able to checkout the source of the kernel
+ and modification to the source and configurations are reflected
+ when building the kernel.
+ """
+ kernel_provider = get_bb_var('PREFERRED_PROVIDER_virtual/kernel')
+ # Clean up the enviroment
+ bitbake('%s -c clean' % kernel_provider)
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ tempdir_cfg = tempfile.mkdtemp(prefix='config_qa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(tempdir_cfg)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake -c clean %s' % kernel_provider)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ #Step 1
+ #Here is just generated the config file instead of all the kernel to optimize the
+ #time of executing this test case.
+ bitbake('%s -c configure' % kernel_provider)
+ bbconfig = os.path.join(get_bb_var('B', kernel_provider),'.config')
+ #Step 2
+ runCmd('cp %s %s' % (bbconfig, tempdir_cfg))
+ self.assertExists(os.path.join(tempdir_cfg, '.config'), 'Could not copy .config file from kernel')
+
+ tmpconfig = os.path.join(tempdir_cfg, '.config')
+ #Step 3
+ bitbake('%s -c clean' % kernel_provider)
+ #Step 4.1
+ runCmd('devtool modify virtual/kernel -x %s' % tempdir)
+ self.assertExists(os.path.join(tempdir, 'Makefile'), 'Extracted source could not be found')
+ #Step 4.2
+ configfile = os.path.join(tempdir,'.config')
+ diff = runCmd('diff %s %s' % (tmpconfig, configfile))
+ self.assertEqual(0,diff.status,'Kernel .config file is not the same using bitbake and devtool')
+ #Step 4.3
+ #NOTE: virtual/kernel is mapped to kernel_provider
+ result = runCmd('devtool build %s' % kernel_provider)
+ self.assertEqual(0,result.status,'Cannot build kernel using `devtool build`')
+ kernelfile = os.path.join(get_bb_var('KBUILD_OUTPUT', kernel_provider), 'vmlinux')
+ self.assertExists(kernelfile, 'Kernel was not build correctly')
+
+ #Modify the kernel source
+ modfile = os.path.join(tempdir,'arch/x86/boot/header.S')
+ modstring = "Use a boot loader. Devtool testing."
+ modapplied = runCmd("sed -i 's/Use a boot loader./%s/' %s" % (modstring, modfile))
+ self.assertEqual(0,modapplied.status,'Modification to %s on kernel source failed' % modfile)
+ #Modify the configuration
+ codeconfigfile = os.path.join(tempdir,'.config.new')
+ modconfopt = "CONFIG_SG_POOL=n"
+ modconf = runCmd("sed -i 's/CONFIG_SG_POOL=y/%s/' %s" % (modconfopt, codeconfigfile))
+ self.assertEqual(0,modconf.status,'Modification to %s failed' % codeconfigfile)
+ #Build again kernel with devtool
+ rebuild = runCmd('devtool build %s' % kernel_provider)
+ self.assertEqual(0,rebuild.status,'Fail to build kernel after modification of source and config')
+ #Step 4.4
+ bzimagename = 'bzImage-' + get_bb_var('KERNEL_VERSION_NAME', kernel_provider)
+ bzimagefile = os.path.join(get_bb_var('D', kernel_provider),'boot', bzimagename)
+ checkmodcode = runCmd("grep '%s' %s" % (modstring, bzimagefile))
+ self.assertEqual(0,checkmodcode.status,'Modification on kernel source failed')
+ #Step 4.5
+ checkmodconfg = runCmd("grep %s %s" % (modconfopt, codeconfigfile))
+ self.assertEqual(0,checkmodconfg.status,'Modification to configuration file failed')
diff --git a/meta/lib/oeqa/selftest/cases/distrodata.py b/meta/lib/oeqa/selftest/cases/distrodata.py
new file mode 100644
index 0000000000..68ba556485
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/distrodata.py
@@ -0,0 +1,89 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
+from oeqa.utils.decorators import testcase
+from oeqa.utils.ftools import write_file
+
+import oe.recipeutils
+
+class Distrodata(OESelftestTestCase):
+
+ def test_checkpkg(self):
+ """
+ Summary: Test that upstream version checks do not regress
+ Expected: Upstream version checks should succeed except for the recipes listed in the exception list.
+ Product: oe-core
+ Author: Alexander Kanavin <alex.kanavin@gmail.com>
+ """
+ feature = 'LICENSE_FLAGS_WHITELIST += " commercial"\n'
+ self.write_config(feature)
+
+ pkgs = oe.recipeutils.get_recipe_upgrade_status()
+
+ regressed_failures = [pkg[0] for pkg in pkgs if pkg[1] == 'UNKNOWN_BROKEN']
+ regressed_successes = [pkg[0] for pkg in pkgs if pkg[1] == 'KNOWN_BROKEN']
+ msg = ""
+ if len(regressed_failures) > 0:
+ msg = msg + """
+The following packages failed upstream version checks. Please fix them using UPSTREAM_CHECK_URI/UPSTREAM_CHECK_REGEX
+(when using tarballs) or UPSTREAM_CHECK_GITTAGREGEX (when using git). If an upstream version check cannot be performed
+(for example, if upstream does not use git tags), you can set UPSTREAM_VERSION_UNKNOWN to '1' in the recipe to acknowledge
+that the check cannot be performed.
+""" + "\n".join(regressed_failures)
+ if len(regressed_successes) > 0:
+ msg = msg + """
+The following packages have been checked successfully for upstream versions,
+but their recipes claim otherwise by setting UPSTREAM_VERSION_UNKNOWN. Please remove that line from the recipes.
+""" + "\n".join(regressed_successes)
+ self.assertTrue(len(regressed_failures) == 0 and len(regressed_successes) == 0, msg)
+
+ def test_maintainers(self):
+ """
+ Summary: Test that oe-core recipes have a maintainer
+ Expected: All oe-core recipes (except a few special static/testing ones) should have a maintainer listed in maintainers.inc file.
+ Product: oe-core
+ Author: Alexander Kanavin <alex.kanavin@gmail.com>
+ """
+ def is_exception(pkg):
+ exceptions = ["packagegroup-", "initramfs-", "systemd-machine-units", "target-sdk-provides-dummy"]
+ for i in exceptions:
+ if i in pkg:
+ return True
+ return False
+
+ feature = 'require conf/distro/include/maintainers.inc\n'
+ self.write_config(feature)
+
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False)
+
+ with_maintainer_list = []
+ no_maintainer_list = []
+ # We could have used all_recipes() here, but this method will find
+ # every recipe if we ever move to setting RECIPE_MAINTAINER in recipe files
+ # instead of maintainers.inc
+ for fn in tinfoil.all_recipe_files(variants=False):
+ if not '/meta/recipes-' in fn:
+ # We are only interested in OE-Core
+ continue
+ rd = tinfoil.parse_recipe_file(fn, appends=False)
+ pn = rd.getVar('PN')
+ if is_exception(pn):
+ continue
+ if rd.getVar('RECIPE_MAINTAINER'):
+ with_maintainer_list.append((pn, fn))
+ else:
+ no_maintainer_list.append((pn, fn))
+
+ if no_maintainer_list:
+ self.fail("""
+The following recipes do not have a maintainer assigned to them. Please add an entry to meta/conf/distro/include/maintainers.inc file.
+""" + "\n".join(['%s (%s)' % i for i in no_maintainer_list]))
+
+ if not with_maintainer_list:
+ self.fail("""
+The list of oe-core recipes with maintainers is empty. This may indicate that the test has regressed and needs fixing.
+""")
diff --git a/meta/lib/oeqa/selftest/eSDK.py b/meta/lib/oeqa/selftest/cases/eSDK.py
index 9d5c680948..862849af35 100644
--- a/meta/lib/oeqa/selftest/eSDK.py
+++ b/meta/lib/oeqa/selftest/cases/eSDK.py
@@ -1,21 +1,20 @@
-import unittest
+#
+# SPDX-License-Identifier: MIT
+#
+
import tempfile
import shutil
import os
import glob
-import logging
-import subprocess
-import oeqa.utils.ftools as ftools
-from oeqa.utils.decorators import testcase
-from oeqa.selftest.base import oeSelfTest
-from oeqa.utils.commands import runCmd, bitbake, get_bb_var
-from oeqa.utils.httpserver import HTTPService
-
-class oeSDKExtSelfTest(oeSelfTest):
+import time
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
+
+class oeSDKExtSelfTest(OESelftestTestCase):
"""
# Bugzilla Test Plan: 6033
# This code is planned to be part of the automation for eSDK containig
- # Install libraries and headers, image generation binary feeds.
+ # Install libraries and headers, image generation binary feeds, sdk-update.
"""
@staticmethod
@@ -24,7 +23,7 @@ class oeSDKExtSelfTest(oeSelfTest):
# what environment load oe-selftest, i586, x86_64
pattern = os.path.join(tmpdir_eSDKQA, 'environment-setup-*')
return glob.glob(pattern)[0]
-
+
@staticmethod
def run_esdk_cmd(env_eSDK, tmpdir_eSDKQA, cmd, postconfig=None, **options):
if postconfig:
@@ -36,7 +35,7 @@ class oeSDKExtSelfTest(oeSelfTest):
if not 'shell' in options:
options['shell'] = True
- runCmd("cd %s; . %s; %s" % (tmpdir_eSDKQA, env_eSDK, cmd), **options)
+ runCmd("cd %s; unset BBPATH; unset BUILDDIR; . %s; %s" % (tmpdir_eSDKQA, env_eSDK, cmd), **options)
@staticmethod
def generate_eSDK(image):
@@ -47,57 +46,75 @@ class oeSDKExtSelfTest(oeSelfTest):
def get_eSDK_toolchain(image):
pn_task = '%s -c populate_sdk_ext' % image
- sdk_deploy = get_bb_var('SDK_DEPLOY', pn_task)
- toolchain_name = get_bb_var('TOOLCHAINEXT_OUTPUTNAME', pn_task)
+ bb_vars = get_bb_vars(['SDK_DEPLOY', 'TOOLCHAINEXT_OUTPUTNAME'], pn_task)
+ sdk_deploy = bb_vars['SDK_DEPLOY']
+ toolchain_name = bb_vars['TOOLCHAINEXT_OUTPUTNAME']
return os.path.join(sdk_deploy, toolchain_name + '.sh')
-
+
+ @staticmethod
+ def update_configuration(cls, image, tmpdir_eSDKQA, env_eSDK, ext_sdk_path):
+ sstate_dir = os.path.join(os.environ['BUILDDIR'], 'sstate-cache')
+
+ oeSDKExtSelfTest.generate_eSDK(cls.image)
+
+ cls.ext_sdk_path = oeSDKExtSelfTest.get_eSDK_toolchain(cls.image)
+ runCmd("%s -y -d \"%s\"" % (cls.ext_sdk_path, cls.tmpdir_eSDKQA))
+
+ cls.env_eSDK = oeSDKExtSelfTest.get_esdk_environment('', cls.tmpdir_eSDKQA)
+
+ sstate_config="""
+SDK_LOCAL_CONF_WHITELIST = "SSTATE_MIRRORS"
+SSTATE_MIRRORS = "file://.* file://%s/PATH"
+CORE_IMAGE_EXTRA_INSTALL = "perl"
+ """ % sstate_dir
+
+ with open(os.path.join(cls.tmpdir_eSDKQA, 'conf', 'local.conf'), 'a+') as f:
+ f.write(sstate_config)
@classmethod
def setUpClass(cls):
- # Start to serve sstate dir
- sstate_dir = os.path.join(os.environ['BUILDDIR'], 'sstate-cache')
- cls.http_service = HTTPService(sstate_dir)
- cls.http_service.start()
+ super(oeSDKExtSelfTest, cls).setUpClass()
+ cls.image = 'core-image-minimal'
- http_url = "127.0.0.1:%d" % cls.http_service.port
-
- image = 'core-image-minimal'
+ bb_vars = get_bb_vars(['SSTATE_DIR', 'WORKDIR'], cls.image)
+ bb.utils.mkdirhier(bb_vars["WORKDIR"])
+ cls.tmpdirobj = tempfile.TemporaryDirectory(prefix="selftest-esdk-", dir=bb_vars["WORKDIR"])
+ cls.tmpdir_eSDKQA = cls.tmpdirobj.name
- cls.tmpdir_eSDKQA = tempfile.mkdtemp(prefix='eSDKQA')
- oeSDKExtSelfTest.generate_eSDK(image)
+ oeSDKExtSelfTest.generate_eSDK(cls.image)
# Install eSDK
- ext_sdk_path = oeSDKExtSelfTest.get_eSDK_toolchain(image)
- runCmd("%s -y -d \"%s\"" % (ext_sdk_path, cls.tmpdir_eSDKQA))
+ cls.ext_sdk_path = oeSDKExtSelfTest.get_eSDK_toolchain(cls.image)
+ runCmd("%s -y -d \"%s\"" % (cls.ext_sdk_path, cls.tmpdir_eSDKQA))
cls.env_eSDK = oeSDKExtSelfTest.get_esdk_environment('', cls.tmpdir_eSDKQA)
# Configure eSDK to use sstate mirror from poky
sstate_config="""
SDK_LOCAL_CONF_WHITELIST = "SSTATE_MIRRORS"
-SSTATE_MIRRORS = "file://.* http://%s/PATH"
- """ % http_url
+SSTATE_MIRRORS = "file://.* file://%s/PATH"
+ """ % bb_vars["SSTATE_DIR"]
with open(os.path.join(cls.tmpdir_eSDKQA, 'conf', 'local.conf'), 'a+') as f:
f.write(sstate_config)
-
@classmethod
def tearDownClass(cls):
- shutil.rmtree(cls.tmpdir_eSDKQA)
- cls.http_service.stop()
+ for i in range(0, 10):
+ if os.path.exists(os.path.join(cls.tmpdir_eSDKQA, 'bitbake.lock')):
+ time.sleep(1)
+ else:
+ break
+ cls.tmpdirobj.cleanup()
+ super().tearDownClass()
- @testcase (1471)
def test_install_libraries_headers(self):
pn_sstate = 'bc'
bitbake(pn_sstate)
cmd = "devtool sdk-install %s " % pn_sstate
oeSDKExtSelfTest.run_esdk_cmd(self.env_eSDK, self.tmpdir_eSDKQA, cmd)
-
- @testcase(1472)
+
def test_image_generation_binary_feeds(self):
image = 'core-image-minimal'
cmd = "devtool build-image %s" % image
oeSDKExtSelfTest.run_esdk_cmd(self.env_eSDK, self.tmpdir_eSDKQA, cmd)
-if __name__ == '__main__':
- unittest.main()
diff --git a/meta/lib/oeqa/selftest/cases/efibootpartition.py b/meta/lib/oeqa/selftest/cases/efibootpartition.py
new file mode 100644
index 0000000000..a61cf9bcb3
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/efibootpartition.py
@@ -0,0 +1,46 @@
+# Based on runqemu.py test file
+#
+# Copyright (c) 2017 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: MIT
+#
+
+import re
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake, runqemu, get_bb_var
+
+class GenericEFITest(OESelftestTestCase):
+ """EFI booting test class"""
+
+ cmd_common = "runqemu nographic serial wic ovmf"
+ efi_provider = "systemd-boot"
+ image = "core-image-minimal"
+ machine = "qemux86-64"
+ recipes_built = False
+
+ @classmethod
+ def setUpLocal(self):
+ super(GenericEFITest, self).setUpLocal(self)
+
+ self.write_config(self,
+"""
+EFI_PROVIDER = "%s"
+IMAGE_FSTYPES_pn-%s_append = " wic"
+MACHINE = "%s"
+MACHINE_FEATURES_append = " efi"
+WKS_FILE = "efi-bootdisk.wks.in"
+IMAGE_INSTALL_append = " grub-efi systemd-boot kernel-image-bzimage"
+"""
+% (self.efi_provider, self.image, self.machine))
+ if not self.recipes_built:
+ bitbake("ovmf")
+ bitbake(self.image)
+ self.recipes_built = True
+
+ @classmethod
+ def test_boot_efi(self):
+ """Test generic boot partition with qemu"""
+ cmd = "%s %s" % (self.cmd_common, self.machine)
+ with runqemu(self.image, ssh=False, launch_cmd=cmd) as qemu:
+ self.assertTrue(qemu.runner.logged, "Failed: %s" % cmd)
diff --git a/meta/lib/oeqa/selftest/cases/fetch.py b/meta/lib/oeqa/selftest/cases/fetch.py
new file mode 100644
index 0000000000..76cbadf2ff
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/fetch.py
@@ -0,0 +1,51 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import oe.path
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake
+
+class Fetch(OESelftestTestCase):
+ def test_git_mirrors(self):
+ """
+ Verify that the git fetcher will fall back to the HTTP mirrors. The
+ recipe needs to be one that we have on the Yocto Project source mirror
+ and is hosted in git.
+ """
+
+ # TODO: mktempd instead of hardcoding
+ dldir = os.path.join(self.builddir, "download-git-mirrors")
+ self.track_for_cleanup(dldir)
+
+ # No mirrors, should use git to fetch successfully
+ features = """
+DL_DIR = "%s"
+MIRRORS_forcevariable = ""
+PREMIRRORS_forcevariable = ""
+""" % dldir
+ self.write_config(features)
+ oe.path.remove(dldir, recurse=True)
+ bitbake("dbus-wait -c fetch -f")
+
+ # No mirrors and broken git, should fail
+ features = """
+DL_DIR = "%s"
+GIT_PROXY_COMMAND = "false"
+MIRRORS_forcevariable = ""
+PREMIRRORS_forcevariable = ""
+""" % dldir
+ self.write_config(features)
+ oe.path.remove(dldir, recurse=True)
+ with self.assertRaises(AssertionError):
+ bitbake("dbus-wait -c fetch -f")
+
+ # Broken git but a specific mirror
+ features = """
+DL_DIR = "%s"
+GIT_PROXY_COMMAND = "false"
+MIRRORS_forcevariable = "git://.*/.* http://downloads.yoctoproject.org/mirror/sources/"
+""" % dldir
+ self.write_config(features)
+ oe.path.remove(dldir, recurse=True)
+ bitbake("dbus-wait -c fetch -f")
diff --git a/meta/lib/oeqa/selftest/cases/gcc.py b/meta/lib/oeqa/selftest/cases/gcc.py
new file mode 100644
index 0000000000..3efe15228f
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/gcc.py
@@ -0,0 +1,152 @@
+# SPDX-License-Identifier: MIT
+import os
+from oeqa.core.decorator import OETestTag
+from oeqa.core.case import OEPTestResultTestCase
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake, get_bb_var, get_bb_vars, runqemu, Command
+
+def parse_values(content):
+ for i in content:
+ for v in ["PASS", "FAIL", "XPASS", "XFAIL", "UNRESOLVED", "UNSUPPORTED", "UNTESTED", "ERROR", "WARNING"]:
+ if i.startswith(v + ": "):
+ yield i[len(v) + 2:].strip(), v
+ break
+
+class GccSelfTestBase(OESelftestTestCase, OEPTestResultTestCase):
+ def check_skip(self, suite):
+ targets = get_bb_var("RUNTIMETARGET", "gcc-runtime").split()
+ if suite not in targets:
+ self.skipTest("Target does not use {0}".format(suite))
+
+ def run_check(self, *suites, ssh = None):
+ targets = set()
+ for s in suites:
+ if s == "gcc":
+ targets.add("check-gcc-c")
+ elif s == "g++":
+ targets.add("check-gcc-c++")
+ else:
+ targets.add("check-target-{}".format(s))
+
+ # configure ssh target
+ features = []
+ features.append('MAKE_CHECK_TARGETS = "{0}"'.format(" ".join(targets)))
+ if ssh is not None:
+ features.append('TOOLCHAIN_TEST_TARGET = "ssh"')
+ features.append('TOOLCHAIN_TEST_HOST = "{0}"'.format(ssh))
+ features.append('TOOLCHAIN_TEST_HOST_USER = "root"')
+ features.append('TOOLCHAIN_TEST_HOST_PORT = "22"')
+ self.write_config("\n".join(features))
+
+ recipe = "gcc-runtime"
+ bitbake("{} -c check".format(recipe))
+
+ bb_vars = get_bb_vars(["B", "TARGET_SYS"], recipe)
+ builddir, target_sys = bb_vars["B"], bb_vars["TARGET_SYS"]
+
+ for suite in suites:
+ sumspath = os.path.join(builddir, "gcc", "testsuite", suite, "{0}.sum".format(suite))
+ if not os.path.exists(sumspath): # check in target dirs
+ sumspath = os.path.join(builddir, target_sys, suite, "testsuite", "{0}.sum".format(suite))
+ if not os.path.exists(sumspath): # handle libstdc++-v3 -> libstdc++
+ sumspath = os.path.join(builddir, target_sys, suite, "testsuite", "{0}.sum".format(suite.split("-")[0]))
+ logpath = os.path.splitext(sumspath)[0] + ".log"
+
+ ptestsuite = "gcc-{}".format(suite) if suite != "gcc" else suite
+ ptestsuite = ptestsuite + "-user" if ssh is None else ptestsuite
+ self.ptest_section(ptestsuite, logfile = logpath)
+ with open(sumspath, "r") as f:
+ for test, result in parse_values(f):
+ self.ptest_result(ptestsuite, test, result)
+
+ def run_check_emulated(self, *args, **kwargs):
+ # build core-image-minimal with required packages
+ default_installed_packages = ["libgcc", "libstdc++", "libatomic", "libgomp"]
+ features = []
+ features.append('IMAGE_FEATURES += "ssh-server-openssh"')
+ features.append('CORE_IMAGE_EXTRA_INSTALL += "{0}"'.format(" ".join(default_installed_packages)))
+ self.write_config("\n".join(features))
+ bitbake("core-image-minimal")
+
+ # wrap the execution with a qemu instance
+ with runqemu("core-image-minimal", runqemuparams = "nographic") as qemu:
+ # validate that SSH is working
+ status, _ = qemu.run("uname")
+ self.assertEqual(status, 0)
+
+ return self.run_check(*args, ssh=qemu.ip, **kwargs)
+
+@OETestTag("toolchain-user")
+class GccCrossSelfTest(GccSelfTestBase):
+ def test_cross_gcc(self):
+ self.run_check("gcc")
+
+@OETestTag("toolchain-user")
+class GxxCrossSelfTest(GccSelfTestBase):
+ def test_cross_gxx(self):
+ self.run_check("g++")
+
+@OETestTag("toolchain-user")
+class GccLibAtomicSelfTest(GccSelfTestBase):
+ def test_libatomic(self):
+ self.run_check("libatomic")
+
+@OETestTag("toolchain-user")
+class GccLibGompSelfTest(GccSelfTestBase):
+ def test_libgomp(self):
+ self.run_check("libgomp")
+
+@OETestTag("toolchain-user")
+class GccLibStdCxxSelfTest(GccSelfTestBase):
+ def test_libstdcxx(self):
+ self.run_check("libstdc++-v3")
+
+@OETestTag("toolchain-user")
+class GccLibSspSelfTest(GccSelfTestBase):
+ def test_libssp(self):
+ self.check_skip("libssp")
+ self.run_check("libssp")
+
+@OETestTag("toolchain-user")
+class GccLibItmSelfTest(GccSelfTestBase):
+ def test_libitm(self):
+ self.check_skip("libitm")
+ self.run_check("libitm")
+
+@OETestTag("toolchain-system")
+class GccCrossSelfTestSystemEmulated(GccSelfTestBase):
+ def test_cross_gcc(self):
+ self.run_check_emulated("gcc")
+
+@OETestTag("toolchain-system")
+class GxxCrossSelfTestSystemEmulated(GccSelfTestBase):
+ def test_cross_gxx(self):
+ self.run_check_emulated("g++")
+
+@OETestTag("toolchain-system")
+class GccLibAtomicSelfTestSystemEmulated(GccSelfTestBase):
+ def test_libatomic(self):
+ self.run_check_emulated("libatomic")
+
+@OETestTag("toolchain-system")
+class GccLibGompSelfTestSystemEmulated(GccSelfTestBase):
+ def test_libgomp(self):
+ self.run_check_emulated("libgomp")
+
+@OETestTag("toolchain-system")
+class GccLibStdCxxSelfTestSystemEmulated(GccSelfTestBase):
+ def test_libstdcxx(self):
+ self.run_check_emulated("libstdc++-v3")
+
+@OETestTag("toolchain-system")
+class GccLibSspSelfTestSystemEmulated(GccSelfTestBase):
+ def test_libssp(self):
+ self.check_skip("libssp")
+ self.run_check_emulated("libssp")
+
+@OETestTag("toolchain-system")
+class GccLibItmSelfTestSystemEmulated(GccSelfTestBase):
+ def test_libitm(self):
+ self.check_skip("libitm")
+ self.run_check_emulated("libitm")
+
diff --git a/meta/lib/oeqa/selftest/cases/glibc.py b/meta/lib/oeqa/selftest/cases/glibc.py
new file mode 100644
index 0000000000..c687f6ef93
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/glibc.py
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: MIT
+import os
+import contextlib
+from oeqa.core.decorator import OETestTag
+from oeqa.core.case import OEPTestResultTestCase
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake, get_bb_var, get_bb_vars, runqemu, Command
+from oeqa.utils.nfs import unfs_server
+
+def parse_values(content):
+ for i in content:
+ for v in ["PASS", "FAIL", "XPASS", "XFAIL", "UNRESOLVED", "UNSUPPORTED", "UNTESTED", "ERROR", "WARNING"]:
+ if i.startswith(v + ": "):
+ yield i[len(v) + 2:].strip(), v
+ break
+
+class GlibcSelfTestBase(OESelftestTestCase, OEPTestResultTestCase):
+ def run_check(self, ssh = None):
+ # configure ssh target
+ features = []
+ if ssh is not None:
+ features.append('TOOLCHAIN_TEST_TARGET = "ssh"')
+ features.append('TOOLCHAIN_TEST_HOST = "{0}"'.format(ssh))
+ features.append('TOOLCHAIN_TEST_HOST_USER = "root"')
+ features.append('TOOLCHAIN_TEST_HOST_PORT = "22"')
+ # force single threaded test execution
+ features.append('EGLIBCPARALLELISM_task-check_pn-glibc-testsuite = "PARALLELMFLAGS="-j1""')
+ self.write_config("\n".join(features))
+
+ bitbake("glibc-testsuite -c check")
+
+ builddir = get_bb_var("B", "glibc-testsuite")
+
+ ptestsuite = "glibc-user" if ssh is None else "glibc"
+ self.ptest_section(ptestsuite)
+ with open(os.path.join(builddir, "tests.sum"), "r") as f:
+ for test, result in parse_values(f):
+ self.ptest_result(ptestsuite, test, result)
+
+ def run_check_emulated(self):
+ with contextlib.ExitStack() as s:
+ # use the base work dir, as the nfs mount, since the recipe directory may not exist
+ tmpdir = get_bb_var("BASE_WORKDIR")
+ nfsport, mountport = s.enter_context(unfs_server(tmpdir))
+
+ # build core-image-minimal with required packages
+ default_installed_packages = [
+ "glibc-charmaps",
+ "libgcc",
+ "libstdc++",
+ "libatomic",
+ "libgomp",
+ # "python3",
+ # "python3-pexpect",
+ "nfs-utils",
+ ]
+ features = []
+ features.append('IMAGE_FEATURES += "ssh-server-openssh"')
+ features.append('CORE_IMAGE_EXTRA_INSTALL += "{0}"'.format(" ".join(default_installed_packages)))
+ self.write_config("\n".join(features))
+ bitbake("core-image-minimal")
+
+ # start runqemu
+ qemu = s.enter_context(runqemu("core-image-minimal", runqemuparams = "nographic"))
+
+ # validate that SSH is working
+ status, _ = qemu.run("uname")
+ self.assertEqual(status, 0)
+
+ # setup nfs mount
+ if qemu.run("mkdir -p \"{0}\"".format(tmpdir))[0] != 0:
+ raise Exception("Failed to setup NFS mount directory on target")
+ mountcmd = "mount -o noac,nfsvers=3,port={0},udp,mountport={1} \"{2}:{3}\" \"{3}\"".format(nfsport, mountport, qemu.server_ip, tmpdir)
+ status, output = qemu.run(mountcmd)
+ if status != 0:
+ raise Exception("Failed to setup NFS mount on target ({})".format(repr(output)))
+
+ self.run_check(ssh = qemu.ip)
+
+@OETestTag("toolchain-user")
+class GlibcSelfTest(GlibcSelfTestBase):
+ def test_glibc(self):
+ self.run_check()
+
+@OETestTag("toolchain-system")
+class GlibcSelfTestSystemEmulated(GlibcSelfTestBase):
+ def test_glibc(self):
+ self.run_check_emulated()
+
diff --git a/meta/lib/oeqa/selftest/cases/gotoolchain.py b/meta/lib/oeqa/selftest/cases/gotoolchain.py
new file mode 100644
index 0000000000..3119520f0d
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/gotoolchain.py
@@ -0,0 +1,71 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import glob
+import os
+import shutil
+import tempfile
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_vars
+
+
+class oeGoToolchainSelfTest(OESelftestTestCase):
+ """
+ Test cases for OE's Go toolchain
+ """
+
+ @staticmethod
+ def get_sdk_environment(tmpdir_SDKQA):
+ pattern = os.path.join(tmpdir_SDKQA, "environment-setup-*")
+ # FIXME: this is a very naive implementation
+ return glob.glob(pattern)[0]
+
+ @staticmethod
+ def get_sdk_toolchain():
+ bb_vars = get_bb_vars(['SDK_DEPLOY', 'TOOLCHAIN_OUTPUTNAME'],
+ "meta-go-toolchain")
+ sdk_deploy = bb_vars['SDK_DEPLOY']
+ toolchain_name = bb_vars['TOOLCHAIN_OUTPUTNAME']
+ return os.path.join(sdk_deploy, toolchain_name + ".sh")
+
+ @classmethod
+ def setUpClass(cls):
+ super(oeGoToolchainSelfTest, cls).setUpClass()
+ cls.tmpdir_SDKQA = tempfile.mkdtemp(prefix='SDKQA')
+ cls.go_path = os.path.join(cls.tmpdir_SDKQA, "go")
+ # Build the SDK and locate it in DEPLOYDIR
+ bitbake("meta-go-toolchain")
+ cls.sdk_path = oeGoToolchainSelfTest.get_sdk_toolchain()
+ # Install the SDK into the tmpdir
+ runCmd("sh %s -y -d \"%s\"" % (cls.sdk_path, cls.tmpdir_SDKQA))
+ cls.env_SDK = oeGoToolchainSelfTest.get_sdk_environment(cls.tmpdir_SDKQA)
+
+ @classmethod
+ def tearDownClass(cls):
+ shutil.rmtree(cls.tmpdir_SDKQA, ignore_errors=True)
+ super(oeGoToolchainSelfTest, cls).tearDownClass()
+
+ def run_sdk_go_command(self, gocmd):
+ cmd = "cd %s; " % self.tmpdir_SDKQA
+ cmd = cmd + ". %s; " % self.env_SDK
+ cmd = cmd + "export GOPATH=%s; " % self.go_path
+ cmd = cmd + "${CROSS_COMPILE}go %s" % gocmd
+ return runCmd(cmd).status
+
+ def test_go_dep_build(self):
+ proj = "github.com/golang"
+ name = "dep"
+ ver = "v0.3.1"
+ archive = ".tar.gz"
+ url = "https://%s/%s/archive/%s%s" % (proj, name, ver, archive)
+
+ runCmd("cd %s; wget %s" % (self.tmpdir_SDKQA, url))
+ runCmd("cd %s; tar -xf %s" % (self.tmpdir_SDKQA, ver+archive))
+ runCmd("mkdir -p %s/src/%s" % (self.go_path, proj))
+ runCmd("mv %s/dep-0.3.1 %s/src/%s/%s"
+ % (self.tmpdir_SDKQA, self.go_path, proj, name))
+ retv = self.run_sdk_go_command('build %s/%s/cmd/dep'
+ % (proj, name))
+ self.assertEqual(retv, 0,
+ msg="Running go build failed for %s" % name)
diff --git a/meta/lib/oeqa/selftest/cases/image_typedep.py b/meta/lib/oeqa/selftest/cases/image_typedep.py
new file mode 100644
index 0000000000..52e1080f13
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/image_typedep.py
@@ -0,0 +1,58 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake
+
+class ImageTypeDepTests(OESelftestTestCase):
+
+ # Verify that when specifying a IMAGE_TYPEDEP_ of the form "foo.bar" that
+ # the conversion type bar gets added as a dep as well
+ def test_conversion_typedep_added(self):
+
+ self.write_recipeinc('emptytest', """
+# Try to empty out the default dependency list
+PACKAGE_INSTALL = ""
+DISTRO_EXTRA_RDEPENDS=""
+
+LICENSE = "MIT"
+IMAGE_FSTYPES = "testfstype"
+
+IMAGE_TYPES_MASKED += "testfstype"
+IMAGE_TYPEDEP_testfstype = "tar.bz2"
+
+inherit image
+
+""")
+ # First get the dependency that should exist for bz2, it will look
+ # like CONVERSION_DEPENDS_bz2="somedep"
+ result = bitbake('-e emptytest')
+
+ dep = None
+ for line in result.output.split('\n'):
+ if line.startswith('CONVERSION_DEPENDS_bz2'):
+ dep = line.split('=')[1].strip('"')
+ break
+
+ self.assertIsNotNone(dep, "CONVERSION_DEPENDS_bz2 dependency not found in bitbake -e output")
+
+ # Now get the dependency task list and check for the expected task
+ # dependency
+ bitbake('-g emptytest')
+
+ taskdependsfile = os.path.join(self.builddir, 'task-depends.dot')
+ dep = dep + ".do_populate_sysroot"
+ depfound = False
+ expectedline = '"emptytest.do_rootfs" -> "{}"'.format(dep)
+
+ with open(taskdependsfile, "r") as f:
+ for line in f:
+ if line.strip() == expectedline:
+ depfound = True
+ break
+
+ if not depfound:
+ raise AssertionError("\"{}\" not found".format(expectedline))
diff --git a/meta/lib/oeqa/selftest/cases/imagefeatures.py b/meta/lib/oeqa/selftest/cases/imagefeatures.py
new file mode 100644
index 0000000000..5c519ac3d6
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/imagefeatures.py
@@ -0,0 +1,264 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, runqemu
+from oeqa.utils.sshcontrol import SSHControl
+import os
+import json
+
+class ImageFeatures(OESelftestTestCase):
+
+ test_user = 'tester'
+ root_user = 'root'
+
+ def test_non_root_user_can_connect_via_ssh_without_password(self):
+ """
+ Summary: Check if non root user can connect via ssh without password
+ Expected: 1. Connection to the image via ssh using root user without providing a password should be allowed.
+ 2. Connection to the image via ssh using tester user without providing a password should be allowed.
+ Product: oe-core
+ Author: Ionut Chisanovici <ionutx.chisanovici@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+
+ features = 'EXTRA_IMAGE_FEATURES = "ssh-server-openssh empty-root-password allow-empty-password allow-root-login"\n'
+ features += 'INHERIT += "extrausers"\n'
+ features += 'EXTRA_USERS_PARAMS = "useradd -p \'\' {}; usermod -s /bin/sh {};"'.format(self.test_user, self.test_user)
+ self.write_config(features)
+
+ # Build a core-image-minimal
+ bitbake('core-image-minimal')
+
+ with runqemu("core-image-minimal") as qemu:
+ # Attempt to ssh with each user into qemu with empty password
+ for user in [self.root_user, self.test_user]:
+ ssh = SSHControl(ip=qemu.ip, logfile=qemu.sshlog, user=user)
+ status, output = ssh.run("true")
+ self.assertEqual(status, 0, 'ssh to user %s failed with %s' % (user, output))
+
+ def test_all_users_can_connect_via_ssh_without_password(self):
+ """
+ Summary: Check if all users can connect via ssh without password
+ Expected: 1. Connection to the image via ssh using root user without providing a password should NOT be allowed.
+ 2. Connection to the image via ssh using tester user without providing a password should be allowed.
+ Product: oe-core
+ Author: Ionut Chisanovici <ionutx.chisanovici@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+
+ features = 'EXTRA_IMAGE_FEATURES = "ssh-server-openssh allow-empty-password allow-root-login"\n'
+ features += 'INHERIT += "extrausers"\n'
+ features += 'EXTRA_USERS_PARAMS = "useradd -p \'\' {}; usermod -s /bin/sh {};"'.format(self.test_user, self.test_user)
+ self.write_config(features)
+
+ # Build a core-image-minimal
+ bitbake('core-image-minimal')
+
+ with runqemu("core-image-minimal") as qemu:
+ # Attempt to ssh with each user into qemu with empty password
+ for user in [self.root_user, self.test_user]:
+ ssh = SSHControl(ip=qemu.ip, logfile=qemu.sshlog, user=user)
+ status, output = ssh.run("true")
+ if user == 'root':
+ self.assertNotEqual(status, 0, 'ssh to user root was allowed when it should not have been')
+ else:
+ self.assertEqual(status, 0, 'ssh to user tester failed with %s' % output)
+
+
+ def test_clutter_image_can_be_built(self):
+ """
+ Summary: Check if clutter image can be built
+ Expected: 1. core-image-clutter can be built
+ Product: oe-core
+ Author: Ionut Chisanovici <ionutx.chisanovici@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+
+ # Build a core-image-clutter
+ bitbake('core-image-clutter')
+
+ def test_wayland_support_in_image(self):
+ """
+ Summary: Check Wayland support in image
+ Expected: 1. Wayland image can be build
+ 2. Wayland feature can be installed
+ Product: oe-core
+ Author: Ionut Chisanovici <ionutx.chisanovici@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+
+ distro_features = get_bb_var('DISTRO_FEATURES')
+ if not ('opengl' in distro_features and 'wayland' in distro_features):
+ self.skipTest('neither opengl nor wayland present on DISTRO_FEATURES so core-image-weston cannot be built')
+
+ # Build a core-image-weston
+ bitbake('core-image-weston')
+
+ def test_bmap(self):
+ """
+ Summary: Check bmap support
+ Expected: 1. core-image-minimal can be build with bmap support
+ 2. core-image-minimal is sparse
+ Product: oe-core
+ Author: Ed Bartosh <ed.bartosh@linux.intel.com>
+ """
+
+ features = 'IMAGE_FSTYPES += " ext4 ext4.bmap ext4.bmap.gz"'
+ self.write_config(features)
+
+ image_name = 'core-image-minimal'
+ bitbake(image_name)
+
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ link_name = get_bb_var('IMAGE_LINK_NAME', image_name)
+ image_path = os.path.join(deploy_dir_image, "%s.ext4" % link_name)
+ bmap_path = "%s.bmap" % image_path
+ gzip_path = "%s.gz" % bmap_path
+
+ # check if result image, bmap and bmap.gz files are in deploy directory
+ self.assertTrue(os.path.exists(image_path))
+ self.assertTrue(os.path.exists(bmap_path))
+ self.assertTrue(os.path.exists(gzip_path))
+
+ # check if result image is sparse
+ image_stat = os.stat(image_path)
+ self.assertGreater(image_stat.st_size, image_stat.st_blocks * 512)
+
+ # check if the resulting gzip is valid
+ self.assertTrue(runCmd('gzip -t %s' % gzip_path))
+
+ def test_hypervisor_fmts(self):
+ """
+ Summary: Check various hypervisor formats
+ Expected: 1. core-image-minimal can be built with vmdk, vdi and
+ qcow2 support.
+ 2. qemu-img says each image has the expected format
+ Product: oe-core
+ Author: Tom Rini <trini@konsulko.com>
+ """
+
+ img_types = [ 'vmdk', 'vdi', 'qcow2' ]
+ features = ""
+ for itype in img_types:
+ features += 'IMAGE_FSTYPES += "wic.%s"\n' % itype
+ self.write_config(features)
+
+ image_name = 'core-image-minimal'
+ bitbake(image_name)
+
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ link_name = get_bb_var('IMAGE_LINK_NAME', image_name)
+ for itype in img_types:
+ image_path = os.path.join(deploy_dir_image, "%s.wic.%s" %
+ (link_name, itype))
+
+ # check if result image file is in deploy directory
+ self.assertTrue(os.path.exists(image_path))
+
+ # check if result image is vmdk
+ sysroot = get_bb_var('STAGING_DIR_NATIVE', 'core-image-minimal')
+ result = runCmd('qemu-img info --output json %s' % image_path,
+ native_sysroot=sysroot)
+ try:
+ data = json.loads(result.output)
+ self.assertEqual(data.get('format'), itype,
+ msg="Unexpected format in '%s'" % (result.output))
+ except json.decoder.JSONDecodeError:
+ self.fail("Could not parse '%ss'" % result.output)
+
+ def test_long_chain_conversion(self):
+ """
+ Summary: Check for chaining many CONVERSION_CMDs together
+ Expected: 1. core-image-minimal can be built with
+ ext4.bmap.gz.bz2.lzo.xz.u-boot and also create a
+ sha256sum
+ 2. The above image has a valid sha256sum
+ Product: oe-core
+ Author: Tom Rini <trini@konsulko.com>
+ """
+
+ conv = "ext4.bmap.gz.bz2.lzo.xz.u-boot"
+ features = 'IMAGE_FSTYPES += "%s %s.sha256sum"' % (conv, conv)
+ self.write_config(features)
+
+ image_name = 'core-image-minimal'
+ bitbake(image_name)
+
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ link_name = get_bb_var('IMAGE_LINK_NAME', image_name)
+ image_path = os.path.join(deploy_dir_image, "%s.%s" %
+ (link_name, conv))
+
+ # check if resulting image is in the deploy directory
+ self.assertTrue(os.path.exists(image_path))
+ self.assertTrue(os.path.exists(image_path + ".sha256sum"))
+
+ # check if the resulting sha256sum agrees
+ self.assertTrue(runCmd('cd %s;sha256sum -c %s.%s.sha256sum' %
+ (deploy_dir_image, link_name, conv)))
+
+ def test_image_fstypes(self):
+ """
+ Summary: Check if image of supported image fstypes can be built
+ Expected: core-image-minimal can be built for various image types
+ Product: oe-core
+ Author: Ed Bartosh <ed.bartosh@linux.intel.com>
+ """
+ image_name = 'core-image-minimal'
+
+ all_image_types = set(get_bb_var("IMAGE_TYPES", image_name).split())
+ blacklist = set(('container', 'elf', 'f2fs', 'multiubi', 'tar.zst'))
+ img_types = all_image_types - blacklist
+
+ config = 'IMAGE_FSTYPES += "%s"\n'\
+ 'MKUBIFS_ARGS ?= "-m 2048 -e 129024 -c 2047"\n'\
+ 'UBINIZE_ARGS ?= "-m 2048 -p 128KiB -s 512"' % ' '.join(img_types)
+ self.write_config(config)
+
+ bitbake(image_name)
+
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ link_name = get_bb_var('IMAGE_LINK_NAME', image_name)
+ for itype in img_types:
+ image_path = os.path.join(deploy_dir_image, "%s.%s" % (link_name, itype))
+ # check if result image is in deploy directory
+ self.assertTrue(os.path.exists(image_path),
+ "%s image %s doesn't exist" % (itype, image_path))
+
+ def test_useradd_static(self):
+ config = """
+USERADDEXTENSION = "useradd-staticids"
+USERADD_ERROR_DYNAMIC = "skip"
+USERADD_UID_TABLES += "files/static-passwd"
+USERADD_GID_TABLES += "files/static-group"
+"""
+ self.write_config(config)
+ bitbake("core-image-base")
+
+ def test_no_busybox_base_utils(self):
+ config = """
+# Enable x11
+DISTRO_FEATURES_append += "x11"
+
+# Switch to systemd
+DISTRO_FEATURES += "systemd"
+VIRTUAL-RUNTIME_init_manager = "systemd"
+VIRTUAL-RUNTIME_initscripts = ""
+VIRTUAL-RUNTIME_syslog = ""
+VIRTUAL-RUNTIME_login_manager = "shadow-base"
+DISTRO_FEATURES_BACKFILL_CONSIDERED = "sysvinit"
+
+# Replace busybox
+PREFERRED_PROVIDER_virtual/base-utils = "packagegroup-core-base-utils"
+VIRTUAL-RUNTIME_base-utils = "packagegroup-core-base-utils"
+VIRTUAL-RUNTIME_base-utils-hwclock = "util-linux-hwclock"
+VIRTUAL-RUNTIME_base-utils-syslog = ""
+
+# Blacklist busybox
+PNBLACKLIST[busybox] = "Don't build this"
+"""
+ self.write_config(config)
+
+ bitbake("--graphviz core-image-sato")
diff --git a/meta/lib/oeqa/selftest/cases/incompatible_lic.py b/meta/lib/oeqa/selftest/cases/incompatible_lic.py
new file mode 100644
index 0000000000..3eabd79097
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/incompatible_lic.py
@@ -0,0 +1,135 @@
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake
+
+class IncompatibleLicenseTests(OESelftestTestCase):
+
+ def lic_test(self, pn, pn_lic, lic):
+ error_msg = 'ERROR: Nothing PROVIDES \'%s\'\n%s was skipped: it has incompatible license(s): %s' % (pn, pn, pn_lic)
+
+ self.write_config("INCOMPATIBLE_LICENSE += \"%s\"" % (lic))
+
+ result = bitbake('%s --dry-run' % (pn), ignore_status=True)
+ if error_msg not in result.output:
+ raise AssertionError(result.output)
+
+ # Verify that a package with an SPDX license (from AVAILABLE_LICENSES)
+ # cannot be built when INCOMPATIBLE_LICENSE contains this SPDX license
+ def test_incompatible_spdx_license(self):
+ self.lic_test('incompatible-license', 'GPL-3.0', 'GPL-3.0')
+
+ # Verify that a package with an SPDX license (from AVAILABLE_LICENSES)
+ # cannot be built when INCOMPATIBLE_LICENSE contains an alias (in
+ # SPDXLICENSEMAP) of this SPDX license
+ def test_incompatible_alias_spdx_license(self):
+ self.lic_test('incompatible-license', 'GPL-3.0', 'GPLv3')
+
+ # Verify that a package with an SPDX license (from AVAILABLE_LICENSES)
+ # cannot be built when INCOMPATIBLE_LICENSE contains a wildcarded license
+ # matching this SPDX license
+ def test_incompatible_spdx_license_wildcard(self):
+ self.lic_test('incompatible-license', 'GPL-3.0', '*GPL-3.0')
+
+ # Verify that a package with an SPDX license (from AVAILABLE_LICENSES)
+ # cannot be built when INCOMPATIBLE_LICENSE contains a wildcarded alias
+ # license matching this SPDX license
+ def test_incompatible_alias_spdx_license_wildcard(self):
+ self.lic_test('incompatible-license', 'GPL-3.0', '*GPLv3')
+
+ # Verify that a package with an alias (from SPDXLICENSEMAP) to an SPDX
+ # license cannot be built when INCOMPATIBLE_LICENSE contains this SPDX
+ # license
+ def test_incompatible_spdx_license_alias(self):
+ self.lic_test('incompatible-license-alias', 'GPL-3.0', 'GPL-3.0')
+
+ # Verify that a package with an alias (from SPDXLICENSEMAP) to an SPDX
+ # license cannot be built when INCOMPATIBLE_LICENSE contains this alias
+ def test_incompatible_alias_spdx_license_alias(self):
+ self.lic_test('incompatible-license-alias', 'GPL-3.0', 'GPLv3')
+
+ # Verify that a package with an alias (from SPDXLICENSEMAP) to an SPDX
+ # license cannot be built when INCOMPATIBLE_LICENSE contains a wildcarded
+ # license matching this SPDX license
+ def test_incompatible_spdx_license_alias_wildcard(self):
+ self.lic_test('incompatible-license-alias', 'GPL-3.0', '*GPL-3.0')
+
+ # Verify that a package with an alias (from SPDXLICENSEMAP) to an SPDX
+ # license cannot be built when INCOMPATIBLE_LICENSE contains a wildcarded
+ # alias license matching the SPDX license
+ def test_incompatible_alias_spdx_license_alias_wildcard(self):
+ self.lic_test('incompatible-license-alias', 'GPL-3.0', '*GPLv3')
+
+ # Verify that a package with multiple SPDX licenses (from
+ # AVAILABLE_LICENSES) cannot be built when INCOMPATIBLE_LICENSE contains
+ # some of them
+ def test_incompatible_spdx_licenses(self):
+ self.lic_test('incompatible-licenses', 'GPL-3.0 LGPL-3.0', 'GPL-3.0 LGPL-3.0')
+
+ # Verify that a package with multiple SPDX licenses (from
+ # AVAILABLE_LICENSES) cannot be built when INCOMPATIBLE_LICENSE contains a
+ # wildcard to some of them
+ def test_incompatible_spdx_licenses_wildcard(self):
+ self.lic_test('incompatible-licenses', 'GPL-3.0 LGPL-3.0', '*GPL-3.0')
+
+ # Verify that a package with multiple SPDX licenses (from
+ # AVAILABLE_LICENSES) cannot be built when INCOMPATIBLE_LICENSE contains a
+ # wildcard matching all licenses
+ def test_incompatible_all_licenses_wildcard(self):
+ self.lic_test('incompatible-licenses', 'GPL-2.0 GPL-3.0 LGPL-3.0', '*')
+
+ # Verify that a package with a non-SPDX license (neither in
+ # AVAILABLE_LICENSES nor in SPDXLICENSEMAP) cannot be built when
+ # INCOMPATIBLE_LICENSE contains this license
+ def test_incompatible_nonspdx_license(self):
+ self.lic_test('incompatible-nonspdx-license', 'FooLicense', 'FooLicense')
+
+class IncompatibleLicensePerImageTests(OESelftestTestCase):
+ def default_config(self):
+ return """
+IMAGE_INSTALL_append = "bash"
+INCOMPATIBLE_LICENSE_pn-core-image-minimal = "GPL-3.0 LGPL-3.0"
+"""
+
+ def test_bash_default(self):
+ self.write_config(self.default_config())
+ error_msg = "ERROR: core-image-minimal-1.0-r0 do_rootfs: Package bash cannot be installed into the image because it has incompatible license(s): GPL-3.0+"
+
+ result = bitbake('core-image-minimal', ignore_status=True)
+ if error_msg not in result.output:
+ raise AssertionError(result.output)
+
+ def test_bash_and_license(self):
+ self.write_config(self.default_config() + '\nLICENSE_append_pn-bash = " & SomeLicense"')
+ error_msg = "ERROR: core-image-minimal-1.0-r0 do_rootfs: Package bash cannot be installed into the image because it has incompatible license(s): GPL-3.0+"
+
+ result = bitbake('core-image-minimal', ignore_status=True)
+ if error_msg not in result.output:
+ raise AssertionError(result.output)
+
+ def test_bash_or_license(self):
+ self.write_config(self.default_config() + '\nLICENSE_append_pn-bash = " | SomeLicense"')
+
+ bitbake('core-image-minimal')
+
+ def test_bash_whitelist(self):
+ self.write_config(self.default_config() + '\nWHITELIST_GPL-3.0_pn-core-image-minimal = "bash"')
+
+ bitbake('core-image-minimal')
+
+class NoGPL3InImagesTests(OESelftestTestCase):
+ def test_core_image_minimal(self):
+ self.write_config("""
+INCOMPATIBLE_LICENSE_pn-core-image-minimal = "GPL-3.0 LGPL-3.0"
+""")
+ bitbake('core-image-minimal')
+
+ def test_core_image_full_cmdline(self):
+ self.write_config("""
+INHERIT += "testimage"\n
+INCOMPATIBLE_LICENSE_pn-core-image-full-cmdline = "GPL-3.0 LGPL-3.0"\n
+RDEPENDS_packagegroup-core-full-cmdline-utils_remove = "bash bc coreutils cpio ed findutils gawk grep mc mc-fish mc-helpers mc-helpers-perl sed tar time"\n
+RDEPENDS_packagegroup-core-full-cmdline-dev-utils_remove = "diffutils m4 make patch"\n
+RDEPENDS_packagegroup-core-full-cmdline-multiuser_remove = "gzip"\n
+""")
+ bitbake('core-image-full-cmdline')
+ bitbake('-c testimage core-image-full-cmdline')
+
diff --git a/meta/lib/oeqa/selftest/cases/kerneldevelopment.py b/meta/lib/oeqa/selftest/cases/kerneldevelopment.py
new file mode 100644
index 0000000000..a61876ee61
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/kerneldevelopment.py
@@ -0,0 +1,67 @@
+import os
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, get_bb_var
+from oeqa.utils.git import GitRepo
+
+class KernelDev(OESelftestTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(KernelDev, cls).setUpClass()
+ # Create the recipe directory structure inside the created layer
+ cls.layername = 'meta-kerneltest'
+ runCmd('bitbake-layers create-layer %s' % cls.layername)
+ runCmd('mkdir -p %s/recipes-kernel/linux/linux-yocto' % cls.layername)
+ cls.recipes_linuxyocto_dir = os.path.join \
+ (cls.builddir, cls.layername, 'recipes-kernel', 'linux', 'linux-yocto')
+ cls.recipeskernel_dir = os.path.dirname(cls.recipes_linuxyocto_dir)
+ runCmd('bitbake-layers add-layer %s' % cls.layername)
+
+ @classmethod
+ def tearDownClass(cls):
+ runCmd('bitbake-layers remove-layer %s' % cls.layername, ignore_status=True)
+ runCmd('rm -rf %s' % cls.layername)
+ super(KernelDev, cls).tearDownClass()
+
+ def setUp(self):
+ super(KernelDev, self).setUp()
+ self.set_machine_config('MACHINE = "qemux86-64"\n')
+
+ def test_apply_patches(self):
+ """
+ Summary: Able to apply a single patch to the Linux kernel source
+ Expected: The README file should exist and the patch changes should be
+ displayed at the end of the file.
+ Product: Kernel Development
+ Author: Yeoh Ee Peng <ee.peng.yeoh@intel.com>
+ AutomatedBy: Mazliana Mohamad <mazliana.mohamad@intel.com>
+ """
+ runCmd('bitbake virtual/kernel -c patch')
+ kernel_source = get_bb_var('STAGING_KERNEL_DIR')
+ readme = os.path.join(kernel_source, 'README')
+
+ # This test step adds modified file 'README' to git and creates a
+ # patch file '0001-KERNEL_DEV_TEST_CASE.patch' at the same location as file
+ patch_content = 'This is a test to apply a patch to the kernel'
+ with open(readme, 'a+') as f:
+ f.write(patch_content)
+ repo = GitRepo('%s' % kernel_source, is_topdir=True)
+ repo.run_cmd('add %s' % readme)
+ repo.run_cmd(['commit', '-m', 'KERNEL_DEV_TEST_CASE'])
+ repo.run_cmd(['format-patch', '-1'])
+ patch_name = '0001-KERNEL_DEV_TEST_CASE.patch'
+ patchpath = os.path.join(kernel_source, patch_name)
+ runCmd('mv %s %s' % (patchpath, self.recipes_linuxyocto_dir))
+ runCmd('rm %s ' % readme)
+ self.assertFalse(os.path.exists(readme))
+
+ recipe_append = os.path.join(self.recipeskernel_dir, 'linux-yocto_%.bbappend')
+ with open(recipe_append, 'w+') as fh:
+ fh.write('SRC_URI += "file://%s"\n' % patch_name)
+ fh.write('FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"')
+
+ runCmd('bitbake virtual/kernel -c clean')
+ runCmd('bitbake virtual/kernel -c patch')
+ self.assertTrue(os.path.exists(readme))
+ result = runCmd('tail -n 1 %s' % readme)
+ self.assertEqual(result.output, patch_content)
diff --git a/meta/lib/oeqa/selftest/layerappend.py b/meta/lib/oeqa/selftest/cases/layerappend.py
index 4de5034a94..05e9426fc6 100644
--- a/meta/lib/oeqa/selftest/layerappend.py
+++ b/meta/lib/oeqa/selftest/cases/layerappend.py
@@ -1,15 +1,14 @@
-import unittest
+#
+# SPDX-License-Identifier: MIT
+#
+
import os
-import logging
-import re
-from oeqa.selftest.base import oeSelfTest
-from oeqa.selftest.buildhistory import BuildhistoryBase
+from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import runCmd, bitbake, get_bb_var
import oeqa.utils.ftools as ftools
-from oeqa.utils.decorators import testcase
-class LayerAppendTests(oeSelfTest):
+class LayerAppendTests(OESelftestTestCase):
layerconf = """
# We have a conf and classes directory, append to BBPATH
BBPATH .= ":${LAYERDIR}"
@@ -44,18 +43,18 @@ sysroot_stage_all_append() {
append2 = """
FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
-SRC_URI_append += "file://appendtest.txt"
+SRC_URI_append = " file://appendtest.txt"
"""
layerappend = ''
def tearDownLocal(self):
if self.layerappend:
ftools.remove_from_file(self.builddir + "/conf/bblayers.conf", self.layerappend)
+ super(LayerAppendTests, self).tearDownLocal()
- @testcase(1196)
def test_layer_appends(self):
corebase = get_bb_var("COREBASE")
- stagingdir = get_bb_var("STAGING_DIR_TARGET")
+
for l in ["0", "1", "2"]:
layer = os.path.join(corebase, "meta-layertest" + l)
self.assertFalse(os.path.exists(layer))
@@ -83,6 +82,7 @@ SRC_URI_append += "file://appendtest.txt"
self.layerappend = "BBLAYERS += \"{0}/meta-layertest0 {0}/meta-layertest1 {0}/meta-layertest2\"".format(corebase)
ftools.append_file(self.builddir + "/conf/bblayers.conf", self.layerappend)
+ stagingdir = get_bb_var("SYSROOT_DESTDIR", "layerappendtest")
bitbake("layerappendtest")
data = ftools.read_file(stagingdir + "/appendtest.txt")
self.assertEqual(data, "Layer 2 test")
@@ -95,5 +95,3 @@ SRC_URI_append += "file://appendtest.txt"
bitbake("layerappendtest")
data = ftools.read_file(stagingdir + "/appendtest.txt")
self.assertEqual(data, "Layer 2 test")
-
-
diff --git a/meta/lib/oeqa/selftest/liboe.py b/meta/lib/oeqa/selftest/cases/liboe.py
index 35131eb240..afe8f8809f 100644
--- a/meta/lib/oeqa/selftest/liboe.py
+++ b/meta/lib/oeqa/selftest/cases/liboe.py
@@ -1,11 +1,19 @@
-from oeqa.selftest.base import oeSelfTest
-from oeqa.utils.commands import get_bb_var, bitbake, runCmd
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import get_bb_var, get_bb_vars, bitbake, runCmd
import oe.path
-import glob
import os
-import os.path
-class LibOE(oeSelfTest):
+class LibOE(OESelftestTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(LibOE, cls).setUpClass()
+ cls.tmp_dir = get_bb_var('TMPDIR')
+
def test_copy_tree_special(self):
"""
Summary: oe.path.copytree() should copy files with special character
@@ -14,8 +22,7 @@ class LibOE(oeSelfTest):
Product: OE-Core
Author: Joshua Lock <joshua.g.lock@intel.com>
"""
- tmp_dir = get_bb_var('TMPDIR')
- testloc = oe.path.join(tmp_dir, 'liboetests')
+ testloc = oe.path.join(self.tmp_dir, 'liboetests')
src = oe.path.join(testloc, 'src')
dst = oe.path.join(testloc, 'dst')
bb.utils.mkdirhier(testloc)
@@ -40,8 +47,7 @@ class LibOE(oeSelfTest):
Product: OE-Core
Author: Joshua Lock <joshua.g.lock@intel.com>
"""
- tmp_dir = get_bb_var('TMPDIR')
- testloc = oe.path.join(tmp_dir, 'liboetests')
+ testloc = oe.path.join(self.tmp_dir, 'liboetests')
src = oe.path.join(testloc, 'src')
dst = oe.path.join(testloc, 'dst')
bb.utils.mkdirhier(testloc)
@@ -50,7 +56,11 @@ class LibOE(oeSelfTest):
# ensure we have setfattr available
bitbake("attr-native")
- bindir = get_bb_var('STAGING_BINDIR_NATIVE')
+
+ bb_vars = get_bb_vars(['SYSROOT_DESTDIR', 'bindir'], 'attr-native')
+ destdir = bb_vars['SYSROOT_DESTDIR']
+ bindir = bb_vars['bindir']
+ bindir = destdir + bindir
# create a file with xattr and copy it
open(oe.path.join(src, testfilename), 'w+b').close()
@@ -70,8 +80,7 @@ class LibOE(oeSelfTest):
Product: OE-Core
Author: Joshua Lock <joshua.g.lock@intel.com>
"""
- tmp_dir = get_bb_var('TMPDIR')
- testloc = oe.path.join(tmp_dir, 'liboetests')
+ testloc = oe.path.join(self.tmp_dir, 'liboetests')
src = oe.path.join(testloc, 'src')
dst = oe.path.join(testloc, 'dst')
bb.utils.mkdirhier(testloc)
diff --git a/meta/lib/oeqa/selftest/lic-checksum.py b/meta/lib/oeqa/selftest/cases/lic_checksum.py
index 2e81373ae4..bae935d697 100644
--- a/meta/lib/oeqa/selftest/lic-checksum.py
+++ b/meta/lib/oeqa/selftest/cases/lic_checksum.py
@@ -1,16 +1,18 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
import os
import tempfile
-from oeqa.selftest.base import oeSelfTest
+from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import bitbake
from oeqa.utils import CommandError
-from oeqa.utils.decorators import testcase
-class LicenseTests(oeSelfTest):
+class LicenseTests(OESelftestTestCase):
# Verify that changing a license file that has an absolute path causes
# the license qa to fail due to a mismatched md5sum.
- @testcase(1197)
def test_nonmatching_checksum(self):
bitbake_cmd = '-c populate_lic emptytest'
error_msg = 'emptytest: The new md5 checksum is 8d777f385d3dfec8815d20f7496026dc'
@@ -19,6 +21,8 @@ class LicenseTests(oeSelfTest):
os.close(lic_file)
self.track_for_cleanup(lic_path)
+ self.write_config("INHERIT_remove = \"report-error\"")
+
self.write_recipeinc('emptytest', """
INHIBIT_DEFAULT_DEPS = "1"
LIC_FILES_CHKSUM = "file://%s;md5=d41d8cd98f00b204e9800998ecf8427e"
@@ -29,7 +33,6 @@ SRC_URI = "file://%s;md5=d41d8cd98f00b204e9800998ecf8427e"
with open(lic_path, "w") as f:
f.write("data")
- self.write_config("INHERIT_remove = \"report-error\"")
result = bitbake(bitbake_cmd, ignore_status=True)
if error_msg not in result.output:
raise AssertionError(result.output)
diff --git a/meta/lib/oeqa/selftest/manifest.py b/meta/lib/oeqa/selftest/cases/manifest.py
index 44d0404c5d..5d13f35468 100644
--- a/meta/lib/oeqa/selftest/manifest.py
+++ b/meta/lib/oeqa/selftest/cases/manifest.py
@@ -1,9 +1,11 @@
-import unittest
+#
+# SPDX-License-Identifier: MIT
+#
+
import os
-from oeqa.selftest.base import oeSelfTest
-from oeqa.utils.commands import get_bb_var, bitbake
-from oeqa.utils.decorators import testcase
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import get_bb_var, get_bb_vars, bitbake
class ManifestEntry:
'''A manifest item of a collection able to list missing packages'''
@@ -11,7 +13,7 @@ class ManifestEntry:
self.file = entry
self.missing = []
-class VerifyManifest(oeSelfTest):
+class VerifyManifest(OESelftestTestCase):
'''Tests for the manifest files and contents of an image'''
@classmethod
@@ -21,14 +23,14 @@ class VerifyManifest(oeSelfTest):
with open(manifest, "r") as mfile:
for line in mfile:
manifest_entry = os.path.join(path, line.split()[0])
- self.log.debug("{}: looking for {}"\
+ self.logger.debug("{}: looking for {}"\
.format(self.classname, manifest_entry))
if not os.path.isfile(manifest_entry):
manifest_errors.append(manifest_entry)
- self.log.debug("{}: {} not found"\
+ self.logger.debug("{}: {} not found"\
.format(self.classname, manifest_entry))
except OSError as e:
- self.log.debug("{}: checking of {} failed"\
+ self.logger.debug("{}: checking of {} failed"\
.format(self.classname, manifest))
raise e
@@ -40,7 +42,7 @@ class VerifyManifest(oeSelfTest):
target == self.buildtarget if target == None else target
directory = get_bb_var(bb_var, target);
if not directory or not os.path.isdir(directory):
- self.log.debug("{}: {} points to {} when target = {}"\
+ self.logger.debug("{}: {} points to {} when target = {}"\
.format(self.classname, bb_var, directory, target))
raise OSError
return directory
@@ -48,18 +50,18 @@ class VerifyManifest(oeSelfTest):
@classmethod
def setUpClass(self):
+ super(VerifyManifest, self).setUpClass()
self.buildtarget = 'core-image-minimal'
self.classname = 'VerifyManifest'
- self.log.info("{}: doing bitbake {} as a prerequisite of the test"\
+ self.logger.info("{}: doing bitbake {} as a prerequisite of the test"\
.format(self.classname, self.buildtarget))
if bitbake(self.buildtarget).status:
- self.log.debug("{} Failed to setup {}"\
+ self.logger.debug("{} Failed to setup {}"\
.format(self.classname, self.buildtarget))
- unittest.SkipTest("{}: Cannot setup testing scenario"\
+ self.skipTest("{}: Cannot setup testing scenario"\
.format(self.classname))
- @testcase(1380)
def test_SDK_manifest_entries(self):
'''Verifying the SDK manifest entries exist, this may take a build'''
@@ -67,12 +69,12 @@ class VerifyManifest(oeSelfTest):
# to do an additional setup for the sdk
sdktask = '-c populate_sdk'
bbargs = sdktask + ' ' + self.buildtarget
- self.log.debug("{}: doing bitbake {} as a prerequisite of the test"\
+ self.logger.debug("{}: doing bitbake {} as a prerequisite of the test"\
.format(self.classname, bbargs))
if bitbake(bbargs).status:
- self.log.debug("{} Failed to bitbake {}"\
+ self.logger.debug("{} Failed to bitbake {}"\
.format(self.classname, bbargs))
- unittest.SkipTest("{}: Cannot setup testing scenario"\
+ self.skipTest("{}: Cannot setup testing scenario"\
.format(self.classname))
@@ -84,13 +86,11 @@ class VerifyManifest(oeSelfTest):
try:
mdir = self.get_dir_from_bb_var('SDK_DEPLOY', self.buildtarget)
for k in d_target.keys():
- mfilename[k] = "{}-toolchain-{}.{}.manifest".format(
- get_bb_var("SDK_NAME", self.buildtarget),
- get_bb_var("SDK_VERSION", self.buildtarget),
- k)
+ toolchain_outputname = get_bb_var('TOOLCHAIN_OUTPUTNAME', self.buildtarget)
+ mfilename[k] = "{}.{}.manifest".format(toolchain_outputname, k)
mpath[k] = os.path.join(mdir, mfilename[k])
if not os.path.isfile(mpath[k]):
- self.log.debug("{}: {} does not exist".format(
+ self.logger.debug("{}: {} does not exist".format(
self.classname, mpath[k]))
raise IOError
m_entry[k] = ManifestEntry(mpath[k])
@@ -100,11 +100,11 @@ class VerifyManifest(oeSelfTest):
reverse_dir[k] = os.path.join(pkgdata_dir[k],
'runtime-reverse')
if not os.path.exists(reverse_dir[k]):
- self.log.debug("{}: {} does not exist".format(
+ self.logger.debug("{}: {} does not exist".format(
self.classname, reverse_dir[k]))
raise IOError
except OSError:
- raise unittest.SkipTest("{}: Error in obtaining manifest dirs"\
+ raise self.skipTest("{}: Error in obtaining manifest dirs"\
.format(self.classname))
except IOError:
msg = "{}: Error cannot find manifests in the specified dir:\n{}"\
@@ -112,7 +112,7 @@ class VerifyManifest(oeSelfTest):
self.fail(msg)
for k in d_target.keys():
- self.log.debug("{}: Check manifest {}".format(
+ self.logger.debug("{}: Check manifest {}".format(
self.classname, m_entry[k].file))
m_entry[k].missing = self.check_manifest_entries(\
@@ -121,11 +121,10 @@ class VerifyManifest(oeSelfTest):
msg = '{}: {} Error has the following missing entries'\
.format(self.classname, m_entry[k].file)
logmsg = msg+':\n'+'\n'.join(m_entry[k].missing)
- self.log.debug(logmsg)
- self.log.info(msg)
+ self.logger.debug(logmsg)
+ self.logger.info(msg)
self.fail(logmsg)
- @testcase(1381)
def test_image_manifest_entries(self):
'''Verifying the image manifest entries exist'''
@@ -145,14 +144,14 @@ class VerifyManifest(oeSelfTest):
revdir = os.path.join(pkgdata_dir, 'runtime-reverse')
if not os.path.exists(revdir): raise IOError
except OSError:
- raise unittest.SkipTest("{}: Error in obtaining manifest dirs"\
+ raise self.skipTest("{}: Error in obtaining manifest dirs"\
.format(self.classname))
except IOError:
msg = "{}: Error cannot find manifests in dir:\n{}"\
.format(self.classname, mdir)
self.fail(msg)
- self.log.debug("{}: Check manifest {}"\
+ self.logger.debug("{}: Check manifest {}"\
.format(self.classname, m_entry.file))
m_entry.missing = self.check_manifest_entries(\
m_entry.file, revdir)
@@ -160,6 +159,6 @@ class VerifyManifest(oeSelfTest):
msg = '{}: {} Error has the following missing entries'\
.format(self.classname, m_entry.file)
logmsg = msg+':\n'+'\n'.join(m_entry.missing)
- self.log.debug(logmsg)
- self.log.info(msg)
+ self.logger.debug(logmsg)
+ self.logger.info(msg)
self.fail(logmsg)
diff --git a/meta/lib/oeqa/selftest/cases/meta_ide.py b/meta/lib/oeqa/selftest/cases/meta_ide.py
new file mode 100644
index 0000000000..03901a2f32
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/meta_ide.py
@@ -0,0 +1,51 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.sdk.utils.sdkbuildproject import SDKBuildProject
+from oeqa.utils.commands import bitbake, get_bb_vars, runCmd
+from oeqa.core.decorator import OETestTag
+import tempfile
+import shutil
+
+@OETestTag("machine")
+class MetaIDE(OESelftestTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(MetaIDE, cls).setUpClass()
+ bitbake('meta-ide-support')
+ bb_vars = get_bb_vars(['MULTIMACH_TARGET_SYS', 'TMPDIR', 'COREBASE'])
+ cls.environment_script = 'environment-setup-%s' % bb_vars['MULTIMACH_TARGET_SYS']
+ cls.tmpdir = bb_vars['TMPDIR']
+ cls.environment_script_path = '%s/%s' % (cls.tmpdir, cls.environment_script)
+ cls.corebasedir = bb_vars['COREBASE']
+ cls.tmpdir_metaideQA = tempfile.mkdtemp(prefix='metaide')
+
+ @classmethod
+ def tearDownClass(cls):
+ shutil.rmtree(cls.tmpdir_metaideQA, ignore_errors=True)
+ super(MetaIDE, cls).tearDownClass()
+
+ def test_meta_ide_had_installed_meta_ide_support(self):
+ self.assertExists(self.environment_script_path)
+
+ def test_meta_ide_can_compile_c_program(self):
+ runCmd('cp %s/test.c %s' % (self.tc.files_dir, self.tmpdir_metaideQA))
+ runCmd("cd %s; . %s; $CC test.c -lm" % (self.tmpdir_metaideQA, self.environment_script_path))
+ compiled_file = '%s/a.out' % self.tmpdir_metaideQA
+ self.assertExists(compiled_file)
+
+ def test_meta_ide_can_build_cpio_project(self):
+ dl_dir = self.td.get('DL_DIR', None)
+ self.project = SDKBuildProject(self.tmpdir_metaideQA + "/cpio/", self.environment_script_path,
+ "https://ftp.gnu.org/gnu/cpio/cpio-2.12.tar.gz",
+ self.tmpdir_metaideQA, self.td['DATETIME'], dl_dir=dl_dir)
+ self.project.download_archive()
+ self.assertEqual(self.project.run_configure(), 0,
+ msg="Running configure failed")
+ self.assertEqual(self.project.run_make(), 0,
+ msg="Running make failed")
+ self.assertEqual(self.project.run_install(), 0,
+ msg="Running make install failed")
diff --git a/meta/lib/oeqa/selftest/cases/multiconfig.py b/meta/lib/oeqa/selftest/cases/multiconfig.py
new file mode 100644
index 0000000000..39b92f2439
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/multiconfig.py
@@ -0,0 +1,72 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import textwrap
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake
+
+class MultiConfig(OESelftestTestCase):
+
+ def test_multiconfig(self):
+ """
+ Test that a simple multiconfig build works. This uses the mcextend class and the
+ multiconfig-image-packager test recipe to build a core-image-full-cmdline image which
+ contains a tiny core-image-minimal and a musl core-image-minimal, installed as packages.
+ """
+
+ config = """
+IMAGE_INSTALL_append_pn-core-image-full-cmdline = " multiconfig-image-packager-tiny multiconfig-image-packager-musl"
+BBMULTICONFIG = "tiny musl"
+"""
+ self.write_config(config)
+
+ muslconfig = """
+MACHINE = "qemux86-64"
+DISTRO = "poky"
+TCLIBC = "musl"
+TMPDIR = "${TOPDIR}/tmp-mc-musl"
+"""
+ self.write_config(muslconfig, 'musl')
+
+ tinyconfig = """
+MACHINE = "qemux86"
+DISTRO = "poky-tiny"
+TMPDIR = "${TOPDIR}/tmp-mc-tiny"
+"""
+ self.write_config(tinyconfig, 'tiny')
+
+ # Build a core-image-minimal
+ bitbake('core-image-full-cmdline')
+
+ def test_multiconfig_reparse(self):
+ """
+ Test that changes to a multiconfig conf file are correctly detected and
+ cause a reparse/rebuild of a recipe.
+ """
+ config = textwrap.dedent('''\
+ MCTESTVAR = "test"
+ BBMULTICONFIG = "test"
+ ''')
+ self.write_config(config)
+
+ testconfig = textwrap.dedent('''\
+ MCTESTVAR_append = "1"
+ ''')
+ self.write_config(testconfig, 'test')
+
+ # Check that the 1) the task executed and 2) that it output the correct
+ # value. Note "bitbake -e" is not used because it always reparses the
+ # recipe and we want to ensure that the automatic reparsing and parse
+ # caching is detected.
+ result = bitbake('mc:test:multiconfig-test-parse -c showvar')
+ self.assertIn('MCTESTVAR=test1', result.output.splitlines())
+
+ testconfig = textwrap.dedent('''\
+ MCTESTVAR_append = "2"
+ ''')
+ self.write_config(testconfig, 'test')
+
+ result = bitbake('mc:test:multiconfig-test-parse -c showvar')
+ self.assertIn('MCTESTVAR=test2', result.output.splitlines())
diff --git a/meta/lib/oeqa/selftest/cases/oelib/__init__.py b/meta/lib/oeqa/selftest/cases/oelib/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/oelib/__init__.py
diff --git a/meta/lib/oeqa/selftest/cases/oelib/buildhistory.py b/meta/lib/oeqa/selftest/cases/oelib/buildhistory.py
new file mode 100644
index 0000000000..6d80827652
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/oelib/buildhistory.py
@@ -0,0 +1,99 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+from oeqa.selftest.case import OESelftestTestCase
+import tempfile
+from oeqa.utils.commands import get_bb_var
+
+class TestBlobParsing(OESelftestTestCase):
+
+ def setUp(self):
+ import time
+ self.repo_path = tempfile.mkdtemp(prefix='selftest-buildhistory',
+ dir=get_bb_var('TOPDIR'))
+
+ try:
+ from git import Repo
+ self.repo = Repo.init(self.repo_path)
+ except ImportError:
+ self.skipTest('Python module GitPython is not present')
+
+ self.test_file = "test"
+ self.var_map = {}
+
+ def tearDown(self):
+ import shutil
+ shutil.rmtree(self.repo_path)
+
+ def commit_vars(self, to_add={}, to_remove = [], msg="A commit message"):
+ if len(to_add) == 0 and len(to_remove) == 0:
+ return
+
+ for k in to_remove:
+ self.var_map.pop(x,None)
+ for k in to_add:
+ self.var_map[k] = to_add[k]
+
+ with open(os.path.join(self.repo_path, self.test_file), 'w') as repo_file:
+ for k in self.var_map:
+ repo_file.write("%s = %s\n" % (k, self.var_map[k]))
+
+ self.repo.git.add("--all")
+ self.repo.git.commit(message=msg)
+
+ def test_blob_to_dict(self):
+ """
+ Test convertion of git blobs to dictionary
+ """
+ from oe.buildhistory_analysis import blob_to_dict
+ valuesmap = { "foo" : "1", "bar" : "2" }
+ self.commit_vars(to_add = valuesmap)
+
+ blob = self.repo.head.commit.tree.blobs[0]
+ self.assertEqual(valuesmap, blob_to_dict(blob),
+ "commit was not translated correctly to dictionary")
+
+ def test_compare_dict_blobs(self):
+ """
+ Test comparisson of dictionaries extracted from git blobs
+ """
+ from oe.buildhistory_analysis import compare_dict_blobs
+
+ changesmap = { "foo-2" : ("2", "8"), "bar" : ("","4"), "bar-2" : ("","5")}
+
+ self.commit_vars(to_add = { "foo" : "1", "foo-2" : "2", "foo-3" : "3" })
+ blob1 = self.repo.heads.master.commit.tree.blobs[0]
+
+ self.commit_vars(to_add = { "foo-2" : "8", "bar" : "4", "bar-2" : "5" })
+ blob2 = self.repo.heads.master.commit.tree.blobs[0]
+
+ change_records = compare_dict_blobs(os.path.join(self.repo_path, self.test_file),
+ blob1, blob2, False, False)
+
+ var_changes = { x.fieldname : (x.oldvalue, x.newvalue) for x in change_records}
+ self.assertEqual(changesmap, var_changes, "Changes not reported correctly")
+
+ def test_compare_dict_blobs_default(self):
+ """
+ Test default values for comparisson of git blob dictionaries
+ """
+ from oe.buildhistory_analysis import compare_dict_blobs
+ defaultmap = { x : ("default", "1") for x in ["PKG", "PKGE", "PKGV", "PKGR"]}
+
+ self.commit_vars(to_add = { "foo" : "1" })
+ blob1 = self.repo.heads.master.commit.tree.blobs[0]
+
+ self.commit_vars(to_add = { "PKG" : "1", "PKGE" : "1", "PKGV" : "1", "PKGR" : "1" })
+ blob2 = self.repo.heads.master.commit.tree.blobs[0]
+
+ change_records = compare_dict_blobs(os.path.join(self.repo_path, self.test_file),
+ blob1, blob2, False, False)
+
+ var_changes = {}
+ for x in change_records:
+ oldvalue = "default" if ("default" in x.oldvalue) else x.oldvalue
+ var_changes[x.fieldname] = (oldvalue, x.newvalue)
+
+ self.assertEqual(defaultmap, var_changes, "Defaults not set properly")
diff --git a/meta/lib/oeqa/selftest/cases/oelib/elf.py b/meta/lib/oeqa/selftest/cases/oelib/elf.py
new file mode 100644
index 0000000000..d0a28090f2
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/oelib/elf.py
@@ -0,0 +1,26 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from unittest.case import TestCase
+import oe.qa
+
+class TestElf(TestCase):
+ def test_machine_name(self):
+ """
+ Test elf_machine_to_string()
+ """
+ self.assertEqual(oe.qa.elf_machine_to_string(0x02), "SPARC")
+ self.assertEqual(oe.qa.elf_machine_to_string(0x03), "x86")
+ self.assertEqual(oe.qa.elf_machine_to_string(0x08), "MIPS")
+ self.assertEqual(oe.qa.elf_machine_to_string(0x14), "PowerPC")
+ self.assertEqual(oe.qa.elf_machine_to_string(0x28), "ARM")
+ self.assertEqual(oe.qa.elf_machine_to_string(0x2A), "SuperH")
+ self.assertEqual(oe.qa.elf_machine_to_string(0x32), "IA-64")
+ self.assertEqual(oe.qa.elf_machine_to_string(0x3E), "x86-64")
+ self.assertEqual(oe.qa.elf_machine_to_string(0xB7), "AArch64")
+ self.assertEqual(oe.qa.elf_machine_to_string(0xF7), "BPF")
+
+ self.assertEqual(oe.qa.elf_machine_to_string(0x00), "Unknown (0)")
+ self.assertEqual(oe.qa.elf_machine_to_string(0xDEADBEEF), "Unknown (3735928559)")
+ self.assertEqual(oe.qa.elf_machine_to_string("foobar"), "Unknown ('foobar')")
diff --git a/meta/lib/oe/tests/test_license.py b/meta/lib/oeqa/selftest/cases/oelib/license.py
index c388886184..6ebbee589f 100644
--- a/meta/lib/oe/tests/test_license.py
+++ b/meta/lib/oeqa/selftest/cases/oelib/license.py
@@ -1,4 +1,8 @@
-import unittest
+#
+# SPDX-License-Identifier: MIT
+#
+
+from unittest.case import TestCase
import oe.license
class SeenVisitor(oe.license.LicenseVisitor):
@@ -9,7 +13,7 @@ class SeenVisitor(oe.license.LicenseVisitor):
def visit_Str(self, node):
self.seen.append(node.s)
-class TestSingleLicense(unittest.TestCase):
+class TestSingleLicense(TestCase):
licenses = [
"GPLv2",
"LGPL-2.0",
@@ -37,7 +41,7 @@ class TestSingleLicense(unittest.TestCase):
self.parse(license)
self.assertEqual(cm.exception.license, license)
-class TestSimpleCombinations(unittest.TestCase):
+class TestSimpleCombinations(TestCase):
tests = {
"FOO&BAR": ["FOO", "BAR"],
"BAZ & MOO": ["BAZ", "MOO"],
@@ -66,3 +70,34 @@ class TestComplexCombinations(TestSimpleCombinations):
"(GPL-2.0|Proprietary)&BSD-4-clause&MIT": ["GPL-2.0", "BSD-4-clause", "MIT"],
}
preferred = ["BAR", "OMEGA", "BETA", "GPL-2.0"]
+
+class TestIsIncluded(TestCase):
+ tests = {
+ ("FOO | BAR", None, None):
+ [True, ["FOO"]],
+ ("FOO | BAR", None, "FOO"):
+ [True, ["BAR"]],
+ ("FOO | BAR", "BAR", None):
+ [True, ["BAR"]],
+ ("FOO | BAR & FOOBAR", "*BAR", None):
+ [True, ["BAR", "FOOBAR"]],
+ ("FOO | BAR & FOOBAR", None, "FOO*"):
+ [False, ["FOOBAR"]],
+ ("(FOO | BAR) & FOOBAR | BARFOO", None, "FOO"):
+ [True, ["BAR", "FOOBAR"]],
+ ("(FOO | BAR) & FOOBAR | BAZ & MOO & BARFOO", None, "FOO"):
+ [True, ["BAZ", "MOO", "BARFOO"]],
+ ("GPL-3.0 & GPL-2.0 & LGPL-2.1 | Proprietary", None, None):
+ [True, ["GPL-3.0", "GPL-2.0", "LGPL-2.1"]],
+ ("GPL-3.0 & GPL-2.0 & LGPL-2.1 | Proprietary", None, "GPL-3.0"):
+ [True, ["Proprietary"]],
+ ("GPL-3.0 & GPL-2.0 & LGPL-2.1 | Proprietary", None, "GPL-3.0 Proprietary"):
+ [False, ["GPL-3.0"]]
+ }
+
+ def test_tests(self):
+ for args, expected in self.tests.items():
+ is_included, licenses = oe.license.is_included(
+ args[0], (args[1] or '').split(), (args[2] or '').split())
+ self.assertEqual(is_included, expected[0])
+ self.assertListEqual(licenses, expected[1])
diff --git a/meta/lib/oe/tests/test_path.py b/meta/lib/oeqa/selftest/cases/oelib/path.py
index 44d068143e..a1cfa08c09 100644
--- a/meta/lib/oe/tests/test_path.py
+++ b/meta/lib/oeqa/selftest/cases/oelib/path.py
@@ -1,11 +1,15 @@
-import unittest
+#
+# SPDX-License-Identifier: MIT
+#
+
+from unittest.case import TestCase
import oe, oe.path
import tempfile
import os
import errno
import shutil
-class TestRealPath(unittest.TestCase):
+class TestRealPath(TestCase):
DIRS = [ "a", "b", "etc", "sbin", "usr", "usr/bin", "usr/binX", "usr/sbin", "usr/include", "usr/include/gdbm" ]
FILES = [ "etc/passwd", "b/file" ]
LINKS = [
@@ -38,13 +42,6 @@ class TestRealPath(unittest.TestCase):
( "b/test", errno.ENOENT ),
]
- def __del__(self):
- try:
- #os.system("tree -F %s" % self.tmpdir)
- shutil.rmtree(self.tmpdir)
- except:
- pass
-
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix = "oe-test_path")
self.root = os.path.join(self.tmpdir, "R")
@@ -59,6 +56,9 @@ class TestRealPath(unittest.TestCase):
for l in self.LINKS:
os.symlink(l[1], os.path.join(self.root, l[0]))
+ def tearDown(self):
+ shutil.rmtree(self.tmpdir)
+
def __realpath(self, file, use_physdir, assume_dir = True):
return oe.path.realpath(os.path.join(self.root, file), self.root,
use_physdir, assume_dir = assume_dir)
diff --git a/meta/lib/oe/tests/test_types.py b/meta/lib/oeqa/selftest/cases/oelib/types.py
index 367cc30e45..7eb49e6f95 100644
--- a/meta/lib/oe/tests/test_types.py
+++ b/meta/lib/oeqa/selftest/cases/oelib/types.py
@@ -1,19 +1,11 @@
-import unittest
-from oe.maketype import create, factory
+#
+# SPDX-License-Identifier: MIT
+#
-class TestTypes(unittest.TestCase):
- def assertIsInstance(self, obj, cls):
- return self.assertTrue(isinstance(obj, cls))
+from unittest.case import TestCase
+from oe.maketype import create
- def assertIsNot(self, obj, other):
- return self.assertFalse(obj is other)
-
- def assertFactoryCreated(self, value, type, **flags):
- cls = factory(type)
- self.assertIsNot(cls, None)
- self.assertIsInstance(create(value, type, **flags), cls)
-
-class TestBooleanType(TestTypes):
+class TestBooleanType(TestCase):
def test_invalid(self):
self.assertRaises(ValueError, create, '', 'boolean')
self.assertRaises(ValueError, create, 'foo', 'boolean')
@@ -43,7 +35,7 @@ class TestBooleanType(TestTypes):
self.assertEqual(create('y', 'boolean'), True)
self.assertNotEqual(create('y', 'boolean'), False)
-class TestList(TestTypes):
+class TestList(TestCase):
def assertListEqual(self, value, valid, sep=None):
obj = create(value, 'list', separator=sep)
self.assertEqual(obj, valid)
diff --git a/meta/lib/oeqa/selftest/cases/oelib/utils.py b/meta/lib/oeqa/selftest/cases/oelib/utils.py
new file mode 100644
index 0000000000..a7214beb4c
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/oelib/utils.py
@@ -0,0 +1,103 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import sys
+from unittest.case import TestCase
+from contextlib import contextmanager
+from io import StringIO
+from oe.utils import packages_filter_out_system, trim_version, multiprocess_launch
+
+class TestPackagesFilterOutSystem(TestCase):
+ def test_filter(self):
+ """
+ Test that oe.utils.packages_filter_out_system works.
+ """
+ try:
+ import bb
+ except ImportError:
+ self.skipTest("Cannot import bb")
+
+ d = bb.data_smart.DataSmart()
+ d.setVar("PN", "foo")
+
+ d.setVar("PACKAGES", "foo foo-doc foo-dev")
+ pkgs = packages_filter_out_system(d)
+ self.assertEqual(pkgs, [])
+
+ d.setVar("PACKAGES", "foo foo-doc foo-data foo-dev")
+ pkgs = packages_filter_out_system(d)
+ self.assertEqual(pkgs, ["foo-data"])
+
+ d.setVar("PACKAGES", "foo foo-locale-en-gb")
+ pkgs = packages_filter_out_system(d)
+ self.assertEqual(pkgs, [])
+
+ d.setVar("PACKAGES", "foo foo-data foo-locale-en-gb")
+ pkgs = packages_filter_out_system(d)
+ self.assertEqual(pkgs, ["foo-data"])
+
+
+class TestTrimVersion(TestCase):
+ def test_version_exception(self):
+ with self.assertRaises(TypeError):
+ trim_version(None, 2)
+ with self.assertRaises(TypeError):
+ trim_version((1, 2, 3), 2)
+
+ def test_num_exception(self):
+ with self.assertRaises(ValueError):
+ trim_version("1.2.3", 0)
+ with self.assertRaises(ValueError):
+ trim_version("1.2.3", -1)
+
+ def test_valid(self):
+ self.assertEqual(trim_version("1.2.3", 1), "1")
+ self.assertEqual(trim_version("1.2.3", 2), "1.2")
+ self.assertEqual(trim_version("1.2.3", 3), "1.2.3")
+ self.assertEqual(trim_version("1.2.3", 4), "1.2.3")
+
+
+class TestMultiprocessLaunch(TestCase):
+
+ def test_multiprocesslaunch(self):
+ import bb
+
+ def testfunction(item, d):
+ if item == "2" or item == "1":
+ raise KeyError("Invalid number %s" % item)
+ return "Found %s" % item
+
+ def dummyerror(msg):
+ print("ERROR: %s" % msg)
+ def dummyfatal(msg):
+ print("ERROR: %s" % msg)
+ raise bb.BBHandledException()
+
+ @contextmanager
+ def captured_output():
+ new_out, new_err = StringIO(), StringIO()
+ old_out, old_err = sys.stdout, sys.stderr
+ try:
+ sys.stdout, sys.stderr = new_out, new_err
+ yield sys.stdout, sys.stderr
+ finally:
+ sys.stdout, sys.stderr = old_out, old_err
+
+ d = bb.data_smart.DataSmart()
+ bb.error = dummyerror
+ bb.fatal = dummyfatal
+
+ # Assert the function returns the right results
+ result = multiprocess_launch(testfunction, ["3", "4", "5", "6"], d, extraargs=(d,))
+ self.assertIn("Found 3", result)
+ self.assertIn("Found 4", result)
+ self.assertIn("Found 5", result)
+ self.assertIn("Found 6", result)
+ self.assertEqual(len(result), 4)
+
+ # Assert the function prints exceptions
+ with captured_output() as (out, err):
+ self.assertRaises(bb.BBHandledException, multiprocess_launch, testfunction, ["1", "2", "3", "4", "5", "6"], d, extraargs=(d,))
+ self.assertIn("KeyError: 'Invalid number 1'", out.getvalue())
+ self.assertIn("KeyError: 'Invalid number 2'", out.getvalue())
diff --git a/meta/lib/oeqa/selftest/cases/oescripts.py b/meta/lib/oeqa/selftest/cases/oescripts.py
new file mode 100644
index 0000000000..41cbe04808
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/oescripts.py
@@ -0,0 +1,188 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import shutil
+import unittest
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.selftest.cases.buildhistory import BuildhistoryBase
+from oeqa.utils.commands import Command, runCmd, bitbake, get_bb_var, get_test_layer
+from oeqa.utils import CommandError
+
+class BuildhistoryDiffTests(BuildhistoryBase):
+
+ def test_buildhistory_diff(self):
+ target = 'xcursor-transparent-theme'
+ self.run_buildhistory_operation(target, target_config="PR = \"r1\"", change_bh_location=True)
+ self.run_buildhistory_operation(target, target_config="PR = \"r0\"", change_bh_location=False, expect_error=True)
+ result = runCmd("oe-pkgdata-util read-value PKGV %s" % target)
+ pkgv = result.output.rstrip()
+ result = runCmd("buildhistory-diff -p %s" % get_bb_var('BUILDHISTORY_DIR'))
+ expected_endlines = [
+ "xcursor-transparent-theme-dev: RDEPENDS: removed \"xcursor-transparent-theme (['= %s-r1'])\", added \"xcursor-transparent-theme (['= %s-r0'])\"" % (pkgv, pkgv),
+ "xcursor-transparent-theme-staticdev: RDEPENDS: removed \"xcursor-transparent-theme-dev (['= %s-r1'])\", added \"xcursor-transparent-theme-dev (['= %s-r0'])\"" % (pkgv, pkgv)
+ ]
+ for line in result.output.splitlines():
+ for el in expected_endlines:
+ if line.endswith(el):
+ expected_endlines.remove(el)
+ break
+ else:
+ self.fail('Unexpected line:\n%s\nExpected line endings:\n %s' % (line, '\n '.join(expected_endlines)))
+ if expected_endlines:
+ self.fail('Missing expected line endings:\n %s' % '\n '.join(expected_endlines))
+
+class OEScriptTests(OESelftestTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(OEScriptTests, cls).setUpClass()
+ try:
+ import cairo
+ except ImportError:
+ raise unittest.SkipTest('Python module cairo is not present')
+ bitbake("core-image-minimal -c rootfs -f")
+ cls.tmpdir = get_bb_var('TMPDIR')
+ cls.buildstats = cls.tmpdir + "/buildstats/" + sorted(os.listdir(cls.tmpdir + "/buildstats"))[-1]
+
+ scripts_dir = os.path.join(get_bb_var('COREBASE'), 'scripts')
+
+class OEPybootchartguyTests(OEScriptTests):
+
+ def test_pybootchartguy_help(self):
+ runCmd('%s/pybootchartgui/pybootchartgui.py --help' % self.scripts_dir)
+
+ def test_pybootchartguy_to_generate_build_png_output(self):
+ runCmd('%s/pybootchartgui/pybootchartgui.py %s -o %s/charts -f png' % (self.scripts_dir, self.buildstats, self.tmpdir))
+ self.assertTrue(os.path.exists(self.tmpdir + "/charts.png"))
+
+ def test_pybootchartguy_to_generate_build_svg_output(self):
+ runCmd('%s/pybootchartgui/pybootchartgui.py %s -o %s/charts -f svg' % (self.scripts_dir, self.buildstats, self.tmpdir))
+ self.assertTrue(os.path.exists(self.tmpdir + "/charts.svg"))
+
+ def test_pybootchartguy_to_generate_build_pdf_output(self):
+ runCmd('%s/pybootchartgui/pybootchartgui.py %s -o %s/charts -f pdf' % (self.scripts_dir, self.buildstats, self.tmpdir))
+ self.assertTrue(os.path.exists(self.tmpdir + "/charts.pdf"))
+
+
+class OEGitproxyTests(OESelftestTestCase):
+
+ scripts_dir = os.path.join(get_bb_var('COREBASE'), 'scripts')
+
+ def test_oegitproxy_help(self):
+ try:
+ res = runCmd('%s/oe-git-proxy --help' % self.scripts_dir, assert_error=False)
+ self.assertTrue(False)
+ except CommandError as e:
+ self.assertEqual(2, e.retcode)
+
+ def run_oegitproxy(self, custom_shell=None):
+ os.environ['SOCAT'] = shutil.which("echo")
+ os.environ['ALL_PROXY'] = "https://proxy.example.com:3128"
+ os.environ['NO_PROXY'] = "*.example.com,.no-proxy.org,192.168.42.0/24,127.*.*.*"
+
+ if custom_shell is None:
+ prefix = ''
+ else:
+ prefix = custom_shell + ' '
+
+ # outside, use the proxy
+ res = runCmd('%s%s/oe-git-proxy host.outside-example.com 9418' %
+ (prefix,self.scripts_dir))
+ self.assertIn('PROXY:', res.output)
+ # match with wildcard suffix
+ res = runCmd('%s%s/oe-git-proxy host.example.com 9418' %
+ (prefix, self.scripts_dir))
+ self.assertIn('TCP:', res.output)
+ # match just suffix
+ res = runCmd('%s%s/oe-git-proxy host.no-proxy.org 9418' %
+ (prefix, self.scripts_dir))
+ self.assertIn('TCP:', res.output)
+ # match IP subnet
+ res = runCmd('%s%s/oe-git-proxy 192.168.42.42 9418' %
+ (prefix, self.scripts_dir))
+ self.assertIn('TCP:', res.output)
+ # match IP wildcard
+ res = runCmd('%s%s/oe-git-proxy 127.1.2.3 9418' %
+ (prefix, self.scripts_dir))
+ self.assertIn('TCP:', res.output)
+
+ # test that * globbering is off
+ os.environ['NO_PROXY'] = "*"
+ res = runCmd('%s%s/oe-git-proxy host.example.com 9418' %
+ (prefix, self.scripts_dir))
+ self.assertIn('TCP:', res.output)
+
+ def test_oegitproxy_proxy(self):
+ self.run_oegitproxy()
+
+ def test_oegitproxy_proxy_dash(self):
+ dash = shutil.which("dash")
+ if dash is None:
+ self.skipTest("No \"dash\" found on test system.")
+ self.run_oegitproxy(custom_shell=dash)
+
+class OeRunNativeTest(OESelftestTestCase):
+ def test_oe_run_native(self):
+ bitbake("qemu-helper-native -c addto_recipe_sysroot")
+ result = runCmd("oe-run-native qemu-helper-native tunctl -h")
+ self.assertIn("Delete: tunctl -d device-name [-f tun-clone-device]", result.output)
+
+class OEListPackageconfigTests(OEScriptTests):
+ #oe-core.scripts.List_all_the_PACKAGECONFIG's_flags
+ def check_endlines(self, results, expected_endlines):
+ for line in results.output.splitlines():
+ for el in expected_endlines:
+ if line == el:
+ expected_endlines.remove(el)
+ break
+
+ if expected_endlines:
+ self.fail('Missing expected listings:\n %s' % '\n '.join(expected_endlines))
+
+
+ #oe-core.scripts.List_all_the_PACKAGECONFIG's_flags
+ def test_packageconfig_flags_help(self):
+ runCmd('%s/contrib/list-packageconfig-flags.py -h' % self.scripts_dir)
+
+ def test_packageconfig_flags_default(self):
+ results = runCmd('%s/contrib/list-packageconfig-flags.py' % self.scripts_dir)
+ expected_endlines = []
+ expected_endlines.append("RECIPE NAME PACKAGECONFIG FLAGS")
+ expected_endlines.append("pinentry gtk2 libcap ncurses qt secret")
+ expected_endlines.append("tar acl")
+
+ self.check_endlines(results, expected_endlines)
+
+
+ def test_packageconfig_flags_option_flags(self):
+ results = runCmd('%s/contrib/list-packageconfig-flags.py -f' % self.scripts_dir)
+ expected_endlines = []
+ expected_endlines.append("PACKAGECONFIG FLAG RECIPE NAMES")
+ expected_endlines.append("qt nativesdk-pinentry pinentry pinentry-native")
+ expected_endlines.append("secret nativesdk-pinentry pinentry pinentry-native")
+
+ self.check_endlines(results, expected_endlines)
+
+ def test_packageconfig_flags_option_all(self):
+ results = runCmd('%s/contrib/list-packageconfig-flags.py -a' % self.scripts_dir)
+ expected_endlines = []
+ expected_endlines.append("pinentry-1.1.0")
+ expected_endlines.append("PACKAGECONFIG ncurses libcap")
+ expected_endlines.append("PACKAGECONFIG[qt] --enable-pinentry-qt, --disable-pinentry-qt, qtbase-native qtbase")
+ expected_endlines.append("PACKAGECONFIG[gtk2] --enable-pinentry-gtk2, --disable-pinentry-gtk2, gtk+ glib-2.0")
+ expected_endlines.append("PACKAGECONFIG[libcap] --with-libcap, --without-libcap, libcap")
+ expected_endlines.append("PACKAGECONFIG[ncurses] --enable-ncurses --with-ncurses-include-dir=${STAGING_INCDIR}, --disable-ncurses, ncurses")
+ expected_endlines.append("PACKAGECONFIG[secret] --enable-libsecret, --disable-libsecret, libsecret")
+
+ self.check_endlines(results, expected_endlines)
+
+ def test_packageconfig_flags_optiins_preferred_only(self):
+ results = runCmd('%s/contrib/list-packageconfig-flags.py -p' % self.scripts_dir)
+ expected_endlines = []
+ expected_endlines.append("RECIPE NAME PACKAGECONFIG FLAGS")
+ expected_endlines.append("pinentry gtk2 libcap ncurses qt secret")
+
+ self.check_endlines(results, expected_endlines)
+
diff --git a/meta/lib/oeqa/selftest/cases/package.py b/meta/lib/oeqa/selftest/cases/package.py
new file mode 100644
index 0000000000..291627877e
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/package.py
@@ -0,0 +1,150 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake, get_bb_vars, get_bb_var, runqemu
+import stat
+import subprocess, os
+import oe.path
+import re
+
+class VersionOrdering(OESelftestTestCase):
+ # version1, version2, sort order
+ tests = (
+ ("1.0", "1.0", 0),
+ ("1.0", "2.0", -1),
+ ("2.0", "1.0", 1),
+ ("2.0-rc", "2.0", 1),
+ ("2.0~rc", "2.0", -1),
+ ("1.2rc2", "1.2.0", -1)
+ )
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+
+ # Build the tools we need and populate a sysroot
+ bitbake("dpkg-native opkg-native rpm-native python3-native")
+ bitbake("build-sysroots -c build_native_sysroot")
+
+ # Get the paths so we can point into the sysroot correctly
+ vars = get_bb_vars(["STAGING_DIR", "BUILD_ARCH", "bindir_native", "libdir_native"])
+ cls.staging = oe.path.join(vars["STAGING_DIR"], vars["BUILD_ARCH"])
+ cls.bindir = oe.path.join(cls.staging, vars["bindir_native"])
+ cls.libdir = oe.path.join(cls.staging, vars["libdir_native"])
+
+ def setUpLocal(self):
+ # Just for convenience
+ self.staging = type(self).staging
+ self.bindir = type(self).bindir
+ self.libdir = type(self).libdir
+
+ def test_dpkg(self):
+ for ver1, ver2, sort in self.tests:
+ op = { -1: "<<", 0: "=", 1: ">>" }[sort]
+ status = subprocess.call((oe.path.join(self.bindir, "dpkg"), "--compare-versions", ver1, op, ver2))
+ self.assertEqual(status, 0, "%s %s %s failed" % (ver1, op, ver2))
+
+ # Now do it again but with incorrect operations
+ op = { -1: ">>", 0: ">>", 1: "<<" }[sort]
+ status = subprocess.call((oe.path.join(self.bindir, "dpkg"), "--compare-versions", ver1, op, ver2))
+ self.assertNotEqual(status, 0, "%s %s %s failed" % (ver1, op, ver2))
+
+ # Now do it again but with incorrect operations
+ op = { -1: "=", 0: "<<", 1: "=" }[sort]
+ status = subprocess.call((oe.path.join(self.bindir, "dpkg"), "--compare-versions", ver1, op, ver2))
+ self.assertNotEqual(status, 0, "%s %s %s failed" % (ver1, op, ver2))
+
+ def test_opkg(self):
+ for ver1, ver2, sort in self.tests:
+ op = { -1: "<<", 0: "=", 1: ">>" }[sort]
+ status = subprocess.call((oe.path.join(self.bindir, "opkg"), "compare-versions", ver1, op, ver2))
+ self.assertEqual(status, 0, "%s %s %s failed" % (ver1, op, ver2))
+
+ # Now do it again but with incorrect operations
+ op = { -1: ">>", 0: ">>", 1: "<<" }[sort]
+ status = subprocess.call((oe.path.join(self.bindir, "opkg"), "compare-versions", ver1, op, ver2))
+ self.assertNotEqual(status, 0, "%s %s %s failed" % (ver1, op, ver2))
+
+ # Now do it again but with incorrect operations
+ op = { -1: "=", 0: "<<", 1: "=" }[sort]
+ status = subprocess.call((oe.path.join(self.bindir, "opkg"), "compare-versions", ver1, op, ver2))
+ self.assertNotEqual(status, 0, "%s %s %s failed" % (ver1, op, ver2))
+
+ def test_rpm(self):
+ # Need to tell the Python bindings where to find its configuration
+ env = os.environ.copy()
+ env["RPM_CONFIGDIR"] = oe.path.join(self.libdir, "rpm")
+
+ for ver1, ver2, sort in self.tests:
+ # The only way to test rpm is via the Python module, so we need to
+ # execute python3-native. labelCompare returns -1/0/1 (like strcmp)
+ # so add 100 and use that as the exit code.
+ command = (oe.path.join(self.bindir, "python3-native", "python3"), "-c",
+ "import sys, rpm; v1=(None, \"%s\", None); v2=(None, \"%s\", None); sys.exit(rpm.labelCompare(v1, v2) + 100)" % (ver1, ver2))
+ status = subprocess.call(command, env=env)
+ self.assertIn(status, (99, 100, 101))
+ self.assertEqual(status - 100, sort, "%s %s (%d) failed" % (ver1, ver2, sort))
+
+class PackageTests(OESelftestTestCase):
+ # Verify that a recipe which sets up hardlink files has those preserved into split packages
+ # Also test file sparseness is preserved
+ def test_preserve_sparse_hardlinks(self):
+ bitbake("selftest-hardlink -c package")
+
+ dest = get_bb_var('PKGDEST', 'selftest-hardlink')
+ bindir = get_bb_var('bindir', 'selftest-hardlink')
+
+ def checkfiles():
+ # Recipe creates 4 hardlinked files, there is a copy in package/ and a copy in packages-split/
+ # so expect 8 in total.
+ self.assertEqual(os.stat(dest + "/selftest-hardlink" + bindir + "/hello1").st_nlink, 8)
+
+ # Test a sparse file remains sparse
+ sparsestat = os.stat(dest + "/selftest-hardlink" + bindir + "/sparsetest")
+ self.assertEqual(sparsestat.st_blocks, 0)
+ self.assertEqual(sparsestat.st_size, 1048576)
+
+ checkfiles()
+
+ # Clean and reinstall so its now definitely from sstate, then retest.
+ bitbake("selftest-hardlink -c clean")
+ bitbake("selftest-hardlink -c package")
+
+ checkfiles()
+
+ # Verify gdb to read symbols from separated debug hardlink file correctly
+ def test_gdb_hardlink_debug(self):
+ features = 'IMAGE_INSTALL_append = " selftest-hardlink"\n'
+ features += 'IMAGE_INSTALL_append = " selftest-hardlink-dbg"\n'
+ features += 'IMAGE_INSTALL_append = " selftest-hardlink-gdb"\n'
+ self.write_config(features)
+ bitbake("core-image-minimal")
+
+ def gdbtest(qemu, binary):
+ """
+ Check that gdb ``binary`` to read symbols from separated debug file
+ """
+ self.logger.info("gdbtest %s" % binary)
+ status, output = qemu.run_serial('/usr/bin/gdb.sh %s' % binary, timeout=60)
+ for l in output.split('\n'):
+ # Check debugging symbols exists
+ if '(no debugging symbols found)' in l:
+ self.logger.error("No debugging symbols found. GDB result:\n%s" % output)
+ return False
+
+ # Check debugging symbols works correctly
+ elif re.match("Breakpoint 1.*hello\.c.*4", l):
+ return True
+
+ self.logger.error("GDB result:\n%d: %s", status, output)
+ return False
+
+ with runqemu('core-image-minimal') as qemu:
+ for binary in ['/usr/bin/hello1',
+ '/usr/bin/hello2',
+ '/usr/libexec/hello3',
+ '/usr/libexec/hello4']:
+ if not gdbtest(qemu, binary):
+ self.fail('GDB %s failed' % binary)
diff --git a/meta/lib/oeqa/selftest/pkgdata.py b/meta/lib/oeqa/selftest/cases/pkgdata.py
index 5a63f89ff2..833a1803ba 100644
--- a/meta/lib/oeqa/selftest/pkgdata.py
+++ b/meta/lib/oeqa/selftest/cases/pkgdata.py
@@ -1,81 +1,78 @@
-import unittest
+#
+# SPDX-License-Identifier: MIT
+#
+
import os
import tempfile
-import logging
import fnmatch
-import oeqa.utils.ftools as ftools
-from oeqa.selftest.base import oeSelfTest
-from oeqa.utils.commands import runCmd, bitbake, get_bb_var
-from oeqa.utils.decorators import testcase
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
-class OePkgdataUtilTests(oeSelfTest):
+class OePkgdataUtilTests(OESelftestTestCase):
@classmethod
def setUpClass(cls):
+ super(OePkgdataUtilTests, cls).setUpClass()
# Ensure we have the right data in pkgdata
- logger = logging.getLogger("selftest")
- logger.info('Running bitbake to generate pkgdata')
- bitbake('glibc busybox zlib bash')
+ cls.logger.info('Running bitbake to generate pkgdata')
+ bitbake('target-sdk-provides-dummy -c clean')
+ bitbake('busybox zlib m4')
- @testcase(1203)
def test_lookup_pkg(self):
# Forward tests
- result = runCmd('oe-pkgdata-util lookup-pkg "glibc busybox"')
- self.assertEqual(result.output, 'libc6\nbusybox')
+ result = runCmd('oe-pkgdata-util lookup-pkg "zlib busybox"')
+ self.assertEqual(result.output, 'libz1\nbusybox')
result = runCmd('oe-pkgdata-util lookup-pkg zlib-dev')
self.assertEqual(result.output, 'libz-dev')
result = runCmd('oe-pkgdata-util lookup-pkg nonexistentpkg', ignore_status=True)
self.assertEqual(result.status, 1, "Status different than 1. output: %s" % result.output)
self.assertEqual(result.output, 'ERROR: The following packages could not be found: nonexistentpkg')
# Reverse tests
- result = runCmd('oe-pkgdata-util lookup-pkg -r "libc6 busybox"')
- self.assertEqual(result.output, 'glibc\nbusybox')
+ result = runCmd('oe-pkgdata-util lookup-pkg -r "libz1 busybox"')
+ self.assertEqual(result.output, 'zlib\nbusybox')
result = runCmd('oe-pkgdata-util lookup-pkg -r libz-dev')
self.assertEqual(result.output, 'zlib-dev')
result = runCmd('oe-pkgdata-util lookup-pkg -r nonexistentpkg', ignore_status=True)
self.assertEqual(result.status, 1, "Status different than 1. output: %s" % result.output)
self.assertEqual(result.output, 'ERROR: The following packages could not be found: nonexistentpkg')
- @testcase(1205)
def test_read_value(self):
result = runCmd('oe-pkgdata-util read-value PN libz1')
self.assertEqual(result.output, 'zlib')
- result = runCmd('oe-pkgdata-util read-value PKGSIZE bash')
+ result = runCmd('oe-pkgdata-util read-value PKG libz1')
+ self.assertEqual(result.output, 'libz1')
+ result = runCmd('oe-pkgdata-util read-value PKGSIZE m4')
pkgsize = int(result.output.strip())
self.assertGreater(pkgsize, 1, "Size should be greater than 1. %s" % result.output)
- @testcase(1198)
def test_find_path(self):
- result = runCmd('oe-pkgdata-util find-path /lib/libc.so.6')
- self.assertEqual(result.output, 'glibc: /lib/libc.so.6')
- result = runCmd('oe-pkgdata-util find-path /bin/bash')
- self.assertEqual(result.output, 'bash: /bin/bash')
+ result = runCmd('oe-pkgdata-util find-path /lib/libz.so.1')
+ self.assertEqual(result.output, 'zlib: /lib/libz.so.1')
+ result = runCmd('oe-pkgdata-util find-path /usr/bin/m4')
+ self.assertEqual(result.output, 'm4: /usr/bin/m4')
result = runCmd('oe-pkgdata-util find-path /not/exist', ignore_status=True)
self.assertEqual(result.status, 1, "Status different than 1. output: %s" % result.output)
self.assertEqual(result.output, 'ERROR: Unable to find any package producing path /not/exist')
- @testcase(1204)
def test_lookup_recipe(self):
- result = runCmd('oe-pkgdata-util lookup-recipe "libc6-staticdev busybox"')
- self.assertEqual(result.output, 'glibc\nbusybox')
+ result = runCmd('oe-pkgdata-util lookup-recipe "libz-staticdev busybox"')
+ self.assertEqual(result.output, 'zlib\nbusybox')
result = runCmd('oe-pkgdata-util lookup-recipe libz-dbg')
self.assertEqual(result.output, 'zlib')
result = runCmd('oe-pkgdata-util lookup-recipe nonexistentpkg', ignore_status=True)
self.assertEqual(result.status, 1, "Status different than 1. output: %s" % result.output)
self.assertEqual(result.output, 'ERROR: The following packages could not be found: nonexistentpkg')
- @testcase(1202)
def test_list_pkgs(self):
# No arguments
result = runCmd('oe-pkgdata-util list-pkgs')
pkglist = result.output.split()
- self.assertIn('glibc-utils', pkglist, "Listed packages: %s" % result.output)
+ self.assertIn('zlib', pkglist, "Listed packages: %s" % result.output)
self.assertIn('zlib-dev', pkglist, "Listed packages: %s" % result.output)
# No pkgspec, runtime
result = runCmd('oe-pkgdata-util list-pkgs -r')
pkglist = result.output.split()
- self.assertIn('libc6-utils', pkglist, "Listed packages: %s" % result.output)
self.assertIn('libz-dev', pkglist, "Listed packages: %s" % result.output)
# With recipe specified
result = runCmd('oe-pkgdata-util list-pkgs -p zlib')
@@ -84,7 +81,7 @@ class OePkgdataUtilTests(oeSelfTest):
pkglist.remove('zlib-ptest') # in case ptest is disabled
except ValueError:
pass
- self.assertEqual(pkglist, ['zlib', 'zlib-dbg', 'zlib-dev', 'zlib-doc', 'zlib-staticdev'], "Packages listed after remove: %s" % result.output)
+ self.assertEqual(pkglist, ['zlib', 'zlib-dbg', 'zlib-dev', 'zlib-doc', 'zlib-src', 'zlib-staticdev'], "Packages listed after remove: %s" % result.output)
# With recipe specified, runtime
result = runCmd('oe-pkgdata-util list-pkgs -p zlib -r')
pkglist = sorted(result.output.split())
@@ -92,7 +89,7 @@ class OePkgdataUtilTests(oeSelfTest):
pkglist.remove('libz-ptest') # in case ptest is disabled
except ValueError:
pass
- self.assertEqual(pkglist, ['libz-dbg', 'libz-dev', 'libz-doc', 'libz-staticdev', 'libz1'], "Packages listed after remove: %s" % result.output)
+ self.assertEqual(pkglist, ['libz-dbg', 'libz-dev', 'libz-doc', 'libz-src', 'libz-staticdev', 'libz1'], "Packages listed after remove: %s" % result.output)
# With recipe specified and unpackaged
result = runCmd('oe-pkgdata-util list-pkgs -p zlib -u')
pkglist = sorted(result.output.split())
@@ -110,7 +107,6 @@ class OePkgdataUtilTests(oeSelfTest):
pkglist = sorted(result.output.split())
self.assertEqual(pkglist, ['libz-dbg', 'libz-dev', 'libz-doc'], "Packages listed: %s" % result.output)
- @testcase(1201)
def test_list_pkg_files(self):
def splitoutput(output):
files = {}
@@ -124,10 +120,11 @@ class OePkgdataUtilTests(oeSelfTest):
curpkg = line.split(':')[0]
files[curpkg] = []
return files
- base_libdir = get_bb_var('base_libdir')
- libdir = get_bb_var('libdir')
- includedir = get_bb_var('includedir')
- mandir = get_bb_var('mandir')
+ bb_vars = get_bb_vars(['base_libdir', 'libdir', 'includedir', 'mandir'])
+ base_libdir = bb_vars['base_libdir']
+ libdir = bb_vars['libdir']
+ includedir = bb_vars['includedir']
+ mandir = bb_vars['mandir']
# Test recipe-space package name
result = runCmd('oe-pkgdata-util list-pkg-files zlib-dev zlib-doc')
files = splitoutput(result.output)
@@ -199,17 +196,15 @@ class OePkgdataUtilTests(oeSelfTest):
self.assertIn(os.path.join(mandir, 'man3/zlib.3'), files['libz-doc'])
self.assertIn(os.path.join(libdir, 'libz.a'), files['libz-staticdev'])
- @testcase(1200)
def test_glob(self):
tempdir = tempfile.mkdtemp(prefix='pkgdataqa')
self.track_for_cleanup(tempdir)
pkglistfile = os.path.join(tempdir, 'pkglist')
with open(pkglistfile, 'w') as f:
- f.write('libc6\n')
f.write('libz1\n')
f.write('busybox\n')
result = runCmd('oe-pkgdata-util glob %s "*-dev"' % pkglistfile)
- desiredresult = ['libc6-dev', 'libz-dev', 'busybox-dev']
+ desiredresult = ['libz-dev', 'busybox-dev']
self.assertEqual(sorted(result.output.split()), sorted(desiredresult))
# The following should not error (because when we use this during rootfs construction, sometimes the complementary package won't exist)
result = runCmd('oe-pkgdata-util glob %s "*-nonexistent"' % pkglistfile)
@@ -220,7 +215,6 @@ class OePkgdataUtilTests(oeSelfTest):
self.assertNotIn('libz-dev', resultlist)
self.assertNotIn('libz-dbg', resultlist)
- @testcase(1206)
def test_specify_pkgdatadir(self):
- result = runCmd('oe-pkgdata-util -p %s lookup-pkg glibc' % get_bb_var('PKGDATA_DIR'))
- self.assertEqual(result.output, 'libc6')
+ result = runCmd('oe-pkgdata-util -p %s lookup-pkg zlib' % get_bb_var('PKGDATA_DIR'))
+ self.assertEqual(result.output, 'libz1')
diff --git a/meta/lib/oeqa/selftest/prservice.py b/meta/lib/oeqa/selftest/cases/prservice.py
index 1b9a510fd4..fe1f24ea6d 100644
--- a/meta/lib/oeqa/selftest/prservice.py
+++ b/meta/lib/oeqa/selftest/cases/prservice.py
@@ -1,23 +1,28 @@
-import unittest
+#
+# SPDX-License-Identifier: MIT
+#
+
import os
-import logging
import re
import shutil
import datetime
import oeqa.utils.ftools as ftools
-from oeqa.selftest.base import oeSelfTest
+from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import runCmd, bitbake, get_bb_var
-from oeqa.utils.decorators import testcase
from oeqa.utils.network import get_free_port
-class BitbakePrTests(oeSelfTest):
-
+class BitbakePrTests(OESelftestTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(BitbakePrTests, cls).setUpClass()
+ cls.pkgdata_dir = get_bb_var('PKGDATA_DIR')
+
def get_pr_version(self, package_name):
- pkgdata_dir = get_bb_var('PKGDATA_DIR')
- package_data_file = os.path.join(pkgdata_dir, 'runtime', package_name)
+ package_data_file = os.path.join(self.pkgdata_dir, 'runtime', package_name)
package_data = ftools.read_file(package_data_file)
- find_pr = re.search("PKGR: r[0-9]+\.([0-9]+)", package_data)
+ find_pr = re.search(r"PKGR: r[0-9]+\.([0-9]+)", package_data)
self.assertTrue(find_pr, "No PKG revision found in %s" % package_data_file)
return int(find_pr.group(1))
@@ -27,7 +32,7 @@ class BitbakePrTests(oeSelfTest):
package_stamps_path = "/".join(stampdata[:-1])
stamps = []
for stamp in os.listdir(package_stamps_path):
- find_stamp = re.match("%s\.%s\.([a-z0-9]{32})" % (re.escape(prefix), recipe_task), stamp)
+ find_stamp = re.match(r"%s\.%s\.([a-z0-9]{32})" % (re.escape(prefix), recipe_task), stamp)
if find_stamp:
stamps.append(find_stamp.group(1))
self.assertFalse(len(stamps) == 0, msg="Cound not find stamp for task %s for recipe %s" % (recipe_task, package_name))
@@ -37,11 +42,9 @@ class BitbakePrTests(oeSelfTest):
def increment_package_pr(self, package_name):
inc_data = "do_package_append() {\n bb.build.exec_func('do_test_prserv', d)\n}\ndo_test_prserv() {\necho \"The current date is: %s\"\n}" % datetime.datetime.now()
self.write_recipeinc(package_name, inc_data)
- bitbake("-ccleansstate %s" % package_name)
res = bitbake(package_name, ignore_status=True)
self.delete_recipeinc(package_name)
self.assertEqual(res.status, 0, msg=res.output)
- self.assertTrue("NOTE: Started PRServer with DBfile" in res.output, msg=res.output)
def config_pr_tests(self, package_name, package_type='rpm', pr_socket='localhost:0'):
config_package_data = 'PACKAGE_CLASSES = "package_%s"' % package_type
@@ -60,7 +63,6 @@ class BitbakePrTests(oeSelfTest):
pr_2 = self.get_pr_version(package_name)
stamp_2 = self.get_task_stamp(package_name, track_task)
- bitbake("-ccleansstate %s" % package_name)
self.assertTrue(pr_2 - pr_1 == 1, "Step between same pkg. revision is greater than 1")
self.assertTrue(stamp_1 != stamp_2, "Different pkg rev. but same stamp: %s" % stamp_1)
@@ -73,6 +75,7 @@ class BitbakePrTests(oeSelfTest):
exported_db_path = os.path.join(self.builddir, 'export.inc')
export_result = runCmd("bitbake-prserv-tool export %s" % exported_db_path, ignore_status=True)
self.assertEqual(export_result.status, 0, msg="PR Service database export failed: %s" % export_result.output)
+ self.assertTrue(os.path.exists(exported_db_path))
if replace_current_db:
current_db_path = os.path.join(get_bb_var('PERSISTENT_DIR'), 'prserv.sqlite3')
@@ -86,42 +89,32 @@ class BitbakePrTests(oeSelfTest):
self.increment_package_pr(package_name)
pr_2 = self.get_pr_version(package_name)
- bitbake("-ccleansstate %s" % package_name)
self.assertTrue(pr_2 - pr_1 == 1, "Step between same pkg. revision is greater than 1")
- @testcase(930)
def test_import_export_replace_db(self):
self.run_test_pr_export_import('m4')
- @testcase(931)
def test_import_export_override_db(self):
self.run_test_pr_export_import('m4', replace_current_db=False)
- @testcase(932)
def test_pr_service_rpm_arch_dep(self):
self.run_test_pr_service('m4', 'rpm', 'do_package')
- @testcase(934)
def test_pr_service_deb_arch_dep(self):
self.run_test_pr_service('m4', 'deb', 'do_package')
- @testcase(933)
def test_pr_service_ipk_arch_dep(self):
self.run_test_pr_service('m4', 'ipk', 'do_package')
- @testcase(935)
def test_pr_service_rpm_arch_indep(self):
self.run_test_pr_service('xcursor-transparent-theme', 'rpm', 'do_package')
- @testcase(937)
def test_pr_service_deb_arch_indep(self):
self.run_test_pr_service('xcursor-transparent-theme', 'deb', 'do_package')
- @testcase(936)
def test_pr_service_ipk_arch_indep(self):
self.run_test_pr_service('xcursor-transparent-theme', 'ipk', 'do_package')
- @testcase(1419)
def test_stopping_prservice_message(self):
port = get_free_port()
diff --git a/meta/lib/oeqa/selftest/recipetool.py b/meta/lib/oeqa/selftest/cases/recipetool.py
index db1f8deeb0..c1562c63b2 100644
--- a/meta/lib/oeqa/selftest/recipetool.py
+++ b/meta/lib/oeqa/selftest/cases/recipetool.py
@@ -1,16 +1,18 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
import os
-import logging
+import shutil
import tempfile
import urllib.parse
-from oeqa.utils.commands import runCmd, bitbake, get_bb_var, create_temp_layer
-from oeqa.utils.decorators import testcase
-from oeqa.selftest import devtool
-
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var
+from oeqa.utils.commands import get_bb_vars, create_temp_layer
+from oeqa.selftest.cases import devtool
templayerdir = None
-
def setUpModule():
global templayerdir
templayerdir = tempfile.mkdtemp(prefix='recipetoolqa')
@@ -24,7 +26,9 @@ def tearDownModule():
class RecipetoolBase(devtool.DevtoolBase):
+
def setUpLocal(self):
+ super(RecipetoolBase, self).setUpLocal()
self.templayerdir = templayerdir
self.tempdir = tempfile.mkdtemp(prefix='recipetoolqa')
self.track_for_cleanup(self.tempdir)
@@ -34,6 +38,7 @@ class RecipetoolBase(devtool.DevtoolBase):
def tearDownLocal(self):
runCmd('rm -rf %s/recipes-*' % self.templayerdir)
+ super(RecipetoolBase, self).tearDownLocal()
def _try_recipetool_appendcmd(self, cmd, testrecipe, expectedfiles, expectedlines=None):
result = runCmd(cmd)
@@ -64,17 +69,16 @@ class RecipetoolBase(devtool.DevtoolBase):
class RecipetoolTests(RecipetoolBase):
+
@classmethod
def setUpClass(cls):
+ super(RecipetoolTests, cls).setUpClass()
# Ensure we have the right data in shlibs/pkgdata
- logger = logging.getLogger("selftest")
- logger.info('Running bitbake to generate pkgdata')
+ cls.logger.info('Running bitbake to generate pkgdata')
bitbake('-c packagedata base-files coreutils busybox selftest-recipetool-appendfile')
-
- @classmethod
- def tearDownClass(cls):
- # Shouldn't leave any traces of this artificial recipe behind
- bitbake('-c cleansstate selftest-recipetool-appendfile')
+ bb_vars = get_bb_vars(['COREBASE', 'BBPATH'])
+ cls.corebase = bb_vars['COREBASE']
+ cls.bbpath = bb_vars['BBPATH']
def _try_recipetool_appendfile(self, testrecipe, destfile, newfile, options, expectedlines, expectedfiles):
cmd = 'recipetool appendfile %s %s %s %s' % (self.templayerdir, destfile, newfile, options)
@@ -88,7 +92,6 @@ class RecipetoolTests(RecipetoolBase):
for errorstr in checkerror:
self.assertIn(errorstr, result.output)
- @testcase(1177)
def test_recipetool_appendfile_basic(self):
# Basic test
expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
@@ -96,21 +99,18 @@ class RecipetoolTests(RecipetoolBase):
_, output = self._try_recipetool_appendfile('base-files', '/etc/motd', self.testfile, '', expectedlines, ['motd'])
self.assertNotIn('WARNING: ', output)
- @testcase(1183)
def test_recipetool_appendfile_invalid(self):
# Test some commands that should error
self._try_recipetool_appendfile_fail('/etc/passwd', self.testfile, ['ERROR: /etc/passwd cannot be handled by this tool', 'useradd', 'extrausers'])
self._try_recipetool_appendfile_fail('/etc/timestamp', self.testfile, ['ERROR: /etc/timestamp cannot be handled by this tool'])
self._try_recipetool_appendfile_fail('/dev/console', self.testfile, ['ERROR: /dev/console cannot be handled by this tool'])
- @testcase(1176)
def test_recipetool_appendfile_alternatives(self):
# Now try with a file we know should be an alternative
# (this is very much a fake example, but one we know is reliably an alternative)
self._try_recipetool_appendfile_fail('/bin/ls', self.testfile, ['ERROR: File /bin/ls is an alternative possibly provided by the following recipes:', 'coreutils', 'busybox'])
- corebase = get_bb_var('COREBASE')
# Need a test file - should be executable
- testfile2 = os.path.join(corebase, 'oe-init-build-env')
+ testfile2 = os.path.join(self.corebase, 'oe-init-build-env')
testfile2name = os.path.basename(testfile2)
expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
'\n',
@@ -128,7 +128,6 @@ class RecipetoolTests(RecipetoolBase):
result = runCmd('diff -q %s %s' % (testfile2, copiedfile), ignore_status=True)
self.assertNotEqual(result.status, 0, 'New file should have been copied but was not %s' % result.output)
- @testcase(1178)
def test_recipetool_appendfile_binary(self):
# Try appending a binary file
# /bin/ls can be a symlink to /usr/bin/ls
@@ -137,9 +136,7 @@ class RecipetoolTests(RecipetoolBase):
self.assertIn('WARNING: ', result.output)
self.assertIn('is a binary', result.output)
- @testcase(1173)
def test_recipetool_appendfile_add(self):
- corebase = get_bb_var('COREBASE')
# Try arbitrary file add to a recipe
expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
'\n',
@@ -152,7 +149,7 @@ class RecipetoolTests(RecipetoolBase):
self._try_recipetool_appendfile('netbase', '/usr/share/something', self.testfile, '-r netbase', expectedlines, ['testfile'])
# Try adding another file, this time where the source file is executable
# (so we're testing that, plus modifying an existing bbappend)
- testfile2 = os.path.join(corebase, 'oe-init-build-env')
+ testfile2 = os.path.join(self.corebase, 'oe-init-build-env')
testfile2name = os.path.basename(testfile2)
expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
'\n',
@@ -167,7 +164,6 @@ class RecipetoolTests(RecipetoolBase):
'}\n']
self._try_recipetool_appendfile('netbase', '/usr/share/scriptname', testfile2, '-r netbase', expectedlines, ['testfile', testfile2name])
- @testcase(1174)
def test_recipetool_appendfile_add_bindir(self):
# Try arbitrary file add to a recipe, this time to a location such that should be installed as executable
expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
@@ -181,7 +177,6 @@ class RecipetoolTests(RecipetoolBase):
_, output = self._try_recipetool_appendfile('netbase', '/usr/bin/selftest-recipetool-testbin', self.testfile, '-r netbase', expectedlines, ['testfile'])
self.assertNotIn('WARNING: ', output)
- @testcase(1175)
def test_recipetool_appendfile_add_machine(self):
# Try arbitrary file add to a recipe, this time to a location such that should be installed as executable
expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
@@ -197,7 +192,6 @@ class RecipetoolTests(RecipetoolBase):
_, output = self._try_recipetool_appendfile('netbase', '/usr/share/something', self.testfile, '-r netbase -m mymachine', expectedlines, ['mymachine/testfile'])
self.assertNotIn('WARNING: ', output)
- @testcase(1184)
def test_recipetool_appendfile_orig(self):
# A file that's in SRC_URI and in do_install with the same name
expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
@@ -205,7 +199,6 @@ class RecipetoolTests(RecipetoolBase):
_, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-orig', self.testfile, '', expectedlines, ['selftest-replaceme-orig'])
self.assertNotIn('WARNING: ', output)
- @testcase(1191)
def test_recipetool_appendfile_todir(self):
# A file that's in SRC_URI and in do_install with destination directory rather than file
expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
@@ -213,7 +206,6 @@ class RecipetoolTests(RecipetoolBase):
_, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-todir', self.testfile, '', expectedlines, ['selftest-replaceme-todir'])
self.assertNotIn('WARNING: ', output)
- @testcase(1187)
def test_recipetool_appendfile_renamed(self):
# A file that's in SRC_URI with a different name to the destination file
expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
@@ -221,7 +213,6 @@ class RecipetoolTests(RecipetoolBase):
_, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-renamed', self.testfile, '', expectedlines, ['file1'])
self.assertNotIn('WARNING: ', output)
- @testcase(1190)
def test_recipetool_appendfile_subdir(self):
# A file that's in SRC_URI in a subdir
expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
@@ -235,7 +226,6 @@ class RecipetoolTests(RecipetoolBase):
_, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-subdir', self.testfile, '', expectedlines, ['testfile'])
self.assertNotIn('WARNING: ', output)
- @testcase(1189)
def test_recipetool_appendfile_src_glob(self):
# A file that's in SRC_URI as a glob
expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
@@ -249,7 +239,6 @@ class RecipetoolTests(RecipetoolBase):
_, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-src-globfile', self.testfile, '', expectedlines, ['testfile'])
self.assertNotIn('WARNING: ', output)
- @testcase(1181)
def test_recipetool_appendfile_inst_glob(self):
# A file that's in do_install as a glob
expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
@@ -257,7 +246,6 @@ class RecipetoolTests(RecipetoolBase):
_, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-inst-globfile', self.testfile, '', expectedlines, ['selftest-replaceme-inst-globfile'])
self.assertNotIn('WARNING: ', output)
- @testcase(1182)
def test_recipetool_appendfile_inst_todir_glob(self):
# A file that's in do_install as a glob with destination as a directory
expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
@@ -265,7 +253,6 @@ class RecipetoolTests(RecipetoolBase):
_, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-inst-todir-globfile', self.testfile, '', expectedlines, ['selftest-replaceme-inst-todir-globfile'])
self.assertNotIn('WARNING: ', output)
- @testcase(1185)
def test_recipetool_appendfile_patch(self):
# A file that's added by a patch in SRC_URI
expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
@@ -284,7 +271,6 @@ class RecipetoolTests(RecipetoolBase):
else:
self.fail('Patch warning not found in output:\n%s' % output)
- @testcase(1188)
def test_recipetool_appendfile_script(self):
# Now, a file that's in SRC_URI but installed by a script (so no mention in do_install)
expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
@@ -298,7 +284,6 @@ class RecipetoolTests(RecipetoolBase):
_, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-scripted', self.testfile, '', expectedlines, ['testfile'])
self.assertNotIn('WARNING: ', output)
- @testcase(1180)
def test_recipetool_appendfile_inst_func(self):
# A file that's installed from a function called by do_install
expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
@@ -306,7 +291,6 @@ class RecipetoolTests(RecipetoolBase):
_, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-inst-func', self.testfile, '', expectedlines, ['selftest-replaceme-inst-func'])
self.assertNotIn('WARNING: ', output)
- @testcase(1186)
def test_recipetool_appendfile_postinstall(self):
# A file that's created by a postinstall script (and explicitly mentioned in it)
# First try without specifying recipe
@@ -322,7 +306,6 @@ class RecipetoolTests(RecipetoolBase):
'}\n']
_, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-postinst', self.testfile, '-r selftest-recipetool-appendfile', expectedlines, ['testfile'])
- @testcase(1179)
def test_recipetool_appendfile_extlayer(self):
# Try creating a bbappend in a layer that's not in bblayers.conf and has a different structure
exttemplayerdir = os.path.join(self.tempdir, 'extlayer')
@@ -338,7 +321,6 @@ class RecipetoolTests(RecipetoolBase):
'metadata/recipes/recipes-test/selftest-recipetool-appendfile/selftest-recipetool-appendfile/selftest-replaceme-orig']
self.assertEqual(sorted(createdfiles), sorted(expectedfiles))
- @testcase(1192)
def test_recipetool_appendfile_wildcard(self):
def try_appendfile_wc(options):
@@ -363,25 +345,25 @@ class RecipetoolTests(RecipetoolBase):
filename = try_appendfile_wc('-w')
self.assertEqual(filename, recipefn.split('_')[0] + '_%.bbappend')
- @testcase(1193)
def test_recipetool_create(self):
# Try adding a recipe
tempsrc = os.path.join(self.tempdir, 'srctree')
os.makedirs(tempsrc)
- recipefile = os.path.join(self.tempdir, 'logrotate_3.8.7.bb')
- srcuri = 'https://fedorahosted.org/releases/l/o/logrotate/logrotate-3.8.7.tar.gz'
+ recipefile = os.path.join(self.tempdir, 'logrotate_3.12.3.bb')
+ srcuri = 'https://github.com/logrotate/logrotate/releases/download/3.12.3/logrotate-3.12.3.tar.xz'
result = runCmd('recipetool create -o %s %s -x %s' % (recipefile, srcuri, tempsrc))
self.assertTrue(os.path.isfile(recipefile))
checkvars = {}
checkvars['LICENSE'] = 'GPLv2'
- checkvars['LIC_FILES_CHKSUM'] = 'file://COPYING;md5=18810669f13b87348459e611d31ab760'
- checkvars['SRC_URI'] = 'https://fedorahosted.org/releases/l/o/logrotate/logrotate-${PV}.tar.gz'
- checkvars['SRC_URI[md5sum]'] = '99e08503ef24c3e2e3ff74cc5f3be213'
- checkvars['SRC_URI[sha256sum]'] = 'f6ba691f40e30e640efa2752c1f9499a3f9738257660994de70a45fe00d12b64'
+ checkvars['LIC_FILES_CHKSUM'] = 'file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263'
+ checkvars['SRC_URI'] = 'https://github.com/logrotate/logrotate/releases/download/${PV}/logrotate-${PV}.tar.xz'
+ checkvars['SRC_URI[md5sum]'] = 'a560c57fac87c45b2fc17406cdf79288'
+ checkvars['SRC_URI[sha256sum]'] = '2e6a401cac9024db2288297e3be1a8ab60e7401ba8e91225218aaf4a27e82a07'
self._test_recipe_contents(recipefile, checkvars, [])
- @testcase(1194)
def test_recipetool_create_git(self):
+ if 'x11' not in get_bb_var('DISTRO_FEATURES'):
+ self.skipTest('Test requires x11 as distro feature')
# Ensure we have the right data in shlibs/pkgdata
bitbake('libpng pango libx11 libxext jpeg libcheck')
# Try adding a recipe
@@ -401,7 +383,6 @@ class RecipetoolTests(RecipetoolBase):
inherits = ['autotools', 'pkgconfig']
self._test_recipe_contents(recipefile, checkvars, inherits)
- @testcase(1392)
def test_recipetool_create_simple(self):
# Try adding a recipe
temprecipe = os.path.join(self.tempdir, 'recipe')
@@ -424,22 +405,20 @@ class RecipetoolTests(RecipetoolBase):
inherits = ['autotools']
self._test_recipe_contents(os.path.join(temprecipe, dirlist[0]), checkvars, inherits)
- @testcase(1418)
def test_recipetool_create_cmake(self):
- # Try adding a recipe
temprecipe = os.path.join(self.tempdir, 'recipe')
os.makedirs(temprecipe)
- recipefile = os.path.join(temprecipe, 'navit_0.5.0.bb')
- srcuri = 'http://downloads.sourceforge.net/project/navit/v0.5.0/navit-0.5.0.tar.gz'
+ recipefile = os.path.join(temprecipe, 'taglib_1.11.1.bb')
+ srcuri = 'http://taglib.github.io/releases/taglib-1.11.1.tar.gz'
result = runCmd('recipetool create -o %s %s' % (temprecipe, srcuri))
self.assertTrue(os.path.isfile(recipefile))
checkvars = {}
- checkvars['LICENSE'] = set(['Unknown', 'GPLv2', 'LGPLv2'])
- checkvars['SRC_URI'] = 'http://downloads.sourceforge.net/project/navit/v${PV}/navit-${PV}.tar.gz'
- checkvars['SRC_URI[md5sum]'] = '242f398e979a6b8c0f3c802b63435b68'
- checkvars['SRC_URI[sha256sum]'] = '13353481d7fc01a4f64e385dda460b51496366bba0fd2cc85a89a0747910e94d'
- checkvars['DEPENDS'] = set(['freetype', 'zlib', 'openssl', 'glib-2.0', 'virtual/libgl', 'virtual/egl', 'gtk+', 'libpng', 'libsdl', 'freeglut', 'dbus-glib'])
- inherits = ['cmake', 'python-dir', 'gettext', 'pkgconfig']
+ checkvars['LICENSE'] = set(['LGPLv2.1', 'MPL-1.1'])
+ checkvars['SRC_URI'] = 'http://taglib.github.io/releases/taglib-${PV}.tar.gz'
+ checkvars['SRC_URI[md5sum]'] = 'cee7be0ccfc892fa433d6c837df9522a'
+ checkvars['SRC_URI[sha256sum]'] = 'b6d1a5a610aae6ff39d93de5efd0fdc787aa9e9dc1e7026fa4c961b26563526b'
+ checkvars['DEPENDS'] = set(['boost', 'zlib'])
+ inherits = ['cmake']
self._test_recipe_contents(recipefile, checkvars, inherits)
def test_recipetool_create_github(self):
@@ -447,13 +426,51 @@ class RecipetoolTests(RecipetoolBase):
temprecipe = os.path.join(self.tempdir, 'recipe')
os.makedirs(temprecipe)
recipefile = os.path.join(temprecipe, 'meson_git.bb')
- srcuri = 'https://github.com/mesonbuild/meson'
- result = runCmd('recipetool create -o %s %s' % (temprecipe, srcuri))
+ srcuri = 'https://github.com/mesonbuild/meson;rev=0.32.0'
+ result = runCmd(['recipetool', 'create', '-o', temprecipe, srcuri])
self.assertTrue(os.path.isfile(recipefile))
checkvars = {}
checkvars['LICENSE'] = set(['Apache-2.0'])
checkvars['SRC_URI'] = 'git://github.com/mesonbuild/meson;protocol=https'
- inherits = ['setuptools']
+ inherits = ['setuptools3']
+ self._test_recipe_contents(recipefile, checkvars, inherits)
+
+ def test_recipetool_create_python3_setuptools(self):
+ # Test creating python3 package from tarball (using setuptools3 class)
+ temprecipe = os.path.join(self.tempdir, 'recipe')
+ os.makedirs(temprecipe)
+ pn = 'python-magic'
+ pv = '0.4.15'
+ recipefile = os.path.join(temprecipe, '%s_%s.bb' % (pn, pv))
+ srcuri = 'https://files.pythonhosted.org/packages/84/30/80932401906eaf787f2e9bd86dc458f1d2e75b064b4c187341f29516945c/python-magic-%s.tar.gz' % pv
+ result = runCmd('recipetool create -o %s %s' % (temprecipe, srcuri))
+ self.assertTrue(os.path.isfile(recipefile))
+ checkvars = {}
+ checkvars['LICENSE'] = set(['MIT'])
+ checkvars['LIC_FILES_CHKSUM'] = 'file://LICENSE;md5=16a934f165e8c3245f241e77d401bb88'
+ checkvars['SRC_URI'] = 'https://files.pythonhosted.org/packages/84/30/80932401906eaf787f2e9bd86dc458f1d2e75b064b4c187341f29516945c/python-magic-${PV}.tar.gz'
+ checkvars['SRC_URI[md5sum]'] = 'e384c95a47218f66c6501cd6dd45ff59'
+ checkvars['SRC_URI[sha256sum]'] = 'f3765c0f582d2dfc72c15f3b5a82aecfae9498bd29ca840d72f37d7bd38bfcd5'
+ inherits = ['setuptools3']
+ self._test_recipe_contents(recipefile, checkvars, inherits)
+
+ def test_recipetool_create_python3_distutils(self):
+ # Test creating python3 package from tarball (using distutils3 class)
+ temprecipe = os.path.join(self.tempdir, 'recipe')
+ os.makedirs(temprecipe)
+ pn = 'docutils'
+ pv = '0.14'
+ recipefile = os.path.join(temprecipe, '%s_%s.bb' % (pn, pv))
+ srcuri = 'https://files.pythonhosted.org/packages/84/f4/5771e41fdf52aabebbadecc9381d11dea0fa34e4759b4071244fa094804c/docutils-%s.tar.gz' % pv
+ result = runCmd('recipetool create -o %s %s' % (temprecipe, srcuri))
+ self.assertTrue(os.path.isfile(recipefile))
+ checkvars = {}
+ checkvars['LICENSE'] = set(['PSF', '&', 'BSD', 'GPL'])
+ checkvars['LIC_FILES_CHKSUM'] = 'file://COPYING.txt;md5=35a23d42b615470583563132872c97d6'
+ checkvars['SRC_URI'] = 'https://files.pythonhosted.org/packages/84/f4/5771e41fdf52aabebbadecc9381d11dea0fa34e4759b4071244fa094804c/docutils-${PV}.tar.gz'
+ checkvars['SRC_URI[md5sum]'] = 'c53768d63db3873b7d452833553469de'
+ checkvars['SRC_URI[sha256sum]'] = '51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274'
+ inherits = ['distutils3']
self._test_recipe_contents(recipefile, checkvars, inherits)
def test_recipetool_create_github_tarball(self):
@@ -468,7 +485,7 @@ class RecipetoolTests(RecipetoolBase):
checkvars = {}
checkvars['LICENSE'] = set(['Apache-2.0'])
checkvars['SRC_URI'] = 'https://github.com/mesonbuild/meson/releases/download/${PV}/meson-${PV}.tar.gz'
- inherits = ['setuptools']
+ inherits = ['setuptools3']
self._test_recipe_contents(recipefile, checkvars, inherits)
def test_recipetool_create_git_http(self):
@@ -485,6 +502,46 @@ class RecipetoolTests(RecipetoolBase):
inherits = ['pkgconfig', 'autotools']
self._test_recipe_contents(recipefile, checkvars, inherits)
+ def _copy_file_with_cleanup(self, srcfile, basedstdir, *paths):
+ dstdir = basedstdir
+ self.assertTrue(os.path.exists(dstdir))
+ for p in paths:
+ dstdir = os.path.join(dstdir, p)
+ if not os.path.exists(dstdir):
+ os.makedirs(dstdir)
+ self.track_for_cleanup(dstdir)
+ dstfile = os.path.join(dstdir, os.path.basename(srcfile))
+ if srcfile != dstfile:
+ shutil.copy(srcfile, dstfile)
+ self.track_for_cleanup(dstfile)
+
+ def test_recipetool_load_plugin(self):
+ """Test that recipetool loads only the first found plugin in BBPATH."""
+
+ recipetool = runCmd("which recipetool")
+ fromname = runCmd("recipetool --quiet pluginfile")
+ srcfile = fromname.output
+ searchpath = self.bbpath.split(':') + [os.path.dirname(recipetool.output)]
+ plugincontent = []
+ with open(srcfile) as fh:
+ plugincontent = fh.readlines()
+ try:
+ self.assertIn('meta-selftest', srcfile, 'wrong bbpath plugin found')
+ for path in searchpath:
+ self._copy_file_with_cleanup(srcfile, path, 'lib', 'recipetool')
+ result = runCmd("recipetool --quiet count")
+ self.assertEqual(result.output, '1')
+ result = runCmd("recipetool --quiet multiloaded")
+ self.assertEqual(result.output, "no")
+ for path in searchpath:
+ result = runCmd("recipetool --quiet bbdir")
+ self.assertEqual(result.output, path)
+ os.unlink(os.path.join(result.output, 'lib', 'recipetool', 'bbpath.py'))
+ finally:
+ with open(srcfile, 'w') as fh:
+ fh.writelines(plugincontent)
+
+
class RecipetoolAppendsrcBase(RecipetoolBase):
def _try_recipetool_appendsrcfile(self, testrecipe, newfile, destfile, options, expectedlines, expectedfiles):
cmd = 'recipetool appendsrcfile %s %s %s %s %s' % (options, self.templayerdir, testrecipe, newfile, destfile)
@@ -560,27 +617,28 @@ class RecipetoolAppendsrcBase(RecipetoolBase):
self._try_recipetool_appendsrcfiles(testrecipe, newfiles, expectedfiles=expectedfiles, destdir=destdir, options=options)
- src_uri = get_bb_var('SRC_URI', testrecipe).split()
+ bb_vars = get_bb_vars(['SRC_URI', 'FILE', 'FILESEXTRAPATHS'], testrecipe)
+ src_uri = bb_vars['SRC_URI'].split()
for f in expectedfiles:
if destdir:
self.assertIn('file://%s;subdir=%s' % (f, destdir), src_uri)
else:
self.assertIn('file://%s' % f, src_uri)
- recipefile = get_bb_var('FILE', testrecipe)
+ recipefile = bb_vars['FILE']
bbappendfile = self._check_bbappend(testrecipe, recipefile, self.templayerdir)
filesdir = os.path.join(os.path.dirname(bbappendfile), testrecipe)
- filesextrapaths = get_bb_var('FILESEXTRAPATHS', testrecipe).split(':')
+ filesextrapaths = bb_vars['FILESEXTRAPATHS'].split(':')
self.assertIn(filesdir, filesextrapaths)
+
+
class RecipetoolAppendsrcTests(RecipetoolAppendsrcBase):
- @testcase(1273)
def test_recipetool_appendsrcfile_basic(self):
self._test_appendsrcfile('base-files', 'a-file')
- @testcase(1274)
def test_recipetool_appendsrcfile_basic_wildcard(self):
testrecipe = 'base-files'
self._test_appendsrcfile(testrecipe, 'a-file', options='-w')
@@ -588,30 +646,26 @@ class RecipetoolAppendsrcTests(RecipetoolAppendsrcBase):
bbappendfile = self._check_bbappend(testrecipe, recipefile, self.templayerdir)
self.assertEqual(os.path.basename(bbappendfile), '%s_%%.bbappend' % testrecipe)
- @testcase(1281)
def test_recipetool_appendsrcfile_subdir_basic(self):
self._test_appendsrcfile('base-files', 'a-file', 'tmp')
- @testcase(1282)
def test_recipetool_appendsrcfile_subdir_basic_dirdest(self):
self._test_appendsrcfile('base-files', destdir='tmp')
- @testcase(1280)
def test_recipetool_appendsrcfile_srcdir_basic(self):
testrecipe = 'bash'
- srcdir = get_bb_var('S', testrecipe)
- workdir = get_bb_var('WORKDIR', testrecipe)
+ bb_vars = get_bb_vars(['S', 'WORKDIR'], testrecipe)
+ srcdir = bb_vars['S']
+ workdir = bb_vars['WORKDIR']
subdir = os.path.relpath(srcdir, workdir)
self._test_appendsrcfile(testrecipe, 'a-file', srcdir=subdir)
- @testcase(1275)
def test_recipetool_appendsrcfile_existing_in_src_uri(self):
testrecipe = 'base-files'
filepath = self._get_first_file_uri(testrecipe)
self.assertTrue(filepath, 'Unable to test, no file:// uri found in SRC_URI for %s' % testrecipe)
self._test_appendsrcfile(testrecipe, filepath, has_src_uri=False)
- @testcase(1276)
def test_recipetool_appendsrcfile_existing_in_src_uri_diff_params(self):
testrecipe = 'base-files'
subdir = 'tmp'
@@ -621,19 +675,20 @@ class RecipetoolAppendsrcTests(RecipetoolAppendsrcBase):
output = self._test_appendsrcfile(testrecipe, filepath, subdir, has_src_uri=False)
self.assertTrue(any('with different parameters' in l for l in output))
- @testcase(1277)
def test_recipetool_appendsrcfile_replace_file_srcdir(self):
testrecipe = 'bash'
filepath = 'Makefile.in'
- srcdir = get_bb_var('S', testrecipe)
- workdir = get_bb_var('WORKDIR', testrecipe)
+ bb_vars = get_bb_vars(['S', 'WORKDIR'], testrecipe)
+ srcdir = bb_vars['S']
+ workdir = bb_vars['WORKDIR']
subdir = os.path.relpath(srcdir, workdir)
self._test_appendsrcfile(testrecipe, filepath, srcdir=subdir)
bitbake('%s:do_unpack' % testrecipe)
- self.assertEqual(open(self.testfile, 'r').read(), open(os.path.join(srcdir, filepath), 'r').read())
+ with open(self.testfile, 'r') as testfile:
+ with open(os.path.join(srcdir, filepath), 'r') as makefilein:
+ self.assertEqual(testfile.read(), makefilein.read())
- @testcase(1278)
def test_recipetool_appendsrcfiles_basic(self, destdir=None):
newfiles = [self.testfile]
for i in range(1, 5):
@@ -643,6 +698,5 @@ class RecipetoolAppendsrcTests(RecipetoolAppendsrcBase):
newfiles.append(testfile)
self._test_appendsrcfiles('gcc', newfiles, destdir=destdir, options='-W')
- @testcase(1279)
def test_recipetool_appendsrcfiles_basic_subdir(self):
self.test_recipetool_appendsrcfiles_basic(destdir='testdir')
diff --git a/meta/lib/oeqa/selftest/cases/recipeutils.py b/meta/lib/oeqa/selftest/cases/recipeutils.py
new file mode 100644
index 0000000000..747870383b
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/recipeutils.py
@@ -0,0 +1,140 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import re
+import time
+import logging
+import bb.tinfoil
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, get_test_layer
+
+
+def setUpModule():
+ global tinfoil
+ global metaselftestpath
+ metaselftestpath = get_test_layer()
+ tinfoil = bb.tinfoil.Tinfoil(tracking=True)
+ tinfoil.prepare(config_only=False, quiet=2)
+
+
+def tearDownModule():
+ tinfoil.shutdown()
+
+
+class RecipeUtilsTests(OESelftestTestCase):
+ """ Tests for the recipeutils module functions """
+
+ def test_patch_recipe_varflag(self):
+ import oe.recipeutils
+ rd = tinfoil.parse_recipe('python3-async-test')
+ vals = {'SRC_URI[md5sum]': 'aaaaaa', 'LICENSE': 'something'}
+ patches = oe.recipeutils.patch_recipe(rd, rd.getVar('FILE'), vals, patch=True, relpath=metaselftestpath)
+
+ expected_patch = """
+--- a/recipes-devtools/python/python-async-test.inc
++++ b/recipes-devtools/python/python-async-test.inc
+@@ -1,14 +1,14 @@
+ SUMMARY = "Python framework to process interdependent tasks in a pool of workers"
+ HOMEPAGE = "http://github.com/gitpython-developers/async"
+ SECTION = "devel/python"
+-LICENSE = "BSD"
++LICENSE = "something"
+ LIC_FILES_CHKSUM = "file://PKG-INFO;beginline=8;endline=8;md5=88df8e78b9edfd744953862179f2d14e"
+
+ inherit pypi
+
+ PYPI_PACKAGE = "async"
+
+-SRC_URI[md5sum] = "9b06b5997de2154f3bc0273f80bcef6b"
++SRC_URI[md5sum] = "aaaaaa"
+ SRC_URI[sha256sum] = "ac6894d876e45878faae493b0cf61d0e28ec417334448ac0a6ea2229d8343051"
+
+ RDEPENDS_${PN} += "${PYTHON_PN}-threading"
+"""
+ patchlines = []
+ for f in patches:
+ for line in f:
+ patchlines.append(line)
+ self.maxDiff = None
+ self.assertEqual(''.join(patchlines).strip(), expected_patch.strip())
+
+
+ def test_patch_recipe_singleappend(self):
+ import oe.recipeutils
+ rd = tinfoil.parse_recipe('recipeutils-test')
+ val = rd.getVar('SRC_URI', False).split()
+ del val[1]
+ val = ' '.join(val)
+ vals = {'SRC_URI': val}
+ patches = oe.recipeutils.patch_recipe(rd, rd.getVar('FILE'), vals, patch=True, relpath=metaselftestpath)
+
+ expected_patch = """
+--- a/recipes-test/recipeutils/recipeutils-test_1.2.bb
++++ b/recipes-test/recipeutils/recipeutils-test_1.2.bb
+@@ -8,6 +8,4 @@
+
+ BBCLASSEXTEND = "native nativesdk"
+
+-SRC_URI += "file://somefile"
+-
+ SRC_URI_append = " file://anotherfile"
+"""
+ patchlines = []
+ for f in patches:
+ for line in f:
+ patchlines.append(line)
+ self.assertEqual(''.join(patchlines).strip(), expected_patch.strip())
+
+
+ def test_patch_recipe_appends(self):
+ import oe.recipeutils
+ rd = tinfoil.parse_recipe('recipeutils-test')
+ val = rd.getVar('SRC_URI', False).split()
+ vals = {'SRC_URI': val[0]}
+ patches = oe.recipeutils.patch_recipe(rd, rd.getVar('FILE'), vals, patch=True, relpath=metaselftestpath)
+
+ expected_patch = """
+--- a/recipes-test/recipeutils/recipeutils-test_1.2.bb
++++ b/recipes-test/recipeutils/recipeutils-test_1.2.bb
+@@ -8,6 +8,3 @@
+
+ BBCLASSEXTEND = "native nativesdk"
+
+-SRC_URI += "file://somefile"
+-
+-SRC_URI_append = " file://anotherfile"
+"""
+ patchlines = []
+ for f in patches:
+ for line in f:
+ patchlines.append(line)
+ self.assertEqual(''.join(patchlines).strip(), expected_patch.strip())
+
+
+ def test_validate_pn(self):
+ import oe.recipeutils
+ expected_results = {
+ 'test': '',
+ 'glib-2.0': '',
+ 'gtk+': '',
+ 'forcevariable': 'reserved',
+ 'pn-something': 'reserved',
+ 'test.bb': 'file',
+ 'test_one': 'character',
+ 'test!': 'character',
+ }
+
+ for pn, expected in expected_results.items():
+ result = oe.recipeutils.validate_pn(pn)
+ if expected:
+ self.assertIn(expected, result)
+ else:
+ self.assertEqual(result, '')
+
+ def test_split_var_value(self):
+ import oe.recipeutils
+ res = oe.recipeutils.split_var_value('test.1 test.2 ${@call_function("hi there world", false)} test.4')
+ self.assertEqual(res, ['test.1', 'test.2', '${@call_function("hi there world", false)}', 'test.4'])
diff --git a/meta/lib/oeqa/selftest/cases/reproducible.py b/meta/lib/oeqa/selftest/cases/reproducible.py
new file mode 100644
index 0000000000..db538a4f89
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/reproducible.py
@@ -0,0 +1,202 @@
+#
+# SPDX-License-Identifier: MIT
+#
+# Copyright 2019 by Garmin Ltd. or its subsidiaries
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
+import bb.utils
+import functools
+import multiprocessing
+import textwrap
+import json
+import unittest
+import tempfile
+import shutil
+import stat
+import os
+
+MISSING = 'MISSING'
+DIFFERENT = 'DIFFERENT'
+SAME = 'SAME'
+
+@functools.total_ordering
+class CompareResult(object):
+ def __init__(self):
+ self.reference = None
+ self.test = None
+ self.status = 'UNKNOWN'
+
+ def __eq__(self, other):
+ return (self.status, self.test) == (other.status, other.test)
+
+ def __lt__(self, other):
+ return (self.status, self.test) < (other.status, other.test)
+
+class PackageCompareResults(object):
+ def __init__(self):
+ self.total = []
+ self.missing = []
+ self.different = []
+ self.same = []
+
+ def add_result(self, r):
+ self.total.append(r)
+ if r.status == MISSING:
+ self.missing.append(r)
+ elif r.status == DIFFERENT:
+ self.different.append(r)
+ else:
+ self.same.append(r)
+
+ def sort(self):
+ self.total.sort()
+ self.missing.sort()
+ self.different.sort()
+ self.same.sort()
+
+ def __str__(self):
+ return 'same=%i different=%i missing=%i total=%i' % (len(self.same), len(self.different), len(self.missing), len(self.total))
+
+def compare_file(reference, test, diffutils_sysroot):
+ result = CompareResult()
+ result.reference = reference
+ result.test = test
+
+ if not os.path.exists(reference):
+ result.status = MISSING
+ return result
+
+ r = runCmd(['cmp', '--quiet', reference, test], native_sysroot=diffutils_sysroot, ignore_status=True)
+
+ if r.status:
+ result.status = DIFFERENT
+ return result
+
+ result.status = SAME
+ return result
+
+class ReproducibleTests(OESelftestTestCase):
+ package_classes = ['deb', 'ipk']
+ images = ['core-image-minimal', 'core-image-sato', 'core-image-full-cmdline']
+ save_results = False
+
+ def setUpLocal(self):
+ super().setUpLocal()
+ needed_vars = ['TOPDIR', 'TARGET_PREFIX', 'BB_NUMBER_THREADS']
+ bb_vars = get_bb_vars(needed_vars)
+ for v in needed_vars:
+ setattr(self, v.lower(), bb_vars[v])
+
+ self.extrasresults = {}
+ self.extrasresults.setdefault('reproducible.rawlogs', {})['log'] = ''
+ self.extrasresults.setdefault('reproducible', {}).setdefault('files', {})
+
+ def append_to_log(self, msg):
+ self.extrasresults['reproducible.rawlogs']['log'] += msg
+
+ def compare_packages(self, reference_dir, test_dir, diffutils_sysroot):
+ result = PackageCompareResults()
+
+ old_cwd = os.getcwd()
+ try:
+ file_result = {}
+ os.chdir(test_dir)
+ with multiprocessing.Pool(processes=int(self.bb_number_threads or 0)) as p:
+ for root, dirs, files in os.walk('.'):
+ async_result = []
+ for f in files:
+ reference_path = os.path.join(reference_dir, root, f)
+ test_path = os.path.join(test_dir, root, f)
+ async_result.append(p.apply_async(compare_file, (reference_path, test_path, diffutils_sysroot)))
+
+ for a in async_result:
+ result.add_result(a.get())
+
+ finally:
+ os.chdir(old_cwd)
+
+ result.sort()
+ return result
+
+ def write_package_list(self, package_class, name, packages):
+ self.extrasresults['reproducible']['files'].setdefault(package_class, {})[name] = [
+ {'reference': p.reference, 'test': p.test} for p in packages]
+
+ def copy_file(self, source, dest):
+ bb.utils.mkdirhier(os.path.dirname(dest))
+ shutil.copyfile(source, dest)
+
+ def test_reproducible_builds(self):
+ capture_vars = ['DEPLOY_DIR_' + c.upper() for c in self.package_classes]
+
+ if self.save_results:
+ save_dir = tempfile.mkdtemp(prefix='oe-reproducible-')
+ os.chmod(save_dir, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
+ self.logger.info('Non-reproducible packages will be copied to %s', save_dir)
+
+ # Build native utilities
+ self.write_config('')
+ bitbake("diffutils-native -c addto_recipe_sysroot")
+ diffutils_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "diffutils-native")
+
+ # Reproducible builds should not pull from sstate or mirrors, but
+ # sharing DL_DIR is fine
+ common_config = textwrap.dedent('''\
+ INHERIT += "reproducible_build"
+ PACKAGE_CLASSES = "%s"
+ SSTATE_DIR = "${TMPDIR}/sstate"
+ ''') % (' '.join('package_%s' % c for c in self.package_classes))
+
+ # Perform a build.
+ reproducibleA_tmp = os.path.join(self.topdir, 'reproducibleA', 'tmp')
+ if os.path.exists(reproducibleA_tmp):
+ bb.utils.remove(reproducibleA_tmp, recurse=True)
+
+ self.write_config((textwrap.dedent('''\
+ TMPDIR = "%s"
+ ''') % reproducibleA_tmp) + common_config)
+ vars_A = get_bb_vars(capture_vars)
+ bitbake(' '.join(self.images))
+
+ # Perform another build.
+ reproducibleB_tmp = os.path.join(self.topdir, 'reproducibleB', 'tmp')
+ if os.path.exists(reproducibleB_tmp):
+ bb.utils.remove(reproducibleB_tmp, recurse=True)
+
+ self.write_config((textwrap.dedent('''\
+ SSTATE_MIRROR = ""
+ TMPDIR = "%s"
+ ''') % reproducibleB_tmp) + common_config)
+ vars_B = get_bb_vars(capture_vars)
+ bitbake(' '.join(self.images))
+
+ # NOTE: The temp directories from the reproducible build are purposely
+ # kept after the build so it can be diffed for debugging.
+
+ for c in self.package_classes:
+ with self.subTest(package_class=c):
+ package_class = 'package_' + c
+
+ deploy_A = vars_A['DEPLOY_DIR_' + c.upper()]
+ deploy_B = vars_B['DEPLOY_DIR_' + c.upper()]
+
+ result = self.compare_packages(deploy_A, deploy_B, diffutils_sysroot)
+
+ self.logger.info('Reproducibility summary for %s: %s' % (c, result))
+
+ self.append_to_log('\n'.join("%s: %s" % (r.status, r.test) for r in result.total))
+
+ self.write_package_list(package_class, 'missing', result.missing)
+ self.write_package_list(package_class, 'different', result.different)
+ self.write_package_list(package_class, 'same', result.same)
+
+ if self.save_results:
+ for d in result.different:
+ self.copy_file(d.reference, '/'.join([save_dir, d.reference]))
+ self.copy_file(d.test, '/'.join([save_dir, d.test]))
+
+ if result.missing or result.different:
+ self.fail("The following %s packages are missing or different: %s" %
+ (c, ' '.join(r.test for r in (result.missing + result.different))))
+
diff --git a/meta/lib/oeqa/selftest/cases/resulttooltests.py b/meta/lib/oeqa/selftest/cases/resulttooltests.py
new file mode 100644
index 0000000000..dac5c46801
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/resulttooltests.py
@@ -0,0 +1,98 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import sys
+basepath = os.path.abspath(os.path.dirname(__file__) + '/../../../../../')
+lib_path = basepath + '/scripts/lib'
+sys.path = sys.path + [lib_path]
+from resulttool.report import ResultsTextReport
+from resulttool import regression as regression
+from resulttool import resultutils as resultutils
+from oeqa.selftest.case import OESelftestTestCase
+
+class ResultToolTests(OESelftestTestCase):
+ base_results_data = {'base_result1': {'configuration': {"TEST_TYPE": "runtime",
+ "TESTSERIES": "series1",
+ "IMAGE_BASENAME": "image",
+ "IMAGE_PKGTYPE": "ipk",
+ "DISTRO": "mydistro",
+ "MACHINE": "qemux86"},
+ 'result': {}},
+ 'base_result2': {'configuration': {"TEST_TYPE": "runtime",
+ "TESTSERIES": "series1",
+ "IMAGE_BASENAME": "image",
+ "IMAGE_PKGTYPE": "ipk",
+ "DISTRO": "mydistro",
+ "MACHINE": "qemux86-64"},
+ 'result': {}}}
+ target_results_data = {'target_result1': {'configuration': {"TEST_TYPE": "runtime",
+ "TESTSERIES": "series1",
+ "IMAGE_BASENAME": "image",
+ "IMAGE_PKGTYPE": "ipk",
+ "DISTRO": "mydistro",
+ "MACHINE": "qemux86"},
+ 'result': {}},
+ 'target_result2': {'configuration': {"TEST_TYPE": "runtime",
+ "TESTSERIES": "series1",
+ "IMAGE_BASENAME": "image",
+ "IMAGE_PKGTYPE": "ipk",
+ "DISTRO": "mydistro",
+ "MACHINE": "qemux86"},
+ 'result': {}},
+ 'target_result3': {'configuration': {"TEST_TYPE": "runtime",
+ "TESTSERIES": "series1",
+ "IMAGE_BASENAME": "image",
+ "IMAGE_PKGTYPE": "ipk",
+ "DISTRO": "mydistro",
+ "MACHINE": "qemux86-64"},
+ 'result': {}}}
+
+ def test_report_can_aggregate_test_result(self):
+ result_data = {'result': {'test1': {'status': 'PASSED'},
+ 'test2': {'status': 'PASSED'},
+ 'test3': {'status': 'FAILED'},
+ 'test4': {'status': 'ERROR'},
+ 'test5': {'status': 'SKIPPED'}}}
+ report = ResultsTextReport()
+ result_report = report.get_aggregated_test_result(None, result_data, 'DummyMachine')
+ self.assertTrue(result_report['passed'] == 2, msg="Passed count not correct:%s" % result_report['passed'])
+ self.assertTrue(result_report['failed'] == 2, msg="Failed count not correct:%s" % result_report['failed'])
+ self.assertTrue(result_report['skipped'] == 1, msg="Skipped count not correct:%s" % result_report['skipped'])
+
+ def test_regression_can_get_regression_base_target_pair(self):
+
+ results = {}
+ resultutils.append_resultsdata(results, ResultToolTests.base_results_data)
+ resultutils.append_resultsdata(results, ResultToolTests.target_results_data)
+ self.assertTrue('target_result1' in results['runtime/mydistro/qemux86/image'], msg="Pair not correct:%s" % results)
+ self.assertTrue('target_result3' in results['runtime/mydistro/qemux86-64/image'], msg="Pair not correct:%s" % results)
+
+ def test_regrresion_can_get_regression_result(self):
+ base_result_data = {'result': {'test1': {'status': 'PASSED'},
+ 'test2': {'status': 'PASSED'},
+ 'test3': {'status': 'FAILED'},
+ 'test4': {'status': 'ERROR'},
+ 'test5': {'status': 'SKIPPED'}}}
+ target_result_data = {'result': {'test1': {'status': 'PASSED'},
+ 'test2': {'status': 'FAILED'},
+ 'test3': {'status': 'PASSED'},
+ 'test4': {'status': 'ERROR'},
+ 'test5': {'status': 'SKIPPED'}}}
+ result, text = regression.compare_result(self.logger, "BaseTestRunName", "TargetTestRunName", base_result_data, target_result_data)
+ self.assertTrue(result['test2']['base'] == 'PASSED',
+ msg="regression not correct:%s" % result['test2']['base'])
+ self.assertTrue(result['test2']['target'] == 'FAILED',
+ msg="regression not correct:%s" % result['test2']['target'])
+ self.assertTrue(result['test3']['base'] == 'FAILED',
+ msg="regression not correct:%s" % result['test3']['base'])
+ self.assertTrue(result['test3']['target'] == 'PASSED',
+ msg="regression not correct:%s" % result['test3']['target'])
+
+ def test_merge_can_merged_results(self):
+ results = {}
+ resultutils.append_resultsdata(results, ResultToolTests.base_results_data, configmap=resultutils.flatten_map)
+ resultutils.append_resultsdata(results, ResultToolTests.target_results_data, configmap=resultutils.flatten_map)
+ self.assertEqual(len(results[''].keys()), 5, msg="Flattened results not correct %s" % str(results))
+
diff --git a/meta/lib/oeqa/selftest/cases/runcmd.py b/meta/lib/oeqa/selftest/cases/runcmd.py
new file mode 100644
index 0000000000..3755764ee7
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/runcmd.py
@@ -0,0 +1,121 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd
+from oeqa.utils import CommandError
+
+import subprocess
+import threading
+import time
+import signal
+
+class MemLogger(object):
+ def __init__(self):
+ self.info_msgs = []
+ self.error_msgs = []
+
+ def info(self, msg):
+ self.info_msgs.append(msg)
+
+ def error(self, msg):
+ self.error_msgs.append(msg)
+
+class RunCmdTests(OESelftestTestCase):
+ """ Basic tests for runCmd() utility function """
+
+ # The delta is intentionally smaller than the timeout, to detect cases where
+ # we incorrectly apply the timeout more than once.
+ TIMEOUT = 5
+ DELTA = 3
+
+ def test_result_okay(self):
+ result = runCmd("true")
+ self.assertEqual(result.status, 0)
+
+ def test_result_false(self):
+ result = runCmd("false", ignore_status=True)
+ self.assertEqual(result.status, 1)
+
+ def test_shell(self):
+ # A shell is used for all string commands.
+ result = runCmd("false; true", ignore_status=True)
+ self.assertEqual(result.status, 0)
+
+ def test_no_shell(self):
+ self.assertRaises(FileNotFoundError,
+ runCmd, "false; true", shell=False)
+
+ def test_list_not_found(self):
+ self.assertRaises(FileNotFoundError,
+ runCmd, ["false; true"])
+
+ def test_list_okay(self):
+ result = runCmd(["true"])
+ self.assertEqual(result.status, 0)
+
+ def test_result_assertion(self):
+ self.assertRaisesRegexp(AssertionError, "Command 'echo .* false' returned non-zero exit status 1:\nfoobar",
+ runCmd, "echo foobar >&2; false", shell=True)
+
+ def test_result_exception(self):
+ self.assertRaisesRegexp(CommandError, "Command 'echo .* false' returned non-zero exit status 1 with output: foobar",
+ runCmd, "echo foobar >&2; false", shell=True, assert_error=False)
+
+ def test_output(self):
+ result = runCmd("echo stdout; echo stderr >&2", shell=True)
+ self.assertEqual("stdout\nstderr", result.output)
+ self.assertEqual("", result.error)
+
+ def test_output_split(self):
+ result = runCmd("echo stdout; echo stderr >&2", shell=True, stderr=subprocess.PIPE)
+ self.assertEqual("stdout", result.output)
+ self.assertEqual("stderr", result.error)
+
+ def test_timeout(self):
+ numthreads = threading.active_count()
+ start = time.time()
+ # Killing a hanging process only works when not using a shell?!
+ result = runCmd(['sleep', '60'], timeout=self.TIMEOUT, ignore_status=True)
+ self.assertEqual(result.status, -signal.SIGTERM)
+ end = time.time()
+ self.assertLess(end - start, self.TIMEOUT + self.DELTA)
+ self.assertEqual(numthreads, threading.active_count())
+
+ def test_timeout_split(self):
+ numthreads = threading.active_count()
+ start = time.time()
+ # Killing a hanging process only works when not using a shell?!
+ result = runCmd(['sleep', '60'], timeout=self.TIMEOUT, ignore_status=True, stderr=subprocess.PIPE)
+ self.assertEqual(result.status, -signal.SIGTERM)
+ end = time.time()
+ self.assertLess(end - start, self.TIMEOUT + self.DELTA)
+ self.assertEqual(numthreads, threading.active_count())
+
+ def test_stdin(self):
+ numthreads = threading.active_count()
+ result = runCmd("cat", data=b"hello world", timeout=self.TIMEOUT)
+ self.assertEqual("hello world", result.output)
+ self.assertEqual(numthreads, threading.active_count())
+
+ def test_stdin_timeout(self):
+ numthreads = threading.active_count()
+ start = time.time()
+ result = runCmd(['sleep', '60'], data=b"hello world", timeout=self.TIMEOUT, ignore_status=True)
+ self.assertEqual(result.status, -signal.SIGTERM)
+ end = time.time()
+ self.assertLess(end - start, self.TIMEOUT + self.DELTA)
+ self.assertEqual(numthreads, threading.active_count())
+
+ def test_log(self):
+ log = MemLogger()
+ result = runCmd("echo stdout; echo stderr >&2", shell=True, output_log=log)
+ self.assertEqual(["Running: echo stdout; echo stderr >&2", "stdout", "stderr"], log.info_msgs)
+ self.assertEqual([], log.error_msgs)
+
+ def test_log_split(self):
+ log = MemLogger()
+ result = runCmd("echo stdout; echo stderr >&2", shell=True, output_log=log, stderr=subprocess.PIPE)
+ self.assertEqual(["Running: echo stdout; echo stderr >&2", "stdout"], log.info_msgs)
+ self.assertEqual(["stderr"], log.error_msgs)
diff --git a/meta/lib/oeqa/selftest/cases/runqemu.py b/meta/lib/oeqa/selftest/cases/runqemu.py
new file mode 100644
index 0000000000..7e676bcb41
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/runqemu.py
@@ -0,0 +1,211 @@
+#
+# Copyright (c) 2017 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: MIT
+#
+
+import re
+import tempfile
+import time
+import oe.types
+from oeqa.core.decorator import OETestTag
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake, runqemu, get_bb_var, runCmd
+
+class RunqemuTests(OESelftestTestCase):
+ """Runqemu test class"""
+
+ image_is_ready = False
+ deploy_dir_image = ''
+
+ def setUpLocal(self):
+ super(RunqemuTests, self).setUpLocal()
+ self.recipe = 'core-image-minimal'
+ self.machine = 'qemux86-64'
+ self.fstypes = "ext4 iso hddimg wic.vmdk wic.qcow2 wic.vdi"
+ self.cmd_common = "runqemu nographic"
+
+ kvm = oe.types.qemu_use_kvm(get_bb_var('QEMU_USE_KVM'), 'x86_64')
+ if kvm:
+ self.cmd_common += " kvm"
+
+ self.write_config(
+"""
+MACHINE = "%s"
+IMAGE_FSTYPES = "%s"
+# 10 means 1 second
+SYSLINUX_TIMEOUT = "10"
+"""
+% (self.machine, self.fstypes)
+ )
+
+ if not RunqemuTests.image_is_ready:
+ RunqemuTests.deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ bitbake(self.recipe)
+ RunqemuTests.image_is_ready = True
+
+ def test_boot_machine(self):
+ """Test runqemu machine"""
+ cmd = "%s %s" % (self.cmd_common, self.machine)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertTrue(qemu.runner.logged, "Failed: %s, %s" % (cmd, f.read()))
+
+ def test_boot_machine_ext4(self):
+ """Test runqemu machine ext4"""
+ cmd = "%s %s ext4" % (self.cmd_common, self.machine)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertIn('rootfs.ext4', f.read(), "Failed: %s" % cmd)
+
+ def test_boot_machine_iso(self):
+ """Test runqemu machine iso"""
+ cmd = "%s %s iso" % (self.cmd_common, self.machine)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertIn('media=cdrom', f.read(), "Failed: %s" % cmd)
+
+ def test_boot_recipe_image(self):
+ """Test runqemu recipe-image"""
+ cmd = "%s %s" % (self.cmd_common, self.recipe)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertTrue(qemu.runner.logged, "Failed: %s, %s" % (cmd, f.read()))
+
+
+ def test_boot_recipe_image_vmdk(self):
+ """Test runqemu recipe-image vmdk"""
+ cmd = "%s %s wic.vmdk" % (self.cmd_common, self.recipe)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertIn('format=vmdk', f.read(), "Failed: %s" % cmd)
+
+ def test_boot_recipe_image_vdi(self):
+ """Test runqemu recipe-image vdi"""
+ cmd = "%s %s wic.vdi" % (self.cmd_common, self.recipe)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertIn('format=vdi', f.read(), "Failed: %s" % cmd)
+
+ def test_boot_deploy(self):
+ """Test runqemu deploy_dir_image"""
+ cmd = "%s %s" % (self.cmd_common, self.deploy_dir_image)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertTrue(qemu.runner.logged, "Failed: %s, %s" % (cmd, f.read()))
+
+
+ def test_boot_deploy_hddimg(self):
+ """Test runqemu deploy_dir_image hddimg"""
+ cmd = "%s %s hddimg" % (self.cmd_common, self.deploy_dir_image)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertTrue(re.search('file=.*.hddimg', f.read()), "Failed: %s, %s" % (cmd, f.read()))
+
+ def test_boot_machine_slirp(self):
+ """Test runqemu machine slirp"""
+ cmd = "%s slirp %s" % (self.cmd_common, self.machine)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertIn(' -netdev user', f.read(), "Failed: %s" % cmd)
+
+ def test_boot_machine_slirp_qcow2(self):
+ """Test runqemu machine slirp qcow2"""
+ cmd = "%s slirp wic.qcow2 %s" % (self.cmd_common, self.machine)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertIn('format=qcow2', f.read(), "Failed: %s" % cmd)
+
+ def test_boot_qemu_boot(self):
+ """Test runqemu /path/to/image.qemuboot.conf"""
+ qemuboot_conf = "%s-%s.qemuboot.conf" % (self.recipe, self.machine)
+ qemuboot_conf = os.path.join(self.deploy_dir_image, qemuboot_conf)
+ if not os.path.exists(qemuboot_conf):
+ self.skipTest("%s not found" % qemuboot_conf)
+ cmd = "%s %s" % (self.cmd_common, qemuboot_conf)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertTrue(qemu.runner.logged, "Failed: %s, %s" % (cmd, f.read()))
+
+ def test_boot_rootfs(self):
+ """Test runqemu /path/to/rootfs.ext4"""
+ rootfs = "%s-%s.ext4" % (self.recipe, self.machine)
+ rootfs = os.path.join(self.deploy_dir_image, rootfs)
+ if not os.path.exists(rootfs):
+ self.skipTest("%s not found" % rootfs)
+ cmd = "%s %s" % (self.cmd_common, rootfs)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertTrue(qemu.runner.logged, "Failed: %s, %s" % (cmd, f.read()))
+
+
+# This test was designed as a separate class to test that shutdown
+# command will shutdown qemu as expected on each qemu architecture
+# based on the MACHINE configuration inside the config file
+# (eg. local.conf).
+#
+# This was different compared to RunqemuTests, where RunqemuTests was
+# dedicated for MACHINE=qemux86-64 where it test that qemux86-64 will
+# bootup various filesystem types, including live image(iso and hddimg)
+# where live image was not supported on all qemu architecture.
+@OETestTag("machine")
+class QemuTest(OESelftestTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(QemuTest, cls).setUpClass()
+ cls.recipe = 'core-image-minimal'
+ cls.machine = get_bb_var('MACHINE')
+ cls.deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ cls.cmd_common = "runqemu nographic"
+ cls.qemuboot_conf = "%s-%s.qemuboot.conf" % (cls.recipe, cls.machine)
+ cls.qemuboot_conf = os.path.join(cls.deploy_dir_image, cls.qemuboot_conf)
+ bitbake(cls.recipe)
+
+ def _start_qemu_shutdown_check_if_shutdown_succeeded(self, qemu, timeout):
+ qemu.run_serial("shutdown -h now")
+ # Stop thread will stop the LoggingThread instance used for logging
+ # qemu through serial console, stop thread will prevent this code
+ # from facing exception (Console connection closed unexpectedly)
+ # when qemu was shutdown by the above shutdown command
+ qemu.runner.stop_thread()
+ time_track = 0
+ try:
+ while True:
+ is_alive = qemu.check()
+ if not is_alive:
+ return True
+ if time_track > timeout:
+ return False
+ time.sleep(1)
+ time_track += 1
+ except SystemExit:
+ return True
+
+ def test_qemu_can_shutdown(self):
+ self.assertExists(self.qemuboot_conf)
+ cmd = "%s %s" % (self.cmd_common, self.qemuboot_conf)
+ shutdown_timeout = 120
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ qemu_shutdown_succeeded = self._start_qemu_shutdown_check_if_shutdown_succeeded(qemu, shutdown_timeout)
+ self.assertTrue(qemu_shutdown_succeeded, 'Failed: %s does not shutdown within timeout(%s)' % (self.machine, shutdown_timeout))
+
+ # Need to have portmap/rpcbind running to allow this test to work and
+ # current autobuilder setup does not have this.
+ def disabled_test_qemu_can_boot_nfs_and_shutdown(self):
+ self.assertExists(self.qemuboot_conf)
+ bitbake('meta-ide-support')
+ rootfs_tar = "%s-%s.tar.bz2" % (self.recipe, self.machine)
+ rootfs_tar = os.path.join(self.deploy_dir_image, rootfs_tar)
+ self.assertExists(rootfs_tar)
+ tmpdir = tempfile.mkdtemp(prefix='qemu_nfs')
+ tmpdir_nfs = os.path.join(tmpdir, 'nfs')
+ cmd_extract_nfs = 'runqemu-extract-sdk %s %s' % (rootfs_tar, tmpdir_nfs)
+ result = runCmd(cmd_extract_nfs)
+ self.assertEqual(0, result.status, "runqemu-extract-sdk didn't run as expected. %s" % result.output)
+ cmd = "%s nfs %s %s" % (self.cmd_common, self.qemuboot_conf, tmpdir_nfs)
+ shutdown_timeout = 120
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ qemu_shutdown_succeeded = self._start_qemu_shutdown_check_if_shutdown_succeeded(qemu, shutdown_timeout)
+ self.assertTrue(qemu_shutdown_succeeded, 'Failed: %s does not shutdown within timeout(%s)' % (self.machine, shutdown_timeout))
+ runCmd('rm -rf %s' % tmpdir)
diff --git a/meta/lib/oeqa/selftest/cases/runtime_test.py b/meta/lib/oeqa/selftest/cases/runtime_test.py
new file mode 100644
index 0000000000..60cb2e01a6
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/runtime_test.py
@@ -0,0 +1,439 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars, runqemu
+from oeqa.utils.sshcontrol import SSHControl
+import os
+import re
+import tempfile
+import shutil
+import oe.lsb
+from oeqa.core.decorator.data import skipIfNotQemu
+
+class TestExport(OESelftestTestCase):
+
+ @classmethod
+ def tearDownClass(cls):
+ runCmd("rm -rf /tmp/sdk")
+ super(TestExport, cls).tearDownClass()
+
+ def test_testexport_basic(self):
+ """
+ Summary: Check basic testexport functionality with only ping test enabled.
+ Expected: 1. testexport directory must be created.
+ 2. runexported.py must run without any error/exception.
+ 3. ping test must succeed.
+ Product: oe-core
+ Author: Mariano Lopez <mariano.lopez@intel.com>
+ """
+
+ features = 'INHERIT += "testexport"\n'
+ # These aren't the actual IP addresses but testexport class needs something defined
+ features += 'TEST_SERVER_IP = "192.168.7.1"\n'
+ features += 'TEST_TARGET_IP = "192.168.7.1"\n'
+ features += 'TEST_SUITES = "ping"\n'
+ self.write_config(features)
+
+ # Build tesexport for core-image-minimal
+ bitbake('core-image-minimal')
+ bitbake('-c testexport core-image-minimal')
+
+ testexport_dir = get_bb_var('TEST_EXPORT_DIR', 'core-image-minimal')
+
+ # Verify if TEST_EXPORT_DIR was created
+ isdir = os.path.isdir(testexport_dir)
+ self.assertEqual(True, isdir, 'Failed to create testexport dir: %s' % testexport_dir)
+
+ with runqemu('core-image-minimal') as qemu:
+ # Attempt to run runexported.py to perform ping test
+ test_path = os.path.join(testexport_dir, "oe-test")
+ data_file = os.path.join(testexport_dir, 'data', 'testdata.json')
+ manifest = os.path.join(testexport_dir, 'data', 'manifest')
+ cmd = ("%s runtime --test-data-file %s --packages-manifest %s "
+ "--target-ip %s --server-ip %s --quiet"
+ % (test_path, data_file, manifest, qemu.ip, qemu.server_ip))
+ result = runCmd(cmd)
+ # Verify ping test was succesful
+ self.assertEqual(0, result.status, 'oe-test runtime returned a non 0 status')
+
+ def test_testexport_sdk(self):
+ """
+ Summary: Check sdk functionality for testexport.
+ Expected: 1. testexport directory must be created.
+ 2. SDK tarball must exists.
+ 3. Uncompressing of tarball must succeed.
+ 4. Check if the SDK directory is added to PATH.
+ 5. Run tar from the SDK directory.
+ Product: oe-core
+ Author: Mariano Lopez <mariano.lopez@intel.com>
+ """
+
+ features = 'INHERIT += "testexport"\n'
+ # These aren't the actual IP addresses but testexport class needs something defined
+ features += 'TEST_SERVER_IP = "192.168.7.1"\n'
+ features += 'TEST_TARGET_IP = "192.168.7.1"\n'
+ features += 'TEST_SUITES = "ping"\n'
+ features += 'TEST_EXPORT_SDK_ENABLED = "1"\n'
+ features += 'TEST_EXPORT_SDK_PACKAGES = "nativesdk-tar"\n'
+ self.write_config(features)
+
+ # Build tesexport for core-image-minimal
+ bitbake('core-image-minimal')
+ bitbake('-c testexport core-image-minimal')
+
+ needed_vars = ['TEST_EXPORT_DIR', 'TEST_EXPORT_SDK_DIR', 'TEST_EXPORT_SDK_NAME']
+ bb_vars = get_bb_vars(needed_vars, 'core-image-minimal')
+ testexport_dir = bb_vars['TEST_EXPORT_DIR']
+ sdk_dir = bb_vars['TEST_EXPORT_SDK_DIR']
+ sdk_name = bb_vars['TEST_EXPORT_SDK_NAME']
+
+ # Check for SDK
+ tarball_name = "%s.sh" % sdk_name
+ tarball_path = os.path.join(testexport_dir, sdk_dir, tarball_name)
+ msg = "Couldn't find SDK tarball: %s" % tarball_path
+ self.assertEqual(os.path.isfile(tarball_path), True, msg)
+
+ # Extract SDK and run tar from SDK
+ result = runCmd("%s -y -d /tmp/sdk" % tarball_path)
+ self.assertEqual(0, result.status, "Couldn't extract SDK")
+
+ env_script = result.output.split()[-1]
+ result = runCmd(". %s; which tar" % env_script, shell=True)
+ self.assertEqual(0, result.status, "Couldn't setup SDK environment")
+ is_sdk_tar = True if "/tmp/sdk" in result.output else False
+ self.assertTrue(is_sdk_tar, "Couldn't setup SDK environment")
+
+ tar_sdk = result.output
+ result = runCmd("%s --version" % tar_sdk)
+ self.assertEqual(0, result.status, "Couldn't run tar from SDK")
+
+
+class TestImage(OESelftestTestCase):
+
+ def test_testimage_install(self):
+ """
+ Summary: Check install packages functionality for testimage/testexport.
+ Expected: 1. Import tests from a directory other than meta.
+ 2. Check install/uninstall of socat.
+ Product: oe-core
+ Author: Mariano Lopez <mariano.lopez@intel.com>
+ """
+ if get_bb_var('DISTRO') == 'poky-tiny':
+ self.skipTest('core-image-full-cmdline not buildable for poky-tiny')
+
+ features = 'INHERIT += "testimage"\n'
+ features += 'IMAGE_INSTALL_append = " libssl"\n'
+ features += 'TEST_SUITES = "ping ssh selftest"\n'
+ self.write_config(features)
+
+ # Build core-image-sato and testimage
+ bitbake('core-image-full-cmdline socat')
+ bitbake('-c testimage core-image-full-cmdline')
+
+ def test_testimage_dnf(self):
+ """
+ Summary: Check package feeds functionality for dnf
+ Expected: 1. Check that remote package feeds can be accessed
+ Product: oe-core
+ Author: Alexander Kanavin <alex.kanavin@gmail.com>
+ """
+ if get_bb_var('DISTRO') == 'poky-tiny':
+ self.skipTest('core-image-full-cmdline not buildable for poky-tiny')
+
+ features = 'INHERIT += "testimage"\n'
+ features += 'TEST_SUITES = "ping ssh dnf_runtime dnf.DnfBasicTest.test_dnf_help"\n'
+ # We don't yet know what the server ip and port will be - they will be patched
+ # in at the start of the on-image test
+ features += 'PACKAGE_FEED_URIS = "http://bogus_ip:bogus_port"\n'
+ features += 'EXTRA_IMAGE_FEATURES += "package-management"\n'
+ features += 'PACKAGE_CLASSES = "package_rpm"\n'
+
+ bitbake('gnupg-native -c addto_recipe_sysroot')
+
+ # Enable package feed signing
+ self.gpg_home = tempfile.mkdtemp(prefix="oeqa-feed-sign-")
+ self.track_for_cleanup(self.gpg_home)
+ signing_key_dir = os.path.join(self.testlayer_path, 'files', 'signing')
+ runCmd('gpg --batch --homedir %s --import %s' % (self.gpg_home, os.path.join(signing_key_dir, 'key.secret')), native_sysroot=get_bb_var("RECIPE_SYSROOT_NATIVE", "gnupg-native"))
+ features += 'INHERIT += "sign_package_feed"\n'
+ features += 'PACKAGE_FEED_GPG_NAME = "testuser"\n'
+ features += 'PACKAGE_FEED_GPG_PASSPHRASE_FILE = "%s"\n' % os.path.join(signing_key_dir, 'key.passphrase')
+ features += 'GPG_PATH = "%s"\n' % self.gpg_home
+ self.write_config(features)
+
+ # Build core-image-sato and testimage
+ bitbake('core-image-full-cmdline socat')
+ bitbake('-c testimage core-image-full-cmdline')
+
+ def test_testimage_virgl_gtk_sdl(self):
+ """
+ Summary: Check host-assisted accelerate OpenGL functionality in qemu with gtk and SDL frontends
+ Expected: 1. Check that virgl kernel driver is loaded and 3d acceleration is enabled
+ 2. Check that kmscube demo runs without crashing.
+ Product: oe-core
+ Author: Alexander Kanavin <alex.kanavin@gmail.com>
+ """
+ if "DISPLAY" not in os.environ:
+ self.skipTest("virgl gtk test must be run inside a X session")
+ distro = oe.lsb.distro_identifier()
+ if distro and distro == 'debian-8':
+ self.skipTest('virgl isn\'t working with Debian 8')
+ if distro and distro == 'centos-7':
+ self.skipTest('virgl isn\'t working with Centos 7')
+ if distro and distro == 'opensuseleap-15.0':
+ self.skipTest('virgl isn\'t working with Opensuse 15.0')
+
+ qemu_packageconfig = get_bb_var('PACKAGECONFIG', 'qemu-system-native')
+ sdl_packageconfig = get_bb_var('PACKAGECONFIG', 'libsdl2-native')
+ features = 'INHERIT += "testimage"\n'
+ if 'gtk+' not in qemu_packageconfig:
+ features += 'PACKAGECONFIG_append_pn-qemu-system-native = " gtk+"\n'
+ if 'sdl' not in qemu_packageconfig:
+ features += 'PACKAGECONFIG_append_pn-qemu-system-native = " sdl"\n'
+ if 'virglrenderer' not in qemu_packageconfig:
+ features += 'PACKAGECONFIG_append_pn-qemu-system-native = " virglrenderer"\n'
+ if 'glx' not in qemu_packageconfig:
+ features += 'PACKAGECONFIG_append_pn-qemu-system-native = " glx"\n'
+ if 'opengl' not in sdl_packageconfig:
+ features += 'PACKAGECONFIG_append_pn-libsdl2-native = " opengl"\n'
+ features += 'TEST_SUITES = "ping ssh virgl"\n'
+ features += 'IMAGE_FEATURES_append = " ssh-server-dropbear"\n'
+ features += 'IMAGE_INSTALL_append = " kmscube"\n'
+ features_gtk = features + 'TEST_RUNQEMUPARAMS = "gtk gl"\n'
+ self.write_config(features_gtk)
+ bitbake('core-image-minimal')
+ bitbake('-c testimage core-image-minimal')
+ features_sdl = features + 'TEST_RUNQEMUPARAMS = "sdl gl"\n'
+ self.write_config(features_sdl)
+ bitbake('core-image-minimal')
+ bitbake('-c testimage core-image-minimal')
+
+ def test_testimage_virgl_headless(self):
+ """
+ Summary: Check host-assisted accelerate OpenGL functionality in qemu with egl-headless frontend
+ Expected: 1. Check that virgl kernel driver is loaded and 3d acceleration is enabled
+ 2. Check that kmscube demo runs without crashing.
+ Product: oe-core
+ Author: Alexander Kanavin <alex.kanavin@gmail.com>
+ """
+ import subprocess, os
+ try:
+ content = os.listdir("/dev/dri")
+ if len([i for i in content if i.startswith('render')]) == 0:
+ self.skipTest("No render nodes found in /dev/dri: %s" %(content))
+ except FileNotFoundError:
+ self.skipTest("/dev/dri directory does not exist; no render nodes available on this machine.")
+ try:
+ dripath = subprocess.check_output("pkg-config --variable=dridriverdir dri", shell=True)
+ except subprocess.CalledProcessError as e:
+ self.skipTest("Could not determine the path to dri drivers on the host via pkg-config.\nPlease install Mesa development files (particularly, dri.pc) on the host machine.")
+ qemu_packageconfig = get_bb_var('PACKAGECONFIG', 'qemu-system-native')
+ features = 'INHERIT += "testimage"\n'
+ if 'virglrenderer' not in qemu_packageconfig:
+ features += 'PACKAGECONFIG_append_pn-qemu-system-native = " virglrenderer"\n'
+ if 'glx' not in qemu_packageconfig:
+ features += 'PACKAGECONFIG_append_pn-qemu-system-native = " glx"\n'
+ features += 'TEST_SUITES = "ping ssh virgl"\n'
+ features += 'IMAGE_FEATURES_append = " ssh-server-dropbear"\n'
+ features += 'IMAGE_INSTALL_append = " kmscube"\n'
+ features += 'TEST_RUNQEMUPARAMS = "egl-headless"\n'
+ self.write_config(features)
+ bitbake('core-image-minimal')
+ bitbake('-c testimage core-image-minimal')
+
+class Postinst(OESelftestTestCase):
+
+ def init_manager_loop(self, init_manager):
+ import oe.path
+
+ vars = get_bb_vars(("IMAGE_ROOTFS", "sysconfdir"), "core-image-minimal")
+ rootfs = vars["IMAGE_ROOTFS"]
+ self.assertIsNotNone(rootfs)
+ sysconfdir = vars["sysconfdir"]
+ self.assertIsNotNone(sysconfdir)
+ # Need to use oe.path here as sysconfdir starts with /
+ hosttestdir = oe.path.join(rootfs, sysconfdir, "postinst-test")
+ targettestdir = os.path.join(sysconfdir, "postinst-test")
+
+ for classes in ("package_rpm", "package_deb", "package_ipk"):
+ with self.subTest(init_manager=init_manager, package_class=classes):
+ features = 'CORE_IMAGE_EXTRA_INSTALL = "postinst-delayed-b"\n'
+ features += 'IMAGE_FEATURES += "package-management empty-root-password"\n'
+ features += 'PACKAGE_CLASSES = "%s"\n' % classes
+ if init_manager == "systemd":
+ features += 'DISTRO_FEATURES_append = " systemd"\n'
+ features += 'VIRTUAL-RUNTIME_init_manager = "systemd"\n'
+ features += 'DISTRO_FEATURES_BACKFILL_CONSIDERED = "sysvinit"\n'
+ features += 'VIRTUAL-RUNTIME_initscripts = ""\n'
+ self.write_config(features)
+
+ bitbake('core-image-minimal')
+
+ self.assertTrue(os.path.isfile(os.path.join(hosttestdir, "rootfs")),
+ "rootfs state file was not created")
+
+ with runqemu('core-image-minimal') as qemu:
+ # Make the test echo a string and search for that as
+ # run_serial()'s status code is useless.'
+ for filename in ("rootfs", "delayed-a", "delayed-b"):
+ status, output = qemu.run_serial("test -f %s && echo found" % os.path.join(targettestdir, filename))
+ self.assertEqual(output, "found", "%s was not present on boot" % filename)
+
+
+
+ @skipIfNotQemu('qemuall', 'Test only runs in qemu')
+ def test_postinst_rootfs_and_boot_sysvinit(self):
+ """
+ Summary: The purpose of this test case is to verify Post-installation
+ scripts are called when rootfs is created and also test
+ that script can be delayed to run at first boot.
+ Dependencies: NA
+ Steps: 1. Add proper configuration to local.conf file
+ 2. Build a "core-image-minimal" image
+ 3. Verify that file created by postinst_rootfs recipe is
+ present on rootfs dir.
+ 4. Boot the image created on qemu and verify that the file
+ created by postinst_boot recipe is present on image.
+ Expected: The files are successfully created during rootfs and boot
+ time for 3 different package managers: rpm,ipk,deb and
+ for initialization managers: sysvinit.
+
+ """
+ self.init_manager_loop("sysvinit")
+
+
+ @skipIfNotQemu('qemuall', 'Test only runs in qemu')
+ def test_postinst_rootfs_and_boot_systemd(self):
+ """
+ Summary: The purpose of this test case is to verify Post-installation
+ scripts are called when rootfs is created and also test
+ that script can be delayed to run at first boot.
+ Dependencies: NA
+ Steps: 1. Add proper configuration to local.conf file
+ 2. Build a "core-image-minimal" image
+ 3. Verify that file created by postinst_rootfs recipe is
+ present on rootfs dir.
+ 4. Boot the image created on qemu and verify that the file
+ created by postinst_boot recipe is present on image.
+ Expected: The files are successfully created during rootfs and boot
+ time for 3 different package managers: rpm,ipk,deb and
+ for initialization managers: systemd.
+
+ """
+
+ self.init_manager_loop("systemd")
+
+
+ def test_failing_postinst(self):
+ """
+ Summary: The purpose of this test case is to verify that post-installation
+ scripts that contain errors are properly reported.
+ Expected: The scriptlet failure is properly reported.
+ The file that is created after the error in the scriptlet is not present.
+ Product: oe-core
+ Author: Alexander Kanavin <alex.kanavin@gmail.com>
+ """
+
+ import oe.path
+
+ vars = get_bb_vars(("IMAGE_ROOTFS", "sysconfdir"), "core-image-minimal")
+ rootfs = vars["IMAGE_ROOTFS"]
+ self.assertIsNotNone(rootfs)
+ sysconfdir = vars["sysconfdir"]
+ self.assertIsNotNone(sysconfdir)
+ # Need to use oe.path here as sysconfdir starts with /
+ hosttestdir = oe.path.join(rootfs, sysconfdir, "postinst-test")
+
+ for classes in ("package_rpm", "package_deb", "package_ipk"):
+ with self.subTest(package_class=classes):
+ features = 'CORE_IMAGE_EXTRA_INSTALL = "postinst-rootfs-failing"\n'
+ features += 'PACKAGE_CLASSES = "%s"\n' % classes
+ self.write_config(features)
+ bb_result = bitbake('core-image-minimal', ignore_status=True)
+ self.assertGreaterEqual(bb_result.output.find("Postinstall scriptlets of ['postinst-rootfs-failing'] have failed."), 0,
+ "Warning about a failed scriptlet not found in bitbake output: %s" %(bb_result.output))
+
+ self.assertTrue(os.path.isfile(os.path.join(hosttestdir, "rootfs-before-failure")),
+ "rootfs-before-failure file was not created")
+ self.assertFalse(os.path.isfile(os.path.join(hosttestdir, "rootfs-after-failure")),
+ "rootfs-after-failure file was created")
+
+class SystemTap(OESelftestTestCase):
+ """
+ Summary: The purpose of this test case is to verify native crosstap
+ works while talking to a target.
+ Expected: The script should successfully connect to the qemu machine
+ and run some systemtap examples on a qemu machine.
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super(SystemTap, cls).setUpClass()
+ cls.image = "core-image-minimal"
+
+ def default_config(self):
+ return """
+# These aren't the actual IP addresses but testexport class needs something defined
+TEST_SERVER_IP = "192.168.7.1"
+TEST_TARGET_IP = "192.168.7.2"
+
+EXTRA_IMAGE_FEATURES += "tools-profile dbg-pkgs"
+IMAGE_FEATURES_append = " ssh-server-dropbear"
+
+# enables kernel debug symbols
+KERNEL_EXTRA_FEATURES_append = " features/debug/debug-kernel.scc"
+KERNEL_EXTRA_FEATURES_append = " features/systemtap/systemtap.scc"
+
+# add systemtap run-time into target image if it is not there yet
+IMAGE_INSTALL_append = " systemtap"
+"""
+
+ def test_crosstap_helloworld(self):
+ self.write_config(self.default_config())
+ bitbake('systemtap-native')
+ systemtap_examples = os.path.join(get_bb_var("WORKDIR","systemtap-native"), "usr/share/systemtap/examples")
+ bitbake(self.image)
+
+ with runqemu(self.image) as qemu:
+ cmd = "crosstap -r root@192.168.7.2 -s %s/general/helloworld.stp " % systemtap_examples
+ result = runCmd(cmd)
+ self.assertEqual(0, result.status, 'crosstap helloworld returned a non 0 status:%s' % result.output)
+
+ def test_crosstap_pstree(self):
+ self.write_config(self.default_config())
+
+ bitbake('systemtap-native')
+ systemtap_examples = os.path.join(get_bb_var("WORKDIR","systemtap-native"), "usr/share/systemtap/examples")
+ bitbake(self.image)
+
+ with runqemu(self.image) as qemu:
+ cmd = "crosstap -r root@192.168.7.2 -s %s/process/pstree.stp" % systemtap_examples
+ result = runCmd(cmd)
+ self.assertEqual(0, result.status, 'crosstap pstree returned a non 0 status:%s' % result.output)
+
+ def test_crosstap_syscalls_by_proc(self):
+ self.write_config(self.default_config())
+
+ bitbake('systemtap-native')
+ systemtap_examples = os.path.join(get_bb_var("WORKDIR","systemtap-native"), "usr/share/systemtap/examples")
+ bitbake(self.image)
+
+ with runqemu(self.image) as qemu:
+ cmd = "crosstap -r root@192.168.7.2 -s %s/process/ syscalls_by_proc.stp" % systemtap_examples
+ result = runCmd(cmd)
+ self.assertEqual(0, result.status, 'crosstap syscalls_by_proc returned a non 0 status:%s' % result.output)
+
+ def test_crosstap_syscalls_by_pid(self):
+ self.write_config(self.default_config())
+
+ bitbake('systemtap-native')
+ systemtap_examples = os.path.join(get_bb_var("WORKDIR","systemtap-native"), "usr/share/systemtap/examples")
+ bitbake(self.image)
+
+ with runqemu(self.image) as qemu:
+ cmd = "crosstap -r root@192.168.7.2 -s %s/process/ syscalls_by_pid.stp" % systemtap_examples
+ result = runCmd(cmd)
+ self.assertEqual(0, result.status, 'crosstap syscalls_by_pid returned a non 0 status:%s' % result.output)
+
diff --git a/meta/lib/oeqa/selftest/cases/selftest.py b/meta/lib/oeqa/selftest/cases/selftest.py
new file mode 100644
index 0000000000..af080dcf03
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/selftest.py
@@ -0,0 +1,53 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import importlib
+from oeqa.utils.commands import runCmd
+import oeqa.selftest
+from oeqa.selftest.case import OESelftestTestCase
+
+class ExternalLayer(OESelftestTestCase):
+
+ def test_list_imported(self):
+ """
+ Summary: Checks functionality to import tests from other layers.
+ Expected: 1. File "external-layer.py" must be in
+ oeqa.selftest.__path__
+ 2. test_unconditional_pas method must exists
+ in ImportedTests class
+ Product: oe-core
+ Author: Mariano Lopez <mariano.lopez@intel.com>
+ """
+
+ test_file = "external-layer.py"
+ test_module = "oeqa.selftest.cases.external-layer"
+ method_name = "test_unconditional_pass"
+
+ # Check if "external-layer.py" is in oeqa path
+ found_file = search_test_file(test_file)
+ self.assertTrue(found_file, msg="Can't find %s in the oeqa path" % test_file)
+
+ # Import oeqa.selftest.external-layer module and search for
+ # test_unconditional_pass method of ImportedTests class
+ found_method = search_method(test_module, method_name)
+ self.assertTrue(method_name, msg="Can't find %s method" % method_name)
+
+def search_test_file(file_name):
+ for layer_path in oeqa.selftest.__path__:
+ for _, _, files in os.walk(layer_path):
+ for f in files:
+ if f == file_name:
+ return True
+ return False
+
+def search_method(module, method):
+ modlib = importlib.import_module(module)
+ for var in vars(modlib):
+ klass = vars(modlib)[var]
+ if isinstance(klass, type(OESelftestTestCase)) and issubclass(klass, OESelftestTestCase):
+ for m in dir(klass):
+ if m == method:
+ return True
+ return False
+
diff --git a/meta/lib/oeqa/selftest/cases/signing.py b/meta/lib/oeqa/selftest/cases/signing.py
new file mode 100644
index 0000000000..93b15ae681
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/signing.py
@@ -0,0 +1,224 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars, create_temp_layer
+import os
+import oe
+import glob
+import re
+import shutil
+import tempfile
+from contextlib import contextmanager
+from oeqa.utils.ftools import write_file
+
+
+class Signing(OESelftestTestCase):
+
+ gpg_dir = ""
+ pub_key_path = ""
+ secret_key_path = ""
+
+ def setup_gpg(self):
+ bitbake('gnupg-native -c addto_recipe_sysroot')
+
+ self.gpg_dir = tempfile.mkdtemp(prefix="oeqa-signing-")
+ self.track_for_cleanup(self.gpg_dir)
+
+ self.pub_key_path = os.path.join(self.testlayer_path, 'files', 'signing', "key.pub")
+ self.secret_key_path = os.path.join(self.testlayer_path, 'files', 'signing', "key.secret")
+
+ nsysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "gnupg-native")
+
+ runCmd('gpg --agent-program=`which gpg-agent`\|--auto-expand-secmem --batch --homedir %s --import %s %s' % (self.gpg_dir, self.pub_key_path, self.secret_key_path), native_sysroot=nsysroot)
+ return nsysroot + get_bb_var("bindir_native")
+
+
+ @contextmanager
+ def create_new_builddir(self, builddir, newbuilddir):
+ bb.utils.mkdirhier(newbuilddir)
+ oe.path.copytree(builddir + "/conf", newbuilddir + "/conf")
+ oe.path.copytree(builddir + "/cache", newbuilddir + "/cache")
+
+ origenv = os.environ.copy()
+
+ for e in os.environ:
+ if builddir in os.environ[e]:
+ os.environ[e] = os.environ[e].replace(builddir, newbuilddir)
+
+ os.chdir(newbuilddir)
+ try:
+ yield
+ finally:
+ for e in origenv:
+ os.environ[e] = origenv[e]
+ os.chdir(builddir)
+
+ def test_signing_packages(self):
+ """
+ Summary: Test that packages can be signed in the package feed
+ Expected: Package should be signed with the correct key
+ Expected: Images can be created from signed packages
+ Product: oe-core
+ Author: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ Author: Alexander Kanavin <alex.kanavin@gmail.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+ import oe.packagedata
+
+ self.setup_gpg()
+
+ package_classes = get_bb_var('PACKAGE_CLASSES')
+ if 'package_rpm' not in package_classes:
+ self.skipTest('This test requires RPM Packaging.')
+
+ test_recipe = 'ed'
+
+ feature = 'INHERIT += "sign_rpm"\n'
+ feature += 'RPM_GPG_PASSPHRASE = "test123"\n'
+ feature += 'RPM_GPG_NAME = "testuser"\n'
+ feature += 'GPG_PATH = "%s"\n' % self.gpg_dir
+
+ self.write_config(feature)
+
+ bitbake('-c clean %s' % test_recipe)
+ bitbake('-f -c package_write_rpm %s' % test_recipe)
+
+ self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe)
+
+ needed_vars = ['PKGDATA_DIR', 'DEPLOY_DIR_RPM', 'PACKAGE_ARCH', 'STAGING_BINDIR_NATIVE']
+ bb_vars = get_bb_vars(needed_vars, test_recipe)
+ pkgdatadir = bb_vars['PKGDATA_DIR']
+ pkgdata = oe.packagedata.read_pkgdatafile(pkgdatadir + "/runtime/ed")
+ if 'PKGE' in pkgdata:
+ pf = pkgdata['PN'] + "-" + pkgdata['PKGE'] + pkgdata['PKGV'] + '-' + pkgdata['PKGR']
+ else:
+ pf = pkgdata['PN'] + "-" + pkgdata['PKGV'] + '-' + pkgdata['PKGR']
+ deploy_dir_rpm = bb_vars['DEPLOY_DIR_RPM']
+ package_arch = bb_vars['PACKAGE_ARCH'].replace('-', '_')
+ staging_bindir_native = bb_vars['STAGING_BINDIR_NATIVE']
+
+ pkg_deploy = os.path.join(deploy_dir_rpm, package_arch, '.'.join((pf, package_arch, 'rpm')))
+
+ # Use a temporary rpmdb
+ rpmdb = tempfile.mkdtemp(prefix='oeqa-rpmdb')
+
+ runCmd('%s/rpmkeys --define "_dbpath %s" --import %s' %
+ (staging_bindir_native, rpmdb, self.pub_key_path))
+
+ ret = runCmd('%s/rpmkeys --define "_dbpath %s" --checksig %s' %
+ (staging_bindir_native, rpmdb, pkg_deploy))
+ # tmp/deploy/rpm/i586/ed-1.9-r0.i586.rpm: rsa sha1 md5 OK
+ self.assertIn('digests signatures OK', ret.output, 'Package signed incorrectly.')
+ shutil.rmtree(rpmdb)
+
+ #Check that an image can be built from signed packages
+ self.add_command_to_tearDown('bitbake -c clean core-image-minimal')
+ bitbake('-c clean core-image-minimal')
+ bitbake('core-image-minimal')
+
+
+ def test_signing_sstate_archive(self):
+ """
+ Summary: Test that sstate archives can be signed
+ Expected: Package should be signed with the correct key
+ Product: oe-core
+ Author: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+
+ test_recipe = 'ed'
+
+ # Since we need gpg but we can't use gpg-native for sstate signatures, we
+ # build gpg-native in our original builddir then run the tests in a second one.
+ builddir = os.environ.get('BUILDDIR') + "-testsign"
+ sstatedir = os.path.join(builddir, 'test-sstate')
+
+ nsysroot = self.setup_gpg()
+
+ feature = 'SSTATE_SIG_KEY ?= "testuser"\n'
+ feature += 'SSTATE_SIG_PASSPHRASE ?= "test123"\n'
+ feature += 'SSTATE_VERIFY_SIG ?= "1"\n'
+ feature += 'GPG_PATH = "%s"\n' % self.gpg_dir
+ feature += 'SSTATE_DIR = "%s"\n' % sstatedir
+ # Any mirror might have partial sstate without .sig files, triggering failures
+ feature += 'SSTATE_MIRRORS_forcevariable = ""\n'
+
+ self.write_config(feature)
+
+ with self.create_new_builddir(os.environ['BUILDDIR'], builddir):
+
+ os.environ["PATH"] = nsysroot + ":" + os.environ["PATH"]
+ self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe)
+ self.add_command_to_tearDown('rm -rf %s' % sstatedir)
+ self.add_command_to_tearDown('rm -rf %s' % builddir)
+
+ bitbake('-c clean %s' % test_recipe)
+ bitbake('-c populate_lic %s' % test_recipe)
+
+ recipe_sig = glob.glob(sstatedir + '/*/*:ed:*_populate_lic.tgz.sig')
+ recipe_tgz = glob.glob(sstatedir + '/*/*:ed:*_populate_lic.tgz')
+
+ self.assertEqual(len(recipe_sig), 1, 'Failed to find .sig file.')
+ self.assertEqual(len(recipe_tgz), 1, 'Failed to find .tgz file.')
+
+ ret = runCmd('gpg --homedir %s --verify %s %s' % (self.gpg_dir, recipe_sig[0], recipe_tgz[0]))
+ # gpg: Signature made Thu 22 Oct 2015 01:45:09 PM EEST using RSA key ID 61EEFB30
+ # gpg: Good signature from "testuser (nocomment) <testuser@email.com>"
+ self.assertIn('gpg: Good signature from', ret.output, 'Package signed incorrectly.')
+
+
+class LockedSignatures(OESelftestTestCase):
+
+ def test_locked_signatures(self):
+ """
+ Summary: Test locked signature mechanism
+ Expected: Locked signatures will prevent task to run
+ Product: oe-core
+ Author: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+
+ import uuid
+
+ test_recipe = 'ed'
+ locked_sigs_file = 'locked-sigs.inc'
+
+ bitbake(test_recipe)
+ # Generate locked sigs include file
+ bitbake('-S none %s' % test_recipe)
+
+ feature = 'require %s\n' % locked_sigs_file
+ feature += 'SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n'
+ self.write_config(feature)
+
+ # Build a locked recipe
+ bitbake(test_recipe)
+
+ templayerdir = tempfile.mkdtemp(prefix='signingqa')
+ create_temp_layer(templayerdir, 'selftestsigning')
+ runCmd('bitbake-layers add-layer %s' % templayerdir)
+
+ # Make a change that should cause the locked task signature to change
+ # Use uuid so hash equivalance server isn't triggered
+ recipe_append_file = test_recipe + '_' + get_bb_var('PV', test_recipe) + '.bbappend'
+ recipe_append_path = os.path.join(templayerdir, 'recipes-test', test_recipe, recipe_append_file)
+ feature = 'SUMMARY_${PN} = "test locked signature%s"\n' % uuid.uuid4()
+
+ os.mkdir(os.path.join(templayerdir, 'recipes-test'))
+ os.mkdir(os.path.join(templayerdir, 'recipes-test', test_recipe))
+ write_file(recipe_append_path, feature)
+
+ self.add_command_to_tearDown('bitbake-layers remove-layer %s' % templayerdir)
+ self.add_command_to_tearDown('rm -f %s' % os.path.join(self.builddir, locked_sigs_file))
+ self.add_command_to_tearDown('rm -rf %s' % templayerdir)
+
+ # Build the recipe again
+ ret = bitbake(test_recipe)
+
+ # Verify you get the warning and that the real task *isn't* run (i.e. the locked signature has worked)
+ patt = r'The %s:do_package sig is computed to be \S+, but the sig is locked to \S+ in SIGGEN_LOCKEDSIGS\S+' % test_recipe
+ found_warn = re.search(patt, ret.output)
+
+ self.assertIsNotNone(found_warn, "Didn't find the expected warning message. Output: %s" % ret.output)
diff --git a/meta/lib/oeqa/selftest/sstate.py b/meta/lib/oeqa/selftest/cases/sstate.py
index 5989724432..410dec64fc 100644
--- a/meta/lib/oeqa/selftest/sstate.py
+++ b/meta/lib/oeqa/selftest/cases/sstate.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
import datetime
import unittest
import os
@@ -5,17 +9,26 @@ import re
import shutil
import oeqa.utils.ftools as ftools
-from oeqa.selftest.base import oeSelfTest
-from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_vars, get_test_layer
-class SStateBase(oeSelfTest):
+class SStateBase(OESelftestTestCase):
def setUpLocal(self):
+ super(SStateBase, self).setUpLocal()
self.temp_sstate_location = None
- self.sstate_path = get_bb_var('SSTATE_DIR')
- self.distro = get_bb_var('NATIVELSBSTRING')
- self.distro_specific_sstate = os.path.join(self.sstate_path, self.distro)
+ needed_vars = ['SSTATE_DIR', 'NATIVELSBSTRING', 'TCLIBC', 'TUNE_ARCH',
+ 'TOPDIR', 'TARGET_VENDOR', 'TARGET_OS']
+ bb_vars = get_bb_vars(needed_vars)
+ self.sstate_path = bb_vars['SSTATE_DIR']
+ self.hostdistro = bb_vars['NATIVELSBSTRING']
+ self.tclibc = bb_vars['TCLIBC']
+ self.tune_arch = bb_vars['TUNE_ARCH']
+ self.topdir = bb_vars['TOPDIR']
+ self.target_vendor = bb_vars['TARGET_VENDOR']
+ self.target_os = bb_vars['TARGET_OS']
+ self.distro_specific_sstate = os.path.join(self.sstate_path, self.hostdistro)
# Creates a special sstate configuration with the option to add sstate mirrors
def config_sstate(self, temp_sstate_location=False, add_local_mirrors=[]):
@@ -26,9 +39,10 @@ class SStateBase(oeSelfTest):
config_temp_sstate = "SSTATE_DIR = \"%s\"" % temp_sstate_path
self.append_config(config_temp_sstate)
self.track_for_cleanup(temp_sstate_path)
- self.sstate_path = get_bb_var('SSTATE_DIR')
- self.distro = get_bb_var('NATIVELSBSTRING')
- self.distro_specific_sstate = os.path.join(self.sstate_path, self.distro)
+ bb_vars = get_bb_vars(['SSTATE_DIR', 'NATIVELSBSTRING'])
+ self.sstate_path = bb_vars['SSTATE_DIR']
+ self.hostdistro = bb_vars['NATIVELSBSTRING']
+ self.distro_specific_sstate = os.path.join(self.sstate_path, self.hostdistro)
if add_local_mirrors:
config_set_sstate_if_not_set = 'SSTATE_MIRRORS ?= ""'
@@ -42,7 +56,7 @@ class SStateBase(oeSelfTest):
def search_sstate(self, filename_regex, distro_specific=True, distro_nonspecific=True):
result = []
for root, dirs, files in os.walk(self.sstate_path):
- if distro_specific and re.search("%s/[a-z0-9]{2}$" % self.distro, root):
+ if distro_specific and re.search("%s/[a-z0-9]{2}$" % self.hostdistro, root):
for f in files:
if re.search(filename_regex, f):
result.append(f)
diff --git a/meta/lib/oeqa/selftest/sstatetests.py b/meta/lib/oeqa/selftest/cases/sstatetests.py
index a353f67e86..6757a0ec68 100644
--- a/meta/lib/oeqa/selftest/sstatetests.py
+++ b/meta/lib/oeqa/selftest/cases/sstatetests.py
@@ -1,22 +1,59 @@
-import datetime
-import unittest
+#
+# SPDX-License-Identifier: MIT
+#
+
import os
-import re
import shutil
import glob
import subprocess
+import tempfile
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer, create_temp_layer
+from oeqa.selftest.cases.sstate import SStateBase
-import oeqa.utils.ftools as ftools
-from oeqa.selftest.base import oeSelfTest
-from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer
-from oeqa.selftest.sstate import SStateBase
-from oeqa.utils.decorators import testcase
+import bb.siggen
class SStateTests(SStateBase):
+ def test_autorev_sstate_works(self):
+ # Test that a git repository which changes is correctly handled by SRCREV = ${AUTOREV}
+ # when PV does not contain SRCPV
+
+ tempdir = tempfile.mkdtemp(prefix='oeqa')
+ self.track_for_cleanup(tempdir)
+ create_temp_layer(tempdir, 'selftestrecipetool')
+ self.add_command_to_tearDown('bitbake-layers remove-layer %s' % tempdir)
+ runCmd('bitbake-layers add-layer %s' % tempdir)
+
+ # Use dbus-wait as a local git repo we can add a commit between two builds in
+ pn = 'dbus-wait'
+ srcrev = '6cc6077a36fe2648a5f993fe7c16c9632f946517'
+ url = 'git://git.yoctoproject.org/dbus-wait'
+ result = runCmd('git clone %s noname' % url, cwd=tempdir)
+ srcdir = os.path.join(tempdir, 'noname')
+ result = runCmd('git reset --hard %s' % srcrev, cwd=srcdir)
+ self.assertTrue(os.path.isfile(os.path.join(srcdir, 'configure.ac')), 'Unable to find configure script in source directory')
+
+ recipefile = os.path.join(tempdir, "recipes-test", "dbus-wait-test", 'dbus-wait-test_git.bb')
+ os.makedirs(os.path.dirname(recipefile))
+ srcuri = 'git://' + srcdir + ';protocol=file'
+ result = runCmd(['recipetool', 'create', '-o', recipefile, srcuri])
+ self.assertTrue(os.path.isfile(recipefile), 'recipetool did not create recipe file; output:\n%s' % result.output)
+
+ with open(recipefile, 'a') as f:
+ f.write('SRCREV = "${AUTOREV}"\n')
+ f.write('PV = "1.0"\n')
+
+ bitbake("dbus-wait-test -c fetch")
+ with open(os.path.join(srcdir, "bar.txt"), "w") as f:
+ f.write("foo")
+ result = runCmd('git add bar.txt; git commit -asm "add bar"', cwd=srcdir)
+ bitbake("dbus-wait-test -c unpack")
+
# Test sstate files creation and their location
def run_test_sstate_creation(self, targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True, should_pass=True):
- self.config_sstate(temp_sstate_location)
+ self.config_sstate(temp_sstate_location, [self.sstate_path])
if self.temp_sstate_location:
bitbake(['-cclean'] + targets)
@@ -39,72 +76,64 @@ class SStateTests(SStateBase):
else:
self.assertTrue(not file_tracker , msg="Found sstate files in the wrong place for: %s (found %s)" % (', '.join(map(str, targets)), str(file_tracker)))
- @testcase(975)
def test_sstate_creation_distro_specific_pass(self):
- targetarch = get_bb_var('TUNE_ARCH')
- self.run_test_sstate_creation(['binutils-cross-'+ targetarch, 'binutils-native'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True)
+ self.run_test_sstate_creation(['binutils-cross-'+ self.tune_arch, 'binutils-native'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True)
- @testcase(1374)
def test_sstate_creation_distro_specific_fail(self):
- targetarch = get_bb_var('TUNE_ARCH')
- self.run_test_sstate_creation(['binutils-cross-'+ targetarch, 'binutils-native'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True, should_pass=False)
+ self.run_test_sstate_creation(['binutils-cross-'+ self.tune_arch, 'binutils-native'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True, should_pass=False)
- @testcase(976)
def test_sstate_creation_distro_nonspecific_pass(self):
- self.run_test_sstate_creation(['glibc-initial'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True)
+ self.run_test_sstate_creation(['linux-libc-headers'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True)
- @testcase(1375)
def test_sstate_creation_distro_nonspecific_fail(self):
- self.run_test_sstate_creation(['glibc-initial'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True, should_pass=False)
-
+ self.run_test_sstate_creation(['linux-libc-headers'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True, should_pass=False)
# Test the sstate files deletion part of the do_cleansstate task
def run_test_cleansstate_task(self, targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True):
- self.config_sstate(temp_sstate_location)
+ self.config_sstate(temp_sstate_location, [self.sstate_path])
bitbake(['-ccleansstate'] + targets)
bitbake(targets)
- tgz_created = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific)
+ tgz_created = self.search_sstate('|'.join(map(str, [s + r'.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific)
self.assertTrue(tgz_created, msg="Could not find sstate .tgz files for: %s (%s)" % (', '.join(map(str, targets)), str(tgz_created)))
- siginfo_created = self.search_sstate('|'.join(map(str, [s + '.*?\.siginfo$' for s in targets])), distro_specific, distro_nonspecific)
+ siginfo_created = self.search_sstate('|'.join(map(str, [s + r'.*?\.siginfo$' for s in targets])), distro_specific, distro_nonspecific)
self.assertTrue(siginfo_created, msg="Could not find sstate .siginfo files for: %s (%s)" % (', '.join(map(str, targets)), str(siginfo_created)))
bitbake(['-ccleansstate'] + targets)
- tgz_removed = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific)
+ tgz_removed = self.search_sstate('|'.join(map(str, [s + r'.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific)
self.assertTrue(not tgz_removed, msg="do_cleansstate didn't remove .tgz sstate files for: %s (%s)" % (', '.join(map(str, targets)), str(tgz_removed)))
- @testcase(977)
def test_cleansstate_task_distro_specific_nonspecific(self):
- targetarch = get_bb_var('TUNE_ARCH')
- self.run_test_cleansstate_task(['binutils-cross-' + targetarch, 'binutils-native', 'glibc-initial'], distro_specific=True, distro_nonspecific=True, temp_sstate_location=True)
+ targets = ['binutils-cross-'+ self.tune_arch, 'binutils-native']
+ targets.append('linux-libc-headers')
+ self.run_test_cleansstate_task(targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True)
- @testcase(1376)
def test_cleansstate_task_distro_nonspecific(self):
- self.run_test_cleansstate_task(['glibc-initial'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True)
+ self.run_test_cleansstate_task(['linux-libc-headers'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True)
- @testcase(1377)
def test_cleansstate_task_distro_specific(self):
- targetarch = get_bb_var('TUNE_ARCH')
- self.run_test_cleansstate_task(['binutils-cross-'+ targetarch, 'binutils-native', 'glibc-initial'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True)
+ targets = ['binutils-cross-'+ self.tune_arch, 'binutils-native']
+ targets.append('linux-libc-headers')
+ self.run_test_cleansstate_task(targets, distro_specific=True, distro_nonspecific=False, temp_sstate_location=True)
# Test rebuilding of distro-specific sstate files
def run_test_rebuild_distro_specific_sstate(self, targets, temp_sstate_location=True):
- self.config_sstate(temp_sstate_location)
+ self.config_sstate(temp_sstate_location, [self.sstate_path])
bitbake(['-ccleansstate'] + targets)
bitbake(targets)
- results = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=False, distro_nonspecific=True)
+ results = self.search_sstate('|'.join(map(str, [s + r'.*?\.tgz$' for s in targets])), distro_specific=False, distro_nonspecific=True)
filtered_results = []
for r in results:
if r.endswith(("_populate_lic.tgz", "_populate_lic.tgz.siginfo")):
continue
filtered_results.append(r)
self.assertTrue(filtered_results == [], msg="Found distro non-specific sstate for: %s (%s)" % (', '.join(map(str, targets)), str(filtered_results)))
- file_tracker_1 = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False)
+ file_tracker_1 = self.search_sstate('|'.join(map(str, [s + r'.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False)
self.assertTrue(len(file_tracker_1) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets)))
self.track_for_cleanup(self.distro_specific_sstate + "_old")
@@ -113,7 +142,7 @@ class SStateTests(SStateBase):
bitbake(['-cclean'] + targets)
bitbake(targets)
- file_tracker_2 = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False)
+ file_tracker_2 = self.search_sstate('|'.join(map(str, [s + r'.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False)
self.assertTrue(len(file_tracker_2) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets)))
not_recreated = [x for x in file_tracker_1 if x not in file_tracker_2]
@@ -122,17 +151,12 @@ class SStateTests(SStateBase):
created_once = [x for x in file_tracker_2 if x not in file_tracker_1]
self.assertTrue(created_once == [], msg="The following sstate files ware created only in the second run: %s" % ', '.join(map(str, created_once)))
- @testcase(175)
def test_rebuild_distro_specific_sstate_cross_native_targets(self):
- targetarch = get_bb_var('TUNE_ARCH')
- self.run_test_rebuild_distro_specific_sstate(['binutils-cross-' + targetarch, 'binutils-native'], temp_sstate_location=True)
+ self.run_test_rebuild_distro_specific_sstate(['binutils-cross-' + self.tune_arch, 'binutils-native'], temp_sstate_location=True)
- @testcase(1372)
def test_rebuild_distro_specific_sstate_cross_target(self):
- targetarch = get_bb_var('TUNE_ARCH')
- self.run_test_rebuild_distro_specific_sstate(['binutils-cross-' + targetarch], temp_sstate_location=True)
+ self.run_test_rebuild_distro_specific_sstate(['binutils-cross-' + self.tune_arch], temp_sstate_location=True)
- @testcase(1373)
def test_rebuild_distro_specific_sstate_native_target(self):
self.run_test_rebuild_distro_specific_sstate(['binutils-native'], temp_sstate_location=True)
@@ -145,10 +169,9 @@ class SStateTests(SStateBase):
self.assertTrue(len(global_config) == len(target_config), msg='Lists global_config and target_config should have the same number of elements')
self.config_sstate(temp_sstate_location=True, add_local_mirrors=[self.sstate_path])
- # If buildhistory is enabled, we need to disable version-going-backwards QA checks for this test. It may report errors otherwise.
- if ('buildhistory' in get_bb_var('USER_CLASSES')) or ('buildhistory' in get_bb_var('INHERIT')):
- remove_errors_config = 'ERROR_QA_remove = "version-going-backwards"'
- self.append_config(remove_errors_config)
+ # If buildhistory is enabled, we need to disable version-going-backwards
+ # QA checks for this test. It may report errors otherwise.
+ self.append_config('ERROR_QA_remove = "version-going-backwards"')
# For not this only checks if random sstate tasks are handled correctly as a group.
# In the future we should add control over what tasks we check for.
@@ -162,25 +185,24 @@ class SStateTests(SStateBase):
if not sstate_arch in sstate_archs_list:
sstate_archs_list.append(sstate_arch)
if target_config[idx] == target_config[-1]:
- target_sstate_before_build = self.search_sstate(target + '.*?\.tgz$')
+ target_sstate_before_build = self.search_sstate(target + r'.*?\.tgz$')
bitbake("-cclean %s" % target)
result = bitbake(target, ignore_status=True)
if target_config[idx] == target_config[-1]:
- target_sstate_after_build = self.search_sstate(target + '.*?\.tgz$')
+ target_sstate_after_build = self.search_sstate(target + r'.*?\.tgz$')
expected_remaining_sstate += [x for x in target_sstate_after_build if x not in target_sstate_before_build if not any(pattern in x for pattern in ignore_patterns)]
self.remove_config(global_config[idx])
self.remove_recipeinc(target, target_config[idx])
self.assertEqual(result.status, 0, msg = "build of %s failed with %s" % (target, result.output))
runCmd("sstate-cache-management.sh -y --cache-dir=%s --remove-duplicated --extra-archs=%s" % (self.sstate_path, ','.join(map(str, sstate_archs_list))))
- actual_remaining_sstate = [x for x in self.search_sstate(target + '.*?\.tgz$') if not any(pattern in x for pattern in ignore_patterns)]
+ actual_remaining_sstate = [x for x in self.search_sstate(target + r'.*?\.tgz$') if not any(pattern in x for pattern in ignore_patterns)]
actual_not_expected = [x for x in actual_remaining_sstate if x not in expected_remaining_sstate]
self.assertFalse(actual_not_expected, msg="Files should have been removed but ware not: %s" % ', '.join(map(str, actual_not_expected)))
expected_not_actual = [x for x in expected_remaining_sstate if x not in actual_remaining_sstate]
self.assertFalse(expected_not_actual, msg="Extra files ware removed: %s" ', '.join(map(str, expected_not_actual)))
- @testcase(973)
def test_sstate_cache_management_script_using_pr_1(self):
global_config = []
target_config = []
@@ -188,7 +210,6 @@ class SStateTests(SStateBase):
target_config.append('PR = "0"')
self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic'])
- @testcase(978)
def test_sstate_cache_management_script_using_pr_2(self):
global_config = []
target_config = []
@@ -198,7 +219,6 @@ class SStateTests(SStateBase):
target_config.append('PR = "1"')
self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic'])
- @testcase(979)
def test_sstate_cache_management_script_using_pr_3(self):
global_config = []
target_config = []
@@ -210,7 +230,6 @@ class SStateTests(SStateBase):
target_config.append('PR = "1"')
self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic'])
- @testcase(974)
def test_sstate_cache_management_script_using_machine(self):
global_config = []
target_config = []
@@ -220,7 +239,6 @@ class SStateTests(SStateBase):
target_config.append('')
self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic'])
- @testcase(1270)
def test_sstate_32_64_same_hash(self):
"""
The sstate checksums for both native and target should not vary whether
@@ -229,27 +247,29 @@ class SStateTests(SStateBase):
manually and check using bitbake -S.
"""
- topdir = get_bb_var('TOPDIR')
- targetvendor = get_bb_var('TARGET_VENDOR')
self.write_config("""
MACHINE = "qemux86"
TMPDIR = "${TOPDIR}/tmp-sstatesamehash"
+TCLIBCAPPEND = ""
BUILD_ARCH = "x86_64"
BUILD_OS = "linux"
SDKMACHINE = "x86_64"
PACKAGE_CLASSES = "package_rpm package_ipk package_deb"
+BB_SIGNATURE_HANDLER = "OEBasicHash"
""")
- self.track_for_cleanup(topdir + "/tmp-sstatesamehash")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
bitbake("core-image-sato -S none")
self.write_config("""
MACHINE = "qemux86"
TMPDIR = "${TOPDIR}/tmp-sstatesamehash2"
+TCLIBCAPPEND = ""
BUILD_ARCH = "i686"
BUILD_OS = "linux"
SDKMACHINE = "i686"
PACKAGE_CLASSES = "package_rpm package_ipk package_deb"
+BB_SIGNATURE_HANDLER = "OEBasicHash"
""")
- self.track_for_cleanup(topdir + "/tmp-sstatesamehash2")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
bitbake("core-image-sato -S none")
def get_files(d):
@@ -262,14 +282,13 @@ PACKAGE_CLASSES = "package_rpm package_ipk package_deb"
continue
f.extend(os.path.join(root, name) for name in files)
return f
- files1 = get_files(topdir + "/tmp-sstatesamehash/stamps/")
- files2 = get_files(topdir + "/tmp-sstatesamehash2/stamps/")
- files2 = [x.replace("tmp-sstatesamehash2", "tmp-sstatesamehash").replace("i686-linux", "x86_64-linux").replace("i686" + targetvendor + "-linux", "x86_64" + targetvendor + "-linux", ) for x in files2]
+ files1 = get_files(self.topdir + "/tmp-sstatesamehash/stamps/")
+ files2 = get_files(self.topdir + "/tmp-sstatesamehash2/stamps/")
+ files2 = [x.replace("tmp-sstatesamehash2", "tmp-sstatesamehash").replace("i686-linux", "x86_64-linux").replace("i686" + self.target_vendor + "-linux", "x86_64" + self.target_vendor + "-linux", ) for x in files2]
self.maxDiff = None
self.assertCountEqual(files1, files2)
- @testcase(1271)
def test_sstate_nativelsbstring_same_hash(self):
"""
The sstate checksums should be independent of whichever NATIVELSBSTRING is
@@ -277,18 +296,21 @@ PACKAGE_CLASSES = "package_rpm package_ipk package_deb"
builds, override the variables manually and check using bitbake -S.
"""
- topdir = get_bb_var('TOPDIR')
self.write_config("""
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash\"
+TCLIBCAPPEND = \"\"
NATIVELSBSTRING = \"DistroA\"
+BB_SIGNATURE_HANDLER = "OEBasicHash"
""")
- self.track_for_cleanup(topdir + "/tmp-sstatesamehash")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
bitbake("core-image-sato -S none")
self.write_config("""
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
+TCLIBCAPPEND = \"\"
NATIVELSBSTRING = \"DistroB\"
+BB_SIGNATURE_HANDLER = "OEBasicHash"
""")
- self.track_for_cleanup(topdir + "/tmp-sstatesamehash2")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
bitbake("core-image-sato -S none")
def get_files(d):
@@ -296,13 +318,12 @@ NATIVELSBSTRING = \"DistroB\"
for root, dirs, files in os.walk(d):
f.extend(os.path.join(root, name) for name in files)
return f
- files1 = get_files(topdir + "/tmp-sstatesamehash/stamps/")
- files2 = get_files(topdir + "/tmp-sstatesamehash2/stamps/")
+ files1 = get_files(self.topdir + "/tmp-sstatesamehash/stamps/")
+ files2 = get_files(self.topdir + "/tmp-sstatesamehash2/stamps/")
files2 = [x.replace("tmp-sstatesamehash2", "tmp-sstatesamehash") for x in files2]
self.maxDiff = None
self.assertCountEqual(files1, files2)
- @testcase(1368)
def test_sstate_allarch_samesigs(self):
"""
The sstate checksums of allarch packages should be independent of whichever
@@ -311,20 +332,51 @@ NATIVELSBSTRING = \"DistroB\"
the two MACHINE values.
"""
- topdir = get_bb_var('TOPDIR')
- targetos = get_bb_var('TARGET_OS')
- targetvendor = get_bb_var('TARGET_VENDOR')
- self.write_config("""
+ configA = """
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash\"
-MACHINE = \"qemux86\"
-""")
- self.track_for_cleanup(topdir + "/tmp-sstatesamehash")
- bitbake("world meta-toolchain -S none")
- self.write_config("""
+TCLIBCAPPEND = \"\"
+MACHINE = \"qemux86-64\"
+BB_SIGNATURE_HANDLER = "OEBasicHash"
+"""
+ configB = """
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
+TCLIBCAPPEND = \"\"
MACHINE = \"qemuarm\"
-""")
- self.track_for_cleanup(topdir + "/tmp-sstatesamehash2")
+BB_SIGNATURE_HANDLER = "OEBasicHash"
+"""
+ self.sstate_allarch_samesigs(configA, configB)
+
+ def test_sstate_nativesdk_samesigs_multilib(self):
+ """
+ check nativesdk stamps are the same between the two MACHINE values.
+ """
+
+ configA = """
+TMPDIR = \"${TOPDIR}/tmp-sstatesamehash\"
+TCLIBCAPPEND = \"\"
+MACHINE = \"qemux86-64\"
+require conf/multilib.conf
+MULTILIBS = \"multilib:lib32\"
+DEFAULTTUNE_virtclass-multilib-lib32 = \"x86\"
+BB_SIGNATURE_HANDLER = "OEBasicHash"
+"""
+ configB = """
+TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
+TCLIBCAPPEND = \"\"
+MACHINE = \"qemuarm\"
+require conf/multilib.conf
+MULTILIBS = \"\"
+BB_SIGNATURE_HANDLER = "OEBasicHash"
+"""
+ self.sstate_allarch_samesigs(configA, configB)
+
+ def sstate_allarch_samesigs(self, configA, configB):
+
+ self.write_config(configA)
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
+ bitbake("world meta-toolchain -S none")
+ self.write_config(configB)
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
bitbake("world meta-toolchain -S none")
def get_files(d):
@@ -338,19 +390,14 @@ MACHINE = \"qemuarm\"
(_, task, _, shash) = name.rsplit(".", 3)
f[os.path.join(os.path.basename(root), task)] = shash
return f
- files1 = get_files(topdir + "/tmp-sstatesamehash/stamps/all" + targetvendor + "-" + targetos)
- files2 = get_files(topdir + "/tmp-sstatesamehash2/stamps/all" + targetvendor + "-" + targetos)
- self.maxDiff = None
- self.assertEqual(files1, files2)
- nativesdkdir = os.path.basename(glob.glob(topdir + "/tmp-sstatesamehash/stamps/*-nativesdk*-linux")[0])
+ nativesdkdir = os.path.basename(glob.glob(self.topdir + "/tmp-sstatesamehash/stamps/*-nativesdk*-linux")[0])
- files1 = get_files(topdir + "/tmp-sstatesamehash/stamps/" + nativesdkdir)
- files2 = get_files(topdir + "/tmp-sstatesamehash2/stamps/" + nativesdkdir)
+ files1 = get_files(self.topdir + "/tmp-sstatesamehash/stamps/" + nativesdkdir)
+ files2 = get_files(self.topdir + "/tmp-sstatesamehash2/stamps/" + nativesdkdir)
self.maxDiff = None
self.assertEqual(files1, files2)
- @testcase(1369)
def test_sstate_sametune_samesigs(self):
"""
The sstate checksums of two identical machines (using the same tune) should be the
@@ -358,26 +405,27 @@ MACHINE = \"qemuarm\"
qemux86copy machine to test this. Also include multilibs in the test.
"""
- topdir = get_bb_var('TOPDIR')
- targetos = get_bb_var('TARGET_OS')
- targetvendor = get_bb_var('TARGET_VENDOR')
self.write_config("""
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash\"
+TCLIBCAPPEND = \"\"
MACHINE = \"qemux86\"
require conf/multilib.conf
MULTILIBS = "multilib:lib32"
DEFAULTTUNE_virtclass-multilib-lib32 = "x86"
+BB_SIGNATURE_HANDLER = "OEBasicHash"
""")
- self.track_for_cleanup(topdir + "/tmp-sstatesamehash")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
bitbake("world meta-toolchain -S none")
self.write_config("""
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
+TCLIBCAPPEND = \"\"
MACHINE = \"qemux86copy\"
require conf/multilib.conf
MULTILIBS = "multilib:lib32"
DEFAULTTUNE_virtclass-multilib-lib32 = "x86"
+BB_SIGNATURE_HANDLER = "OEBasicHash"
""")
- self.track_for_cleanup(topdir + "/tmp-sstatesamehash2")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
bitbake("world meta-toolchain -S none")
def get_files(d):
@@ -391,8 +439,8 @@ DEFAULTTUNE_virtclass-multilib-lib32 = "x86"
if "do_build" not in name and "do_populate_sdk" not in name:
f.append(os.path.join(root, name))
return f
- files1 = get_files(topdir + "/tmp-sstatesamehash/stamps")
- files2 = get_files(topdir + "/tmp-sstatesamehash2/stamps")
+ files1 = get_files(self.topdir + "/tmp-sstatesamehash/stamps")
+ files2 = get_files(self.topdir + "/tmp-sstatesamehash2/stamps")
files2 = [x.replace("tmp-sstatesamehash2", "tmp-sstatesamehash") for x in files2]
self.maxDiff = None
self.assertCountEqual(files1, files2)
@@ -404,24 +452,25 @@ DEFAULTTUNE_virtclass-multilib-lib32 = "x86"
classes inherits should be the same.
"""
- topdir = get_bb_var('TOPDIR')
- targetvendor = get_bb_var('TARGET_VENDOR')
self.write_config("""
TMPDIR = "${TOPDIR}/tmp-sstatesamehash"
-BB_NUMBER_THREADS = "1"
+TCLIBCAPPEND = ""
+BB_NUMBER_THREADS = "${@oe.utils.cpu_count()}"
PARALLEL_MAKE = "-j 1"
DL_DIR = "${TOPDIR}/download1"
TIME = "111111"
DATE = "20161111"
INHERIT_remove = "buildstats-summary buildhistory uninative"
http_proxy = ""
+BB_SIGNATURE_HANDLER = "OEBasicHash"
""")
- self.track_for_cleanup(topdir + "/tmp-sstatesamehash")
- self.track_for_cleanup(topdir + "/download1")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
+ self.track_for_cleanup(self.topdir + "/download1")
bitbake("world meta-toolchain -S none")
self.write_config("""
TMPDIR = "${TOPDIR}/tmp-sstatesamehash2"
-BB_NUMBER_THREADS = "2"
+TCLIBCAPPEND = ""
+BB_NUMBER_THREADS = "${@oe.utils.cpu_count()+1}"
PARALLEL_MAKE = "-j 2"
DL_DIR = "${TOPDIR}/download2"
TIME = "222222"
@@ -430,9 +479,10 @@ DATE = "20161212"
INHERIT_remove = "uninative"
INHERIT += "buildstats-summary buildhistory"
http_proxy = "http://example.com/"
+BB_SIGNATURE_HANDLER = "OEBasicHash"
""")
- self.track_for_cleanup(topdir + "/tmp-sstatesamehash2")
- self.track_for_cleanup(topdir + "/download2")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
+ self.track_for_cleanup(self.topdir + "/download2")
bitbake("world meta-toolchain -S none")
def get_files(d):
@@ -444,8 +494,26 @@ http_proxy = "http://example.com/"
base = os.sep.join(root.rsplit(os.sep, 2)[-2:] + [name])
f[base] = shash
return f
- files1 = get_files(topdir + "/tmp-sstatesamehash/stamps/")
- files2 = get_files(topdir + "/tmp-sstatesamehash2/stamps/")
+
+ def compare_sigfiles(files, files1, files2, compare=False):
+ for k in files:
+ if k in files1 and k in files2:
+ print("%s differs:" % k)
+ if compare:
+ sigdatafile1 = self.topdir + "/tmp-sstatesamehash/stamps/" + k + "." + files1[k]
+ sigdatafile2 = self.topdir + "/tmp-sstatesamehash2/stamps/" + k + "." + files2[k]
+ output = bb.siggen.compare_sigfiles(sigdatafile1, sigdatafile2)
+ if output:
+ print('\n'.join(output))
+ elif k in files1 and k not in files2:
+ print("%s in files1" % k)
+ elif k not in files1 and k in files2:
+ print("%s in files2" % k)
+ else:
+ assert "shouldn't reach here"
+
+ files1 = get_files(self.topdir + "/tmp-sstatesamehash/stamps/")
+ files2 = get_files(self.topdir + "/tmp-sstatesamehash2/stamps/")
# Remove items that are identical in both sets
for k,v in files1.items() & files2.items():
del files1[k]
@@ -454,16 +522,11 @@ http_proxy = "http://example.com/"
# No changes, so we're done
return
- for k in files1.keys() | files2.keys():
- if k in files1 and k in files2:
- print("%s differs:" % k)
- print(subprocess.check_output(("bitbake-diffsigs",
- topdir + "/tmp-sstatesamehash/stamps/" + k + "." + files1[k],
- topdir + "/tmp-sstatesamehash2/stamps/" + k + "." + files2[k])))
- elif k in files1 and k not in files2:
- print("%s in files1" % k)
- elif k not in files1 and k in files2:
- print("%s in files2" % k)
- else:
- assert "shouldn't reach here"
+ files = list(files1.keys() | files2.keys())
+ # this is an expensive computation, thus just compare the first 'max_sigfiles_to_compare' k files
+ max_sigfiles_to_compare = 20
+ first, rest = files[:max_sigfiles_to_compare], files[max_sigfiles_to_compare:]
+ compare_sigfiles(first, files1, files2, compare=True)
+ compare_sigfiles(rest, files1, files2, compare=False)
+
self.fail("sstate hashes not identical.")
diff --git a/meta/lib/oeqa/selftest/cases/tinfoil.py b/meta/lib/oeqa/selftest/cases/tinfoil.py
new file mode 100644
index 0000000000..42a1b6b4f4
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/tinfoil.py
@@ -0,0 +1,224 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import re
+import time
+import logging
+import bb.tinfoil
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd
+
+class TinfoilTests(OESelftestTestCase):
+ """ Basic tests for the tinfoil API """
+
+ def test_getvar(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(True)
+ machine = tinfoil.config_data.getVar('MACHINE')
+ if not machine:
+ self.fail('Unable to get MACHINE value - returned %s' % machine)
+
+ def test_expand(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(True)
+ expr = '${@os.getpid()}'
+ pid = tinfoil.config_data.expand(expr)
+ if not pid:
+ self.fail('Unable to expand "%s" - returned %s' % (expr, pid))
+
+ def test_getvar_bb_origenv(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(True)
+ origenv = tinfoil.config_data.getVar('BB_ORIGENV', False)
+ if not origenv:
+ self.fail('Unable to get BB_ORIGENV value - returned %s' % origenv)
+ self.assertEqual(origenv.getVar('HOME', False), os.environ['HOME'])
+
+ def test_parse_recipe(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False, quiet=2)
+ testrecipe = 'mdadm'
+ best = tinfoil.find_best_provider(testrecipe)
+ if not best:
+ self.fail('Unable to find recipe providing %s' % testrecipe)
+ rd = tinfoil.parse_recipe_file(best[3])
+ self.assertEqual(testrecipe, rd.getVar('PN'))
+
+ def test_parse_recipe_copy_expand(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False, quiet=2)
+ testrecipe = 'mdadm'
+ best = tinfoil.find_best_provider(testrecipe)
+ if not best:
+ self.fail('Unable to find recipe providing %s' % testrecipe)
+ rd = tinfoil.parse_recipe_file(best[3])
+ # Check we can get variable values
+ self.assertEqual(testrecipe, rd.getVar('PN'))
+ # Check that expanding a value that includes a variable reference works
+ self.assertEqual(testrecipe, rd.getVar('BPN'))
+ # Now check that changing the referenced variable's value in a copy gives that
+ # value when expanding
+ localdata = bb.data.createCopy(rd)
+ localdata.setVar('PN', 'hello')
+ self.assertEqual('hello', localdata.getVar('BPN'))
+
+ def test_parse_recipe_initial_datastore(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False, quiet=2)
+ testrecipe = 'mdadm'
+ best = tinfoil.find_best_provider(testrecipe)
+ if not best:
+ self.fail('Unable to find recipe providing %s' % testrecipe)
+ dcopy = bb.data.createCopy(tinfoil.config_data)
+ dcopy.setVar('MYVARIABLE', 'somevalue')
+ rd = tinfoil.parse_recipe_file(best[3], config_data=dcopy)
+ # Check we can get variable values
+ self.assertEqual('somevalue', rd.getVar('MYVARIABLE'))
+
+ def test_list_recipes(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False, quiet=2)
+ # Check pkg_pn
+ checkpns = ['tar', 'automake', 'coreutils', 'm4-native', 'nativesdk-gcc']
+ pkg_pn = tinfoil.cooker.recipecaches[''].pkg_pn
+ for pn in checkpns:
+ self.assertIn(pn, pkg_pn)
+ # Check pkg_fn
+ checkfns = {'nativesdk-gcc': '^virtual:nativesdk:.*', 'coreutils': '.*/coreutils_.*.bb'}
+ for fn, pn in tinfoil.cooker.recipecaches[''].pkg_fn.items():
+ if pn in checkpns:
+ if pn in checkfns:
+ self.assertTrue(re.match(checkfns[pn], fn), 'Entry for %s: %s did not match %s' % (pn, fn, checkfns[pn]))
+ checkpns.remove(pn)
+ if checkpns:
+ self.fail('Unable to find pkg_fn entries for: %s' % ', '.join(checkpns))
+
+ def test_wait_event(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+
+ tinfoil.set_event_mask(['bb.event.FilesMatchingFound', 'bb.command.CommandCompleted'])
+
+ # Need to drain events otherwise events that were masked may still be in the queue
+ while tinfoil.wait_event():
+ pass
+
+ pattern = 'conf'
+ res = tinfoil.run_command('findFilesMatchingInDir', pattern, 'conf/machine')
+ self.assertTrue(res)
+
+ eventreceived = False
+ commandcomplete = False
+ start = time.time()
+ # Wait for 5s in total so we'd detect spurious heartbeat events for example
+ while time.time() - start < 5:
+ event = tinfoil.wait_event(1)
+ if event:
+ if isinstance(event, bb.command.CommandCompleted):
+ commandcomplete = True
+ elif isinstance(event, bb.event.FilesMatchingFound):
+ self.assertEqual(pattern, event._pattern)
+ self.assertIn('qemuarm.conf', event._matches)
+ eventreceived = True
+ elif isinstance(event, logging.LogRecord):
+ continue
+ else:
+ self.fail('Unexpected event: %s' % event)
+
+ self.assertTrue(commandcomplete, 'Timed out waiting for CommandCompleted event from bitbake server')
+ self.assertTrue(eventreceived, 'Did not receive FilesMatchingFound event from bitbake server')
+
+ def test_setvariable_clean(self):
+ # First check that setVariable affects the datastore
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+ tinfoil.run_command('setVariable', 'TESTVAR', 'specialvalue')
+ self.assertEqual(tinfoil.config_data.getVar('TESTVAR'), 'specialvalue', 'Value set using setVariable is not reflected in client-side getVar()')
+
+ # Now check that the setVariable's effects are no longer present
+ # (this may legitimately break in future if we stop reinitialising
+ # the datastore, in which case we'll have to reconsider use of
+ # setVariable entirely)
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+ self.assertNotEqual(tinfoil.config_data.getVar('TESTVAR'), 'specialvalue', 'Value set using setVariable is still present!')
+
+ # Now check that setVar on the main datastore works (uses setVariable internally)
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+ tinfoil.config_data.setVar('TESTVAR', 'specialvalue')
+ value = tinfoil.run_command('getVariable', 'TESTVAR')
+ self.assertEqual(value, 'specialvalue', 'Value set using config_data.setVar() is not reflected in config_data.getVar()')
+
+ def test_datastore_operations(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+ # Test setVarFlag() / getVarFlag()
+ tinfoil.config_data.setVarFlag('TESTVAR', 'flagname', 'flagval')
+ value = tinfoil.config_data.getVarFlag('TESTVAR', 'flagname')
+ self.assertEqual(value, 'flagval', 'Value set using config_data.setVarFlag() is not reflected in config_data.getVarFlag()')
+ # Test delVarFlag()
+ tinfoil.config_data.setVarFlag('TESTVAR', 'otherflag', 'othervalue')
+ tinfoil.config_data.delVarFlag('TESTVAR', 'flagname')
+ value = tinfoil.config_data.getVarFlag('TESTVAR', 'flagname')
+ self.assertEqual(value, None, 'Varflag deleted using config_data.delVarFlag() is not reflected in config_data.getVarFlag()')
+ value = tinfoil.config_data.getVarFlag('TESTVAR', 'otherflag')
+ self.assertEqual(value, 'othervalue', 'Varflag deleted using config_data.delVarFlag() caused unrelated flag to be removed')
+ # Test delVar()
+ tinfoil.config_data.setVar('TESTVAR', 'varvalue')
+ value = tinfoil.config_data.getVar('TESTVAR')
+ self.assertEqual(value, 'varvalue', 'Value set using config_data.setVar() is not reflected in config_data.getVar()')
+ tinfoil.config_data.delVar('TESTVAR')
+ value = tinfoil.config_data.getVar('TESTVAR')
+ self.assertEqual(value, None, 'Variable deleted using config_data.delVar() appears to still have a value')
+ # Test renameVar()
+ tinfoil.config_data.setVar('TESTVAROLD', 'origvalue')
+ tinfoil.config_data.renameVar('TESTVAROLD', 'TESTVARNEW')
+ value = tinfoil.config_data.getVar('TESTVAROLD')
+ self.assertEqual(value, None, 'Variable renamed using config_data.renameVar() still seems to exist')
+ value = tinfoil.config_data.getVar('TESTVARNEW')
+ self.assertEqual(value, 'origvalue', 'Variable renamed using config_data.renameVar() does not appear with new name')
+ # Test overrides
+ tinfoil.config_data.setVar('TESTVAR', 'original')
+ tinfoil.config_data.setVar('TESTVAR_overrideone', 'one')
+ tinfoil.config_data.setVar('TESTVAR_overridetwo', 'two')
+ tinfoil.config_data.appendVar('OVERRIDES', ':overrideone')
+ value = tinfoil.config_data.getVar('TESTVAR')
+ self.assertEqual(value, 'one', 'Variable overrides not functioning correctly')
+
+ def test_variable_history(self):
+ # Basic test to ensure that variable history works when tracking=True
+ with bb.tinfoil.Tinfoil(tracking=True) as tinfoil:
+ tinfoil.prepare(config_only=False, quiet=2)
+ # Note that _tracking for any datastore we get will be
+ # false here, that's currently expected - so we can't check
+ # for that
+ history = tinfoil.config_data.varhistory.variable('DL_DIR')
+ for entry in history:
+ if entry['file'].endswith('/bitbake.conf'):
+ if entry['op'] in ['set', 'set?']:
+ break
+ else:
+ self.fail('Did not find history entry setting DL_DIR in bitbake.conf. History: %s' % history)
+ # Check it works for recipes as well
+ testrecipe = 'zlib'
+ rd = tinfoil.parse_recipe(testrecipe)
+ history = rd.varhistory.variable('LICENSE')
+ bbfound = -1
+ recipefound = -1
+ for i, entry in enumerate(history):
+ if entry['file'].endswith('/bitbake.conf'):
+ if entry['detail'] == 'INVALID' and entry['op'] in ['set', 'set?']:
+ bbfound = i
+ elif entry['file'].endswith('.bb'):
+ if entry['op'] == 'set':
+ recipefound = i
+ if bbfound == -1:
+ self.fail('Did not find history entry setting LICENSE in bitbake.conf parsing %s recipe. History: %s' % (testrecipe, history))
+ if recipefound == -1:
+ self.fail('Did not find history entry setting LICENSE in %s recipe. History: %s' % (testrecipe, history))
+ if bbfound > recipefound:
+ self.fail('History entry setting LICENSE in %s recipe and in bitbake.conf in wrong order. History: %s' % (testrecipe, history))
diff --git a/meta/lib/oeqa/selftest/cases/wic.py b/meta/lib/oeqa/selftest/cases/wic.py
new file mode 100644
index 0000000000..3c5be2f501
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/wic.py
@@ -0,0 +1,1051 @@
+#
+# Copyright (c) 2015, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AUTHORS
+# Ed Bartosh <ed.bartosh@linux.intel.com>
+
+"""Test cases for wic."""
+
+import os
+import sys
+import unittest
+
+from glob import glob
+from shutil import rmtree, copy
+from functools import wraps, lru_cache
+from tempfile import NamedTemporaryFile
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars, runqemu
+
+
+@lru_cache(maxsize=32)
+def get_host_arch(recipe):
+ """A cached call to get_bb_var('HOST_ARCH', <recipe>)"""
+ return get_bb_var('HOST_ARCH', recipe)
+
+
+def only_for_arch(archs, image='core-image-minimal'):
+ """Decorator for wrapping test cases that can be run only for specific target
+ architectures. A list of compatible architectures is passed in `archs`.
+ Current architecture will be determined by parsing bitbake output for
+ `image` recipe.
+ """
+ def wrapper(func):
+ @wraps(func)
+ def wrapped_f(*args, **kwargs):
+ arch = get_host_arch(image)
+ if archs and arch not in archs:
+ raise unittest.SkipTest("Testcase arch dependency not met: %s" % arch)
+ return func(*args, **kwargs)
+ wrapped_f.__name__ = func.__name__
+ return wrapped_f
+ return wrapper
+
+
+class WicTestCase(OESelftestTestCase):
+ """Wic test class."""
+
+ image_is_ready = False
+ wicenv_cache = {}
+
+ def setUpLocal(self):
+ """This code is executed before each test method."""
+ self.resultdir = self.builddir + "/wic-tmp/"
+ super(WicTestCase, self).setUpLocal()
+
+ # Do this here instead of in setUpClass as the base setUp does some
+ # clean up which can result in the native tools built earlier in
+ # setUpClass being unavailable.
+ if not WicTestCase.image_is_ready:
+ if get_bb_var('USE_NLS') == 'yes':
+ bitbake('wic-tools')
+ else:
+ self.skipTest('wic-tools cannot be built due its (intltool|gettext)-native dependency and NLS disable')
+
+ bitbake('core-image-minimal')
+ WicTestCase.image_is_ready = True
+
+ rmtree(self.resultdir, ignore_errors=True)
+
+ def tearDownLocal(self):
+ """Remove resultdir as it may contain images."""
+ rmtree(self.resultdir, ignore_errors=True)
+ super(WicTestCase, self).tearDownLocal()
+
+ def _get_image_env_path(self, image):
+ """Generate and obtain the path to <image>.env"""
+ if image not in WicTestCase.wicenv_cache:
+ self.assertEqual(0, bitbake('%s -c do_rootfs_wicenv' % image).status)
+ bb_vars = get_bb_vars(['STAGING_DIR', 'MACHINE'], image)
+ stdir = bb_vars['STAGING_DIR']
+ machine = bb_vars['MACHINE']
+ WicTestCase.wicenv_cache[image] = os.path.join(stdir, machine, 'imgdata')
+ return WicTestCase.wicenv_cache[image]
+
+class Wic(WicTestCase):
+
+ def test_version(self):
+ """Test wic --version"""
+ runCmd('wic --version')
+
+ def test_help(self):
+ """Test wic --help and wic -h"""
+ runCmd('wic --help')
+ runCmd('wic -h')
+
+ def test_createhelp(self):
+ """Test wic create --help"""
+ runCmd('wic create --help')
+
+ def test_listhelp(self):
+ """Test wic list --help"""
+ runCmd('wic list --help')
+
+ def test_help_create(self):
+ """Test wic help create"""
+ runCmd('wic help create')
+
+ def test_help_list(self):
+ """Test wic help list"""
+ runCmd('wic help list')
+
+ def test_help_overview(self):
+ """Test wic help overview"""
+ runCmd('wic help overview')
+
+ def test_help_plugins(self):
+ """Test wic help plugins"""
+ runCmd('wic help plugins')
+
+ def test_help_kickstart(self):
+ """Test wic help kickstart"""
+ runCmd('wic help kickstart')
+
+ def test_list_images(self):
+ """Test wic list images"""
+ runCmd('wic list images')
+
+ def test_list_source_plugins(self):
+ """Test wic list source-plugins"""
+ runCmd('wic list source-plugins')
+
+ def test_listed_images_help(self):
+ """Test wic listed images help"""
+ output = runCmd('wic list images').output
+ imagelist = [line.split()[0] for line in output.splitlines()]
+ for image in imagelist:
+ runCmd('wic list %s help' % image)
+
+ def test_unsupported_subcommand(self):
+ """Test unsupported subcommand"""
+ self.assertNotEqual(0, runCmd('wic unsupported', ignore_status=True).status)
+
+ def test_no_command(self):
+ """Test wic without command"""
+ self.assertEqual(1, runCmd('wic', ignore_status=True).status)
+
+ def test_build_image_name(self):
+ """Test wic create wictestdisk --image-name=core-image-minimal"""
+ cmd = "wic create wictestdisk --image-name=core-image-minimal -o %s" % self.resultdir
+ runCmd(cmd)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_gpt_image(self):
+ """Test creation of core-image-minimal with gpt table and UUID boot"""
+ cmd = "wic create directdisk-gpt --image-name core-image-minimal -o %s" % self.resultdir
+ runCmd(cmd)
+ self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_iso_image(self):
+ """Test creation of hybrid iso image with legacy and EFI boot"""
+ config = 'INITRAMFS_IMAGE = "core-image-minimal-initramfs"\n'\
+ 'MACHINE_FEATURES_append = " efi"\n'\
+ 'DEPENDS_pn-core-image-minimal += "syslinux"\n'
+ self.append_config(config)
+ bitbake('core-image-minimal core-image-minimal-initramfs')
+ self.remove_config(config)
+ cmd = "wic create mkhybridiso --image-name core-image-minimal -o %s" % self.resultdir
+ runCmd(cmd)
+ self.assertEqual(1, len(glob(self.resultdir + "HYBRID_ISO_IMG-*.direct")))
+ self.assertEqual(1, len(glob(self.resultdir + "HYBRID_ISO_IMG-*.iso")))
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_qemux86_directdisk(self):
+ """Test creation of qemux-86-directdisk image"""
+ cmd = "wic create qemux86-directdisk -e core-image-minimal -o %s" % self.resultdir
+ runCmd(cmd)
+ self.assertEqual(1, len(glob(self.resultdir + "qemux86-directdisk-*direct")))
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_mkefidisk(self):
+ """Test creation of mkefidisk image"""
+ cmd = "wic create mkefidisk -e core-image-minimal -o %s" % self.resultdir
+ runCmd(cmd)
+ self.assertEqual(1, len(glob(self.resultdir + "mkefidisk-*direct")))
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_bootloader_config(self):
+ """Test creation of directdisk-bootloader-config image"""
+ config = 'DEPENDS_pn-core-image-minimal += "syslinux"\n'
+ self.append_config(config)
+ bitbake('core-image-minimal')
+ self.remove_config(config)
+ cmd = "wic create directdisk-bootloader-config -e core-image-minimal -o %s" % self.resultdir
+ runCmd(cmd)
+ self.assertEqual(1, len(glob(self.resultdir + "directdisk-bootloader-config-*direct")))
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_systemd_bootdisk(self):
+ """Test creation of systemd-bootdisk image"""
+ config = 'MACHINE_FEATURES_append = " efi"\n'
+ self.append_config(config)
+ bitbake('core-image-minimal')
+ self.remove_config(config)
+ cmd = "wic create systemd-bootdisk -e core-image-minimal -o %s" % self.resultdir
+ runCmd(cmd)
+ self.assertEqual(1, len(glob(self.resultdir + "systemd-bootdisk-*direct")))
+
+ def test_sdimage_bootpart(self):
+ """Test creation of sdimage-bootpart image"""
+ cmd = "wic create sdimage-bootpart -e core-image-minimal -o %s" % self.resultdir
+ kimgtype = get_bb_var('KERNEL_IMAGETYPE', 'core-image-minimal')
+ self.write_config('IMAGE_BOOT_FILES = "%s"\n' % kimgtype)
+ runCmd(cmd)
+ self.assertEqual(1, len(glob(self.resultdir + "sdimage-bootpart-*direct")))
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_default_output_dir(self):
+ """Test default output location"""
+ for fname in glob("directdisk-*.direct"):
+ os.remove(fname)
+ config = 'DEPENDS_pn-core-image-minimal += "syslinux"\n'
+ self.append_config(config)
+ bitbake('core-image-minimal')
+ self.remove_config(config)
+ cmd = "wic create directdisk -e core-image-minimal"
+ runCmd(cmd)
+ self.assertEqual(1, len(glob("directdisk-*.direct")))
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_build_artifacts(self):
+ """Test wic create directdisk providing all artifacts."""
+ bb_vars = get_bb_vars(['STAGING_DATADIR', 'RECIPE_SYSROOT_NATIVE'],
+ 'wic-tools')
+ bb_vars.update(get_bb_vars(['DEPLOY_DIR_IMAGE', 'IMAGE_ROOTFS'],
+ 'core-image-minimal'))
+ bbvars = {key.lower(): value for key, value in bb_vars.items()}
+ bbvars['resultdir'] = self.resultdir
+ runCmd("wic create directdisk "
+ "-b %(staging_datadir)s "
+ "-k %(deploy_dir_image)s "
+ "-n %(recipe_sysroot_native)s "
+ "-r %(image_rootfs)s "
+ "-o %(resultdir)s" % bbvars)
+ self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
+
+ def test_compress_gzip(self):
+ """Test compressing an image with gzip"""
+ runCmd("wic create wictestdisk "
+ "--image-name core-image-minimal "
+ "-c gzip -o %s" % self.resultdir)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct.gz")))
+
+ def test_compress_bzip2(self):
+ """Test compressing an image with bzip2"""
+ runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-c bzip2 -o %s" % self.resultdir)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct.bz2")))
+
+ def test_compress_xz(self):
+ """Test compressing an image with xz"""
+ runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "--compress-with=xz -o %s" % self.resultdir)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct.xz")))
+
+ def test_wrong_compressor(self):
+ """Test how wic breaks if wrong compressor is provided"""
+ self.assertEqual(2, runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-c wrong -o %s" % self.resultdir,
+ ignore_status=True).status)
+
+ def test_debug_short(self):
+ """Test -D option"""
+ runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-D -o %s" % self.resultdir)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
+
+ def test_debug_long(self):
+ """Test --debug option"""
+ runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "--debug -o %s" % self.resultdir)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
+
+ def test_skip_build_check_short(self):
+ """Test -s option"""
+ runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-s -o %s" % self.resultdir)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
+
+ def test_skip_build_check_long(self):
+ """Test --skip-build-check option"""
+ runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "--skip-build-check "
+ "--outdir %s" % self.resultdir)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
+
+ def test_build_rootfs_short(self):
+ """Test -f option"""
+ runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-f -o %s" % self.resultdir)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
+
+ def test_build_rootfs_long(self):
+ """Test --build-rootfs option"""
+ runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "--build-rootfs "
+ "--outdir %s" % self.resultdir)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_rootfs_indirect_recipes(self):
+ """Test usage of rootfs plugin with rootfs recipes"""
+ runCmd("wic create directdisk-multi-rootfs "
+ "--image-name=core-image-minimal "
+ "--rootfs rootfs1=core-image-minimal "
+ "--rootfs rootfs2=core-image-minimal "
+ "--outdir %s" % self.resultdir)
+ self.assertEqual(1, len(glob(self.resultdir + "directdisk-multi-rootfs*.direct")))
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_rootfs_artifacts(self):
+ """Test usage of rootfs plugin with rootfs paths"""
+ bb_vars = get_bb_vars(['STAGING_DATADIR', 'RECIPE_SYSROOT_NATIVE'],
+ 'wic-tools')
+ bb_vars.update(get_bb_vars(['DEPLOY_DIR_IMAGE', 'IMAGE_ROOTFS'],
+ 'core-image-minimal'))
+ bbvars = {key.lower(): value for key, value in bb_vars.items()}
+ bbvars['wks'] = "directdisk-multi-rootfs"
+ bbvars['resultdir'] = self.resultdir
+ runCmd("wic create %(wks)s "
+ "--bootimg-dir=%(staging_datadir)s "
+ "--kernel-dir=%(deploy_dir_image)s "
+ "--native-sysroot=%(recipe_sysroot_native)s "
+ "--rootfs-dir rootfs1=%(image_rootfs)s "
+ "--rootfs-dir rootfs2=%(image_rootfs)s "
+ "--outdir %(resultdir)s" % bbvars)
+ self.assertEqual(1, len(glob(self.resultdir + "%(wks)s-*.direct" % bbvars)))
+
+ def test_exclude_path(self):
+ """Test --exclude-path wks option."""
+
+ oldpath = os.environ['PATH']
+ os.environ['PATH'] = get_bb_var("PATH", "wic-tools")
+
+ try:
+ wks_file = 'temp.wks'
+ with open(wks_file, 'w') as wks:
+ rootfs_dir = get_bb_var('IMAGE_ROOTFS', 'core-image-minimal')
+ wks.write("""
+part / --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path usr
+part /usr --source rootfs --ondisk mmcblk0 --fstype=ext4 --rootfs-dir %s/usr
+part /etc --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path bin/ --rootfs-dir %s/usr"""
+ % (rootfs_dir, rootfs_dir))
+ runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wks_file, self.resultdir))
+
+ os.remove(wks_file)
+ wicout = glob(self.resultdir + "%s-*direct" % 'temp')
+ self.assertEqual(1, len(wicout))
+
+ wicimg = wicout[0]
+
+ # verify partition size with wic
+ res = runCmd("parted -m %s unit b p 2>/dev/null" % wicimg)
+
+ # parse parted output which looks like this:
+ # BYT;\n
+ # /var/tmp/wic/build/tmpfwvjjkf_-201611101222-hda.direct:200MiB:file:512:512:msdos::;\n
+ # 1:0.00MiB:200MiB:200MiB:ext4::;\n
+ partlns = res.output.splitlines()[2:]
+
+ self.assertEqual(3, len(partlns))
+
+ for part in [1, 2, 3]:
+ part_file = os.path.join(self.resultdir, "selftest_img.part%d" % part)
+ partln = partlns[part-1].split(":")
+ self.assertEqual(7, len(partln))
+ start = int(partln[1].rstrip("B")) / 512
+ length = int(partln[3].rstrip("B")) / 512
+ runCmd("dd if=%s of=%s skip=%d count=%d" %
+ (wicimg, part_file, start, length))
+
+ def extract_files(debugfs_output):
+ """
+ extract file names from the output of debugfs -R 'ls -p',
+ which looks like this:
+
+ /2/040755/0/0/.//\n
+ /2/040755/0/0/..//\n
+ /11/040700/0/0/lost+found^M//\n
+ /12/040755/1002/1002/run//\n
+ /13/040755/1002/1002/sys//\n
+ /14/040755/1002/1002/bin//\n
+ /80/040755/1002/1002/var//\n
+ /92/040755/1002/1002/tmp//\n
+ """
+ # NOTE the occasional ^M in file names
+ return [line.split('/')[5].strip() for line in \
+ debugfs_output.strip().split('/\n')]
+
+ # Test partition 1, should contain the normal root directories, except
+ # /usr.
+ res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % \
+ os.path.join(self.resultdir, "selftest_img.part1"))
+ files = extract_files(res.output)
+ self.assertIn("etc", files)
+ self.assertNotIn("usr", files)
+
+ # Partition 2, should contain common directories for /usr, not root
+ # directories.
+ res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % \
+ os.path.join(self.resultdir, "selftest_img.part2"))
+ files = extract_files(res.output)
+ self.assertNotIn("etc", files)
+ self.assertNotIn("usr", files)
+ self.assertIn("share", files)
+
+ # Partition 3, should contain the same as partition 2, including the bin
+ # directory, but not the files inside it.
+ res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % \
+ os.path.join(self.resultdir, "selftest_img.part3"))
+ files = extract_files(res.output)
+ self.assertNotIn("etc", files)
+ self.assertNotIn("usr", files)
+ self.assertIn("share", files)
+ self.assertIn("bin", files)
+ res = runCmd("debugfs -R 'ls -p bin' %s 2>/dev/null" % \
+ os.path.join(self.resultdir, "selftest_img.part3"))
+ files = extract_files(res.output)
+ self.assertIn(".", files)
+ self.assertIn("..", files)
+ self.assertEqual(2, len(files))
+
+ for part in [1, 2, 3]:
+ part_file = os.path.join(self.resultdir, "selftest_img.part%d" % part)
+ os.remove(part_file)
+
+ finally:
+ os.environ['PATH'] = oldpath
+
+ def test_exclude_path_errors(self):
+ """Test --exclude-path wks option error handling."""
+ wks_file = 'temp.wks'
+
+ # Absolute argument.
+ with open(wks_file, 'w') as wks:
+ wks.write("part / --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path /usr")
+ self.assertNotEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wks_file, self.resultdir), ignore_status=True).status)
+ os.remove(wks_file)
+
+ # Argument pointing to parent directory.
+ with open(wks_file, 'w') as wks:
+ wks.write("part / --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path ././..")
+ self.assertNotEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wks_file, self.resultdir), ignore_status=True).status)
+ os.remove(wks_file)
+
+class Wic2(WicTestCase):
+
+ def test_bmap_short(self):
+ """Test generation of .bmap file -m option"""
+ cmd = "wic create wictestdisk -e core-image-minimal -m -o %s" % self.resultdir
+ runCmd(cmd)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct")))
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct.bmap")))
+
+ def test_bmap_long(self):
+ """Test generation of .bmap file --bmap option"""
+ cmd = "wic create wictestdisk -e core-image-minimal --bmap -o %s" % self.resultdir
+ runCmd(cmd)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct")))
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct.bmap")))
+
+ def test_image_env(self):
+ """Test generation of <image>.env files."""
+ image = 'core-image-minimal'
+ imgdatadir = self._get_image_env_path(image)
+
+ bb_vars = get_bb_vars(['IMAGE_BASENAME', 'WICVARS'], image)
+ basename = bb_vars['IMAGE_BASENAME']
+ self.assertEqual(basename, image)
+ path = os.path.join(imgdatadir, basename) + '.env'
+ self.assertTrue(os.path.isfile(path))
+
+ wicvars = set(bb_vars['WICVARS'].split())
+ # filter out optional variables
+ wicvars = wicvars.difference(('DEPLOY_DIR_IMAGE', 'IMAGE_BOOT_FILES',
+ 'INITRD', 'INITRD_LIVE', 'ISODIR','INITRAMFS_IMAGE',
+ 'INITRAMFS_IMAGE_BUNDLE', 'INITRAMFS_LINK_NAME'))
+ with open(path) as envfile:
+ content = dict(line.split("=", 1) for line in envfile)
+ # test if variables used by wic present in the .env file
+ for var in wicvars:
+ self.assertTrue(var in content, "%s is not in .env file" % var)
+ self.assertTrue(content[var])
+
+ def test_image_vars_dir_short(self):
+ """Test image vars directory selection -v option"""
+ image = 'core-image-minimal'
+ imgenvdir = self._get_image_env_path(image)
+ native_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "wic-tools")
+
+ runCmd("wic create wictestdisk "
+ "--image-name=%s -v %s -n %s -o %s"
+ % (image, imgenvdir, native_sysroot,
+ self.resultdir))
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct")))
+
+ def test_image_vars_dir_long(self):
+ """Test image vars directory selection --vars option"""
+ image = 'core-image-minimal'
+ imgenvdir = self._get_image_env_path(image)
+ native_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "wic-tools")
+
+ runCmd("wic create wictestdisk "
+ "--image-name=%s "
+ "--vars %s "
+ "--native-sysroot %s "
+ "--outdir %s"
+ % (image, imgenvdir, native_sysroot,
+ self.resultdir))
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct")))
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_wic_image_type(self):
+ """Test building wic images by bitbake"""
+ config = 'IMAGE_FSTYPES += "wic"\nWKS_FILE = "wic-image-minimal"\n'\
+ 'MACHINE_FEATURES_append = " efi"\n'
+ self.append_config(config)
+ self.assertEqual(0, bitbake('wic-image-minimal').status)
+ self.remove_config(config)
+
+ bb_vars = get_bb_vars(['DEPLOY_DIR_IMAGE', 'MACHINE'])
+ deploy_dir = bb_vars['DEPLOY_DIR_IMAGE']
+ machine = bb_vars['MACHINE']
+ prefix = os.path.join(deploy_dir, 'wic-image-minimal-%s.' % machine)
+ # check if we have result image and manifests symlinks
+ # pointing to existing files
+ for suffix in ('wic', 'manifest'):
+ path = prefix + suffix
+ self.assertTrue(os.path.islink(path))
+ self.assertTrue(os.path.isfile(os.path.realpath(path)))
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_qemu(self):
+ """Test wic-image-minimal under qemu"""
+ config = 'IMAGE_FSTYPES += "wic"\nWKS_FILE = "wic-image-minimal"\n'\
+ 'MACHINE_FEATURES_append = " efi"\n'
+ self.append_config(config)
+ self.assertEqual(0, bitbake('wic-image-minimal').status)
+ self.remove_config(config)
+
+ with runqemu('wic-image-minimal', ssh=False) as qemu:
+ cmd = "mount | grep '^/dev/' | cut -f1,3 -d ' ' | egrep -c -e '/dev/sda1 /boot' " \
+ "-e '/dev/root /|/dev/sda2 /' -e '/dev/sda3 /media' -e '/dev/sda4 /mnt'"
+ status, output = qemu.run_serial(cmd)
+ self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
+ self.assertEqual(output, '4')
+ cmd = "grep UUID= /etc/fstab"
+ status, output = qemu.run_serial(cmd)
+ self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
+ self.assertEqual(output, 'UUID=2c71ef06-a81d-4735-9d3a-379b69c6bdba\t/media\text4\tdefaults\t0\t0')
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_qemu_efi(self):
+ """Test core-image-minimal efi image under qemu"""
+ config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "mkefidisk.wks"\n'
+ self.append_config(config)
+ self.assertEqual(0, bitbake('core-image-minimal ovmf').status)
+ self.remove_config(config)
+
+ with runqemu('core-image-minimal', ssh=False,
+ runqemuparams='ovmf', image_fstype='wic') as qemu:
+ cmd = "grep sda. /proc/partitions |wc -l"
+ status, output = qemu.run_serial(cmd)
+ self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
+ self.assertEqual(output, '3')
+
+ @staticmethod
+ def _make_fixed_size_wks(size):
+ """
+ Create a wks of an image with a single partition. Size of the partition is set
+ using --fixed-size flag. Returns a tuple: (path to wks file, wks image name)
+ """
+ with NamedTemporaryFile("w", suffix=".wks", delete=False) as tempf:
+ wkspath = tempf.name
+ tempf.write("part " \
+ "--source rootfs --ondisk hda --align 4 --fixed-size %d "
+ "--fstype=ext4\n" % size)
+ wksname = os.path.splitext(os.path.basename(wkspath))[0]
+
+ return wkspath, wksname
+
+ def test_fixed_size(self):
+ """
+ Test creation of a simple image with partition size controlled through
+ --fixed-size flag
+ """
+ wkspath, wksname = Wic2._make_fixed_size_wks(200)
+
+ runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wkspath, self.resultdir))
+ os.remove(wkspath)
+ wicout = glob(self.resultdir + "%s-*direct" % wksname)
+ self.assertEqual(1, len(wicout))
+
+ wicimg = wicout[0]
+
+ native_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "wic-tools")
+
+ # verify partition size with wic
+ res = runCmd("parted -m %s unit mib p 2>/dev/null" % wicimg,
+ native_sysroot=native_sysroot)
+
+ # parse parted output which looks like this:
+ # BYT;\n
+ # /var/tmp/wic/build/tmpfwvjjkf_-201611101222-hda.direct:200MiB:file:512:512:msdos::;\n
+ # 1:0.00MiB:200MiB:200MiB:ext4::;\n
+ partlns = res.output.splitlines()[2:]
+
+ self.assertEqual(1, len(partlns),
+ msg="Partition list '%s'" % res.output)
+ self.assertEqual("1:0.00MiB:200MiB:200MiB:ext4::;", partlns[0],
+ msg="Partition list '%s'" % res.output)
+
+ def test_fixed_size_error(self):
+ """
+ Test creation of a simple image with partition size controlled through
+ --fixed-size flag. The size of partition is intentionally set to 1MiB
+ in order to trigger an error in wic.
+ """
+ wkspath, wksname = Wic2._make_fixed_size_wks(1)
+
+ self.assertEqual(1, runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wkspath, self.resultdir), ignore_status=True).status)
+ os.remove(wkspath)
+ wicout = glob(self.resultdir + "%s-*direct" % wksname)
+ self.assertEqual(0, len(wicout))
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_rawcopy_plugin_qemu(self):
+ """Test rawcopy plugin in qemu"""
+ # build ext4 and wic images
+ for fstype in ("ext4", "wic"):
+ config = 'IMAGE_FSTYPES = "%s"\nWKS_FILE = "test_rawcopy_plugin.wks.in"\n' % fstype
+ self.append_config(config)
+ self.assertEqual(0, bitbake('core-image-minimal').status)
+ self.remove_config(config)
+
+ with runqemu('core-image-minimal', ssh=False, image_fstype='wic') as qemu:
+ cmd = "grep sda. /proc/partitions |wc -l"
+ status, output = qemu.run_serial(cmd)
+ self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
+ self.assertEqual(output, '2')
+
+ def test_rawcopy_plugin(self):
+ """Test rawcopy plugin"""
+ img = 'core-image-minimal'
+ machine = get_bb_var('MACHINE', img)
+ with NamedTemporaryFile("w", suffix=".wks") as wks:
+ wks.writelines(['part /boot --active --source bootimg-pcbios\n',
+ 'part / --source rawcopy --sourceparams="file=%s-%s.ext4" --use-uuid\n'\
+ % (img, machine),
+ 'bootloader --timeout=0 --append="console=ttyS0,115200n8"\n'])
+ wks.flush()
+ cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir)
+ runCmd(cmd)
+ wksname = os.path.splitext(os.path.basename(wks.name))[0]
+ out = glob(self.resultdir + "%s-*direct" % wksname)
+ self.assertEqual(1, len(out))
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_biosplusefi_plugin_qemu(self):
+ """Test biosplusefi plugin in qemu"""
+ config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "test_biosplusefi_plugin.wks"\nMACHINE_FEATURES_append = " efi"\n'
+ self.append_config(config)
+ self.assertEqual(0, bitbake('core-image-minimal').status)
+ self.remove_config(config)
+
+ with runqemu('core-image-minimal', ssh=False, image_fstype='wic') as qemu:
+ # Check that we have ONLY two /dev/sda* partitions (/boot and /)
+ cmd = "grep sda. /proc/partitions | wc -l"
+ status, output = qemu.run_serial(cmd)
+ self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
+ self.assertEqual(output, '2')
+ # Check that /dev/sda1 is /boot and that either /dev/root OR /dev/sda2 is /
+ cmd = "mount | grep '^/dev/' | cut -f1,3 -d ' ' | egrep -c -e '/dev/sda1 /boot' -e '/dev/root /|/dev/sda2 /'"
+ status, output = qemu.run_serial(cmd)
+ self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
+ self.assertEqual(output, '2')
+ # Check that /boot has EFI bootx64.efi (required for EFI)
+ cmd = "ls /boot/EFI/BOOT/bootx64.efi | wc -l"
+ status, output = qemu.run_serial(cmd)
+ self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
+ self.assertEqual(output, '1')
+ # Check that "BOOTABLE" flag is set on boot partition (required for PC-Bios)
+ # Trailing "cat" seems to be required; otherwise run_serial() sends back echo of the input command
+ cmd = "fdisk -l /dev/sda | grep /dev/sda1 | awk {print'$2'} | cat"
+ status, output = qemu.run_serial(cmd)
+ self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
+ self.assertEqual(output, '*')
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_biosplusefi_plugin(self):
+ """Test biosplusefi plugin"""
+ # Wic generation below may fail depending on the order of the unittests
+ # This is because bootimg-pcbios (that bootimg-biosplusefi uses) generate its MBR inside STAGING_DATADIR directory
+ # which may or may not exists depending on what was built already
+ # If an image hasn't been built yet, directory ${STAGING_DATADIR}/syslinux won't exists and _get_bootimg_dir()
+ # will raise with "Couldn't find correct bootimg_dir"
+ # The easiest way to work-around this issue is to make sure we already built an image here, hence the bitbake call
+ config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "test_biosplusefi_plugin.wks"\nMACHINE_FEATURES_append = " efi"\n'
+ self.append_config(config)
+ self.assertEqual(0, bitbake('core-image-minimal').status)
+ self.remove_config(config)
+
+ img = 'core-image-minimal'
+ with NamedTemporaryFile("w", suffix=".wks") as wks:
+ wks.writelines(['part /boot --active --source bootimg-biosplusefi --sourceparams="loader=grub-efi"\n',
+ 'part / --source rootfs --fstype=ext4 --align 1024 --use-uuid\n'\
+ 'bootloader --timeout=0 --append="console=ttyS0,115200n8"\n'])
+ wks.flush()
+ cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir)
+ runCmd(cmd)
+ wksname = os.path.splitext(os.path.basename(wks.name))[0]
+ out = glob(self.resultdir + "%s-*.direct" % wksname)
+ self.assertEqual(1, len(out))
+
+ def test_fs_types(self):
+ """Test filesystem types for empty and not empty partitions"""
+ img = 'core-image-minimal'
+ with NamedTemporaryFile("w", suffix=".wks") as wks:
+ wks.writelines(['part ext2 --fstype ext2 --source rootfs\n',
+ 'part btrfs --fstype btrfs --source rootfs --size 40M\n',
+ 'part squash --fstype squashfs --source rootfs\n',
+ 'part swap --fstype swap --size 1M\n',
+ 'part emptyvfat --fstype vfat --size 1M\n',
+ 'part emptymsdos --fstype msdos --size 1M\n',
+ 'part emptyext2 --fstype ext2 --size 1M\n',
+ 'part emptybtrfs --fstype btrfs --size 150M\n'])
+ wks.flush()
+ cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir)
+ runCmd(cmd)
+ wksname = os.path.splitext(os.path.basename(wks.name))[0]
+ out = glob(self.resultdir + "%s-*direct" % wksname)
+ self.assertEqual(1, len(out))
+
+ def test_kickstart_parser(self):
+ """Test wks parser options"""
+ with NamedTemporaryFile("w", suffix=".wks") as wks:
+ wks.writelines(['part / --fstype ext3 --source rootfs --system-id 0xFF '\
+ '--overhead-factor 1.2 --size 100k\n'])
+ wks.flush()
+ cmd = "wic create %s -e core-image-minimal -o %s" % (wks.name, self.resultdir)
+ runCmd(cmd)
+ wksname = os.path.splitext(os.path.basename(wks.name))[0]
+ out = glob(self.resultdir + "%s-*direct" % wksname)
+ self.assertEqual(1, len(out))
+
+ def test_image_bootpart_globbed(self):
+ """Test globbed sources with image-bootpart plugin"""
+ img = "core-image-minimal"
+ cmd = "wic create sdimage-bootpart -e %s -o %s" % (img, self.resultdir)
+ config = 'IMAGE_BOOT_FILES = "%s*"' % get_bb_var('KERNEL_IMAGETYPE', img)
+ self.append_config(config)
+ runCmd(cmd)
+ self.remove_config(config)
+ self.assertEqual(1, len(glob(self.resultdir + "sdimage-bootpart-*direct")))
+
+ def test_sparse_copy(self):
+ """Test sparse_copy with FIEMAP and SEEK_HOLE filemap APIs"""
+ libpath = os.path.join(get_bb_var('COREBASE'), 'scripts', 'lib', 'wic')
+ sys.path.insert(0, libpath)
+ from filemap import FilemapFiemap, FilemapSeek, sparse_copy, ErrorNotSupp
+ with NamedTemporaryFile("w", suffix=".wic-sparse") as sparse:
+ src_name = sparse.name
+ src_size = 1024 * 10
+ sparse.truncate(src_size)
+ # write one byte to the file
+ with open(src_name, 'r+b') as sfile:
+ sfile.seek(1024 * 4)
+ sfile.write(b'\x00')
+ dest = sparse.name + '.out'
+ # copy src file to dest using different filemap APIs
+ for api in (FilemapFiemap, FilemapSeek, None):
+ if os.path.exists(dest):
+ os.unlink(dest)
+ try:
+ sparse_copy(sparse.name, dest, api=api)
+ except ErrorNotSupp:
+ continue # skip unsupported API
+ dest_stat = os.stat(dest)
+ self.assertEqual(dest_stat.st_size, src_size)
+ # 8 blocks is 4K (physical sector size)
+ self.assertEqual(dest_stat.st_blocks, 8)
+ os.unlink(dest)
+
+ def test_wic_ls(self):
+ """Test listing image content using 'wic ls'"""
+ runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-D -o %s" % self.resultdir)
+ images = glob(self.resultdir + "wictestdisk-*.direct")
+ self.assertEqual(1, len(images))
+
+ sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools')
+
+ # list partitions
+ result = runCmd("wic ls %s -n %s" % (images[0], sysroot))
+ self.assertEqual(3, len(result.output.split('\n')))
+
+ # list directory content of the first partition
+ result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot))
+ self.assertEqual(6, len(result.output.split('\n')))
+
+ def test_wic_cp(self):
+ """Test copy files and directories to the the wic image."""
+ runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-D -o %s" % self.resultdir)
+ images = glob(self.resultdir + "wictestdisk-*.direct")
+ self.assertEqual(1, len(images))
+
+ sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools')
+
+ # list directory content of the first partition
+ result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot))
+ self.assertEqual(6, len(result.output.split('\n')))
+
+ with NamedTemporaryFile("w", suffix=".wic-cp") as testfile:
+ testfile.write("test")
+
+ # copy file to the partition
+ runCmd("wic cp %s %s:1/ -n %s" % (testfile.name, images[0], sysroot))
+
+ # check if file is there
+ result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot))
+ self.assertEqual(7, len(result.output.split('\n')))
+ self.assertTrue(os.path.basename(testfile.name) in result.output)
+
+ # prepare directory
+ testdir = os.path.join(self.resultdir, 'wic-test-cp-dir')
+ testsubdir = os.path.join(testdir, 'subdir')
+ os.makedirs(os.path.join(testsubdir))
+ copy(testfile.name, testdir)
+
+ # copy directory to the partition
+ runCmd("wic cp %s %s:1/ -n %s" % (testdir, images[0], sysroot))
+
+ # check if directory is there
+ result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot))
+ self.assertEqual(8, len(result.output.split('\n')))
+ self.assertTrue(os.path.basename(testdir) in result.output)
+
+ # copy the file from the partition and check if it success
+ dest = '%s-cp' % testfile.name
+ runCmd("wic cp %s:1/%s %s -n %s" % (images[0],
+ os.path.basename(testfile.name), dest, sysroot))
+ self.assertTrue(os.path.exists(dest))
+
+
+ def test_wic_rm(self):
+ """Test removing files and directories from the the wic image."""
+ runCmd("wic create mkefidisk "
+ "--image-name=core-image-minimal "
+ "-D -o %s" % self.resultdir)
+ images = glob(self.resultdir + "mkefidisk-*.direct")
+ self.assertEqual(1, len(images))
+
+ sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools')
+
+ # list directory content of the first partition
+ result = runCmd("wic ls %s:1 -n %s" % (images[0], sysroot))
+ self.assertIn('\nBZIMAGE ', result.output)
+ self.assertIn('\nEFI <DIR> ', result.output)
+
+ # remove file
+ runCmd("wic rm %s:1/bzimage -n %s" % (images[0], sysroot))
+
+ # remove directory
+ runCmd("wic rm %s:1/efi -n %s" % (images[0], sysroot))
+
+ # check if they're removed
+ result = runCmd("wic ls %s:1 -n %s" % (images[0], sysroot))
+ self.assertNotIn('\nBZIMAGE ', result.output)
+ self.assertNotIn('\nEFI <DIR> ', result.output)
+
+ def test_mkfs_extraopts(self):
+ """Test wks option --mkfs-extraopts for empty and not empty partitions"""
+ img = 'core-image-minimal'
+ with NamedTemporaryFile("w", suffix=".wks") as wks:
+ wks.writelines(
+ ['part ext2 --fstype ext2 --source rootfs --mkfs-extraopts "-D -F -i 8192"\n',
+ "part btrfs --fstype btrfs --source rootfs --size 40M --mkfs-extraopts='--quiet'\n",
+ 'part squash --fstype squashfs --source rootfs --mkfs-extraopts "-no-sparse -b 4096"\n',
+ 'part emptyvfat --fstype vfat --size 1M --mkfs-extraopts "-S 1024 -s 64"\n',
+ 'part emptymsdos --fstype msdos --size 1M --mkfs-extraopts "-S 1024 -s 64"\n',
+ 'part emptyext2 --fstype ext2 --size 1M --mkfs-extraopts "-D -F -i 8192"\n',
+ 'part emptybtrfs --fstype btrfs --size 100M --mkfs-extraopts "--mixed -K"\n'])
+ wks.flush()
+ cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir)
+ runCmd(cmd)
+ wksname = os.path.splitext(os.path.basename(wks.name))[0]
+ out = glob(self.resultdir + "%s-*direct" % wksname)
+ self.assertEqual(1, len(out))
+
+ def test_expand_mbr_image(self):
+ """Test wic write --expand command for mbr image"""
+ # build an image
+ config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "directdisk.wks"\n'
+ self.append_config(config)
+ self.assertEqual(0, bitbake('core-image-minimal').status)
+
+ # get path to the image
+ bb_vars = get_bb_vars(['DEPLOY_DIR_IMAGE', 'MACHINE'])
+ deploy_dir = bb_vars['DEPLOY_DIR_IMAGE']
+ machine = bb_vars['MACHINE']
+ image_path = os.path.join(deploy_dir, 'core-image-minimal-%s.wic' % machine)
+
+ self.remove_config(config)
+
+ try:
+ # expand image to 1G
+ new_image_path = None
+ with NamedTemporaryFile(mode='wb', suffix='.wic.exp',
+ dir=deploy_dir, delete=False) as sparse:
+ sparse.truncate(1024 ** 3)
+ new_image_path = sparse.name
+
+ sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools')
+ cmd = "wic write -n %s --expand 1:0 %s %s" % (sysroot, image_path, new_image_path)
+ runCmd(cmd)
+
+ # check if partitions are expanded
+ orig = runCmd("wic ls %s -n %s" % (image_path, sysroot))
+ exp = runCmd("wic ls %s -n %s" % (new_image_path, sysroot))
+ orig_sizes = [int(line.split()[3]) for line in orig.output.split('\n')[1:]]
+ exp_sizes = [int(line.split()[3]) for line in exp.output.split('\n')[1:]]
+ self.assertEqual(orig_sizes[0], exp_sizes[0]) # first partition is not resized
+ self.assertTrue(orig_sizes[1] < exp_sizes[1])
+
+ # Check if all free space is partitioned
+ result = runCmd("%s/usr/sbin/sfdisk -F %s" % (sysroot, new_image_path))
+ self.assertTrue("0 B, 0 bytes, 0 sectors" in result.output)
+
+ os.rename(image_path, image_path + '.bak')
+ os.rename(new_image_path, image_path)
+
+ # Check if it boots in qemu
+ with runqemu('core-image-minimal', ssh=False) as qemu:
+ cmd = "ls /etc/"
+ status, output = qemu.run_serial('true')
+ self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
+ finally:
+ if os.path.exists(new_image_path):
+ os.unlink(new_image_path)
+ if os.path.exists(image_path + '.bak'):
+ os.rename(image_path + '.bak', image_path)
+
+ def test_wic_ls_ext(self):
+ """Test listing content of the ext partition using 'wic ls'"""
+ runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-D -o %s" % self.resultdir)
+ images = glob(self.resultdir + "wictestdisk-*.direct")
+ self.assertEqual(1, len(images))
+
+ sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools')
+
+ # list directory content of the second ext4 partition
+ result = runCmd("wic ls %s:2/ -n %s" % (images[0], sysroot))
+ self.assertTrue(set(['bin', 'home', 'proc', 'usr', 'var', 'dev', 'lib', 'sbin']).issubset(
+ set(line.split()[-1] for line in result.output.split('\n') if line)))
+
+ def test_wic_cp_ext(self):
+ """Test copy files and directories to the ext partition."""
+ runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-D -o %s" % self.resultdir)
+ images = glob(self.resultdir + "wictestdisk-*.direct")
+ self.assertEqual(1, len(images))
+
+ sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools')
+
+ # list directory content of the ext4 partition
+ result = runCmd("wic ls %s:2/ -n %s" % (images[0], sysroot))
+ dirs = set(line.split()[-1] for line in result.output.split('\n') if line)
+ self.assertTrue(set(['bin', 'home', 'proc', 'usr', 'var', 'dev', 'lib', 'sbin']).issubset(dirs))
+
+ with NamedTemporaryFile("w", suffix=".wic-cp") as testfile:
+ testfile.write("test")
+
+ # copy file to the partition
+ runCmd("wic cp %s %s:2/ -n %s" % (testfile.name, images[0], sysroot))
+
+ # check if file is there
+ result = runCmd("wic ls %s:2/ -n %s" % (images[0], sysroot))
+ newdirs = set(line.split()[-1] for line in result.output.split('\n') if line)
+ self.assertEqual(newdirs.difference(dirs), set([os.path.basename(testfile.name)]))
+
+ # check if the file to copy is in the partition
+ result = runCmd("wic ls %s:2/etc/ -n %s" % (images[0], sysroot))
+ self.assertTrue('fstab' in [line.split()[-1] for line in result.output.split('\n') if line])
+
+ # copy file from the partition, replace the temporary file content with it and
+ # check for the file size to validate the copy
+ runCmd("wic cp %s:2/etc/fstab %s -n %s" % (images[0], testfile.name, sysroot))
+ self.assertTrue(os.stat(testfile.name).st_size > 0)
+
+
+ def test_wic_rm_ext(self):
+ """Test removing files from the ext partition."""
+ runCmd("wic create mkefidisk "
+ "--image-name=core-image-minimal "
+ "-D -o %s" % self.resultdir)
+ images = glob(self.resultdir + "mkefidisk-*.direct")
+ self.assertEqual(1, len(images))
+
+ sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools')
+
+ # list directory content of the /etc directory on ext4 partition
+ result = runCmd("wic ls %s:2/etc/ -n %s" % (images[0], sysroot))
+ self.assertTrue('fstab' in [line.split()[-1] for line in result.output.split('\n') if line])
+
+ # remove file
+ runCmd("wic rm %s:2/etc/fstab -n %s" % (images[0], sysroot))
+
+ # check if it's removed
+ result = runCmd("wic ls %s:2/etc/ -n %s" % (images[0], sysroot))
+ self.assertTrue('fstab' not in [line.split()[-1] for line in result.output.split('\n') if line])
+
+ # remove non-empty directory
+ runCmd("wic rm -r %s:2/etc/ -n %s" % (images[0], sysroot))
+
+ # check if it's removed
+ result = runCmd("wic ls %s:2/ -n %s" % (images[0], sysroot))
+ self.assertTrue('etc' not in [line.split()[-1] for line in result.output.split('\n') if line])
diff --git a/meta/lib/oeqa/selftest/context.py b/meta/lib/oeqa/selftest/context.py
new file mode 100644
index 0000000000..c4eb5d614e
--- /dev/null
+++ b/meta/lib/oeqa/selftest/context.py
@@ -0,0 +1,341 @@
+#
+# Copyright (C) 2017 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import time
+import glob
+import sys
+import importlib
+import signal
+from shutil import copyfile
+from random import choice
+
+import oeqa
+import oe
+
+from oeqa.core.context import OETestContext, OETestContextExecutor
+from oeqa.core.exception import OEQAPreRun, OEQATestNotFound
+
+from oeqa.utils.commands import runCmd, get_bb_vars, get_test_layer
+
+class OESelftestTestContext(OETestContext):
+ def __init__(self, td=None, logger=None, machines=None, config_paths=None):
+ super(OESelftestTestContext, self).__init__(td, logger)
+
+ self.machines = machines
+ self.custommachine = None
+ self.config_paths = config_paths
+
+ def runTests(self, processes=None, machine=None, skips=[]):
+ if machine:
+ self.custommachine = machine
+ if machine == 'random':
+ self.custommachine = choice(self.machines)
+ self.logger.info('Run tests with custom MACHINE set to: %s' % \
+ self.custommachine)
+ return super(OESelftestTestContext, self).runTests(processes, skips)
+
+ def listTests(self, display_type, machine=None):
+ return super(OESelftestTestContext, self).listTests(display_type)
+
+class OESelftestTestContextExecutor(OETestContextExecutor):
+ _context_class = OESelftestTestContext
+ _script_executor = 'oe-selftest'
+
+ name = 'oe-selftest'
+ help = 'oe-selftest test component'
+ description = 'Executes selftest tests'
+
+ def register_commands(self, logger, parser):
+ group = parser.add_mutually_exclusive_group(required=True)
+
+ group.add_argument('-a', '--run-all-tests', default=False,
+ action="store_true", dest="run_all_tests",
+ help='Run all (unhidden) tests')
+ group.add_argument('-R', '--skip-tests', required=False, action='store',
+ nargs='+', dest="skips", default=None,
+ help='Run all (unhidden) tests except the ones specified. Format should be <module>[.<class>[.<test_method>]]')
+ group.add_argument('-r', '--run-tests', required=False, action='store',
+ nargs='+', dest="run_tests", default=None,
+ help='Select what tests to run (modules, classes or test methods). Format should be: <module>.<class>.<test_method>')
+
+ group.add_argument('-m', '--list-modules', required=False,
+ action="store_true", default=False,
+ help='List all available test modules.')
+ group.add_argument('--list-classes', required=False,
+ action="store_true", default=False,
+ help='List all available test classes.')
+ group.add_argument('-l', '--list-tests', required=False,
+ action="store_true", default=False,
+ help='List all available tests.')
+
+ parser.add_argument('-j', '--num-processes', dest='processes', action='store',
+ type=int, help="number of processes to execute in parallel with")
+
+ parser.add_argument('--machine', required=False, choices=['random', 'all'],
+ help='Run tests on different machines (random/all).')
+
+ parser.add_argument('-t', '--select-tag', dest="select_tags",
+ action='append', default=None,
+ help='Filter all (unhidden) tests to any that match any of the specified tag(s).')
+ parser.add_argument('-T', '--exclude-tag', dest="exclude_tags",
+ action='append', default=None,
+ help='Exclude all (unhidden) tests that match any of the specified tag(s). (exclude applies before select)')
+
+ parser.set_defaults(func=self.run)
+
+ def _get_available_machines(self):
+ machines = []
+
+ bbpath = self.tc_kwargs['init']['td']['BBPATH'].split(':')
+
+ for path in bbpath:
+ found_machines = glob.glob(os.path.join(path, 'conf', 'machine', '*.conf'))
+ if found_machines:
+ for i in found_machines:
+ # eg: '/home/<user>/poky/meta-intel/conf/machine/intel-core2-32.conf'
+ machines.append(os.path.splitext(os.path.basename(i))[0])
+
+ return machines
+
+ def _get_cases_paths(self, bbpath):
+ cases_paths = []
+ for layer in bbpath:
+ cases_dir = os.path.join(layer, 'lib', 'oeqa', 'selftest', 'cases')
+ if os.path.isdir(cases_dir):
+ cases_paths.append(cases_dir)
+ return cases_paths
+
+ def _process_args(self, logger, args):
+ args.test_start_time = time.strftime("%Y%m%d%H%M%S")
+ args.test_data_file = None
+ args.CASES_PATHS = None
+
+ bbvars = get_bb_vars()
+ logdir = os.environ.get("BUILDDIR")
+ if 'LOG_DIR' in bbvars:
+ logdir = bbvars['LOG_DIR']
+ bb.utils.mkdirhier(logdir)
+ args.output_log = logdir + '/%s-results-%s.log' % (self.name, args.test_start_time)
+
+ super(OESelftestTestContextExecutor, self)._process_args(logger, args)
+
+ if args.list_modules:
+ args.list_tests = 'module'
+ elif args.list_classes:
+ args.list_tests = 'class'
+ elif args.list_tests:
+ args.list_tests = 'name'
+
+ self.tc_kwargs['init']['td'] = bbvars
+ self.tc_kwargs['init']['machines'] = self._get_available_machines()
+
+ builddir = os.environ.get("BUILDDIR")
+ self.tc_kwargs['init']['config_paths'] = {}
+ self.tc_kwargs['init']['config_paths']['testlayer_path'] = \
+ get_test_layer()
+ self.tc_kwargs['init']['config_paths']['builddir'] = builddir
+ self.tc_kwargs['init']['config_paths']['localconf'] = \
+ os.path.join(builddir, "conf/local.conf")
+ self.tc_kwargs['init']['config_paths']['localconf_backup'] = \
+ os.path.join(builddir, "conf/local.conf.orig")
+ self.tc_kwargs['init']['config_paths']['localconf_class_backup'] = \
+ os.path.join(builddir, "conf/local.conf.bk")
+ self.tc_kwargs['init']['config_paths']['bblayers'] = \
+ os.path.join(builddir, "conf/bblayers.conf")
+ self.tc_kwargs['init']['config_paths']['bblayers_backup'] = \
+ os.path.join(builddir, "conf/bblayers.conf.orig")
+ self.tc_kwargs['init']['config_paths']['bblayers_class_backup'] = \
+ os.path.join(builddir, "conf/bblayers.conf.bk")
+
+ copyfile(self.tc_kwargs['init']['config_paths']['localconf'],
+ self.tc_kwargs['init']['config_paths']['localconf_backup'])
+ copyfile(self.tc_kwargs['init']['config_paths']['bblayers'],
+ self.tc_kwargs['init']['config_paths']['bblayers_backup'])
+
+ def tag_filter(tags):
+ if args.exclude_tags:
+ if any(tag in args.exclude_tags for tag in tags):
+ return True
+ if args.select_tags:
+ if not tags or not any(tag in args.select_tags for tag in tags):
+ return True
+ return False
+
+ if args.select_tags or args.exclude_tags:
+ self.tc_kwargs['load']['tags_filter'] = tag_filter
+
+ self.tc_kwargs['run']['skips'] = args.skips
+ self.tc_kwargs['run']['processes'] = args.processes
+
+ def _pre_run(self):
+ def _check_required_env_variables(vars):
+ for var in vars:
+ if not os.environ.get(var):
+ self.tc.logger.error("%s is not set. Did you forget to source your build environment setup script?" % var)
+ raise OEQAPreRun
+
+ def _check_presence_meta_selftest():
+ builddir = os.environ.get("BUILDDIR")
+ if os.getcwd() != builddir:
+ self.tc.logger.info("Changing cwd to %s" % builddir)
+ os.chdir(builddir)
+
+ if not "meta-selftest" in self.tc.td["BBLAYERS"]:
+ self.tc.logger.warning("meta-selftest layer not found in BBLAYERS, adding it")
+ meta_selftestdir = os.path.join(
+ self.tc.td["BBLAYERS_FETCH_DIR"], 'meta-selftest')
+ if os.path.isdir(meta_selftestdir):
+ runCmd("bitbake-layers add-layer %s" %meta_selftestdir)
+ # reload data is needed because a meta-selftest layer was add
+ self.tc.td = get_bb_vars()
+ self.tc.config_paths['testlayer_path'] = get_test_layer()
+ else:
+ self.tc.logger.error("could not locate meta-selftest in:\n%s" % meta_selftestdir)
+ raise OEQAPreRun
+
+ def _add_layer_libs():
+ bbpath = self.tc.td['BBPATH'].split(':')
+ layer_libdirs = [p for p in (os.path.join(l, 'lib') \
+ for l in bbpath) if os.path.exists(p)]
+ if layer_libdirs:
+ self.tc.logger.info("Adding layer libraries:")
+ for l in layer_libdirs:
+ self.tc.logger.info("\t%s" % l)
+
+ sys.path.extend(layer_libdirs)
+ importlib.reload(oeqa.selftest)
+
+ _check_required_env_variables(["BUILDDIR"])
+ _check_presence_meta_selftest()
+
+ if "buildhistory.bbclass" in self.tc.td["BBINCLUDED"]:
+ self.tc.logger.error("You have buildhistory enabled already and this isn't recommended for selftest, please disable it first.")
+ raise OEQAPreRun
+
+ if "rm_work.bbclass" in self.tc.td["BBINCLUDED"]:
+ self.tc.logger.error("You have rm_work enabled which isn't recommended while running oe-selftest. Please disable it before continuing.")
+ raise OEQAPreRun
+
+ if "PRSERV_HOST" in self.tc.td:
+ self.tc.logger.error("Please unset PRSERV_HOST in order to run oe-selftest")
+ raise OEQAPreRun
+
+ if "SANITY_TESTED_DISTROS" in self.tc.td:
+ self.tc.logger.error("Please unset SANITY_TESTED_DISTROS in order to run oe-selftest")
+ raise OEQAPreRun
+
+ _add_layer_libs()
+
+ self.tc.logger.info("Running bitbake -e to test the configuration is valid/parsable")
+ runCmd("bitbake -e")
+
+ def get_json_result_dir(self, args):
+ json_result_dir = os.path.join(self.tc.td["LOG_DIR"], 'oeqa')
+ if "OEQA_JSON_RESULT_DIR" in self.tc.td:
+ json_result_dir = self.tc.td["OEQA_JSON_RESULT_DIR"]
+
+ return json_result_dir
+
+ def get_configuration(self, args):
+ import platform
+ from oeqa.utils.metadata import metadata_from_bb
+ metadata = metadata_from_bb()
+ configuration = {'TEST_TYPE': 'oeselftest',
+ 'STARTTIME': args.test_start_time,
+ 'MACHINE': self.tc.td["MACHINE"],
+ 'HOST_DISTRO': oe.lsb.distro_identifier().replace(' ', '-'),
+ 'HOST_NAME': metadata['hostname'],
+ 'LAYERS': metadata['layers']}
+ return configuration
+
+ def get_result_id(self, configuration):
+ return '%s_%s_%s_%s' % (configuration['TEST_TYPE'], configuration['HOST_DISTRO'], configuration['MACHINE'], configuration['STARTTIME'])
+
+ def _internal_run(self, logger, args):
+ self.module_paths = self._get_cases_paths(
+ self.tc_kwargs['init']['td']['BBPATH'].split(':'))
+
+ self.tc = self._context_class(**self.tc_kwargs['init'])
+ try:
+ self.tc.loadTests(self.module_paths, **self.tc_kwargs['load'])
+ except OEQATestNotFound as ex:
+ logger.error(ex)
+ sys.exit(1)
+
+ if args.list_tests:
+ rc = self.tc.listTests(args.list_tests, **self.tc_kwargs['list'])
+ else:
+ self._pre_run()
+ rc = self.tc.runTests(**self.tc_kwargs['run'])
+ configuration = self.get_configuration(args)
+ rc.logDetails(self.get_json_result_dir(args),
+ configuration,
+ self.get_result_id(configuration))
+ rc.logSummary(self.name)
+
+ return rc
+
+ def _signal_clean_handler(self, signum, frame):
+ sys.exit(1)
+
+ def run(self, logger, args):
+ self._process_args(logger, args)
+
+ signal.signal(signal.SIGTERM, self._signal_clean_handler)
+
+ rc = None
+ try:
+ if args.machine:
+ logger.info('Custom machine mode enabled. MACHINE set to %s' %
+ args.machine)
+
+ if args.machine == 'all':
+ results = []
+ for m in self.tc_kwargs['init']['machines']:
+ self.tc_kwargs['run']['machine'] = m
+ results.append(self._internal_run(logger, args))
+
+ # XXX: the oe-selftest script only needs to know if one
+ # machine run fails
+ for r in results:
+ rc = r
+ if not r.wasSuccessful():
+ break
+
+ else:
+ self.tc_kwargs['run']['machine'] = args.machine
+ return self._internal_run(logger, args)
+
+ else:
+ self.tc_kwargs['run']['machine'] = args.machine
+ rc = self._internal_run(logger, args)
+ finally:
+ config_paths = self.tc_kwargs['init']['config_paths']
+ if os.path.exists(config_paths['localconf_backup']):
+ copyfile(config_paths['localconf_backup'],
+ config_paths['localconf'])
+ os.remove(config_paths['localconf_backup'])
+
+ if os.path.exists(config_paths['bblayers_backup']):
+ copyfile(config_paths['bblayers_backup'],
+ config_paths['bblayers'])
+ os.remove(config_paths['bblayers_backup'])
+
+ if os.path.exists(config_paths['localconf_class_backup']):
+ os.remove(config_paths['localconf_class_backup'])
+ if os.path.exists(config_paths['bblayers_class_backup']):
+ os.remove(config_paths['bblayers_class_backup'])
+
+ output_link = os.path.join(os.path.dirname(args.output_log),
+ "%s-results.log" % self.name)
+ if os.path.lexists(output_link):
+ os.remove(output_link)
+ os.symlink(args.output_log, output_link)
+
+ return rc
+
+_executor_class = OESelftestTestContextExecutor
diff --git a/meta/lib/oeqa/selftest/imagefeatures.py b/meta/lib/oeqa/selftest/imagefeatures.py
deleted file mode 100644
index d015c49087..0000000000
--- a/meta/lib/oeqa/selftest/imagefeatures.py
+++ /dev/null
@@ -1,127 +0,0 @@
-from oeqa.selftest.base import oeSelfTest
-from oeqa.utils.commands import runCmd, bitbake, get_bb_var, runqemu
-from oeqa.utils.decorators import testcase
-from oeqa.utils.sshcontrol import SSHControl
-import os
-import sys
-import logging
-
-class ImageFeatures(oeSelfTest):
-
- test_user = 'tester'
- root_user = 'root'
-
- @testcase(1107)
- def test_non_root_user_can_connect_via_ssh_without_password(self):
- """
- Summary: Check if non root user can connect via ssh without password
- Expected: 1. Connection to the image via ssh using root user without providing a password should be allowed.
- 2. Connection to the image via ssh using tester user without providing a password should be allowed.
- Product: oe-core
- Author: Ionut Chisanovici <ionutx.chisanovici@intel.com>
- AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
- """
-
- features = 'EXTRA_IMAGE_FEATURES = "ssh-server-openssh empty-root-password allow-empty-password"\n'
- features += 'INHERIT += "extrausers"\n'
- features += 'EXTRA_USERS_PARAMS = "useradd -p \'\' {}; usermod -s /bin/sh {};"'.format(self.test_user, self.test_user)
- self.write_config(features)
-
- # Build a core-image-minimal
- bitbake('core-image-minimal')
-
- with runqemu("core-image-minimal") as qemu:
- # Attempt to ssh with each user into qemu with empty password
- for user in [self.root_user, self.test_user]:
- ssh = SSHControl(ip=qemu.ip, logfile=qemu.sshlog, user=user)
- status, output = ssh.run("true")
- self.assertEqual(status, 0, 'ssh to user %s failed with %s' % (user, output))
-
- @testcase(1115)
- def test_all_users_can_connect_via_ssh_without_password(self):
- """
- Summary: Check if all users can connect via ssh without password
- Expected: 1. Connection to the image via ssh using root user without providing a password should NOT be allowed.
- 2. Connection to the image via ssh using tester user without providing a password should be allowed.
- Product: oe-core
- Author: Ionut Chisanovici <ionutx.chisanovici@intel.com>
- AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
- """
-
- features = 'EXTRA_IMAGE_FEATURES = "ssh-server-openssh allow-empty-password"\n'
- features += 'INHERIT += "extrausers"\n'
- features += 'EXTRA_USERS_PARAMS = "useradd -p \'\' {}; usermod -s /bin/sh {};"'.format(self.test_user, self.test_user)
- self.write_config(features)
-
- # Build a core-image-minimal
- bitbake('core-image-minimal')
-
- with runqemu("core-image-minimal") as qemu:
- # Attempt to ssh with each user into qemu with empty password
- for user in [self.root_user, self.test_user]:
- ssh = SSHControl(ip=qemu.ip, logfile=qemu.sshlog, user=user)
- status, output = ssh.run("true")
- if user == 'root':
- self.assertNotEqual(status, 0, 'ssh to user root was allowed when it should not have been')
- else:
- self.assertEqual(status, 0, 'ssh to user tester failed with %s' % output)
-
-
- @testcase(1116)
- def test_clutter_image_can_be_built(self):
- """
- Summary: Check if clutter image can be built
- Expected: 1. core-image-clutter can be built
- Product: oe-core
- Author: Ionut Chisanovici <ionutx.chisanovici@intel.com>
- AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
- """
-
- # Build a core-image-clutter
- bitbake('core-image-clutter')
-
- @testcase(1117)
- def test_wayland_support_in_image(self):
- """
- Summary: Check Wayland support in image
- Expected: 1. Wayland image can be build
- 2. Wayland feature can be installed
- Product: oe-core
- Author: Ionut Chisanovici <ionutx.chisanovici@intel.com>
- AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
- """
-
- features = 'DISTRO_FEATURES_append = " wayland"\n'
- features += 'CORE_IMAGE_EXTRA_INSTALL += "wayland weston"'
- self.write_config(features)
-
- # Build a core-image-weston
- bitbake('core-image-weston')
-
- def test_bmap(self):
- """
- Summary: Check bmap support
- Expected: 1. core-image-minimal can be build with bmap support
- 2. core-image-minimal is sparse
- Product: oe-core
- Author: Ed Bartosh <ed.bartosh@linux.intel.com>
- """
-
- features = 'IMAGE_FSTYPES += " ext4 ext4.bmap"'
- self.write_config(features)
-
- image_name = 'core-image-minimal'
- bitbake(image_name)
-
- deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
- link_name = get_bb_var('IMAGE_LINK_NAME', image_name)
- image_path = os.path.join(deploy_dir_image, "%s.ext4" % link_name)
- bmap_path = "%s.bmap" % image_path
-
- # check if result image and bmap file are in deploy directory
- self.assertTrue(os.path.exists(image_path))
- self.assertTrue(os.path.exists(bmap_path))
-
- # check if result image is sparse
- image_stat = os.stat(image_path)
- self.assertTrue(image_stat.st_size > image_stat.st_blocks * 512)
diff --git a/meta/lib/oeqa/selftest/oescripts.py b/meta/lib/oeqa/selftest/oescripts.py
deleted file mode 100644
index 31cd50809c..0000000000
--- a/meta/lib/oeqa/selftest/oescripts.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import datetime
-import unittest
-import os
-import re
-import shutil
-
-import oeqa.utils.ftools as ftools
-from oeqa.selftest.base import oeSelfTest
-from oeqa.selftest.buildhistory import BuildhistoryBase
-from oeqa.utils.commands import Command, runCmd, bitbake, get_bb_var, get_test_layer
-from oeqa.utils.decorators import testcase
-
-class TestScripts(oeSelfTest):
-
- @testcase(300)
- def test_cleanup_workdir(self):
- path = os.path.dirname(get_bb_var('WORKDIR', 'gzip'))
- old_version_recipe = os.path.join(get_bb_var('COREBASE'), 'meta/recipes-extended/gzip/gzip_1.3.12.bb')
- old_version = '1.3.12'
- bitbake("-ccleansstate gzip")
- bitbake("-ccleansstate -b %s" % old_version_recipe)
- if os.path.exists(get_bb_var('WORKDIR', "-b %s" % old_version_recipe)):
- shutil.rmtree(get_bb_var('WORKDIR', "-b %s" % old_version_recipe))
- if os.path.exists(get_bb_var('WORKDIR', 'gzip')):
- shutil.rmtree(get_bb_var('WORKDIR', 'gzip'))
-
- if os.path.exists(path):
- initial_contents = os.listdir(path)
- else:
- initial_contents = []
-
- bitbake('gzip')
- intermediary_contents = os.listdir(path)
- bitbake("-b %s" % old_version_recipe)
- runCmd('cleanup-workdir')
- remaining_contents = os.listdir(path)
-
- expected_contents = [x for x in intermediary_contents if x not in initial_contents]
- remaining_not_expected = [x for x in remaining_contents if x not in expected_contents]
- self.assertFalse(remaining_not_expected, msg="Not all necessary content has been deleted from %s: %s" % (path, ', '.join(map(str, remaining_not_expected))))
- expected_not_remaining = [x for x in expected_contents if x not in remaining_contents]
- self.assertFalse(expected_not_remaining, msg="The script removed extra contents from %s: %s" % (path, ', '.join(map(str, expected_not_remaining))))
-
-class BuildhistoryDiffTests(BuildhistoryBase):
-
- @testcase(295)
- def test_buildhistory_diff(self):
- self.add_command_to_tearDown('cleanup-workdir')
- target = 'xcursor-transparent-theme'
- self.run_buildhistory_operation(target, target_config="PR = \"r1\"", change_bh_location=True)
- self.run_buildhistory_operation(target, target_config="PR = \"r0\"", change_bh_location=False, expect_error=True)
- result = runCmd("buildhistory-diff -p %s" % get_bb_var('BUILDHISTORY_DIR'))
- expected_output = 'PR changed from "r1" to "r0"'
- self.assertTrue(expected_output in result.output, msg="Did not find expected output: %s" % result.output)
diff --git a/meta/lib/oeqa/selftest/runtime-test.py b/meta/lib/oeqa/selftest/runtime-test.py
deleted file mode 100644
index c2d5b45a4b..0000000000
--- a/meta/lib/oeqa/selftest/runtime-test.py
+++ /dev/null
@@ -1,105 +0,0 @@
-from oeqa.selftest.base import oeSelfTest
-from oeqa.utils.commands import runCmd, bitbake, get_bb_var, runqemu
-from oeqa.utils.decorators import testcase
-import os
-
-class TestExport(oeSelfTest):
-
- def test_testexport_basic(self):
- """
- Summary: Check basic testexport functionality with only ping test enabled.
- Expected: 1. testexport directory must be created.
- 2. runexported.py must run without any error/exception.
- 3. ping test must succeed.
- Product: oe-core
- Author: Mariano Lopez <mariano.lopez@intel.com>
- """
-
- features = 'INHERIT += "testexport"\n'
- # These aren't the actual IP addresses but testexport class needs something defined
- features += 'TEST_SERVER_IP = "192.168.7.1"\n'
- features += 'TEST_TARGET_IP = "192.168.7.1"\n'
- features += 'TEST_SUITES = "ping"\n'
- self.write_config(features)
-
- # Build tesexport for core-image-minimal
- bitbake('core-image-minimal')
- bitbake('-c testexport core-image-minimal')
-
- # Verify if TEST_EXPORT_DIR was created
- testexport_dir = get_bb_var('TEST_EXPORT_DIR', 'core-image-minimal')
- isdir = os.path.isdir(testexport_dir)
- self.assertEqual(True, isdir, 'Failed to create testexport dir: %s' % testexport_dir)
-
- with runqemu('core-image-minimal') as qemu:
- # Attempt to run runexported.py to perform ping test
- runexported_path = os.path.join(testexport_dir, "runexported.py")
- testdata_path = os.path.join(testexport_dir, "testdata.json")
- cmd = "%s -t %s -s %s %s" % (runexported_path, qemu.ip, qemu.server_ip, testdata_path)
- result = runCmd(cmd)
- self.assertEqual(0, result.status, 'runexported.py returned a non 0 status')
-
- # Verify ping test was succesful
- failure = True if 'FAIL' in result.output else False
- self.assertNotEqual(True, failure, 'ping test failed')
-
- def test_testexport_sdk(self):
- """
- Summary: Check sdk functionality for testexport.
- Expected: 1. testexport directory must be created.
- 2. SDK tarball must exists.
- 3. Uncompressing of tarball must succeed.
- 4. Check if the SDK directory is added to PATH.
- 5. Run tar from the SDK directory.
- Product: oe-core
- Author: Mariano Lopez <mariano.lopez@intel.com>
- """
-
- features = 'INHERIT += "testexport"\n'
- # These aren't the actual IP addresses but testexport class needs something defined
- features += 'TEST_SERVER_IP = "192.168.7.1"\n'
- features += 'TEST_TARGET_IP = "192.168.7.1"\n'
- features += 'TEST_SUITES = "ping"\n'
- features += 'TEST_SUITES_TAGS = "selftest_sdk"\n'
- features += 'TEST_EXPORT_SDK_ENABLED = "1"\n'
- features += 'TEST_EXPORT_SDK_PACKAGES = "nativesdk-tar"\n'
- self.write_config(features)
-
- # Build tesexport for core-image-minimal
- bitbake('core-image-minimal')
- bitbake('-c testexport core-image-minimal')
-
- # Check for SDK
- testexport_dir = get_bb_var('TEST_EXPORT_DIR', 'core-image-minimal')
- sdk_dir = get_bb_var('TEST_EXPORT_SDK_DIR', 'core-image-minimal')
- tarball_name = "%s.sh" % get_bb_var('TEST_EXPORT_SDK_NAME', 'core-image-minimal')
- tarball_path = os.path.join(testexport_dir, sdk_dir, tarball_name)
- self.assertEqual(os.path.isfile(tarball_path), True, "Couldn't find SDK tarball: %s" % tarball_path)
-
- # Run runexported.py
- runexported_path = os.path.join(testexport_dir, "runexported.py")
- testdata_path = os.path.join(testexport_dir, "testdata.json")
- cmd = "%s %s" % (runexported_path, testdata_path)
- result = runCmd(cmd)
- self.assertEqual(0, result.status, 'runexported.py returned a non 0 status')
-
-
-class TestImage(oeSelfTest):
-
- def test_testimage_install(self):
- """
- Summary: Check install packages functionality for testimage/testexport.
- Expected: 1. Import tests from a directory other than meta.
- 2. Check install/unistall of socat.
- Product: oe-core
- Author: Mariano Lopez <mariano.lopez@intel.com>
- """
-
- features = 'INHERIT += "testimage"\n'
- features += 'TEST_SUITES = "ping ssh selftest"\n'
- features += 'TEST_SUITES_TAGS = "selftest_package_install"\n'
- self.write_config(features)
-
- # Build core-image-sato and testimage
- bitbake('core-image-full-cmdline socat')
- bitbake('-c testimage core-image-full-cmdline')
diff --git a/meta/lib/oeqa/selftest/signing.py b/meta/lib/oeqa/selftest/signing.py
deleted file mode 100644
index 4c12d6d940..0000000000
--- a/meta/lib/oeqa/selftest/signing.py
+++ /dev/null
@@ -1,178 +0,0 @@
-from oeqa.selftest.base import oeSelfTest
-from oeqa.utils.commands import runCmd, bitbake, get_bb_var
-import os
-import glob
-import re
-import shutil
-import tempfile
-from oeqa.utils.decorators import testcase
-from oeqa.utils.ftools import write_file
-
-
-class Signing(oeSelfTest):
-
- gpg_dir = ""
- pub_key_path = ""
- secret_key_path = ""
-
- @classmethod
- def setUpClass(cls):
- # Check that we can find the gpg binary and fail early if we can't
- if not shutil.which("gpg"):
- raise AssertionError("This test needs GnuPG")
-
- cls.gpg_home_dir = tempfile.TemporaryDirectory(prefix="oeqa-signing-")
- cls.gpg_dir = cls.gpg_home_dir.name
-
- cls.pub_key_path = os.path.join(cls.testlayer_path, 'files', 'signing', "key.pub")
- cls.secret_key_path = os.path.join(cls.testlayer_path, 'files', 'signing', "key.secret")
-
- runCmd('gpg --homedir %s --import %s %s' % (cls.gpg_dir, cls.pub_key_path, cls.secret_key_path))
-
- @testcase(1362)
- def test_signing_packages(self):
- """
- Summary: Test that packages can be signed in the package feed
- Expected: Package should be signed with the correct key
- Product: oe-core
- Author: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
- AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
- """
- import oe.packagedata
-
- package_classes = get_bb_var('PACKAGE_CLASSES')
- if 'package_rpm' not in package_classes:
- self.skipTest('This test requires RPM Packaging.')
-
- test_recipe = 'ed'
-
- feature = 'INHERIT += "sign_rpm"\n'
- feature += 'RPM_GPG_PASSPHRASE = "test123"\n'
- feature += 'RPM_GPG_NAME = "testuser"\n'
- feature += 'RPM_GPG_PUBKEY = "%s"\n' % self.pub_key_path
- feature += 'GPG_PATH = "%s"\n' % self.gpg_dir
-
- self.write_config(feature)
-
- bitbake('-c cleansstate %s' % test_recipe)
- bitbake(test_recipe)
- self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe)
-
- pkgdatadir = get_bb_var('PKGDATA_DIR', test_recipe)
- pkgdata = oe.packagedata.read_pkgdatafile(pkgdatadir + "/runtime/ed")
- if 'PKGE' in pkgdata:
- pf = pkgdata['PN'] + "-" + pkgdata['PKGE'] + pkgdata['PKGV'] + '-' + pkgdata['PKGR']
- else:
- pf = pkgdata['PN'] + "-" + pkgdata['PKGV'] + '-' + pkgdata['PKGR']
- deploy_dir_rpm = get_bb_var('DEPLOY_DIR_RPM', test_recipe)
- package_arch = get_bb_var('PACKAGE_ARCH', test_recipe).replace('-', '_')
- staging_bindir_native = get_bb_var('STAGING_BINDIR_NATIVE')
-
- pkg_deploy = os.path.join(deploy_dir_rpm, package_arch, '.'.join((pf, package_arch, 'rpm')))
-
- # Use a temporary rpmdb
- rpmdb = tempfile.mkdtemp(prefix='oeqa-rpmdb')
-
- runCmd('%s/rpm --define "_dbpath %s" --import %s' %
- (staging_bindir_native, rpmdb, self.pub_key_path))
-
- ret = runCmd('%s/rpm --define "_dbpath %s" --checksig %s' %
- (staging_bindir_native, rpmdb, pkg_deploy))
- # tmp/deploy/rpm/i586/ed-1.9-r0.i586.rpm: rsa sha1 md5 OK
- self.assertIn('rsa sha1 md5 OK', ret.output, 'Package signed incorrectly.')
- shutil.rmtree(rpmdb)
-
- @testcase(1382)
- def test_signing_sstate_archive(self):
- """
- Summary: Test that sstate archives can be signed
- Expected: Package should be signed with the correct key
- Product: oe-core
- Author: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
- AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
- """
-
- test_recipe = 'ed'
-
- builddir = os.environ.get('BUILDDIR')
- sstatedir = os.path.join(builddir, 'test-sstate')
-
- self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe)
- self.add_command_to_tearDown('bitbake -c cleansstate %s' % test_recipe)
- self.add_command_to_tearDown('rm -rf %s' % sstatedir)
-
- # Determine the pub key signature
- ret = runCmd('gpg --homedir %s --list-keys' % self.gpg_dir)
- pub_key = re.search(r'^pub\s+\S+/(\S+)\s+', ret.output, re.M)
- self.assertIsNotNone(pub_key, 'Failed to determine the public key signature.')
- pub_key = pub_key.group(1)
-
- feature = 'SSTATE_SIG_KEY ?= "%s"\n' % pub_key
- feature += 'SSTATE_SIG_PASSPHRASE ?= "test123"\n'
- feature += 'SSTATE_VERIFY_SIG ?= "1"\n'
- feature += 'GPG_PATH = "%s"\n' % self.gpg_dir
- feature += 'SSTATE_DIR = "%s"\n' % sstatedir
-
- self.write_config(feature)
-
- bitbake('-c cleansstate %s' % test_recipe)
- bitbake(test_recipe)
-
- recipe_sig = glob.glob(sstatedir + '/*/*:ed:*_package.tgz.sig')
- recipe_tgz = glob.glob(sstatedir + '/*/*:ed:*_package.tgz')
-
- self.assertEqual(len(recipe_sig), 1, 'Failed to find .sig file.')
- self.assertEqual(len(recipe_tgz), 1, 'Failed to find .tgz file.')
-
- ret = runCmd('gpg --homedir %s --verify %s %s' % (self.gpg_dir, recipe_sig[0], recipe_tgz[0]))
- # gpg: Signature made Thu 22 Oct 2015 01:45:09 PM EEST using RSA key ID 61EEFB30
- # gpg: Good signature from "testuser (nocomment) <testuser@email.com>"
- self.assertIn('gpg: Good signature from', ret.output, 'Package signed incorrectly.')
-
-
-class LockedSignatures(oeSelfTest):
-
- @testcase(1420)
- def test_locked_signatures(self):
- """
- Summary: Test locked signature mechanism
- Expected: Locked signatures will prevent task to run
- Product: oe-core
- Author: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
- AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
- """
-
- test_recipe = 'ed'
- locked_sigs_file = 'locked-sigs.inc'
-
- self.add_command_to_tearDown('rm -f %s' % os.path.join(self.builddir, locked_sigs_file))
-
- bitbake(test_recipe)
- # Generate locked sigs include file
- bitbake('-S none %s' % test_recipe)
-
- feature = 'require %s\n' % locked_sigs_file
- feature += 'SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n'
- self.write_config(feature)
-
- # Build a locked recipe
- bitbake(test_recipe)
-
- # Make a change that should cause the locked task signature to change
- recipe_append_file = test_recipe + '_' + get_bb_var('PV', test_recipe) + '.bbappend'
- recipe_append_path = os.path.join(self.testlayer_path, 'recipes-test', test_recipe, recipe_append_file)
- feature = 'SUMMARY += "test locked signature"\n'
-
- os.mkdir(os.path.join(self.testlayer_path, 'recipes-test', test_recipe))
- write_file(recipe_append_path, feature)
-
- self.add_command_to_tearDown('rm -rf %s' % os.path.join(self.testlayer_path, 'recipes-test', test_recipe))
-
- # Build the recipe again
- ret = bitbake(test_recipe)
-
- # Verify you get the warning and that the real task *isn't* run (i.e. the locked signature has worked)
- patt = r'WARNING: The %s:do_package sig is computed to be \S+, but the sig is locked to \S+ in SIGGEN_LOCKEDSIGS\S+' % test_recipe
- found_warn = re.search(patt, ret.output)
-
- self.assertIsNotNone(found_warn, "Didn't find the expected warning message. Output: %s" % ret.output)
diff --git a/meta/lib/oeqa/selftest/wic.py b/meta/lib/oeqa/selftest/wic.py
deleted file mode 100644
index e550785fcf..0000000000
--- a/meta/lib/oeqa/selftest/wic.py
+++ /dev/null
@@ -1,286 +0,0 @@
-#!/usr/bin/env python
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
-# Copyright (c) 2015, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# AUTHORS
-# Ed Bartosh <ed.bartosh@linux.intel.com>
-
-"""Test cases for wic."""
-
-import os
-
-from glob import glob
-from shutil import rmtree
-
-from oeqa.selftest.base import oeSelfTest
-from oeqa.utils.commands import runCmd, bitbake, get_bb_var, runqemu
-from oeqa.utils.decorators import testcase
-
-
-class Wic(oeSelfTest):
- """Wic test class."""
-
- resultdir = "/var/tmp/wic/build/"
- image_is_ready = False
-
- def setUpLocal(self):
- """This code is executed before each test method."""
- self.write_config('IMAGE_FSTYPES += " hddimg"\n'
- 'MACHINE_FEATURES_append = " efi"\n')
-
- # Do this here instead of in setUpClass as the base setUp does some
- # clean up which can result in the native tools built earlier in
- # setUpClass being unavailable.
- if not Wic.image_is_ready:
- bitbake('syslinux syslinux-native parted-native gptfdisk-native '
- 'dosfstools-native mtools-native bmap-tools-native')
- bitbake('core-image-minimal')
- Wic.image_is_ready = True
-
- rmtree(self.resultdir, ignore_errors=True)
-
- @testcase(1208)
- def test_help(self):
- """Test wic --help"""
- self.assertEqual(0, runCmd('wic --help').status)
-
- @testcase(1209)
- def test_createhelp(self):
- """Test wic create --help"""
- self.assertEqual(0, runCmd('wic create --help').status)
-
- @testcase(1210)
- def test_listhelp(self):
- """Test wic list --help"""
- self.assertEqual(0, runCmd('wic list --help').status)
-
- @testcase(1211)
- def test_build_image_name(self):
- """Test wic create directdisk --image-name core-image-minimal"""
- self.assertEqual(0, runCmd("wic create directdisk "
- "--image-name core-image-minimal").status)
- self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
-
- @testcase(1212)
- def test_build_artifacts(self):
- """Test wic create directdisk providing all artifacts."""
- bbvars = dict((var.lower(), get_bb_var(var, 'core-image-minimal')) \
- for var in ('STAGING_DATADIR', 'DEPLOY_DIR_IMAGE',
- 'STAGING_DIR_NATIVE', 'IMAGE_ROOTFS'))
- status = runCmd("wic create directdisk "
- "-b %(staging_datadir)s "
- "-k %(deploy_dir_image)s "
- "-n %(staging_dir_native)s "
- "-r %(image_rootfs)s" % bbvars).status
- self.assertEqual(0, status)
- self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
-
- @testcase(1157)
- def test_gpt_image(self):
- """Test creation of core-image-minimal with gpt table and UUID boot"""
- self.assertEqual(0, runCmd("wic create directdisk-gpt "
- "--image-name core-image-minimal").status)
- self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
-
- @testcase(1213)
- def test_unsupported_subcommand(self):
- """Test unsupported subcommand"""
- self.assertEqual(1, runCmd('wic unsupported',
- ignore_status=True).status)
-
- @testcase(1214)
- def test_no_command(self):
- """Test wic without command"""
- self.assertEqual(1, runCmd('wic', ignore_status=True).status)
-
- @testcase(1215)
- def test_help_overview(self):
- """Test wic help overview"""
- self.assertEqual(0, runCmd('wic help overview').status)
-
- @testcase(1216)
- def test_help_plugins(self):
- """Test wic help plugins"""
- self.assertEqual(0, runCmd('wic help plugins').status)
-
- @testcase(1217)
- def test_help_kickstart(self):
- """Test wic help kickstart"""
- self.assertEqual(0, runCmd('wic help kickstart').status)
-
- @testcase(1264)
- def test_compress_gzip(self):
- """Test compressing an image with gzip"""
- self.assertEqual(0, runCmd("wic create directdisk "
- "--image-name core-image-minimal "
- "-c gzip").status)
- self.assertEqual(1, len(glob(self.resultdir + \
- "directdisk-*.direct.gz")))
-
- @testcase(1265)
- def test_compress_bzip2(self):
- """Test compressing an image with bzip2"""
- self.assertEqual(0, runCmd("wic create directdisk "
- "--image-name core-image-minimal "
- "-c bzip2").status)
- self.assertEqual(1, len(glob(self.resultdir + \
- "directdisk-*.direct.bz2")))
-
- @testcase(1266)
- def test_compress_xz(self):
- """Test compressing an image with xz"""
- self.assertEqual(0, runCmd("wic create directdisk "
- "--image-name core-image-minimal "
- "-c xz").status)
- self.assertEqual(1, len(glob(self.resultdir + \
- "directdisk-*.direct.xz")))
-
- @testcase(1267)
- def test_wrong_compressor(self):
- """Test how wic breaks if wrong compressor is provided"""
- self.assertEqual(2, runCmd("wic create directdisk "
- "--image-name core-image-minimal "
- "-c wrong", ignore_status=True).status)
-
- @testcase(1268)
- def test_rootfs_indirect_recipes(self):
- """Test usage of rootfs plugin with rootfs recipes"""
- wks = "directdisk-multi-rootfs"
- self.assertEqual(0, runCmd("wic create %s "
- "--image-name core-image-minimal "
- "--rootfs rootfs1=core-image-minimal "
- "--rootfs rootfs2=core-image-minimal" \
- % wks).status)
- self.assertEqual(1, len(glob(self.resultdir + "%s*.direct" % wks)))
-
- @testcase(1269)
- def test_rootfs_artifacts(self):
- """Test usage of rootfs plugin with rootfs paths"""
- bbvars = dict((var.lower(), get_bb_var(var, 'core-image-minimal')) \
- for var in ('STAGING_DATADIR', 'DEPLOY_DIR_IMAGE',
- 'STAGING_DIR_NATIVE', 'IMAGE_ROOTFS'))
- bbvars['wks'] = "directdisk-multi-rootfs"
- status = runCmd("wic create %(wks)s "
- "-b %(staging_datadir)s "
- "-k %(deploy_dir_image)s "
- "-n %(staging_dir_native)s "
- "--rootfs-dir rootfs1=%(image_rootfs)s "
- "--rootfs-dir rootfs2=%(image_rootfs)s" \
- % bbvars).status
- self.assertEqual(0, status)
- self.assertEqual(1, len(glob(self.resultdir + \
- "%(wks)s-*.direct" % bbvars)))
-
- @testcase(1346)
- def test_iso_image(self):
- """Test creation of hybrid iso image with legacy and EFI boot"""
- self.assertEqual(0, runCmd("wic create mkhybridiso "
- "--image-name core-image-minimal").status)
- self.assertEqual(1, len(glob(self.resultdir + "HYBRID_ISO_IMG-*.direct")))
- self.assertEqual(1, len(glob(self.resultdir + "HYBRID_ISO_IMG-*.iso")))
-
- @testcase(1347)
- def test_image_env(self):
- """Test generation of <image>.env files."""
- image = 'core-image-minimal'
- self.assertEqual(0, bitbake('%s -c do_rootfs_wicenv' % image).status)
- stdir = get_bb_var('STAGING_DIR_TARGET', image)
- imgdatadir = os.path.join(stdir, 'imgdata')
-
- basename = get_bb_var('IMAGE_BASENAME', image)
- self.assertEqual(basename, image)
- path = os.path.join(imgdatadir, basename) + '.env'
- self.assertTrue(os.path.isfile(path))
-
- wicvars = set(get_bb_var('WICVARS', image).split())
- # filter out optional variables
- wicvars = wicvars.difference(('HDDDIR', 'IMAGE_BOOT_FILES',
- 'INITRD', 'ISODIR'))
- with open(path) as envfile:
- content = dict(line.split("=", 1) for line in envfile)
- # test if variables used by wic present in the .env file
- for var in wicvars:
- self.assertTrue(var in content, "%s is not in .env file" % var)
- self.assertTrue(content[var])
-
- @testcase(1351)
- def test_wic_image_type(self):
- """Test building wic images by bitbake"""
- self.assertEqual(0, bitbake('wic-image-minimal').status)
-
- deploy_dir = get_bb_var('DEPLOY_DIR_IMAGE')
- machine = get_bb_var('MACHINE')
- prefix = os.path.join(deploy_dir, 'wic-image-minimal-%s.' % machine)
- # check if we have result image and manifests symlinks
- # pointing to existing files
- for suffix in ('wic', 'manifest'):
- path = prefix + suffix
- self.assertTrue(os.path.islink(path))
- self.assertTrue(os.path.isfile(os.path.realpath(path)))
-
- @testcase(1348)
- def test_qemux86_directdisk(self):
- """Test creation of qemux-86-directdisk image"""
- image = "qemux86-directdisk"
- self.assertEqual(0, runCmd("wic create %s -e core-image-minimal" \
- % image).status)
- self.assertEqual(1, len(glob(self.resultdir + "%s-*direct" % image)))
-
- @testcase(1349)
- def test_mkgummidisk(self):
- """Test creation of mkgummidisk image"""
- image = "mkgummidisk"
- self.assertEqual(0, runCmd("wic create %s -e core-image-minimal" \
- % image).status)
- self.assertEqual(1, len(glob(self.resultdir + "%s-*direct" % image)))
-
- @testcase(1350)
- def test_mkefidisk(self):
- """Test creation of mkefidisk image"""
- image = "mkefidisk"
- self.assertEqual(0, runCmd("wic create %s -e core-image-minimal" \
- % image).status)
- self.assertEqual(1, len(glob(self.resultdir + "%s-*direct" % image)))
-
- @testcase(1385)
- def test_directdisk_bootloader_config(self):
- """Test creation of directdisk-bootloader-config image"""
- image = "directdisk-bootloader-config"
- self.assertEqual(0, runCmd("wic create %s -e core-image-minimal" \
- % image).status)
- self.assertEqual(1, len(glob(self.resultdir + "%s-*direct" % image)))
-
- @testcase(1422)
- def test_qemu(self):
- """Test wic-image-minimal under qemu"""
- self.assertEqual(0, bitbake('wic-image-minimal').status)
-
- with runqemu('wic-image-minimal', ssh=False) as qemu:
- command = "mount |grep '^/dev/' | cut -f1,3 -d ' '"
- status, output = qemu.run_serial(command)
- self.assertEqual(1, status, 'Failed to run command "%s": %s' % (command, output))
- self.assertEqual(output, '/dev/root /\r\n/dev/vda3 /mnt')
-
- def test_bmap(self):
- """Test generation of .bmap file"""
- image = "directdisk"
- status = runCmd("wic create %s -e core-image-minimal --bmap" % image).status
- self.assertEqual(0, status)
- self.assertEqual(1, len(glob(self.resultdir + "%s-*direct" % image)))
- self.assertEqual(1, len(glob(self.resultdir + "%s-*direct.bmap" % image)))
diff --git a/meta/lib/oeqa/targetcontrol.py b/meta/lib/oeqa/targetcontrol.py
index 24669f461d..0b2915d5cc 100644
--- a/meta/lib/oeqa/targetcontrol.py
+++ b/meta/lib/oeqa/targetcontrol.py
@@ -1,6 +1,8 @@
+#
# Copyright (C) 2013 Intel Corporation
#
-# Released under the MIT license (see COPYING.MIT)
+# SPDX-License-Identifier: MIT
+#
# This module is used by testimage.bbclass for setting up and controlling a target machine.
@@ -18,42 +20,18 @@ from oeqa.utils.dump import TargetDumper
from oeqa.controllers.testtargetloader import TestTargetLoader
from abc import ABCMeta, abstractmethod
-def get_target_controller(d):
- testtarget = d.getVar("TEST_TARGET", True)
- # old, simple names
- if testtarget == "qemu":
- return QemuTarget(d)
- elif testtarget == "simpleremote":
- return SimpleRemoteTarget(d)
- else:
- # use the class name
- try:
- # is it a core class defined here?
- controller = getattr(sys.modules[__name__], testtarget)
- except AttributeError:
- # nope, perhaps a layer defined one
- try:
- bbpath = d.getVar("BBPATH", True).split(':')
- testtargetloader = TestTargetLoader()
- controller = testtargetloader.get_controller_module(testtarget, bbpath)
- except ImportError as e:
- bb.fatal("Failed to import {0} from available controller modules:\n{1}".format(testtarget,traceback.format_exc()))
- except AttributeError as e:
- bb.fatal("Invalid TEST_TARGET - " + str(e))
- return controller(d)
-
-
class BaseTarget(object, metaclass=ABCMeta):
supported_image_fstypes = []
- def __init__(self, d):
+ def __init__(self, d, logger):
self.connection = None
self.ip = None
self.server_ip = None
- self.datetime = d.getVar('DATETIME', True)
- self.testdir = d.getVar("TEST_LOG_DIR", True)
- self.pn = d.getVar("PN", True)
+ self.datetime = d.getVar('DATETIME')
+ self.testdir = d.getVar("TEST_LOG_DIR")
+ self.pn = d.getVar("PN")
+ self.logger = logger
@abstractmethod
def deploy(self):
@@ -63,7 +41,7 @@ class BaseTarget(object, metaclass=ABCMeta):
if os.path.islink(sshloglink):
os.unlink(sshloglink)
os.symlink(self.sshlog, sshloglink)
- bb.note("SSH log file: %s" % self.sshlog)
+ self.logger.info("SSH log file: %s" % self.sshlog)
@abstractmethod
def start(self, params=None, ssh=True, extra_bootparams=None):
@@ -80,7 +58,7 @@ class BaseTarget(object, metaclass=ABCMeta):
@classmethod
def match_image_fstype(self, d, image_fstypes=None):
if not image_fstypes:
- image_fstypes = d.getVar('IMAGE_FSTYPES', True).split(' ')
+ image_fstypes = d.getVar('IMAGE_FSTYPES').split(' ')
possible_image_fstypes = [fstype for fstype in self.supported_image_fstypes if fstype in image_fstypes]
if possible_image_fstypes:
return possible_image_fstypes[0]
@@ -113,54 +91,60 @@ class QemuTarget(BaseTarget):
supported_image_fstypes = ['ext3', 'ext4', 'cpio.gz', 'wic']
- def __init__(self, d):
+ def __init__(self, d, logger, image_fstype=None):
+
+ import oe.types
- super(QemuTarget, self).__init__(d)
+ super(QemuTarget, self).__init__(d, logger)
- self.image_fstype = self.get_image_fstype(d)
+ self.rootfs = ''
+ self.kernel = ''
+ self.image_fstype = ''
+
+ if d.getVar('FIND_ROOTFS') == '1':
+ self.image_fstype = image_fstype or self.get_image_fstype(d)
+ self.rootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), d.getVar("IMAGE_LINK_NAME") + '.' + self.image_fstype)
+ self.kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), d.getVar("KERNEL_IMAGETYPE", False) + '-' + d.getVar('MACHINE', False) + '.bin')
self.qemulog = os.path.join(self.testdir, "qemu_boot_log.%s" % self.datetime)
- self.rootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME", True) + '.' + self.image_fstype)
- self.kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("KERNEL_IMAGETYPE", False) + '-' + d.getVar('MACHINE', False) + '.bin')
- dump_target_cmds = d.getVar("testimage_dump_target", True)
- dump_host_cmds = d.getVar("testimage_dump_host", True)
- dump_dir = d.getVar("TESTIMAGE_DUMP_DIR", True)
- if d.getVar("QEMU_USE_KVM", False) is not None \
- and d.getVar("QEMU_USE_KVM", False) == "True" \
- and "x86" in d.getVar("MACHINE", True):
- use_kvm = True
- else:
- use_kvm = False
+ dump_target_cmds = d.getVar("testimage_dump_target")
+ dump_host_cmds = d.getVar("testimage_dump_host")
+ dump_dir = d.getVar("TESTIMAGE_DUMP_DIR")
+ if not dump_dir:
+ dump_dir = os.path.join(d.getVar('LOG_DIR'), 'runtime-hostdump')
+ use_kvm = oe.types.qemu_use_kvm(d.getVar('QEMU_USE_KVM'), d.getVar('TARGET_ARCH'))
# Log QemuRunner log output to a file
import oe.path
bb.utils.mkdirhier(self.testdir)
self.qemurunnerlog = os.path.join(self.testdir, 'qemurunner_log.%s' % self.datetime)
- logger = logging.getLogger('BitBake.QemuRunner')
loggerhandler = logging.FileHandler(self.qemurunnerlog)
loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
- logger.addHandler(loggerhandler)
+ self.logger.addHandler(loggerhandler)
oe.path.symlink(os.path.basename(self.qemurunnerlog), os.path.join(self.testdir, 'qemurunner_log'), force=True)
- if d.getVar("DISTRO", True) == "poky-tiny":
- self.runner = QemuTinyRunner(machine=d.getVar("MACHINE", True),
+ if d.getVar("DISTRO") == "poky-tiny":
+ self.runner = QemuTinyRunner(machine=d.getVar("MACHINE"),
rootfs=self.rootfs,
- tmpdir = d.getVar("TMPDIR", True),
- deploy_dir_image = d.getVar("DEPLOY_DIR_IMAGE", True),
- display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY", True),
+ tmpdir = d.getVar("TMPDIR"),
+ deploy_dir_image = d.getVar("DEPLOY_DIR_IMAGE"),
+ display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY"),
logfile = self.qemulog,
kernel = self.kernel,
- boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT", True)))
+ boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT")),
+ logger = logger)
else:
- self.runner = QemuRunner(machine=d.getVar("MACHINE", True),
+ self.runner = QemuRunner(machine=d.getVar("MACHINE"),
rootfs=self.rootfs,
- tmpdir = d.getVar("TMPDIR", True),
- deploy_dir_image = d.getVar("DEPLOY_DIR_IMAGE", True),
- display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY", True),
+ tmpdir = d.getVar("TMPDIR"),
+ deploy_dir_image = d.getVar("DEPLOY_DIR_IMAGE"),
+ display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY"),
logfile = self.qemulog,
- boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT", True)),
+ boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT")),
use_kvm = use_kvm,
dump_dir = dump_dir,
- dump_host_cmds = d.getVar("testimage_dump_host", True))
+ dump_host_cmds = d.getVar("testimage_dump_host"),
+ logger = logger,
+ serial_ports = len(d.getVar("SERIAL_CONSOLES").split()))
self.target_dumper = TargetDumper(dump_target_cmds, dump_dir, self.runner)
@@ -172,12 +156,17 @@ class QemuTarget(BaseTarget):
os.unlink(qemuloglink)
os.symlink(self.qemulog, qemuloglink)
- bb.note("rootfs file: %s" % self.rootfs)
- bb.note("Qemu log file: %s" % self.qemulog)
+ self.logger.info("rootfs file: %s" % self.rootfs)
+ self.logger.info("Qemu log file: %s" % self.qemulog)
super(QemuTarget, self).deploy()
- def start(self, params=None, ssh=True, extra_bootparams=None):
- if self.runner.start(params, get_ip=ssh, extra_bootparams=extra_bootparams):
+ def start(self, params=None, ssh=True, extra_bootparams='', runqemuparams='', launch_cmd='', discard_writes=True):
+ if launch_cmd:
+ start = self.runner.launch(get_ip=ssh, launch_cmd=launch_cmd, qemuparams=params)
+ else:
+ start = self.runner.start(params, get_ip=ssh, extra_bootparams=extra_bootparams, runqemuparams=runqemuparams, discard_writes=discard_writes)
+
+ if start:
if ssh:
self.ip = self.runner.ip
self.server_ip = self.runner.server_ip
@@ -187,7 +176,7 @@ class QemuTarget(BaseTarget):
if os.path.exists(self.qemulog):
with open(self.qemulog, 'r') as f:
bb.error("Qemu log output from %s:\n%s" % (self.qemulog, f.read()))
- raise bb.build.FuncFailed("%s - FAILED to start qemu - check the task log and the boot log" % self.pn)
+ raise RuntimeError("%s - FAILED to start qemu - check the task log and the boot log" % self.pn)
def check(self):
return self.runner.is_alive()
@@ -204,30 +193,30 @@ class QemuTarget(BaseTarget):
self.server_ip = self.runner.server_ip
self.connection = SSHControl(ip=self.ip, logfile=self.sshlog)
else:
- raise bb.build.FuncFailed("%s - FAILED to re-start qemu - check the task log and the boot log" % self.pn)
+ raise RuntimError("%s - FAILED to re-start qemu - check the task log and the boot log" % self.pn)
- def run_serial(self, command):
- return self.runner.run_serial(command)
+ def run_serial(self, command, timeout=60):
+ return self.runner.run_serial(command, timeout=timeout)
class SimpleRemoteTarget(BaseTarget):
def __init__(self, d):
super(SimpleRemoteTarget, self).__init__(d)
- addr = d.getVar("TEST_TARGET_IP", True) or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.')
+ addr = d.getVar("TEST_TARGET_IP") or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.')
self.ip = addr.split(":")[0]
try:
self.port = addr.split(":")[1]
except IndexError:
self.port = None
- bb.note("Target IP: %s" % self.ip)
- self.server_ip = d.getVar("TEST_SERVER_IP", True)
+ self.logger.info("Target IP: %s" % self.ip)
+ self.server_ip = d.getVar("TEST_SERVER_IP")
if not self.server_ip:
try:
self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split("\n")[0].split()[-1]
except Exception as e:
bb.fatal("Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s" % e)
- bb.note("Server IP: %s" % self.server_ip)
+ self.logger.info("Server IP: %s" % self.server_ip)
def deploy(self):
super(SimpleRemoteTarget, self).deploy()
diff --git a/meta/lib/oeqa/utils/__init__.py b/meta/lib/oeqa/utils/__init__.py
index 8f706f3637..70fbe7b552 100644
--- a/meta/lib/oeqa/utils/__init__.py
+++ b/meta/lib/oeqa/utils/__init__.py
@@ -1,8 +1,10 @@
+#
+# SPDX-License-Identifier: MIT
+#
# Enable other layers to have modules in the same named directory
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
-
# Borrowed from CalledProcessError
class CommandError(Exception):
@@ -36,3 +38,69 @@ def avoid_paths_in_environ(paths):
new_path = new_path[:-1]
return new_path
+
+def make_logger_bitbake_compatible(logger):
+ import logging
+
+ """
+ Bitbake logger redifines debug() in order to
+ set a level within debug, this breaks compatibility
+ with vainilla logging, so we neeed to redifine debug()
+ method again also add info() method with INFO + 1 level.
+ """
+ def _bitbake_log_debug(*args, **kwargs):
+ lvl = logging.DEBUG
+
+ if isinstance(args[0], int):
+ lvl = args[0]
+ msg = args[1]
+ args = args[2:]
+ else:
+ msg = args[0]
+ args = args[1:]
+
+ logger.log(lvl, msg, *args, **kwargs)
+
+ def _bitbake_log_info(msg, *args, **kwargs):
+ logger.log(logging.INFO + 1, msg, *args, **kwargs)
+
+ logger.debug = _bitbake_log_debug
+ logger.info = _bitbake_log_info
+
+ return logger
+
+def load_test_components(logger, executor):
+ import sys
+ import os
+ import importlib
+
+ from oeqa.core.context import OETestContextExecutor
+
+ components = {}
+
+ for path in sys.path:
+ base_dir = os.path.join(path, 'oeqa')
+ if os.path.exists(base_dir) and os.path.isdir(base_dir):
+ for file in os.listdir(base_dir):
+ comp_name = file
+ comp_context = os.path.join(base_dir, file, 'context.py')
+ if os.path.exists(comp_context):
+ comp_plugin = importlib.import_module('oeqa.%s.%s' % \
+ (comp_name, 'context'))
+ try:
+ if not issubclass(comp_plugin._executor_class,
+ OETestContextExecutor):
+ raise TypeError("Component %s in %s, _executor_class "\
+ "isn't derived from OETestContextExecutor."\
+ % (comp_name, comp_context))
+
+ if comp_plugin._executor_class._script_executor \
+ != executor:
+ continue
+
+ components[comp_name] = comp_plugin._executor_class()
+ except AttributeError:
+ raise AttributeError("Component %s in %s don't have "\
+ "_executor_class defined." % (comp_name, comp_context))
+
+ return components
diff --git a/meta/lib/oeqa/utils/buildproject.py b/meta/lib/oeqa/utils/buildproject.py
new file mode 100644
index 0000000000..e6d80cc8dc
--- /dev/null
+++ b/meta/lib/oeqa/utils/buildproject.py
@@ -0,0 +1,63 @@
+#
+# Copyright (C) 2013-2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+# Provides a class for automating build tests for projects
+
+import os
+import re
+import subprocess
+import shutil
+import tempfile
+
+from abc import ABCMeta, abstractmethod
+
+class BuildProject(metaclass=ABCMeta):
+ def __init__(self, uri, foldername=None, tmpdir=None, dl_dir=None):
+ self.uri = uri
+ self.archive = os.path.basename(uri)
+ if not tmpdir:
+ self.tempdirobj = tempfile.TemporaryDirectory(prefix='buildproject-')
+ tmpdir = self.tempdirobj.name
+ self.localarchive = os.path.join(tmpdir, self.archive)
+ self.dl_dir = dl_dir
+ if foldername:
+ self.fname = foldername
+ else:
+ self.fname = re.sub(r'\.tar\.bz2$|\.tar\.gz$|\.tar\.xz$', '', self.archive)
+ self.needclean = False
+
+ # Download self.archive to self.localarchive
+ def _download_archive(self):
+
+ self.needclean = True
+ if self.dl_dir and os.path.exists(os.path.join(self.dl_dir, self.archive)):
+ shutil.copyfile(os.path.join(self.dl_dir, self.archive), self.localarchive)
+ return
+
+ cmd = "wget -O %s %s" % (self.localarchive, self.uri)
+ subprocess.check_output(cmd, shell=True)
+
+ # This method should provide a way to run a command in the desired environment.
+ @abstractmethod
+ def _run(self, cmd):
+ pass
+
+ # The timeout parameter of target.run is set to 0 to make the ssh command
+ # run with no timeout.
+ def run_configure(self, configure_args='', extra_cmds=''):
+ return self._run('cd %s; gnu-configize; %s ./configure %s' % (self.targetdir, extra_cmds, configure_args))
+
+ def run_make(self, make_args=''):
+ return self._run('cd %s; make %s' % (self.targetdir, make_args))
+
+ def run_install(self, install_args=''):
+ return self._run('cd %s; make install %s' % (self.targetdir, install_args))
+
+ def clean(self):
+ if not self.needclean:
+ return
+ self._run('rm -rf %s' % self.targetdir)
+ subprocess.check_call('rm -f %s' % self.localarchive, shell=True)
diff --git a/meta/lib/oeqa/utils/commands.py b/meta/lib/oeqa/utils/commands.py
index 5cd0f7477b..dc1e286dac 100644
--- a/meta/lib/oeqa/utils/commands.py
+++ b/meta/lib/oeqa/utils/commands.py
@@ -1,6 +1,8 @@
+#
# Copyright (c) 2013-2014 Intel Corporation
#
-# Released under the MIT license (see COPYING.MIT)
+# SPDX-License-Identifier: MIT
+#
# DESCRIPTION
# This module is mainly used by scripts/oe-selftest and modules under meta/oeqa/selftest
@@ -13,6 +15,7 @@ import sys
import signal
import subprocess
import threading
+import time
import logging
from oeqa.utils import CommandError
from oeqa.utils import ftools
@@ -25,7 +28,7 @@ except ImportError:
pass
class Command(object):
- def __init__(self, command, bg=False, timeout=None, data=None, **options):
+ def __init__(self, command, bg=False, timeout=None, data=None, output_log=None, **options):
self.defaultopts = {
"stdout": subprocess.PIPE,
@@ -48,41 +51,106 @@ class Command(object):
self.options.update(options)
self.status = None
+ # We collect chunks of output before joining them at the end.
+ self._output_chunks = []
+ self._error_chunks = []
self.output = None
self.error = None
- self.thread = None
+ self.threads = []
+ self.output_log = output_log
self.log = logging.getLogger("utils.commands")
def run(self):
self.process = subprocess.Popen(self.cmd, **self.options)
- def commThread():
- self.output, self.error = self.process.communicate(self.data)
-
- self.thread = threading.Thread(target=commThread)
- self.thread.start()
+ def readThread(output, stream, logfunc):
+ if logfunc:
+ for line in stream:
+ output.append(line)
+ logfunc(line.decode("utf-8", errors='replace').rstrip())
+ else:
+ output.append(stream.read())
+
+ def readStderrThread():
+ readThread(self._error_chunks, self.process.stderr, self.output_log.error if self.output_log else None)
+
+ def readStdoutThread():
+ readThread(self._output_chunks, self.process.stdout, self.output_log.info if self.output_log else None)
+
+ def writeThread():
+ try:
+ self.process.stdin.write(self.data)
+ self.process.stdin.close()
+ except OSError as ex:
+ # It's not an error when the command does not consume all
+ # of our data. subprocess.communicate() also ignores that.
+ if ex.errno != EPIPE:
+ raise
+
+ # We write in a separate thread because then we can read
+ # without worrying about deadlocks. The additional thread is
+ # expected to terminate by itself and we mark it as a daemon,
+ # so even it should happen to not terminate for whatever
+ # reason, the main process will still exit, which will then
+ # kill the write thread.
+ if self.data:
+ threading.Thread(target=writeThread, daemon=True).start()
+ if self.process.stderr:
+ thread = threading.Thread(target=readStderrThread)
+ thread.start()
+ self.threads.append(thread)
+ if self.output_log:
+ self.output_log.info('Running: %s' % self.cmd)
+ thread = threading.Thread(target=readStdoutThread)
+ thread.start()
+ self.threads.append(thread)
self.log.debug("Running command '%s'" % self.cmd)
if not self.bg:
- self.thread.join(self.timeout)
+ if self.timeout is None:
+ for thread in self.threads:
+ thread.join()
+ else:
+ deadline = time.time() + self.timeout
+ for thread in self.threads:
+ timeout = deadline - time.time()
+ if timeout < 0:
+ timeout = 0
+ thread.join(timeout)
self.stop()
def stop(self):
- if self.thread.isAlive():
- self.process.terminate()
+ for thread in self.threads:
+ if thread.isAlive():
+ self.process.terminate()
# let's give it more time to terminate gracefully before killing it
- self.thread.join(5)
- if self.thread.isAlive():
+ thread.join(5)
+ if thread.isAlive():
self.process.kill()
- self.thread.join()
+ thread.join()
- if not self.output:
- self.output = ""
- else:
- self.output = self.output.decode("utf-8", errors='replace').rstrip()
- self.status = self.process.poll()
+ def finalize_output(data):
+ if not data:
+ data = ""
+ else:
+ data = b"".join(data)
+ data = data.decode("utf-8", errors='replace').rstrip()
+ return data
+
+ self.output = finalize_output(self._output_chunks)
+ self._output_chunks = None
+ # self.error used to be a byte string earlier, probably unintentionally.
+ # Now it is a normal string, just like self.output.
+ self.error = finalize_output(self._error_chunks)
+ self._error_chunks = None
+ # At this point we know that the process has closed stdout/stderr, so
+ # it is safe and necessary to wait for the actual process completion.
+ self.status = self.process.wait()
+ self.process.stdout.close()
+ if self.process.stderr:
+ self.process.stderr.close()
self.log.debug("Command '%s' returned %d as exit code." % (self.cmd, self.status))
# logging the complete output is insane
@@ -97,10 +165,21 @@ class Result(object):
pass
-def runCmd(command, ignore_status=False, timeout=None, assert_error=True, **options):
+def runCmd(command, ignore_status=False, timeout=None, assert_error=True,
+ native_sysroot=None, limit_exc_output=0, output_log=None, **options):
result = Result()
- cmd = Command(command, timeout=timeout, **options)
+ if native_sysroot:
+ extra_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin" % \
+ (native_sysroot, native_sysroot, native_sysroot)
+ extra_libpaths = "%s/lib:%s/usr/lib" % \
+ (native_sysroot, native_sysroot)
+ nenv = dict(options.get('env', os.environ))
+ nenv['PATH'] = extra_paths + ':' + nenv.get('PATH', '')
+ nenv['LD_LIBRARY_PATH'] = extra_libpaths + ':' + nenv.get('LD_LIBRARY_PATH', '')
+ options['env'] = nenv
+
+ cmd = Command(command, timeout=timeout, output_log=output_log, **options)
cmd.run()
result.command = command
@@ -110,15 +189,21 @@ def runCmd(command, ignore_status=False, timeout=None, assert_error=True, **opti
result.pid = cmd.process.pid
if result.status and not ignore_status:
+ exc_output = result.output
+ if limit_exc_output > 0:
+ split = result.output.splitlines()
+ if len(split) > limit_exc_output:
+ exc_output = "\n... (last %d lines of output)\n" % limit_exc_output + \
+ '\n'.join(split[-limit_exc_output:])
if assert_error:
- raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, result.status, result.output))
+ raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, result.status, exc_output))
else:
- raise CommandError(result.status, command, result.output)
+ raise CommandError(result.status, command, exc_output)
return result
-def bitbake(command, ignore_status=False, timeout=None, postconfig=None, **options):
+def bitbake(command, ignore_status=False, timeout=None, postconfig=None, output_log=None, **options):
if postconfig:
postconfig_file = os.path.join(os.environ.get('BUILDDIR'), 'oeqa-post.conf')
@@ -133,7 +218,7 @@ def bitbake(command, ignore_status=False, timeout=None, postconfig=None, **optio
cmd = [ "bitbake" ] + [a for a in (command + extra_args.split(" ")) if a not in [""]]
try:
- return runCmd(cmd, ignore_status, timeout, **options)
+ return runCmd(cmd, ignore_status, timeout, output_log=output_log, **options)
finally:
if postconfig:
os.remove(postconfig_file)
@@ -149,7 +234,9 @@ def get_bb_vars(variables=None, target=None, postconfig=None):
"""Get values of multiple bitbake variables"""
bbenv = get_bb_env(target, postconfig=postconfig)
- var_re = re.compile(r'^(export )?(?P<var>\w+)="(?P<value>.*)"$')
+ if variables is not None:
+ variables = list(variables)
+ var_re = re.compile(r'^(export )?(?P<var>\w+(_.*)?)="(?P<value>.*)"$')
unset_re = re.compile(r'^unset (?P<var>\w+)$')
lastline = None
values = {}
@@ -206,60 +293,64 @@ def create_temp_layer(templayerdir, templayername, priority=999, recipepathspec=
f.write('BBFILE_PATTERN_%s = "^${LAYERDIR}/"\n' % templayername)
f.write('BBFILE_PRIORITY_%s = "%d"\n' % (templayername, priority))
f.write('BBFILE_PATTERN_IGNORE_EMPTY_%s = "1"\n' % templayername)
-
+ f.write('LAYERSERIES_COMPAT_%s = "${LAYERSERIES_COMPAT_core}"\n' % templayername)
@contextlib.contextmanager
-def runqemu(pn, ssh=True):
+def runqemu(pn, ssh=True, runqemuparams='', image_fstype=None, launch_cmd=None, qemuparams=None, overrides={}, discard_writes=True):
+ """
+ launch_cmd means directly run the command, don't need set rootfs or env vars.
+ """
import bb.tinfoil
import bb.build
+ # Need a non-'BitBake' logger to capture the runner output
+ targetlogger = logging.getLogger('TargetRunner')
+ targetlogger.setLevel(logging.DEBUG)
+ handler = logging.StreamHandler(sys.stdout)
+ targetlogger.addHandler(handler)
+
tinfoil = bb.tinfoil.Tinfoil()
- tinfoil.prepare(False)
+ tinfoil.prepare(config_only=False, quiet=True)
try:
tinfoil.logger.setLevel(logging.WARNING)
import oeqa.targetcontrol
tinfoil.config_data.setVar("TEST_LOG_DIR", "${WORKDIR}/testimage")
tinfoil.config_data.setVar("TEST_QEMUBOOT_TIMEOUT", "1000")
- import oe.recipeutils
- recipefile = oe.recipeutils.pn_to_recipe(tinfoil.cooker, pn)
- recipedata = oe.recipeutils.parse_recipe(tinfoil.cooker, recipefile, [])
-
- # The QemuRunner log is saved out, but we need to ensure it is at the right
- # log level (and then ensure that since it's a child of the BitBake logger,
- # we disable propagation so we don't then see the log events on the console)
- logger = logging.getLogger('BitBake.QemuRunner')
- logger.setLevel(logging.DEBUG)
- logger.propagate = False
- logdir = recipedata.getVar("TEST_LOG_DIR", True)
-
- qemu = oeqa.targetcontrol.QemuTarget(recipedata)
+ # Tell QemuTarget() whether need find rootfs/kernel or not
+ if launch_cmd:
+ tinfoil.config_data.setVar("FIND_ROOTFS", '0')
+ else:
+ tinfoil.config_data.setVar("FIND_ROOTFS", '1')
+
+ recipedata = tinfoil.parse_recipe(pn)
+ for key, value in overrides.items():
+ recipedata.setVar(key, value)
+
+ logdir = recipedata.getVar("TEST_LOG_DIR")
+
+ qemu = oeqa.targetcontrol.QemuTarget(recipedata, targetlogger, image_fstype)
finally:
# We need to shut down tinfoil early here in case we actually want
# to run tinfoil-using utilities with the running QEMU instance.
# Luckily QemuTarget doesn't need it after the constructor.
tinfoil.shutdown()
- # Setup bitbake logger as console handler is removed by tinfoil.shutdown
- bblogger = logging.getLogger('BitBake')
- bblogger.setLevel(logging.INFO)
- console = logging.StreamHandler(sys.stdout)
- bbformat = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
- if sys.stdout.isatty():
- bbformat.enable_color()
- console.setFormatter(bbformat)
- bblogger.addHandler(console)
-
try:
qemu.deploy()
try:
- qemu.start(ssh=ssh)
- except bb.build.FuncFailed:
- raise Exception('Failed to start QEMU - see the logs in %s' % logdir)
+ qemu.start(params=qemuparams, ssh=ssh, runqemuparams=runqemuparams, launch_cmd=launch_cmd, discard_writes=discard_writes)
+ except Exception as e:
+ msg = str(e) + '\nFailed to start QEMU - see the logs in %s' % logdir
+ if os.path.exists(qemu.qemurunnerlog):
+ with open(qemu.qemurunnerlog, 'r') as f:
+ msg = msg + "Qemurunner log output from %s:\n%s" % (qemu.qemurunnerlog, f.read())
+ raise Exception(msg)
yield qemu
finally:
+ targetlogger.removeHandler(handler)
try:
qemu.stop()
except:
diff --git a/meta/lib/oeqa/utils/decorators.py b/meta/lib/oeqa/utils/decorators.py
index 25f9c54e6b..aabf4110cb 100644
--- a/meta/lib/oeqa/utils/decorators.py
+++ b/meta/lib/oeqa/utils/decorators.py
@@ -1,6 +1,8 @@
+#
# Copyright (C) 2013 Intel Corporation
#
-# Released under the MIT license (see COPYING.MIT)
+# SPDX-License-Identifier: MIT
+#
# Some custom decorators that can be used by unittests
# Most useful is skipUnlessPassed which can be used for
@@ -172,18 +174,19 @@ def LogResults(original_class):
#check status of tests and record it
+ tcid = self.id()
for (name, msg) in result.errors:
- if (self._testMethodName == str(name).split(' ')[0]) and (class_name in str(name).split(' ')[1]):
+ if tcid == name.id():
local_log.results("Testcase "+str(test_case)+": ERROR")
local_log.results("Testcase "+str(test_case)+":\n"+msg)
passed = False
for (name, msg) in result.failures:
- if (self._testMethodName == str(name).split(' ')[0]) and (class_name in str(name).split(' ')[1]):
+ if tcid == name.id():
local_log.results("Testcase "+str(test_case)+": FAILED")
local_log.results("Testcase "+str(test_case)+":\n"+msg)
passed = False
for (name, msg) in result.skipped:
- if (self._testMethodName == str(name).split(' ')[0]) and (class_name in str(name).split(' ')[1]):
+ if tcid == name.id():
local_log.results("Testcase "+str(test_case)+": SKIPPED")
passed = False
if passed:
diff --git a/meta/lib/oeqa/utils/dump.py b/meta/lib/oeqa/utils/dump.py
index 71422a9aea..09a44329e0 100644
--- a/meta/lib/oeqa/utils/dump.py
+++ b/meta/lib/oeqa/utils/dump.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
import os
import sys
import errno
@@ -5,12 +9,6 @@ import datetime
import itertools
from .commands import runCmd
-def get_host_dumper(d):
- cmds = d.getVar("testimage_dump_host", True)
- parent_dir = d.getVar("TESTIMAGE_DUMP_DIR", True)
- return HostDumper(cmds, parent_dir)
-
-
class BaseDumper(object):
""" Base class to dump commands from host/target """
@@ -18,7 +16,7 @@ class BaseDumper(object):
self.cmds = []
# Some testing doesn't inherit testimage, so it is needed
# to set some defaults.
- self.parent_dir = parent_dir or "/tmp/oe-saved-tests"
+ self.parent_dir = parent_dir
dft_cmds = """ top -bn1
iostat -x -z -N -d -p ALL 20 2
ps -ef
@@ -73,17 +71,19 @@ class HostDumper(BaseDumper):
def dump_host(self, dump_dir=""):
if dump_dir:
self.dump_dir = dump_dir
+ env = os.environ.copy()
+ env['PATH'] = '/usr/sbin:/sbin:/usr/bin:/bin'
+ env['COLUMNS'] = '9999'
for cmd in self.cmds:
- result = runCmd(cmd, ignore_status=True)
+ result = runCmd(cmd, ignore_status=True, env=env)
self._write_dump(cmd.split()[0], result.output)
-
class TargetDumper(BaseDumper):
""" Class to get dumps from target, it only works with QemuRunner """
- def __init__(self, cmds, parent_dir, qemurunner):
+ def __init__(self, cmds, parent_dir, runner):
super(TargetDumper, self).__init__(cmds, parent_dir)
- self.runner = qemurunner
+ self.runner = runner
def dump_target(self, dump_dir=""):
if dump_dir:
diff --git a/meta/lib/oeqa/utils/ftools.py b/meta/lib/oeqa/utils/ftools.py
index a7233d4ca6..3093419cc7 100644
--- a/meta/lib/oeqa/utils/ftools.py
+++ b/meta/lib/oeqa/utils/ftools.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
import os
import re
import errno
diff --git a/meta/lib/oeqa/utils/git.py b/meta/lib/oeqa/utils/git.py
index ae85d27663..ea35a766eb 100644
--- a/meta/lib/oeqa/utils/git.py
+++ b/meta/lib/oeqa/utils/git.py
@@ -1,7 +1,7 @@
#
# Copyright (C) 2016 Intel Corporation
#
-# Released under the MIT license (see COPYING.MIT)
+# SPDX-License-Identifier: MIT
#
"""Git repository interactions"""
import os
@@ -16,8 +16,17 @@ class GitError(Exception):
class GitRepo(object):
"""Class representing a Git repository clone"""
def __init__(self, path, is_topdir=False):
- self.top_dir = self._run_git_cmd_at(['rev-parse', '--show-toplevel'],
- path)
+ git_dir = self._run_git_cmd_at(['rev-parse', '--git-dir'], path)
+ git_dir = git_dir if os.path.isabs(git_dir) else os.path.join(path, git_dir)
+ self.git_dir = os.path.realpath(git_dir)
+
+ if self._run_git_cmd_at(['rev-parse', '--is-bare-repository'], path) == 'true':
+ self.bare = True
+ self.top_dir = self.git_dir
+ else:
+ self.bare = False
+ self.top_dir = self._run_git_cmd_at(['rev-parse', '--show-toplevel'],
+ path)
realpath = os.path.realpath(path)
if is_topdir and realpath != self.top_dir:
raise GitError("{} is not a Git top directory".format(realpath))
@@ -36,9 +45,12 @@ class GitRepo(object):
return ret.output.strip()
@staticmethod
- def init(path):
+ def init(path, bare=False):
"""Initialize a new Git repository"""
- GitRepo._run_git_cmd_at('init', cwd=path)
+ cmd = ['init']
+ if bare:
+ cmd.append('--bare')
+ GitRepo._run_git_cmd_at(cmd, cwd=path)
return GitRepo(path, is_topdir=True)
def run_cmd(self, git_args, env_update=None):
@@ -52,7 +64,7 @@ class GitRepo(object):
def rev_parse(self, revision):
"""Do git rev-parse"""
try:
- return self.run_cmd(['rev-parse', revision])
+ return self.run_cmd(['rev-parse', '--verify', revision])
except GitError:
# Revision does not exist
return None
diff --git a/meta/lib/oeqa/utils/gitarchive.py b/meta/lib/oeqa/utils/gitarchive.py
new file mode 100644
index 0000000000..6e8040eb5c
--- /dev/null
+++ b/meta/lib/oeqa/utils/gitarchive.py
@@ -0,0 +1,237 @@
+#
+# Helper functions for committing data to git and pushing upstream
+#
+# Copyright (c) 2017, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import re
+import sys
+from operator import attrgetter
+from collections import namedtuple
+from oeqa.utils.git import GitRepo, GitError
+
+class ArchiveError(Exception):
+ """Internal error handling of this script"""
+
+def format_str(string, fields):
+ """Format string using the given fields (dict)"""
+ try:
+ return string.format(**fields)
+ except KeyError as err:
+ raise ArchiveError("Unable to expand string '{}': unknown field {} "
+ "(valid fields are: {})".format(
+ string, err, ', '.join(sorted(fields.keys()))))
+
+
+def init_git_repo(path, no_create, bare, log):
+ """Initialize local Git repository"""
+ path = os.path.abspath(path)
+ if os.path.isfile(path):
+ raise ArchiveError("Invalid Git repo at {}: path exists but is not a "
+ "directory".format(path))
+ if not os.path.isdir(path) or not os.listdir(path):
+ if no_create:
+ raise ArchiveError("No git repo at {}, refusing to create "
+ "one".format(path))
+ if not os.path.isdir(path):
+ try:
+ os.mkdir(path)
+ except (FileNotFoundError, PermissionError) as err:
+ raise ArchiveError("Failed to mkdir {}: {}".format(path, err))
+ if not os.listdir(path):
+ log.info("Initializing a new Git repo at %s", path)
+ repo = GitRepo.init(path, bare)
+ try:
+ repo = GitRepo(path, is_topdir=True)
+ except GitError:
+ raise ArchiveError("Non-empty directory that is not a Git repository "
+ "at {}\nPlease specify an existing Git repository, "
+ "an empty directory or a non-existing directory "
+ "path.".format(path))
+ return repo
+
+
+def git_commit_data(repo, data_dir, branch, message, exclude, notes, log):
+ """Commit data into a Git repository"""
+ log.info("Committing data into to branch %s", branch)
+ tmp_index = os.path.join(repo.git_dir, 'index.oe-git-archive')
+ try:
+ # Create new tree object from the data
+ env_update = {'GIT_INDEX_FILE': tmp_index,
+ 'GIT_WORK_TREE': os.path.abspath(data_dir)}
+ repo.run_cmd('add .', env_update)
+
+ # Remove files that are excluded
+ if exclude:
+ repo.run_cmd(['rm', '--cached'] + [f for f in exclude], env_update)
+
+ tree = repo.run_cmd('write-tree', env_update)
+
+ # Create new commit object from the tree
+ parent = repo.rev_parse(branch)
+ if not parent:
+ parent = repo.rev_parse("origin/" + branch)
+ git_cmd = ['commit-tree', tree, '-m', message]
+ if parent:
+ git_cmd += ['-p', parent]
+ commit = repo.run_cmd(git_cmd, env_update)
+
+ # Create git notes
+ for ref, filename in notes:
+ ref = ref.format(branch_name=branch)
+ repo.run_cmd(['notes', '--ref', ref, 'add',
+ '-F', os.path.abspath(filename), commit])
+
+ # Update branch head
+ git_cmd = ['update-ref', 'refs/heads/' + branch, commit]
+ repo.run_cmd(git_cmd)
+
+ # Update current HEAD, if we're on branch 'branch'
+ if not repo.bare and repo.get_current_branch() == branch:
+ log.info("Updating %s HEAD to latest commit", repo.top_dir)
+ repo.run_cmd('reset --hard')
+
+ return commit
+ finally:
+ if os.path.exists(tmp_index):
+ os.unlink(tmp_index)
+
+
+def expand_tag_strings(repo, name_pattern, msg_subj_pattern, msg_body_pattern,
+ keywords):
+ """Generate tag name and message, with support for running id number"""
+ keyws = keywords.copy()
+ # Tag number is handled specially: if not defined, we autoincrement it
+ if 'tag_number' not in keyws:
+ # Fill in all other fields than 'tag_number'
+ keyws['tag_number'] = '{tag_number}'
+ tag_re = format_str(name_pattern, keyws)
+ # Replace parentheses for proper regex matching
+ tag_re = tag_re.replace('(', '\(').replace(')', '\)') + '$'
+ # Inject regex group pattern for 'tag_number'
+ tag_re = tag_re.format(tag_number='(?P<tag_number>[0-9]{1,5})')
+
+ keyws['tag_number'] = 0
+ for existing_tag in repo.run_cmd('tag').splitlines():
+ match = re.match(tag_re, existing_tag)
+
+ if match and int(match.group('tag_number')) >= keyws['tag_number']:
+ keyws['tag_number'] = int(match.group('tag_number')) + 1
+
+ tag_name = format_str(name_pattern, keyws)
+ msg_subj= format_str(msg_subj_pattern.strip(), keyws)
+ msg_body = format_str(msg_body_pattern, keyws)
+ return tag_name, msg_subj + '\n\n' + msg_body
+
+def gitarchive(data_dir, git_dir, no_create, bare, commit_msg_subject, commit_msg_body, branch_name, no_tag, tagname, tag_msg_subject, tag_msg_body, exclude, notes, push, keywords, log):
+
+ if not os.path.isdir(data_dir):
+ raise ArchiveError("Not a directory: {}".format(data_dir))
+
+ data_repo = init_git_repo(git_dir, no_create, bare, log)
+
+ # Expand strings early in order to avoid getting into inconsistent
+ # state (e.g. no tag even if data was committed)
+ commit_msg = format_str(commit_msg_subject.strip(), keywords)
+ commit_msg += '\n\n' + format_str(commit_msg_body, keywords)
+ branch_name = format_str(branch_name, keywords)
+ tag_name = None
+ if not no_tag and tagname:
+ tag_name, tag_msg = expand_tag_strings(data_repo, tagname,
+ tag_msg_subject,
+ tag_msg_body, keywords)
+
+ # Commit data
+ commit = git_commit_data(data_repo, data_dir, branch_name,
+ commit_msg, exclude, notes, log)
+
+ # Create tag
+ if tag_name:
+ log.info("Creating tag %s", tag_name)
+ data_repo.run_cmd(['tag', '-a', '-m', tag_msg, tag_name, commit])
+
+ # Push data to remote
+ if push:
+ cmd = ['push', '--tags']
+ # If no remote is given we push with the default settings from
+ # gitconfig
+ if push is not True:
+ notes_refs = ['refs/notes/' + ref.format(branch_name=branch_name)
+ for ref, _ in notes]
+ cmd.extend([push, branch_name] + notes_refs)
+ log.info("Pushing data to remote")
+ data_repo.run_cmd(cmd)
+
+# Container class for tester revisions
+TestedRev = namedtuple('TestedRev', 'commit commit_number tags')
+
+def get_test_runs(log, repo, tag_name, **kwargs):
+ """Get a sorted list of test runs, matching given pattern"""
+ # First, get field names from the tag name pattern
+ field_names = [m.group(1) for m in re.finditer(r'{(\w+)}', tag_name)]
+ undef_fields = [f for f in field_names if f not in kwargs.keys()]
+
+ # Fields for formatting tag name pattern
+ str_fields = dict([(f, '*') for f in field_names])
+ str_fields.update(kwargs)
+
+ # Get a list of all matching tags
+ tag_pattern = tag_name.format(**str_fields)
+ tags = repo.run_cmd(['tag', '-l', tag_pattern]).splitlines()
+ log.debug("Found %d tags matching pattern '%s'", len(tags), tag_pattern)
+
+ # Parse undefined fields from tag names
+ str_fields = dict([(f, r'(?P<{}>[\w\-.()]+)'.format(f)) for f in field_names])
+ str_fields['branch'] = r'(?P<branch>[\w\-.()/]+)'
+ str_fields['commit'] = '(?P<commit>[0-9a-f]{7,40})'
+ str_fields['commit_number'] = '(?P<commit_number>[0-9]{1,7})'
+ str_fields['tag_number'] = '(?P<tag_number>[0-9]{1,5})'
+ # escape parenthesis in fields in order to not messa up the regexp
+ fixed_fields = dict([(k, v.replace('(', r'\(').replace(')', r'\)')) for k, v in kwargs.items()])
+ str_fields.update(fixed_fields)
+ tag_re = re.compile(tag_name.format(**str_fields))
+
+ # Parse fields from tags
+ revs = []
+ for tag in tags:
+ m = tag_re.match(tag)
+ groups = m.groupdict()
+ revs.append([groups[f] for f in undef_fields] + [tag])
+
+ # Return field names and a sorted list of revs
+ return undef_fields, sorted(revs)
+
+def get_test_revs(log, repo, tag_name, **kwargs):
+ """Get list of all tested revisions"""
+ fields, runs = get_test_runs(log, repo, tag_name, **kwargs)
+
+ revs = {}
+ commit_i = fields.index('commit')
+ commit_num_i = fields.index('commit_number')
+ for run in runs:
+ commit = run[commit_i]
+ commit_num = run[commit_num_i]
+ tag = run[-1]
+ if not commit in revs:
+ revs[commit] = TestedRev(commit, commit_num, [tag])
+ else:
+ assert commit_num == revs[commit].commit_number, "Commit numbers do not match"
+ revs[commit].tags.append(tag)
+
+ # Return in sorted table
+ revs = sorted(revs.values(), key=attrgetter('commit_number'))
+ log.debug("Found %d tested revisions:\n %s", len(revs),
+ "\n ".join(['{} ({})'.format(rev.commit_number, rev.commit) for rev in revs]))
+ return revs
+
+def rev_find(revs, attr, val):
+ """Search from a list of TestedRev"""
+ for i, rev in enumerate(revs):
+ if getattr(rev, attr) == val:
+ return i
+ raise ValueError("Unable to find '{}' value '{}'".format(attr, val))
+
diff --git a/meta/lib/oeqa/utils/httpserver.py b/meta/lib/oeqa/utils/httpserver.py
index 7d12331453..58d3c3b3f8 100644
--- a/meta/lib/oeqa/utils/httpserver.py
+++ b/meta/lib/oeqa/utils/httpserver.py
@@ -1,13 +1,17 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
import http.server
import multiprocessing
import os
+import traceback
+import signal
from socketserver import ThreadingMixIn
class HTTPServer(ThreadingMixIn, http.server.HTTPServer):
- def server_start(self, root_dir):
- import signal
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ def server_start(self, root_dir, logger):
os.chdir(root_dir)
self.serve_forever()
@@ -18,19 +22,40 @@ class HTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
class HTTPService(object):
- def __init__(self, root_dir, host=''):
+ def __init__(self, root_dir, host='', port=0, logger=None):
self.root_dir = root_dir
self.host = host
- self.port = 0
+ self.port = port
+ self.logger = logger
def start(self):
+ if not os.path.exists(self.root_dir):
+ self.logger.info("Not starting HTTPService for directory %s which doesn't exist" % (self.root_dir))
+ return
+
self.server = HTTPServer((self.host, self.port), HTTPRequestHandler)
if self.port == 0:
self.port = self.server.server_port
- self.process = multiprocessing.Process(target=self.server.server_start, args=[self.root_dir])
+ self.process = multiprocessing.Process(target=self.server.server_start, args=[self.root_dir, self.logger])
+
+ # The signal handler from testimage.bbclass can cause deadlocks here
+ # if the HTTPServer is terminated before it can restore the standard
+ #signal behaviour
+ orig = signal.getsignal(signal.SIGTERM)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.process.start()
+ signal.signal(signal.SIGTERM, orig)
+
+ if self.logger:
+ self.logger.info("Started HTTPService on %s:%s" % (self.host, self.port))
+
def stop(self):
- self.server.server_close()
- self.process.terminate()
- self.process.join()
+ if hasattr(self, "server"):
+ self.server.server_close()
+ if hasattr(self, "process"):
+ self.process.terminate()
+ self.process.join()
+ if self.logger:
+ self.logger.info("Stopped HTTPService on %s:%s" % (self.host, self.port))
+
diff --git a/meta/lib/oeqa/utils/logparser.py b/meta/lib/oeqa/utils/logparser.py
index b377dcd271..7313df8ec3 100644
--- a/meta/lib/oeqa/utils/logparser.py
+++ b/meta/lib/oeqa/utils/logparser.py
@@ -1,125 +1,152 @@
-#!/usr/bin/env python
+#
+# SPDX-License-Identifier: MIT
+#
import sys
import os
import re
-from . import ftools
-
# A parser that can be used to identify weather a line is a test result or a section statement.
-class Lparser(object):
-
- def __init__(self, test_0_pass_regex, test_0_fail_regex, section_0_begin_regex=None, section_0_end_regex=None, **kwargs):
- # Initialize the arguments dictionary
- if kwargs:
- self.args = kwargs
- else:
- self.args = {}
-
- # Add the default args to the dictionary
- self.args['test_0_pass_regex'] = test_0_pass_regex
- self.args['test_0_fail_regex'] = test_0_fail_regex
- if section_0_begin_regex:
- self.args['section_0_begin_regex'] = section_0_begin_regex
- if section_0_end_regex:
- self.args['section_0_end_regex'] = section_0_end_regex
-
- self.test_possible_status = ['pass', 'fail', 'error']
- self.section_possible_status = ['begin', 'end']
-
- self.initialized = False
-
-
- # Initialize the parser with the current configuration
- def init(self):
-
- # extra arguments can be added by the user to define new test and section categories. They must follow a pre-defined pattern: <type>_<category_name>_<status>_regex
- self.test_argument_pattern = "^test_(.+?)_(%s)_regex" % '|'.join(map(str, self.test_possible_status))
- self.section_argument_pattern = "^section_(.+?)_(%s)_regex" % '|'.join(map(str, self.section_possible_status))
-
- # Initialize the test and section regex dictionaries
- self.test_regex = {}
- self.section_regex ={}
-
- for arg, value in self.args.items():
- if not value:
- raise Exception('The value of provided argument %s is %s. Should have a valid value.' % (key, value))
- is_test = re.search(self.test_argument_pattern, arg)
- is_section = re.search(self.section_argument_pattern, arg)
- if is_test:
- if not is_test.group(1) in self.test_regex:
- self.test_regex[is_test.group(1)] = {}
- self.test_regex[is_test.group(1)][is_test.group(2)] = re.compile(value)
- elif is_section:
- if not is_section.group(1) in self.section_regex:
- self.section_regex[is_section.group(1)] = {}
- self.section_regex[is_section.group(1)][is_section.group(2)] = re.compile(value)
- else:
- # TODO: Make these call a traceback instead of a simple exception..
- raise Exception("The provided argument name does not correspond to any valid type. Please give one of the following types:\nfor tests: %s\nfor sections: %s" % (self.test_argument_pattern, self.section_argument_pattern))
-
- self.initialized = True
-
- # Parse a line and return a tuple containing the type of result (test/section) and its category, status and name
- def parse_line(self, line):
- if not self.initialized:
- raise Exception("The parser is not initialized..")
-
- for test_category, test_status_list in self.test_regex.items():
- for test_status, status_regex in test_status_list.items():
- test_name = status_regex.search(line)
- if test_name:
- return ['test', test_category, test_status, test_name.group(1)]
-
- for section_category, section_status_list in self.section_regex.items():
- for section_status, status_regex in section_status_list.items():
- section_name = status_regex.search(line)
- if section_name:
- return ['section', section_category, section_status, section_name.group(1)]
- return None
-
-
-class Result(object):
-
+class PtestParser(object):
def __init__(self):
- self.result_dict = {}
-
- def store(self, section, test, status):
- if not section in self.result_dict:
- self.result_dict[section] = []
-
- self.result_dict[section].append((test, status))
-
- # sort tests by the test name(the first element of the tuple), for each section. This can be helpful when using git to diff for changes by making sure they are always in the same order.
- def sort_tests(self):
- for package in self.result_dict:
- sorted_results = sorted(self.result_dict[package], key=lambda tup: tup[0])
- self.result_dict[package] = sorted_results
+ self.results = {}
+ self.sections = {}
+
+ def parse(self, logfile):
+ test_regex = {}
+ test_regex['PASSED'] = re.compile(r"^PASS:(.+)")
+ test_regex['FAILED'] = re.compile(r"^FAIL:([^(]+)")
+ test_regex['SKIPPED'] = re.compile(r"^SKIP:(.+)")
+
+ section_regex = {}
+ section_regex['begin'] = re.compile(r"^BEGIN: .*/(.+)/ptest")
+ section_regex['end'] = re.compile(r"^END: .*/(.+)/ptest")
+ section_regex['duration'] = re.compile(r"^DURATION: (.+)")
+ section_regex['exitcode'] = re.compile(r"^ERROR: Exit status is (.+)")
+ section_regex['timeout'] = re.compile(r"^TIMEOUT: .*/(.+)/ptest")
+
+ def newsection():
+ return { 'name': "No-section", 'log': "" }
+
+ current_section = newsection()
+
+ with open(logfile, errors='replace') as f:
+ for line in f:
+ result = section_regex['begin'].search(line)
+ if result:
+ current_section['name'] = result.group(1)
+ continue
+
+ result = section_regex['end'].search(line)
+ if result:
+ if current_section['name'] != result.group(1):
+ bb.warn("Ptest END log section mismatch %s vs. %s" % (current_section['name'], result.group(1)))
+ if current_section['name'] in self.sections:
+ bb.warn("Ptest duplicate section for %s" % (current_section['name']))
+ self.sections[current_section['name']] = current_section
+ del self.sections[current_section['name']]['name']
+ current_section = newsection()
+ continue
+
+ result = section_regex['timeout'].search(line)
+ if result:
+ if current_section['name'] != result.group(1):
+ bb.warn("Ptest TIMEOUT log section mismatch %s vs. %s" % (current_section['name'], result.group(1)))
+ current_section['timeout'] = True
+ continue
+
+ for t in ['duration', 'exitcode']:
+ result = section_regex[t].search(line)
+ if result:
+ current_section[t] = result.group(1)
+ continue
+
+ current_section['log'] = current_section['log'] + line
+
+ for t in test_regex:
+ result = test_regex[t].search(line)
+ if result:
+ if current_section['name'] not in self.results:
+ self.results[current_section['name']] = {}
+ self.results[current_section['name']][result.group(1).strip()] = t
+
+ return self.results, self.sections
# Log the results as files. The file name is the section name and the contents are the tests in that section.
- def log_as_files(self, target_dir, test_status):
- status_regex = re.compile('|'.join(map(str, test_status)))
- if not type(test_status) == type([]):
- raise Exception("test_status should be a list. Got " + str(test_status) + " instead.")
+ def results_as_files(self, target_dir):
if not os.path.exists(target_dir):
raise Exception("Target directory does not exist: %s" % target_dir)
- for section, test_results in self.result_dict.items():
- prefix = ''
- for x in test_status:
- prefix +=x+'.'
- if (section != ''):
- prefix += section
+ for section in self.results:
+ prefix = 'No-section'
+ if section:
+ prefix = section
section_file = os.path.join(target_dir, prefix)
# purge the file contents if it exists
- open(section_file, 'w').close()
- for test_result in test_results:
- (test_name, status) = test_result
- # we log only the tests with status in the test_status list
- match_status = status_regex.search(status)
- if match_status:
- ftools.append_file(section_file, status + ": " + test_name)
-
- # Not yet implemented!
- def log_to_lava(self):
- pass
+ with open(section_file, 'w') as f:
+ for test_name in sorted(self.results[section]):
+ status = self.results[section][test_name]
+ f.write(status + ": " + test_name + "\n")
+
+
+# ltp log parsing
+class LtpParser(object):
+ def __init__(self):
+ self.results = {}
+ self.section = {'duration': "", 'log': ""}
+
+ def parse(self, logfile):
+ test_regex = {}
+ test_regex['PASSED'] = re.compile(r"PASS")
+ test_regex['FAILED'] = re.compile(r"FAIL")
+ test_regex['SKIPPED'] = re.compile(r"SKIP")
+
+ with open(logfile, errors='replace') as f:
+ for line in f:
+ for t in test_regex:
+ result = test_regex[t].search(line)
+ if result:
+ self.results[line.split()[0].strip()] = t
+
+ for test in self.results:
+ result = self.results[test]
+ self.section['log'] = self.section['log'] + ("%s: %s\n" % (result.strip()[:-2], test.strip()))
+
+ return self.results, self.section
+
+
+# ltp Compliance log parsing
+class LtpComplianceParser(object):
+ def __init__(self):
+ self.results = {}
+ self.section = {'duration': "", 'log': ""}
+
+ def parse(self, logfile):
+ test_regex = {}
+ test_regex['PASSED'] = re.compile(r"^PASS")
+ test_regex['FAILED'] = re.compile(r"^FAIL")
+ test_regex['SKIPPED'] = re.compile(r"(?:UNTESTED)|(?:UNSUPPORTED)")
+
+ section_regex = {}
+ section_regex['test'] = re.compile(r"^Testing")
+
+ with open(logfile, errors='replace') as f:
+ for line in f:
+ result = section_regex['test'].search(line)
+ if result:
+ self.name = ""
+ self.name = line.split()[1].strip()
+ self.results[self.name] = "PASSED"
+ failed = 0
+
+ failed_result = test_regex['FAILED'].search(line)
+ if failed_result:
+ failed = line.split()[1].strip()
+ if int(failed) > 0:
+ self.results[self.name] = "FAILED"
+
+ for test in self.results:
+ result = self.results[test]
+ self.section['log'] = self.section['log'] + ("%s: %s\n" % (result.strip()[:-2], test.strip()))
+
+ return self.results, self.section
diff --git a/meta/lib/oeqa/utils/metadata.py b/meta/lib/oeqa/utils/metadata.py
new file mode 100644
index 0000000000..8013aa684d
--- /dev/null
+++ b/meta/lib/oeqa/utils/metadata.py
@@ -0,0 +1,124 @@
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+# Functions to get metadata from the testing host used
+# for analytics of test results.
+
+from collections import OrderedDict
+from collections.abc import MutableMapping
+from xml.dom.minidom import parseString
+from xml.etree.ElementTree import Element, tostring
+
+from oe.lsb import get_os_release
+from oeqa.utils.commands import runCmd, get_bb_vars
+
+
+def metadata_from_bb():
+ """ Returns test's metadata as OrderedDict.
+
+ Data will be gathered using bitbake -e thanks to get_bb_vars.
+ """
+ metadata_config_vars = ('MACHINE', 'BB_NUMBER_THREADS', 'PARALLEL_MAKE')
+
+ info_dict = OrderedDict()
+ hostname = runCmd('hostname')
+ info_dict['hostname'] = hostname.output
+ data_dict = get_bb_vars()
+
+ # Distro information
+ info_dict['distro'] = {'id': data_dict['DISTRO'],
+ 'version_id': data_dict['DISTRO_VERSION'],
+ 'pretty_name': '%s %s' % (data_dict['DISTRO'], data_dict['DISTRO_VERSION'])}
+
+ # Host distro information
+ os_release = get_os_release()
+ if os_release:
+ info_dict['host_distro'] = OrderedDict()
+ for key in ('ID', 'VERSION_ID', 'PRETTY_NAME'):
+ if key in os_release:
+ info_dict['host_distro'][key.lower()] = os_release[key]
+
+ info_dict['layers'] = get_layers(data_dict['BBLAYERS'])
+ info_dict['bitbake'] = git_rev_info(os.path.dirname(bb.__file__))
+
+ info_dict['config'] = OrderedDict()
+ for var in sorted(metadata_config_vars):
+ info_dict['config'][var] = data_dict[var]
+ return info_dict
+
+def metadata_from_data_store(d):
+ """ Returns test's metadata as OrderedDict.
+
+ Data will be collected from the provided data store.
+ """
+ # TODO: Getting metadata from the data store would
+ # be useful when running within bitbake.
+ pass
+
+def git_rev_info(path):
+ """Get git revision information as a dict"""
+ info = OrderedDict()
+
+ try:
+ from git import Repo, InvalidGitRepositoryError, NoSuchPathError
+ except ImportError:
+ import subprocess
+ try:
+ info['branch'] = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"], cwd=path).decode('utf-8').strip()
+ except subprocess.CalledProcessError:
+ pass
+ try:
+ info['commit'] = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=path).decode('utf-8').strip()
+ except subprocess.CalledProcessError:
+ pass
+ try:
+ info['commit_count'] = int(subprocess.check_output(["git", "rev-list", "--count", "HEAD"], cwd=path).decode('utf-8').strip())
+ except subprocess.CalledProcessError:
+ pass
+ return info
+ try:
+ repo = Repo(path, search_parent_directories=True)
+ except (InvalidGitRepositoryError, NoSuchPathError):
+ return info
+ info['commit'] = repo.head.commit.hexsha
+ info['commit_count'] = repo.head.commit.count()
+ try:
+ info['branch'] = repo.active_branch.name
+ except TypeError:
+ info['branch'] = '(nobranch)'
+ return info
+
+def get_layers(layers):
+ """Returns layer information in dict format"""
+ layer_dict = OrderedDict()
+ for layer in layers.split():
+ layer_name = os.path.basename(layer)
+ layer_dict[layer_name] = git_rev_info(layer)
+ return layer_dict
+
+def write_metadata_file(file_path, metadata):
+ """ Writes metadata to a XML file in directory. """
+
+ xml = dict_to_XML('metadata', metadata)
+ xml_doc = parseString(tostring(xml).decode('UTF-8'))
+ with open(file_path, 'w') as f:
+ f.write(xml_doc.toprettyxml())
+
+def dict_to_XML(tag, dictionary, **kwargs):
+ """ Return XML element converting dicts recursively. """
+
+ elem = Element(tag, **kwargs)
+ for key, val in dictionary.items():
+ if tag == 'layers':
+ child = (dict_to_XML('layer', val, name=key))
+ elif isinstance(val, MutableMapping):
+ child = (dict_to_XML(key, val))
+ else:
+ if tag == 'config':
+ child = Element('variable', name=key)
+ else:
+ child = Element(key)
+ child.text = str(val)
+ elem.append(child)
+ return elem
diff --git a/meta/lib/oeqa/utils/network.py b/meta/lib/oeqa/utils/network.py
index 2768f6c5db..59d01723a1 100644
--- a/meta/lib/oeqa/utils/network.py
+++ b/meta/lib/oeqa/utils/network.py
@@ -1,7 +1,11 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
import socket
-def get_free_port():
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+def get_free_port(udp = False):
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM if not udp else socket.SOCK_DGRAM)
s.bind(('', 0))
addr = s.getsockname()
s.close()
diff --git a/meta/lib/oeqa/utils/nfs.py b/meta/lib/oeqa/utils/nfs.py
new file mode 100644
index 0000000000..a37686c914
--- /dev/null
+++ b/meta/lib/oeqa/utils/nfs.py
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: MIT
+import os
+import sys
+import tempfile
+import contextlib
+import socket
+from oeqa.utils.commands import bitbake, get_bb_var, Command
+from oeqa.utils.network import get_free_port
+
+@contextlib.contextmanager
+def unfs_server(directory, logger = None):
+ unfs_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "unfs3-native")
+ if not os.path.exists(os.path.join(unfs_sysroot, "usr", "bin", "unfsd")):
+ # build native tool
+ bitbake("unfs3-native -c addto_recipe_sysroot")
+
+ exports = None
+ cmd = None
+ try:
+ # create the exports file
+ with tempfile.NamedTemporaryFile(delete = False) as exports:
+ exports.write("{0} (rw,no_root_squash,no_all_squash,insecure)\n".format(directory).encode())
+
+ # find some ports for the server
+ nfsport, mountport = get_free_port(udp = True), get_free_port(udp = True)
+
+ nenv = dict(os.environ)
+ nenv['PATH'] = "{0}/sbin:{0}/usr/sbin:{0}/usr/bin:".format(unfs_sysroot) + nenv.get('PATH', '')
+ cmd = Command(["unfsd", "-d", "-p", "-N", "-e", exports.name, "-n", str(nfsport), "-m", str(mountport)],
+ bg = True, env = nenv, output_log = logger)
+ cmd.run()
+ yield nfsport, mountport
+ finally:
+ if cmd is not None:
+ cmd.stop()
+ if exports is not None:
+ # clean up exports file
+ os.unlink(exports.name)
+
diff --git a/meta/lib/oeqa/utils/package_manager.py b/meta/lib/oeqa/utils/package_manager.py
index 099ecc9728..2d358f7172 100644
--- a/meta/lib/oeqa/utils/package_manager.py
+++ b/meta/lib/oeqa/utils/package_manager.py
@@ -1,29 +1,217 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import json
+import shutil
+
+from oeqa.core.utils.test import getCaseFile, getCaseMethod
+
def get_package_manager(d, root_path):
"""
Returns an OE package manager that can install packages in root_path.
"""
from oe.package_manager import RpmPM, OpkgPM, DpkgPM
- pkg_class = d.getVar("IMAGE_PKGTYPE", True)
+ pkg_class = d.getVar("IMAGE_PKGTYPE")
if pkg_class == "rpm":
pm = RpmPM(d,
root_path,
- d.getVar('TARGET_VENDOR', True))
+ d.getVar('TARGET_VENDOR'),
+ filterbydependencies=False)
pm.create_configs()
elif pkg_class == "ipk":
pm = OpkgPM(d,
root_path,
- d.getVar("IPKGCONF_TARGET", True),
- d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True))
+ d.getVar("IPKGCONF_TARGET"),
+ d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"),
+ filterbydependencies=False)
elif pkg_class == "deb":
pm = DpkgPM(d,
root_path,
- d.getVar('PACKAGE_ARCHS', True),
- d.getVar('DPKG_ARCH', True))
+ d.getVar('PACKAGE_ARCHS'),
+ d.getVar('DPKG_ARCH'),
+ filterbydependencies=False)
pm.write_index()
pm.update()
return pm
+
+def find_packages_to_extract(test_suite):
+ """
+ Returns packages to extract required by runtime tests.
+ """
+ from oeqa.core.utils.test import getSuiteCasesFiles
+
+ needed_packages = {}
+ files = getSuiteCasesFiles(test_suite)
+
+ for f in set(files):
+ json_file = _get_json_file(f)
+ if json_file:
+ needed_packages.update(_get_needed_packages(json_file))
+
+ return needed_packages
+
+def _get_json_file(module_path):
+ """
+ Returns the path of the JSON file for a module, empty if doesn't exitst.
+ """
+
+ json_file = '%s.json' % module_path.rsplit('.', 1)[0]
+ if os.path.isfile(module_path) and os.path.isfile(json_file):
+ return json_file
+ else:
+ return ''
+
+def _get_needed_packages(json_file, test=None):
+ """
+ Returns a dict with needed packages based on a JSON file.
+
+ If a test is specified it will return the dict just for that test.
+ """
+ needed_packages = {}
+
+ with open(json_file) as f:
+ test_packages = json.load(f)
+ for key,value in test_packages.items():
+ needed_packages[key] = value
+
+ if test:
+ if test in needed_packages:
+ needed_packages = needed_packages[test]
+ else:
+ needed_packages = {}
+
+ return needed_packages
+
+def extract_packages(d, needed_packages):
+ """
+ Extract packages that will be needed during runtime.
+ """
+
+ import bb
+ import oe.path
+
+ extracted_path = d.getVar('TEST_EXTRACTED_DIR')
+
+ for key,value in needed_packages.items():
+ packages = ()
+ if isinstance(value, dict):
+ packages = (value, )
+ elif isinstance(value, list):
+ packages = value
+ else:
+ bb.fatal('Failed to process needed packages for %s; '
+ 'Value must be a dict or list' % key)
+
+ for package in packages:
+ pkg = package['pkg']
+ rm = package.get('rm', False)
+ extract = package.get('extract', True)
+
+ if extract:
+ #logger.debug(1, 'Extracting %s' % pkg)
+ dst_dir = os.path.join(extracted_path, pkg)
+ # Same package used for more than one test,
+ # don't need to extract again.
+ if os.path.exists(dst_dir):
+ continue
+
+ # Extract package and copy it to TEST_EXTRACTED_DIR
+ pkg_dir = _extract_in_tmpdir(d, pkg)
+ oe.path.copytree(pkg_dir, dst_dir)
+ shutil.rmtree(pkg_dir)
+
+ else:
+ #logger.debug(1, 'Copying %s' % pkg)
+ _copy_package(d, pkg)
+
+def _extract_in_tmpdir(d, pkg):
+ """"
+ Returns path to a temp directory where the package was
+ extracted without dependencies.
+ """
+
+ from oeqa.utils.package_manager import get_package_manager
+
+ pkg_path = os.path.join(d.getVar('TEST_INSTALL_TMP_DIR'), pkg)
+ pm = get_package_manager(d, pkg_path)
+ extract_dir = pm.extract(pkg)
+ shutil.rmtree(pkg_path)
+
+ return extract_dir
+
+def _copy_package(d, pkg):
+ """
+ Copy the RPM, DEB or IPK package to dst_dir
+ """
+
+ from oeqa.utils.package_manager import get_package_manager
+
+ pkg_path = os.path.join(d.getVar('TEST_INSTALL_TMP_DIR'), pkg)
+ dst_dir = d.getVar('TEST_PACKAGED_DIR')
+ pm = get_package_manager(d, pkg_path)
+ pkg_info = pm.package_info(pkg)
+ file_path = pkg_info[pkg]['filepath']
+ shutil.copy2(file_path, dst_dir)
+ shutil.rmtree(pkg_path)
+
+def install_package(test_case):
+ """
+ Installs package in DUT if required.
+ """
+ needed_packages = test_needs_package(test_case)
+ if needed_packages:
+ _install_uninstall_packages(needed_packages, test_case, True)
+
+def uninstall_package(test_case):
+ """
+ Uninstalls package in DUT if required.
+ """
+ needed_packages = test_needs_package(test_case)
+ if needed_packages:
+ _install_uninstall_packages(needed_packages, test_case, False)
+
+def test_needs_package(test_case):
+ """
+ Checks if a test case requires to install/uninstall packages.
+ """
+ test_file = getCaseFile(test_case)
+ json_file = _get_json_file(test_file)
+
+ if json_file:
+ test_method = getCaseMethod(test_case)
+ needed_packages = _get_needed_packages(json_file, test_method)
+ if needed_packages:
+ return needed_packages
+
+ return None
+
+def _install_uninstall_packages(needed_packages, test_case, install=True):
+ """
+ Install/Uninstall packages in the DUT without using a package manager
+ """
+
+ if isinstance(needed_packages, dict):
+ packages = [needed_packages]
+ elif isinstance(needed_packages, list):
+ packages = needed_packages
+
+ for package in packages:
+ pkg = package['pkg']
+ rm = package.get('rm', False)
+ extract = package.get('extract', True)
+ src_dir = os.path.join(test_case.tc.extract_dir, pkg)
+
+ # Install package
+ if install and extract:
+ test_case.tc.target.copyDirTo(src_dir, '/')
+
+ # Uninstall package
+ elif not install and rm:
+ test_case.tc.target.deleteDirStructure(src_dir, '/')
diff --git a/meta/lib/oeqa/utils/qemurunner.py b/meta/lib/oeqa/utils/qemurunner.py
index 1f98682f75..2cada35d48 100644
--- a/meta/lib/oeqa/utils/qemurunner.py
+++ b/meta/lib/oeqa/utils/qemurunner.py
@@ -1,12 +1,15 @@
+#
# Copyright (C) 2013 Intel Corporation
#
-# Released under the MIT license (see COPYING.MIT)
+# SPDX-License-Identifier: MIT
+#
# This module provides a class for starting qemu images using runqemu.
# It's used by testimage.bbclass.
import subprocess
import os
+import sys
import time
import signal
import re
@@ -16,10 +19,8 @@ import errno
import string
import threading
import codecs
-from oeqa.utils.dump import HostDumper
-
import logging
-logger = logging.getLogger("BitBake.QemuRunner")
+from oeqa.utils.dump import HostDumper
# Get Unicode non printable control chars
control_range = list(range(0,32))+list(range(127,160))
@@ -29,16 +30,19 @@ re_control_char = re.compile('[%s]' % re.escape("".join(control_chars)))
class QemuRunner:
- def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, boottime, dump_dir, dump_host_cmds, use_kvm):
+ def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, boottime, dump_dir, dump_host_cmds,
+ use_kvm, logger, use_slirp=False, serial_ports=2):
# Popen object for runqemu
self.runqemu = None
# pid of the qemu process that runqemu will start
self.qemupid = None
- # target ip - from the command line
+ # target ip - from the command line or runqemu output
self.ip = None
# host ip - where qemu is running
self.server_ip = None
+ # target ip netmask
+ self.netmask = None
self.machine = machine
self.rootfs = rootfs
@@ -50,9 +54,16 @@ class QemuRunner:
self.logged = False
self.thread = None
self.use_kvm = use_kvm
+ self.use_slirp = use_slirp
+ self.serial_ports = serial_ports
+ self.msg = ''
- self.runqemutime = 60
+ self.runqemutime = 120
+ self.qemu_pidfile = 'pidfile_'+str(os.getpid())
self.host_dumper = HostDumper(dump_host_cmds, dump_dir)
+ self.monitorpipe = None
+
+ self.logger = logger
def create_socket(self):
try:
@@ -61,7 +72,7 @@ class QemuRunner:
sock.bind(("127.0.0.1",0))
sock.listen(2)
port = sock.getsockname()[1]
- logger.info("Created listening socket for qemu serial console on: 127.0.0.1:%s" % port)
+ self.logger.debug("Created listening socket for qemu serial console on: 127.0.0.1:%s" % port)
return (sock, port)
except socket.error:
@@ -72,8 +83,9 @@ class QemuRunner:
if self.logfile:
# It is needed to sanitize the data received from qemu
# because is possible to have control characters
- msg = msg.decode("utf-8")
+ msg = msg.decode("utf-8", errors='ignore')
msg = re_control_char.sub('', msg)
+ self.msg += msg
with codecs.open(self.logfile, "a", encoding="utf-8") as f:
f.write("%s" % msg)
@@ -87,67 +99,84 @@ class QemuRunner:
def handleSIGCHLD(self, signum, frame):
if self.runqemu and self.runqemu.poll():
if self.runqemu.returncode:
- logger.info('runqemu exited with code %d' % self.runqemu.returncode)
- logger.info("Output from runqemu:\n%s" % self.getOutput(self.runqemu.stdout))
+ self.logger.error('runqemu exited with code %d' % self.runqemu.returncode)
+ self.logger.error('Output from runqemu:\n%s' % self.getOutput(self.runqemu.stdout))
self.stop()
self._dump_host()
raise SystemExit
- def start(self, qemuparams = None, get_ip = True, extra_bootparams = None):
+ def start(self, qemuparams = None, get_ip = True, extra_bootparams = None, runqemuparams='', launch_cmd=None, discard_writes=True):
+ env = os.environ.copy()
if self.display:
- os.environ["DISPLAY"] = self.display
+ env["DISPLAY"] = self.display
# Set this flag so that Qemu doesn't do any grabs as SDL grabs
# interact badly with screensavers.
- os.environ["QEMU_DONT_GRAB"] = "1"
+ env["QEMU_DONT_GRAB"] = "1"
if not os.path.exists(self.rootfs):
- logger.error("Invalid rootfs %s" % self.rootfs)
+ self.logger.error("Invalid rootfs %s" % self.rootfs)
return False
if not os.path.exists(self.tmpdir):
- logger.error("Invalid TMPDIR path %s" % self.tmpdir)
+ self.logger.error("Invalid TMPDIR path %s" % self.tmpdir)
return False
else:
- os.environ["OE_TMPDIR"] = self.tmpdir
+ env["OE_TMPDIR"] = self.tmpdir
if not os.path.exists(self.deploy_dir_image):
- logger.error("Invalid DEPLOY_DIR_IMAGE path %s" % self.deploy_dir_image)
+ self.logger.error("Invalid DEPLOY_DIR_IMAGE path %s" % self.deploy_dir_image)
return False
else:
- os.environ["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image
+ env["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image
+ if not launch_cmd:
+ launch_cmd = 'runqemu %s' % ('snapshot' if discard_writes else '')
+ if self.use_kvm:
+ self.logger.debug('Using kvm for runqemu')
+ launch_cmd += ' kvm'
+ else:
+ self.logger.debug('Not using kvm for runqemu')
+ if not self.display:
+ launch_cmd += ' nographic'
+ if self.use_slirp:
+ launch_cmd += ' slirp'
+ launch_cmd += ' %s %s %s' % (runqemuparams, self.machine, self.rootfs)
+
+ return self.launch(launch_cmd, qemuparams=qemuparams, get_ip=get_ip, extra_bootparams=extra_bootparams, env=env)
+
+ def launch(self, launch_cmd, get_ip = True, qemuparams = None, extra_bootparams = None, env = None):
try:
- threadsock, threadport = self.create_socket()
+ if self.serial_ports >= 2:
+ self.threadsock, threadport = self.create_socket()
self.server_socket, self.serverport = self.create_socket()
except socket.error as msg:
- logger.error("Failed to create listening socket: %s" % msg[1])
+ self.logger.error("Failed to create listening socket: %s" % msg[1])
return False
-
bootparams = 'console=tty1 console=ttyS0,115200n8 printk.time=1'
if extra_bootparams:
bootparams = bootparams + ' ' + extra_bootparams
- self.qemuparams = 'bootparams="{0}" qemuparams="-serial tcp:127.0.0.1:{1}"'.format(bootparams, threadport)
- if not self.display:
- self.qemuparams = 'nographic ' + self.qemuparams
+ # Ask QEMU to store the QEMU process PID in file, this way we don't have to parse running processes
+ # and analyze descendents in order to determine it.
+ if os.path.exists(self.qemu_pidfile):
+ os.remove(self.qemu_pidfile)
+ self.qemuparams = 'bootparams="{0}" qemuparams="-pidfile {1}"'.format(bootparams, self.qemu_pidfile)
if qemuparams:
self.qemuparams = self.qemuparams[:-1] + " " + qemuparams + " " + '\"'
+ if self.serial_ports >= 2:
+ launch_cmd += ' tcpserial=%s:%s %s' % (threadport, self.serverport, self.qemuparams)
+ else:
+ launch_cmd += ' tcpserial=%s %s' % (self.serverport, self.qemuparams)
+
self.origchldhandler = signal.getsignal(signal.SIGCHLD)
signal.signal(signal.SIGCHLD, self.handleSIGCHLD)
- launch_cmd = 'runqemu snapshot '
- if self.use_kvm:
- logger.info('Using kvm for runqemu')
- launch_cmd += 'kvm '
- else:
- logger.info('Not using kvm for runqemu')
- launch_cmd += 'tcpserial=%s %s %s %s' % (self.serverport, self.machine, self.rootfs, self.qemuparams)
- logger.info('launchcmd=%s'%(launch_cmd))
+ self.logger.debug('launchcmd=%s'%(launch_cmd))
# FIXME: We pass in stdin=subprocess.PIPE here to work around stty
# blocking at the end of the runqemu script when using this within
# oe-selftest (this makes stty error out immediately). There ought
# to be a proper fix but this will suffice for now.
- self.runqemu = subprocess.Popen(launch_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, preexec_fn=os.setpgrp)
+ self.runqemu = subprocess.Popen(launch_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, preexec_fn=os.setpgrp, env=env)
output = self.runqemu.stdout
#
@@ -176,130 +205,183 @@ class QemuRunner:
os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM)
sys.exit(0)
- logger.info("runqemu started, pid is %s" % self.runqemu.pid)
- logger.info("waiting at most %s seconds for qemu pid" % self.runqemutime)
+ self.logger.debug("runqemu started, pid is %s" % self.runqemu.pid)
+ self.logger.debug("waiting at most %s seconds for qemu pid (%s)" %
+ (self.runqemutime, time.strftime("%D %H:%M:%S")))
endtime = time.time() + self.runqemutime
while not self.is_alive() and time.time() < endtime:
if self.runqemu.poll():
if self.runqemu.returncode:
# No point waiting any longer
- logger.info('runqemu exited with code %d' % self.runqemu.returncode)
+ self.logger.warning('runqemu exited with code %d' % self.runqemu.returncode)
self._dump_host()
+ self.logger.warning("Output from runqemu:\n%s" % self.getOutput(output))
self.stop()
- logger.info("Output from runqemu:\n%s" % self.getOutput(output))
return False
- time.sleep(1)
-
- if self.is_alive():
- logger.info("qemu started - qemu procces pid is %s" % self.qemupid)
- if get_ip:
- cmdline = ''
- with open('/proc/%s/cmdline' % self.qemupid) as p:
- cmdline = p.read()
- # It is needed to sanitize the data received
- # because is possible to have control characters
- cmdline = re_control_char.sub('', cmdline)
- try:
- ips = re.findall("((?:[0-9]{1,3}\.){3}[0-9]{1,3})", cmdline.split("ip=")[1])
- if not ips or len(ips) != 3:
- raise ValueError
- else:
- self.ip = ips[0]
- self.server_ip = ips[1]
- except (IndexError, ValueError):
- logger.info("Couldn't get ip from qemu process arguments! Here is the qemu command line used:\n%s\nand output from runqemu:\n%s" % (cmdline, self.getOutput(output)))
+ time.sleep(0.5)
+
+ if not self.is_alive():
+ self.logger.error("Qemu pid didn't appear in %s seconds (%s)" %
+ (self.runqemutime, time.strftime("%D %H:%M:%S")))
+ # Dump all processes to help us to figure out what is going on...
+ ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command '], stdout=subprocess.PIPE).communicate()[0]
+ processes = ps.decode("utf-8")
+ self.logger.debug("Running processes:\n%s" % processes)
+ self._dump_host()
+ op = self.getOutput(output)
+ self.stop()
+ if op:
+ self.logger.error("Output from runqemu:\n%s" % op)
+ else:
+ self.logger.error("No output from runqemu.\n")
+ return False
+
+ # We are alive: qemu is running
+ out = self.getOutput(output)
+ netconf = False # network configuration is not required by default
+ self.logger.debug("qemu started in %s seconds - qemu procces pid is %s (%s)" %
+ (time.time() - (endtime - self.runqemutime),
+ self.qemupid, time.strftime("%D %H:%M:%S")))
+ cmdline = ''
+ if get_ip:
+ with open('/proc/%s/cmdline' % self.qemupid) as p:
+ cmdline = p.read()
+ # It is needed to sanitize the data received
+ # because is possible to have control characters
+ cmdline = re_control_char.sub(' ', cmdline)
+ try:
+ if self.use_slirp:
+ tcp_ports = cmdline.split("hostfwd=tcp::")[1]
+ host_port = tcp_ports[:tcp_ports.find('-')]
+ self.ip = "localhost:%s" % host_port
+ else:
+ ips = re.findall(r"((?:[0-9]{1,3}\.){3}[0-9]{1,3})", cmdline.split("ip=")[1])
+ self.ip = ips[0]
+ self.server_ip = ips[1]
+ self.logger.debug("qemu cmdline used:\n{}".format(cmdline))
+ except (IndexError, ValueError):
+ # Try to get network configuration from runqemu output
+ match = re.match(r'.*Network configuration: ([0-9.]+)::([0-9.]+):([0-9.]+)$.*',
+ out, re.MULTILINE|re.DOTALL)
+ if match:
+ self.ip, self.server_ip, self.netmask = match.groups()
+ # network configuration is required as we couldn't get it
+ # from the runqemu command line, so qemu doesn't run kernel
+ # and guest networking is not configured
+ netconf = True
+ else:
+ self.logger.error("Couldn't get ip from qemu command line and runqemu output! "
+ "Here is the qemu command line used:\n%s\n"
+ "and output from runqemu:\n%s" % (cmdline, out))
self._dump_host()
self.stop()
return False
- logger.info("qemu cmdline used:\n{}".format(cmdline))
- logger.info("Target IP: %s" % self.ip)
- logger.info("Server IP: %s" % self.server_ip)
- self.thread = LoggingThread(self.log, threadsock, logger)
+ self.logger.debug("Target IP: %s" % self.ip)
+ self.logger.debug("Server IP: %s" % self.server_ip)
+
+ if self.serial_ports >= 2:
+ self.thread = LoggingThread(self.log, self.threadsock, self.logger)
self.thread.start()
if not self.thread.connection_established.wait(self.boottime):
- logger.error("Didn't receive a console connection from qemu. "
+ self.logger.error("Didn't receive a console connection from qemu. "
"Here is the qemu command line used:\n%s\nand "
- "output from runqemu:\n%s" % (cmdline,
- self.getOutput(output)))
+ "output from runqemu:\n%s" % (cmdline, out))
self.stop_thread()
return False
- logger.info("Output from runqemu:\n%s", self.getOutput(output))
- logger.info("Waiting at most %d seconds for login banner" % self.boottime)
- endtime = time.time() + self.boottime
- socklist = [self.server_socket]
- reachedlogin = False
- stopread = False
- qemusock = None
- bootlog = ''
- data = b''
- while time.time() < endtime and not stopread:
- sread, swrite, serror = select.select(socklist, [], [], 5)
- for sock in sread:
- if sock is self.server_socket:
- qemusock, addr = self.server_socket.accept()
- qemusock.setblocking(0)
- socklist.append(qemusock)
- socklist.remove(self.server_socket)
- logger.info("Connection from %s:%s" % addr)
- else:
- data = data + sock.recv(1024)
- if data:
- try:
- data = data.decode("utf-8", errors="surrogateescape")
- bootlog += data
- data = b''
- if re.search(".* login:", bootlog):
- self.server_socket = qemusock
- stopread = True
- reachedlogin = True
- logger.info("Reached login banner")
- except UnicodeDecodeError:
- continue
- else:
- socklist.remove(sock)
- sock.close()
- stopread = True
-
- if not reachedlogin:
- logger.info("Target didn't reached login boot in %d seconds" % self.boottime)
- lines = "\n".join(bootlog.splitlines()[-25:])
- logger.info("Last 25 lines of text:\n%s" % lines)
- logger.info("Check full boot log: %s" % self.logfile)
- self._dump_host()
- self.stop()
- return False
-
- # If we are not able to login the tests can continue
+ self.logger.debug("Output from runqemu:\n%s", out)
+ self.logger.debug("Waiting at most %d seconds for login banner (%s)" %
+ (self.boottime, time.strftime("%D %H:%M:%S")))
+ endtime = time.time() + self.boottime
+ socklist = [self.server_socket]
+ reachedlogin = False
+ stopread = False
+ qemusock = None
+ bootlog = b''
+ data = b''
+ while time.time() < endtime and not stopread:
try:
- (status, output) = self.run_serial("root\n", raw=True)
- if re.search("root@[a-zA-Z0-9\-]+:~#", output):
- self.logged = True
- logger.info("Logged as root in serial console")
+ sread, swrite, serror = select.select(socklist, [], [], 5)
+ except InterruptedError:
+ continue
+ for sock in sread:
+ if sock is self.server_socket:
+ qemusock, addr = self.server_socket.accept()
+ qemusock.setblocking(0)
+ socklist.append(qemusock)
+ socklist.remove(self.server_socket)
+ self.logger.debug("Connection from %s:%s" % addr)
else:
- logger.info("Couldn't login into serial console"
- " as root using blank password")
- except:
- logger.info("Serial console failed while trying to login")
-
- else:
- logger.info("Qemu pid didn't appeared in %s seconds" % self.runqemutime)
+ data = data + sock.recv(1024)
+ if data:
+ bootlog += data
+ if self.serial_ports < 2:
+ # this socket has mixed console/kernel data, log it to logfile
+ self.log(data)
+
+ data = b''
+ if b' login:' in bootlog:
+ self.server_socket = qemusock
+ stopread = True
+ reachedlogin = True
+ self.logger.debug("Reached login banner in %s seconds (%s)" %
+ (time.time() - (endtime - self.boottime),
+ time.strftime("%D %H:%M:%S")))
+ else:
+ # no need to check if reachedlogin unless we support multiple connections
+ self.logger.debug("QEMU socket disconnected before login banner reached. (%s)" %
+ time.strftime("%D %H:%M:%S"))
+ socklist.remove(sock)
+ sock.close()
+ stopread = True
+
+
+ if not reachedlogin:
+ if time.time() >= endtime:
+ self.logger.warning("Target didn't reach login banner in %d seconds (%s)" %
+ (self.boottime, time.strftime("%D %H:%M:%S")))
+ tail = lambda l: "\n".join(l.splitlines()[-25:])
+ bootlog = bootlog.decode("utf-8")
+ # in case bootlog is empty, use tail qemu log store at self.msg
+ lines = tail(bootlog if bootlog else self.msg)
+ self.logger.warning("Last 25 lines of text:\n%s" % lines)
+ self.logger.warning("Check full boot log: %s" % self.logfile)
self._dump_host()
self.stop()
- logger.info("Output from runqemu:\n%s" % self.getOutput(output))
return False
- return self.is_alive()
+ # If we are not able to login the tests can continue
+ try:
+ (status, output) = self.run_serial("root\n", raw=True)
+ if re.search(r"root@[a-zA-Z0-9\-]+:~#", output):
+ self.logged = True
+ self.logger.debug("Logged as root in serial console")
+ if netconf:
+ # configure guest networking
+ cmd = "ifconfig eth0 %s netmask %s up\n" % (self.ip, self.netmask)
+ output = self.run_serial(cmd, raw=True)[1]
+ if re.search(r"root@[a-zA-Z0-9\-]+:~#", output):
+ self.logger.debug("configured ip address %s", self.ip)
+ else:
+ self.logger.debug("Couldn't configure guest networking")
+ else:
+ self.logger.warning("Couldn't login into serial console"
+ " as root using blank password")
+ self.logger.warning("The output:\n%s" % output)
+ except:
+ self.logger.warning("Serial console failed while trying to login")
+ return True
def stop(self):
- self.stop_thread()
if hasattr(self, "origchldhandler"):
signal.signal(signal.SIGCHLD, self.origchldhandler)
+ self.stop_thread()
+ self.stop_qemu_system()
if self.runqemu:
if hasattr(self, "monitorpid"):
os.kill(self.monitorpid, signal.SIGKILL)
- logger.info("Sending SIGTERM to runqemu")
+ self.logger.debug("Sending SIGTERM to runqemu")
try:
os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM)
except OSError as e:
@@ -309,14 +391,32 @@ class QemuRunner:
while self.runqemu.poll() is None and time.time() < endtime:
time.sleep(1)
if self.runqemu.poll() is None:
- logger.info("Sending SIGKILL to runqemu")
+ self.logger.debug("Sending SIGKILL to runqemu")
os.killpg(os.getpgid(self.runqemu.pid), signal.SIGKILL)
+ self.runqemu.stdin.close()
+ self.runqemu.stdout.close()
self.runqemu = None
+
if hasattr(self, 'server_socket') and self.server_socket:
self.server_socket.close()
self.server_socket = None
+ if hasattr(self, 'threadsock') and self.threadsock:
+ self.threadsock.close()
+ self.threadsock = None
self.qemupid = None
self.ip = None
+ if os.path.exists(self.qemu_pidfile):
+ os.remove(self.qemu_pidfile)
+ if self.monitorpipe:
+ self.monitorpipe.close()
+
+ def stop_qemu_system(self):
+ if self.qemupid:
+ try:
+ # qemu-system behaves well and a SIGTERM is enough
+ os.kill(self.qemupid, signal.SIGTERM)
+ except ProcessLookupError as e:
+ self.logger.warning('qemu-system ended unexpectedly')
def stop_thread(self):
if self.thread and self.thread.is_alive():
@@ -324,7 +424,7 @@ class QemuRunner:
self.thread.join()
def restart(self, qemuparams = None):
- logger.info("Restarting qemu process")
+ self.logger.warning("Restarting qemu process")
if self.runqemu.poll() is None:
self.stop()
if self.start(qemuparams):
@@ -332,59 +432,26 @@ class QemuRunner:
return False
def is_alive(self):
- if not self.runqemu:
+ if not self.runqemu or self.runqemu.poll() is not None:
return False
- qemu_child = self.find_child(str(self.runqemu.pid))
- if qemu_child:
- self.qemupid = qemu_child[0]
- if os.path.exists("/proc/" + str(self.qemupid)):
- return True
+ if os.path.isfile(self.qemu_pidfile):
+ # when handling pidfile, qemu creates the file, stat it, lock it and then write to it
+ # so it's possible that the file has been created but the content is empty
+ pidfile_timeout = time.time() + 3
+ while time.time() < pidfile_timeout:
+ with open(self.qemu_pidfile, 'r') as f:
+ qemu_pid = f.read().strip()
+ # file created but not yet written contents
+ if not qemu_pid:
+ time.sleep(0.5)
+ continue
+ else:
+ if os.path.exists("/proc/" + qemu_pid):
+ self.qemupid = int(qemu_pid)
+ return True
return False
- def find_child(self,parent_pid):
- #
- # Walk the process tree from the process specified looking for a qemu-system. Return its [pid'cmd]
- #
- ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command'], stdout=subprocess.PIPE).communicate()[0]
- processes = ps.decode("utf-8").split('\n')
- nfields = len(processes[0].split()) - 1
- pids = {}
- commands = {}
- for row in processes[1:]:
- data = row.split(None, nfields)
- if len(data) != 3:
- continue
- if data[1] not in pids:
- pids[data[1]] = []
-
- pids[data[1]].append(data[0])
- commands[data[0]] = data[2]
-
- if parent_pid not in pids:
- return []
-
- parents = []
- newparents = pids[parent_pid]
- while newparents:
- next = []
- for p in newparents:
- if p in pids:
- for n in pids[p]:
- if n not in parents and n not in next:
- next.append(n)
- if p not in parents:
- parents.append(p)
- newparents = next
- #print("Children matching %s:" % str(parents))
- for p in parents:
- # Need to be careful here since runqemu runs "ldd qemu-system-xxxx"
- # Also, old versions of ldd (2.11) run "LD_XXXX qemu-system-xxxx"
- basecmd = commands[p].split()[0]
- basecmd = os.path.basename(basecmd)
- if "qemu-system" in basecmd and "-serial tcp" in commands[p]:
- return [int(p),commands[p]]
-
- def run_serial(self, command, raw=False):
+ def run_serial(self, command, raw=False, timeout=60):
# We assume target system have echo to get command status
if not raw:
command = "%s; echo $?\n" % command
@@ -392,20 +459,26 @@ class QemuRunner:
data = ''
status = 0
self.server_socket.sendall(command.encode('utf-8'))
- keepreading = True
- while keepreading:
- sread, _, _ = select.select([self.server_socket],[],[],5)
+ start = time.time()
+ end = start + timeout
+ while True:
+ now = time.time()
+ if now >= end:
+ data += "<<< run_serial(): command timed out after %d seconds without output >>>\r\n\r\n" % timeout
+ break
+ try:
+ sread, _, _ = select.select([self.server_socket],[],[], end - now)
+ except InterruptedError:
+ continue
if sread:
answer = self.server_socket.recv(1024)
if answer:
data += answer.decode('utf-8')
# Search the prompt to stop
- if re.search("[a-zA-Z0-9]+@[a-zA-Z0-9\-]+:~#", data):
- keepreading = False
+ if re.search(r"[a-zA-Z0-9]+@[a-zA-Z0-9\-]+:~#", data):
+ break
else:
raise Exception("No data on serial console socket")
- else:
- keepreading = False
if data:
if raw:
@@ -427,7 +500,7 @@ class QemuRunner:
def _dump_host(self):
self.host_dumper.create_dir("qemu")
- logger.warn("Qemu ended unexpectedly, dump data from host"
+ self.logger.warning("Qemu ended unexpectedly, dump data from host"
" is in %s" % self.host_dumper.dump_dir)
self.host_dumper.dump_host()
@@ -456,17 +529,17 @@ class LoggingThread(threading.Thread):
self.teardown()
def run(self):
- self.logger.info("Starting logging thread")
+ self.logger.debug("Starting logging thread")
self.readpipe, self.writepipe = os.pipe()
threading.Thread.run(self)
def stop(self):
- self.logger.info("Stopping logging thread")
+ self.logger.debug("Stopping logging thread")
if self.running:
os.write(self.writepipe, bytes("stop", "utf-8"))
def teardown(self):
- self.logger.info("Tearing down logging thread")
+ self.logger.debug("Tearing down logging thread")
self.close_socket(self.serversock)
if self.readsock is not None:
@@ -484,7 +557,7 @@ class LoggingThread(threading.Thread):
breakout = False
self.running = True
- self.logger.info("Starting thread event loop")
+ self.logger.debug("Starting thread event loop")
while not breakout:
events = poll.poll()
for event in events:
@@ -494,19 +567,19 @@ class LoggingThread(threading.Thread):
# Event to stop the thread
if self.readpipe == event[0]:
- self.logger.info("Stop event received")
+ self.logger.debug("Stop event received")
breakout = True
break
# A connection request was received
elif self.serversock.fileno() == event[0]:
- self.logger.info("Connection request received")
+ self.logger.debug("Connection request received")
self.readsock, _ = self.serversock.accept()
self.readsock.setblocking(0)
poll.unregister(self.serversock.fileno())
poll.register(self.readsock.fileno(), event_read_mask)
- self.logger.info("Setting connection established event")
+ self.logger.debug("Setting connection established event")
self.connection_established.set()
# Actual data to be logged
diff --git a/meta/lib/oeqa/utils/qemutinyrunner.py b/meta/lib/oeqa/utils/qemutinyrunner.py
index d554f0dbcd..364005bd2d 100644
--- a/meta/lib/oeqa/utils/qemutinyrunner.py
+++ b/meta/lib/oeqa/utils/qemutinyrunner.py
@@ -1,6 +1,8 @@
+#
# Copyright (C) 2015 Intel Corporation
#
-# Released under the MIT license (see COPYING.MIT)
+# SPDX-License-Identifier: MIT
+#
# This module provides a class for starting qemu images of poky tiny.
# It's used by testimage.bbclass.
@@ -17,7 +19,7 @@ from .qemurunner import QemuRunner
class QemuTinyRunner(QemuRunner):
- def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, kernel, boottime):
+ def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, kernel, boottime, logger):
# Popen object for runqemu
self.runqemu = None
@@ -40,6 +42,7 @@ class QemuTinyRunner(QemuRunner):
self.socketfile = "console.sock"
self.server_socket = None
self.kernel = kernel
+ self.logger = logger
def create_socket(self):
@@ -60,7 +63,7 @@ class QemuTinyRunner(QemuRunner):
with open(self.logfile, "a") as f:
f.write("%s" % msg)
- def start(self, qemuparams = None, ssh=True, extra_bootparams=None):
+ def start(self, qemuparams = None, ssh=True, extra_bootparams=None, runqemuparams='', discard_writes=True):
if self.display:
os.environ["DISPLAY"] = self.display
@@ -107,14 +110,17 @@ class QemuTinyRunner(QemuRunner):
return self.is_alive()
- def run_serial(self, command):
+ def run_serial(self, command, timeout=60):
self.server_socket.sendall(command+'\n')
data = ''
status = 0
stopread = False
- endtime = time.time()+5
+ endtime = time.time()+timeout
while time.time()<endtime and not stopread:
- sread, _, _ = select.select([self.server_socket],[],[],5)
+ try:
+ sread, _, _ = select.select([self.server_socket],[],[],1)
+ except InterruptedError:
+ continue
for sock in sread:
answer = sock.recv(1024)
if answer:
@@ -124,6 +130,8 @@ class QemuTinyRunner(QemuRunner):
stopread = True
if not data:
status = 1
+ if not stopread:
+ data += "<<< run_serial(): command timed out after %d seconds without output >>>\r\n\r\n" % timeout
return (status, str(data))
def find_child(self,parent_pid):
diff --git a/meta/lib/oeqa/utils/sshcontrol.py b/meta/lib/oeqa/utils/sshcontrol.py
index da485ee408..49a07264c6 100644
--- a/meta/lib/oeqa/utils/sshcontrol.py
+++ b/meta/lib/oeqa/utils/sshcontrol.py
@@ -1,7 +1,8 @@
-# -*- coding: utf-8 -*-
+#
# Copyright (C) 2013 Intel Corporation
#
-# Released under the MIT license (see COPYING.MIT)
+# SPDX-License-Identifier: MIT
+#
# Provides a class for setting up ssh connections,
# running commands and copying files to/from a target.
@@ -52,17 +53,19 @@ class SSHProcess(object):
endtime = self.starttime + timeout
eof = False
while time.time() < endtime and not eof:
- if select.select([self.process.stdout], [], [], 5)[0] != []:
- data = os.read(self.process.stdout.fileno(), 1024)
- if not data:
- self.process.stdout.close()
- eof = True
- else:
- data = data.decode("utf-8")
- output += data
- self.log(data)
- endtime = time.time() + timeout
-
+ try:
+ if select.select([self.process.stdout], [], [], 5)[0] != []:
+ data = os.read(self.process.stdout.fileno(), 1024)
+ if not data:
+ self.process.stdout.close()
+ eof = True
+ else:
+ data = data.decode("utf-8")
+ output += data
+ self.log(data)
+ endtime = time.time() + timeout
+ except InterruptedError:
+ continue
# process hasn't returned yet
if not eof:
@@ -148,12 +151,9 @@ class SSHControl(object):
def copy_to(self, localpath, remotepath):
if os.path.islink(localpath):
- link = os.readlink(localpath)
- dst_dir, dst_base = os.path.split(remotepath)
- return self.run("cd %s; ln -s %s %s" % (dst_dir, link, dst_base))
- else:
- command = self.scp + [localpath, '%s@%s:%s' % (self.user, self.ip, remotepath)]
- return self._internal_run(command, ignore_status=False)
+ localpath = os.path.dirname(localpath) + "/" + os.readlink(localpath)
+ command = self.scp + [localpath, '%s@%s:%s' % (self.user, self.ip, remotepath)]
+ return self._internal_run(command, ignore_status=False)
def copy_from(self, remotepath, localpath):
command = self.scp + ['%s@%s:%s' % (self.user, self.ip, remotepath), localpath]
diff --git a/meta/lib/oeqa/utils/subprocesstweak.py b/meta/lib/oeqa/utils/subprocesstweak.py
new file mode 100644
index 0000000000..b47975a4bc
--- /dev/null
+++ b/meta/lib/oeqa/utils/subprocesstweak.py
@@ -0,0 +1,22 @@
+#
+# SPDX-License-Identifier: MIT
+#
+import subprocess
+
+class OETestCalledProcessError(subprocess.CalledProcessError):
+ def __str__(self):
+ def strify(o):
+ if isinstance(o, bytes):
+ return o.decode("utf-8", errors="replace")
+ else:
+ return o
+
+ s = "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
+ if hasattr(self, "output") and self.output:
+ s = s + "\nStandard Output: " + strify(self.output)
+ if hasattr(self, "stderr") and self.stderr:
+ s = s + "\nStandard Error: " + strify(self.stderr)
+ return s
+
+def errors_have_output():
+ subprocess.CalledProcessError = OETestCalledProcessError
diff --git a/meta/lib/oeqa/utils/targetbuild.py b/meta/lib/oeqa/utils/targetbuild.py
index 59593f5ef3..1055810ca3 100644
--- a/meta/lib/oeqa/utils/targetbuild.py
+++ b/meta/lib/oeqa/utils/targetbuild.py
@@ -1,6 +1,8 @@
+#
# Copyright (C) 2013 Intel Corporation
#
-# Released under the MIT license (see COPYING.MIT)
+# SPDX-License-Identifier: MIT
+#
# Provides a class for automating build tests for projects
@@ -8,15 +10,21 @@ import os
import re
import bb.utils
import subprocess
+import tempfile
from abc import ABCMeta, abstractmethod
class BuildProject(metaclass=ABCMeta):
- def __init__(self, d, uri, foldername=None, tmpdir="/tmp/"):
+ def __init__(self, d, uri, foldername=None, tmpdir=None):
self.d = d
self.uri = uri
self.archive = os.path.basename(uri)
- self.localarchive = os.path.join(tmpdir,self.archive)
+ if not tmpdir:
+ tmpdir = self.d.getVar('WORKDIR')
+ if not tmpdir:
+ self.tempdirobj = tempfile.TemporaryDirectory(prefix='buildproject-')
+ tmpdir = self.tempdirobj.name
+ self.localarchive = os.path.join(tmpdir, self.archive)
if foldername:
self.fname = foldername
else:
@@ -24,8 +32,7 @@ class BuildProject(metaclass=ABCMeta):
# Download self.archive to self.localarchive
def _download_archive(self):
-
- dl_dir = self.d.getVar("DL_DIR", True)
+ dl_dir = self.d.getVar("DL_DIR")
if dl_dir and os.path.exists(os.path.join(dl_dir, self.archive)):
bb.utils.copyfile(os.path.join(dl_dir, self.archive), self.localarchive)
return
@@ -40,12 +47,12 @@ class BuildProject(metaclass=ABCMeta):
cmd = ''
for var in exportvars:
- val = self.d.getVar(var, True)
+ val = self.d.getVar(var)
if val:
cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd)
cmd = cmd + "wget -O %s %s" % (self.localarchive, self.uri)
- subprocess.check_call(cmd, shell=True)
+ subprocess.check_output(cmd, shell=True)
# This method should provide a way to run a command in the desired environment.
@abstractmethod
@@ -65,7 +72,7 @@ class BuildProject(metaclass=ABCMeta):
def clean(self):
self._run('rm -rf %s' % self.targetdir)
- subprocess.call('rm -f %s' % self.localarchive, shell=True)
+ subprocess.check_call('rm -f %s' % self.localarchive, shell=True)
pass
class TargetBuildProject(BuildProject):
@@ -73,7 +80,7 @@ class TargetBuildProject(BuildProject):
def __init__(self, target, d, uri, foldername=None):
self.target = target
self.targetdir = "~/"
- BuildProject.__init__(self, d, uri, foldername, tmpdir="/tmp")
+ BuildProject.__init__(self, d, uri, foldername)
def download_archive(self):
@@ -103,8 +110,8 @@ class SDKBuildProject(BuildProject):
self.testdir = testpath
self.targetdir = testpath
bb.utils.mkdirhier(testpath)
- self.datetime = d.getVar('DATETIME', True)
- self.testlogdir = d.getVar("TEST_LOG_DIR", True)
+ self.datetime = d.getVar('DATETIME')
+ self.testlogdir = d.getVar("TEST_LOG_DIR")
bb.utils.mkdirhier(self.testlogdir)
self.logfile = os.path.join(self.testlogdir, "sdk_target_log.%s" % self.datetime)
BuildProject.__init__(self, d, uri, foldername, tmpdir=testpath)
@@ -114,7 +121,7 @@ class SDKBuildProject(BuildProject):
self._download_archive()
cmd = 'tar xf %s%s -C %s' % (self.targetdir, self.archive, self.targetdir)
- subprocess.check_call(cmd, shell=True)
+ subprocess.check_output(cmd, shell=True)
#Change targetdir to project folder
self.targetdir = os.path.join(self.targetdir, self.fname)
@@ -132,4 +139,4 @@ class SDKBuildProject(BuildProject):
def _run(self, cmd):
self.log("Running . %s; " % self.sdkenv + cmd)
- return subprocess.call(". %s; " % self.sdkenv + cmd, shell=True)
+ return subprocess.check_call(". %s; " % self.sdkenv + cmd, shell=True)
diff --git a/meta/lib/oeqa/utils/testexport.py b/meta/lib/oeqa/utils/testexport.py
index 57be2ca449..e89d130a9c 100644
--- a/meta/lib/oeqa/utils/testexport.py
+++ b/meta/lib/oeqa/utils/testexport.py
@@ -1,6 +1,8 @@
+#
# Copyright (C) 2015 Intel Corporation
#
-# Released under the MIT license (see COPYING.MIT)
+# SPDX-License-Identifier: MIT
+#
# Provides functions to help with exporting binaries obtained from built targets
@@ -72,9 +74,9 @@ def process_binaries(d, params):
return extract_bin_command
if determine_if_poky_env(): # machine with poky environment
- exportpath = d.getVar("TEST_EXPORT_DIR", True) if export_env else d.getVar("DEPLOY_DIR", True)
- rpm_deploy_dir = d.getVar("DEPLOY_DIR_RPM", True)
- arch = get_dest_folder(d.getVar("TUNE_FEATURES", True), os.listdir(rpm_deploy_dir))
+ exportpath = d.getVar("TEST_EXPORT_DIR") if export_env else d.getVar("DEPLOY_DIR")
+ rpm_deploy_dir = d.getVar("DEPLOY_DIR_RPM")
+ arch = get_dest_folder(d.getVar("TUNE_FEATURES"), os.listdir(rpm_deploy_dir))
arch_rpm_dir = os.path.join(rpm_deploy_dir, arch)
extracted_bin_dir = os.path.join(exportpath,"binaries", arch, "extracted_binaries")
packaged_bin_dir = os.path.join(exportpath,"binaries", arch, "packaged_binaries")
@@ -92,7 +94,7 @@ def process_binaries(d, params):
return ""
for item in native_rpm_file_list:# will copy all versions of package. Used version will be selected on remote machine
bb.plain("Copying native package file: %s" % item)
- sh.copy(os.path.join(rpm_deploy_dir, native_rpm_dir, item), os.path.join(d.getVar("TEST_EXPORT_DIR", True), "binaries", "native"))
+ sh.copy(os.path.join(rpm_deploy_dir, native_rpm_dir, item), os.path.join(d.getVar("TEST_EXPORT_DIR"), "binaries", "native"))
else: # nothing to do here; running tests under bitbake, so we asume native binaries are in sysroots dir.
if param_list[1] or param_list[4]:
bb.warn("Native binary %s %s%s. Running tests under bitbake environment. Version can't be checked except when the test itself does it"
@@ -148,7 +150,7 @@ def process_binaries(d, params):
else: # this is for target device
if param_list[2] == "rpm":
return "No need to extract, this is an .rpm file"
- arch = get_dest_folder(d.getVar("TUNE_FEATURES", True), os.listdir(binaries_path))
+ arch = get_dest_folder(d.getVar("TUNE_FEATURES"), os.listdir(binaries_path))
extracted_bin_path = os.path.join(binaries_path, arch, "extracted_binaries")
extracted_bin_list = [item for item in os.listdir(extracted_bin_path)]
packaged_bin_path = os.path.join(binaries_path, arch, "packaged_binaries")
@@ -206,9 +208,9 @@ def send_bin_to_DUT(d,params):
from oeqa.oetest import oeRuntimeTest
param_list = params
cleanup_list = list()
- bins_dir = os.path.join(d.getVar("TEST_EXPORT_DIR", True), "binaries") if determine_if_poky_env() \
+ bins_dir = os.path.join(d.getVar("TEST_EXPORT_DIR"), "binaries") if determine_if_poky_env() \
else os.getenv("bin_dir")
- arch = get_dest_folder(d.getVar("TUNE_FEATURES", True), os.listdir(bins_dir))
+ arch = get_dest_folder(d.getVar("TUNE_FEATURES"), os.listdir(bins_dir))
arch_rpms_dir = os.path.join(bins_dir, arch, "packaged_binaries")
extracted_bin_dir = os.path.join(bins_dir, arch, "extracted_binaries", param_list[0])
diff --git a/meta/lib/rootfspostcommands.py b/meta/lib/rootfspostcommands.py
new file mode 100644
index 0000000000..fdb9f5b850
--- /dev/null
+++ b/meta/lib/rootfspostcommands.py
@@ -0,0 +1,60 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+
+def sort_file(filename, mapping):
+ """
+ Sorts a passwd or group file based on the numeric ID in the third column.
+ If a mapping is given, the name from the first column is mapped via that
+ dictionary instead (necessary for /etc/shadow and /etc/gshadow). If not,
+ a new mapping is created on the fly and returned.
+ """
+ new_mapping = {}
+ with open(filename, 'rb+') as f:
+ lines = f.readlines()
+ # No explicit error checking for the sake of simplicity. /etc
+ # files are assumed to be well-formed, causing exceptions if
+ # not.
+ for line in lines:
+ entries = line.split(b':')
+ name = entries[0]
+ if mapping is None:
+ id = int(entries[2])
+ else:
+ id = mapping[name]
+ new_mapping[name] = id
+ # Sort by numeric id first, with entire line as secondary key
+ # (just in case that there is more than one entry for the same id).
+ lines.sort(key=lambda line: (new_mapping[line.split(b':')[0]], line))
+ # We overwrite the entire file, i.e. no truncate() necessary.
+ f.seek(0)
+ f.write(b''.join(lines))
+ return new_mapping
+
+def remove_backup(filename):
+ """
+ Removes the backup file for files like /etc/passwd.
+ """
+ backup_filename = filename + '-'
+ if os.path.exists(backup_filename):
+ os.unlink(backup_filename)
+
+def sort_passwd(sysconfdir):
+ """
+ Sorts passwd and group files in a rootfs /etc directory by ID.
+ Backup files are sometimes are inconsistent and then cannot be
+ sorted (YOCTO #11043), and more importantly, are not needed in
+ the initial rootfs, so they get deleted.
+ """
+ for main, shadow in (('passwd', 'shadow'),
+ ('group', 'gshadow')):
+ filename = os.path.join(sysconfdir, main)
+ remove_backup(filename)
+ if os.path.exists(filename):
+ mapping = sort_file(filename, None)
+ filename = os.path.join(sysconfdir, shadow)
+ remove_backup(filename)
+ if os.path.exists(filename):
+ sort_file(filename, mapping)