summaryrefslogtreecommitdiffstats
path: root/meta/lib/oeqa/utils/commands.py
blob: 0bb90028dc63a75e84c8bc7ee364ca2bf1a62c35 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
# Copyright (c) 2013-2014 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)

# DESCRIPTION
# This module is mainly used by scripts/oe-selftest and modules under meta/oeqa/selftest
# It provides a class and methods for running commands on the host in a convienent way for tests.



import os
import sys
import signal
import subprocess
import threading
import time
import logging
from oeqa.utils import CommandError
from oeqa.utils import ftools
import re
import contextlib
# Export test doesn't require bb
try:
    import bb
except ImportError:
    pass

class Command(object):
    def __init__(self, command, bg=False, timeout=None, data=None, output_log=None, **options):

        self.defaultopts = {
            "stdout": subprocess.PIPE,
            "stderr": subprocess.STDOUT,
            "stdin": None,
            "shell": False,
            "bufsize": -1,
        }

        self.cmd = command
        self.bg = bg
        self.timeout = timeout
        self.data = data

        self.options = dict(self.defaultopts)
        if isinstance(self.cmd, str):
            self.options["shell"] = True
        if self.data:
            self.options['stdin'] = subprocess.PIPE
        self.options.update(options)

        self.status = None
        # We collect chunks of output before joining them at the end.
        self._output_chunks = []
        self._error_chunks = []
        self.output = None
        self.error = None
        self.threads = []

        self.output_log = output_log
        self.log = logging.getLogger("utils.commands")

    def run(self):
        self.process = subprocess.Popen(self.cmd, **self.options)

        def readThread(output, stream, logfunc):
            if logfunc:
                for line in stream:
                    output.append(line)
                    logfunc(line.decode("utf-8", errors='replace').rstrip())
            else:
                output.append(stream.read())

        def readStderrThread():
            readThread(self._error_chunks, self.process.stderr, self.output_log.error if self.output_log else None)

        def readStdoutThread():
            readThread(self._output_chunks, self.process.stdout, self.output_log.info if self.output_log else None)

        def writeThread():
            try:
                self.process.stdin.write(self.data)
                self.process.stdin.close()
            except OSError as ex:
                # It's not an error when the command does not consume all
                # of our data. subprocess.communicate() also ignores that.
                if ex.errno != EPIPE:
                    raise

        # We write in a separate thread because then we can read
        # without worrying about deadlocks. The additional thread is
        # expected to terminate by itself and we mark it as a daemon,
        # so even it should happen to not terminate for whatever
        # reason, the main process will still exit, which will then
        # kill the write thread.
        if self.data:
            threading.Thread(target=writeThread, daemon=True).start()
        if self.process.stderr:
            thread = threading.Thread(target=readStderrThread)
            thread.start()
            self.threads.append(thread)
        if self.output_log:
            self.output_log.info('Running: %s' % self.cmd)
        thread = threading.Thread(target=readStdoutThread)
        thread.start()
        self.threads.append(thread)

        self.log.debug("Running command '%s'" % self.cmd)

        if not self.bg:
            if self.timeout is None:
                for thread in self.threads:
                    thread.join()
            else:
                deadline = time.time() + self.timeout
                for thread in self.threads:
                    timeout = deadline - time.time() 
                    if timeout < 0:
                        timeout = 0
                    thread.join(timeout)
            self.stop()

    def stop(self):
        for thread in self.threads:
            if thread.isAlive():
                self.process.terminate()
            # let's give it more time to terminate gracefully before killing it
            thread.join(5)
            if thread.isAlive():
                self.process.kill()
                thread.join()

        def finalize_output(data):
            if not data:
                data = ""
            else:
                data = b"".join(data)
                data = data.decode("utf-8", errors='replace').rstrip()
            return data

        self.output = finalize_output(self._output_chunks)
        self._output_chunks = None
        # self.error used to be a byte string earlier, probably unintentionally.
        # Now it is a normal string, just like self.output.
        self.error = finalize_output(self._error_chunks)
        self._error_chunks = None
        # At this point we know that the process has closed stdout/stderr, so
        # it is safe and necessary to wait for the actual process completion.
        self.status = self.process.wait()

        self.log.debug("Command '%s' returned %d as exit code." % (self.cmd, self.status))
        # logging the complete output is insane
        # bitbake -e output is really big
        # and makes the log file useless
        if self.status:
            lout = "\n".join(self.output.splitlines()[-20:])
            self.log.debug("Last 20 lines:\n%s" % lout)


class Result(object):
    pass


def runCmd(command, ignore_status=False, timeout=None, assert_error=True,
          native_sysroot=None, limit_exc_output=0, output_log=None, **options):
    result = Result()

    if native_sysroot:
        extra_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin" % \
                      (native_sysroot, native_sysroot, native_sysroot)
        nenv = dict(options.get('env', os.environ))
        nenv['PATH'] = extra_paths + ':' + nenv.get('PATH', '')
        options['env'] = nenv

    cmd = Command(command, timeout=timeout, output_log=output_log, **options)
    cmd.run()

    result.command = command
    result.status = cmd.status
    result.output = cmd.output
    result.error = cmd.error
    result.pid = cmd.process.pid

    if result.status and not ignore_status:
        exc_output = result.output
        if limit_exc_output > 0:
            split = result.output.splitlines()
            if len(split) > limit_exc_output:
                exc_output = "\n... (last %d lines of output)\n" % limit_exc_output + \
                             '\n'.join(split[-limit_exc_output:])
        if assert_error:
            raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, result.status, exc_output))
        else:
            raise CommandError(result.status, command, exc_output)

    return result


def bitbake(command, ignore_status=False, timeout=None, postconfig=None, output_log=None, **options):

    if postconfig:
        postconfig_file = os.path.join(os.environ.get('BUILDDIR'), 'oeqa-post.conf')
        ftools.write_file(postconfig_file, postconfig)
        extra_args = "-R %s" % postconfig_file
    else:
        extra_args = ""

    if isinstance(command, str):
        cmd = "bitbake " + extra_args + " " + command
    else:
        cmd = [ "bitbake" ] + [a for a in (command + extra_args.split(" ")) if a not in [""]]

    try:
        return runCmd(cmd, ignore_status, timeout, output_log=output_log, **options)
    finally:
        if postconfig:
            os.remove(postconfig_file)


def get_bb_env(target=None, postconfig=None):
    if target:
        return bitbake("-e %s" % target, postconfig=postconfig).output
    else:
        return bitbake("-e", postconfig=postconfig).output

def get_bb_vars(variables=None, target=None, postconfig=None):
    """Get values of multiple bitbake variables"""
    bbenv = get_bb_env(target, postconfig=postconfig)

    if variables is not None:
        variables = variables.copy()
    var_re = re.compile(r'^(export )?(?P<var>\w+(_.*)?)="(?P<value>.*)"$')
    unset_re = re.compile(r'^unset (?P<var>\w+)$')
    lastline = None
    values = {}
    for line in bbenv.splitlines():
        match = var_re.match(line)
        val = None
        if match:
            val = match.group('value')
        else:
            match = unset_re.match(line)
            if match:
                # Handle [unexport] variables
                if lastline.startswith('#   "'):
                    val = lastline.split('"')[1]
        if val:
            var = match.group('var')
            if variables is None:
                values[var] = val
            else:
                if var in variables:
                    values[var] = val
                    variables.remove(var)
                # Stop after all required variables have been found
                if not variables:
                    break
        lastline = line
    if variables:
        # Fill in missing values
        for var in variables:
            values[var] = None
    return values

def get_bb_var(var, target=None, postconfig=None):
    return get_bb_vars([var], target, postconfig)[var]

def get_test_layer():
    layers = get_bb_var("BBLAYERS").split()
    testlayer = None
    for l in layers:
        if '~' in l:
            l = os.path.expanduser(l)
        if "/meta-selftest" in l and os.path.isdir(l):
            testlayer = l
            break
    return testlayer

def create_temp_layer(templayerdir, templayername, priority=999, recipepathspec='recipes-*/*'):
    os.makedirs(os.path.join(templayerdir, 'conf'))
    with open(os.path.join(templayerdir, 'conf', 'layer.conf'), 'w') as f:
        f.write('BBPATH .= ":${LAYERDIR}"\n')
        f.write('BBFILES += "${LAYERDIR}/%s/*.bb \\' % recipepathspec)
        f.write('            ${LAYERDIR}/%s/*.bbappend"\n' % recipepathspec)
        f.write('BBFILE_COLLECTIONS += "%s"\n' % templayername)
        f.write('BBFILE_PATTERN_%s = "^${LAYERDIR}/"\n' % templayername)
        f.write('BBFILE_PRIORITY_%s = "%d"\n' % (templayername, priority))
        f.write('BBFILE_PATTERN_IGNORE_EMPTY_%s = "1"\n' % templayername)


@contextlib.contextmanager
def runqemu(pn, ssh=True, runqemuparams='', image_fstype=None, launch_cmd=None, qemuparams=None, overrides={}, discard_writes=True):
    """
    launch_cmd means directly run the command, don't need set rootfs or env vars.
    """

    import bb.tinfoil
    import bb.build

    # Need a non-'BitBake' logger to capture the runner output
    targetlogger = logging.getLogger('TargetRunner')
    targetlogger.setLevel(logging.DEBUG)
    handler = logging.StreamHandler(sys.stdout)
    targetlogger.addHandler(handler)

    tinfoil = bb.tinfoil.Tinfoil()
    tinfoil.prepare(config_only=False, quiet=True)
    try:
        tinfoil.logger.setLevel(logging.WARNING)
        import oeqa.targetcontrol
        tinfoil.config_data.setVar("TEST_LOG_DIR", "${WORKDIR}/testimage")
        tinfoil.config_data.setVar("TEST_QEMUBOOT_TIMEOUT", "1000")
        # Tell QemuTarget() whether need find rootfs/kernel or not
        if launch_cmd:
            tinfoil.config_data.setVar("FIND_ROOTFS", '0')
        else:
            tinfoil.config_data.setVar("FIND_ROOTFS", '1')

        recipedata = tinfoil.parse_recipe(pn)
        for key, value in overrides.items():
            recipedata.setVar(key, value)

        logdir = recipedata.getVar("TEST_LOG_DIR")

        qemu = oeqa.targetcontrol.QemuTarget(recipedata, targetlogger, image_fstype)
    finally:
        # We need to shut down tinfoil early here in case we actually want
        # to run tinfoil-using utilities with the running QEMU instance.
        # Luckily QemuTarget doesn't need it after the constructor.
        tinfoil.shutdown()

    try:
        qemu.deploy()
        try:
            qemu.start(params=qemuparams, ssh=ssh, runqemuparams=runqemuparams, launch_cmd=launch_cmd, discard_writes=discard_writes)
        except bb.build.FuncFailed:
            raise Exception('Failed to start QEMU - see the logs in %s' % logdir)

        yield qemu

    finally:
        try:
            qemu.stop()
        except:
            pass
    targetlogger.removeHandler(handler)

def updateEnv(env_file):
    """
    Source a file and update environment.
    """

    cmd = ". %s; env -0" % env_file
    result = runCmd(cmd)

    for line in result.output.split("\0"):
        (key, _, value) = line.partition("=")
        os.environ[key] = value