aboutsummaryrefslogtreecommitdiff
path: root/test/py
diff options
context:
space:
mode:
authorthead_admin <occ_thead@service.alibaba.com>2022-09-13 11:04:33 +0800
committerthead_admin <occ_thead@service.alibaba.com>2022-09-13 11:04:33 +0800
commit43db9e00d5837c100c0b2fbbee64a08ab807d1e0 (patch)
treeb40c0eed02935b6682e8c5c975e3016b6b2f55fe /test/py
Linux_SDK_V0.9.5Linux_SDK_V0.9.5
Diffstat (limited to 'test/py')
-rw-r--r--test/py/.gitignore1
-rw-r--r--test/py/README.md370
-rw-r--r--test/py/conftest.py596
-rw-r--r--test/py/multiplexed_log.css108
-rw-r--r--test/py/multiplexed_log.py709
-rw-r--r--test/py/pytest.ini13
-rw-r--r--test/py/requirements.txt22
-rwxr-xr-xtest/py/test.py20
-rw-r--r--test/py/tests/test_000_version.py19
-rw-r--r--test/py/tests/test_android/test_ab.py75
-rw-r--r--test/py/tests/test_android/test_avb.py137
-rw-r--r--test/py/tests/test_bind.py179
-rw-r--r--test/py/tests/test_dfu.py320
-rw-r--r--test/py/tests/test_efi_loader.py196
-rw-r--r--test/py/tests/test_efi_selftest.py198
-rw-r--r--test/py/tests/test_env.py338
-rwxr-xr-xtest/py/tests/test_fit.py457
-rw-r--r--test/py/tests/test_fpga.py565
-rw-r--r--test/py/tests/test_fs/conftest.py602
-rw-r--r--test/py/tests/test_fs/fstest_defs.py16
-rw-r--r--test/py/tests/test_fs/fstest_helpers.py13
-rw-r--r--test/py/tests/test_fs/test_basic.py292
-rw-r--r--test/py/tests/test_fs/test_ext.py319
-rw-r--r--test/py/tests/test_fs/test_mkdir.py121
-rw-r--r--test/py/tests/test_fs/test_symlink.py130
-rw-r--r--test/py/tests/test_fs/test_unlink.py118
-rw-r--r--test/py/tests/test_gpt.py178
-rw-r--r--test/py/tests/test_handoff.py15
-rw-r--r--test/py/tests/test_help.py8
-rw-r--r--test/py/tests/test_hush_if_test.py161
-rw-r--r--test/py/tests/test_log.py132
-rw-r--r--test/py/tests/test_md.py36
-rw-r--r--test/py/tests/test_mmc_rd.py286
-rw-r--r--test/py/tests/test_mmc_wr.py105
-rw-r--r--test/py/tests/test_net.py208
-rw-r--r--test/py/tests/test_ofplatdata.py70
-rw-r--r--test/py/tests/test_pinmux.py66
-rw-r--r--test/py/tests/test_sandbox_exit.py21
-rw-r--r--test/py/tests/test_sf.py217
-rw-r--r--test/py/tests/test_shell_basics.py45
-rw-r--r--test/py/tests/test_sleep.py35
-rw-r--r--test/py/tests/test_tpm2.py233
-rw-r--r--test/py/tests/test_ums.py236
-rw-r--r--test/py/tests/test_unknown_cmd.py13
-rw-r--r--test/py/tests/test_ut.py28
-rw-r--r--test/py/tests/test_vboot.py293
-rw-r--r--test/py/tests/vboot/sandbox-kernel.dts7
-rw-r--r--test/py/tests/vboot/sandbox-u-boot.dts10
-rw-r--r--test/py/tests/vboot/sign-configs-sha1-pss.its46
-rw-r--r--test/py/tests/vboot/sign-configs-sha1.its45
-rw-r--r--test/py/tests/vboot/sign-configs-sha256-pss-prod.its46
-rw-r--r--test/py/tests/vboot/sign-configs-sha256-pss.its46
-rw-r--r--test/py/tests/vboot/sign-configs-sha256.its45
-rw-r--r--test/py/tests/vboot/sign-images-sha1-pss.its44
-rw-r--r--test/py/tests/vboot/sign-images-sha1.its42
-rw-r--r--test/py/tests/vboot/sign-images-sha256-pss.its44
-rw-r--r--test/py/tests/vboot/sign-images-sha256.its42
-rw-r--r--test/py/u_boot_console_base.py468
-rw-r--r--test/py/u_boot_console_exec_attach.py70
-rw-r--r--test/py/u_boot_console_sandbox.py108
-rw-r--r--test/py/u_boot_spawn.py209
-rw-r--r--test/py/u_boot_utils.py340
62 files changed, 9932 insertions, 0 deletions
diff --git a/test/py/.gitignore b/test/py/.gitignore
new file mode 100644
index 00000000..0d20b648
--- /dev/null
+++ b/test/py/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/test/py/README.md b/test/py/README.md
new file mode 100644
index 00000000..3cbe01b7
--- /dev/null
+++ b/test/py/README.md
@@ -0,0 +1,370 @@
+# U-Boot pytest suite
+
+## Introduction
+
+This tool aims to test U-Boot by executing U-Boot shell commands using the
+console interface. A single top-level script exists to execute or attach to the
+U-Boot console, run the entire script of tests against it, and summarize the
+results. Advantages of this approach are:
+
+- Testing is performed in the same way a user or script would interact with
+ U-Boot; there can be no disconnect.
+- There is no need to write or embed test-related code into U-Boot itself.
+ It is asserted that writing test-related code in Python is simpler and more
+ flexible than writing it all in C.
+- It is reasonably simple to interact with U-Boot in this way.
+
+## Requirements
+
+The test suite is implemented using pytest. Interaction with the U-Boot console
+involves executing some binary and interacting with its stdin/stdout. You will
+need to implement various "hook" scripts that are called by the test suite at
+the appropriate time.
+
+In order to run the testsuite at a minimum we require that both python3 and
+pip for python3 be installed. All of the required python modules are
+described in the requirements.txt file in this directory and can be installed
+with the command ```pip install -r requirements.txt```
+
+In order to execute certain tests on their supported platforms other tools
+will be required. The following is an incomplete list:
+
+| Package |
+| -------------- |
+| gdisk |
+| dfu-util |
+| dtc |
+| openssl |
+| sudo OR guestmount |
+| e2fsprogs |
+| dosfstools |
+
+Please use the apporirate commands for your distribution to match these tools
+up with the package that provides them.
+
+The test script supports either:
+
+- Executing a sandbox port of U-Boot on the local machine as a sub-process,
+ and interacting with it over stdin/stdout.
+- Executing an external "hook" scripts to flash a U-Boot binary onto a
+ physical board, attach to the board's console stream, and reset the board.
+ Further details are described later.
+
+### Using `virtualenv` to provide requirements
+
+The recommended way to run the test suite, in order to ensure reproducibility
+is to use `virtualenv` to set up the necessary environment. This can be done
+via the following commands:
+
+```bash
+$ cd /path/to/u-boot
+$ sudo apt-get install python3 python3-virtualenv
+$ virtualenv -p /usr/bin/python3 venv
+$ . ./venv/bin/activate
+$ pip install -r test/py/requirements.txt
+```
+
+## Testing sandbox
+
+To run the testsuite on the sandbox port (U-Boot built as a native user-space
+application), simply execute:
+
+```
+./test/py/test.py --bd sandbox --build
+```
+
+The `--bd` option tells the test suite which board type is being tested. This
+lets the test suite know which features the board has, and hence exactly what
+can be tested.
+
+The `--build` option tells U-Boot to compile U-Boot. Alternatively, you may
+omit this option and build U-Boot yourself, in whatever way you choose, before
+running the test script.
+
+The test script will attach to U-Boot, execute all valid tests for the board,
+then print a summary of the test process. A complete log of the test session
+will be written to `${build_dir}/test-log.html`. This is best viewed in a web
+browser, but may be read directly as plain text, perhaps with the aid of the
+`html2text` utility.
+
+### Testing under a debugger
+
+If you need to run sandbox under a debugger, you may pass the command-line
+option `--gdbserver COMM`. This causes two things to happens:
+
+- Instead of running U-Boot directly, it will be run under gdbserver, with
+ debug communication via the channel `COMM`. You can attach a debugger to the
+ sandbox process in order to debug it. See `man gdbserver` and the example
+ below for details of valid values for `COMM`.
+- All timeouts in tests are disabled, allowing U-Boot an arbitrary amount of
+ time to execute commands. This is useful if U-Boot is stopped at a breakpoint
+ during debugging.
+
+A usage example is:
+
+Window 1:
+```shell
+./test/py/test.py --bd sandbox --gdbserver localhost:1234
+```
+
+Window 2:
+```shell
+gdb ./build-sandbox/u-boot -ex 'target remote localhost:1234'
+```
+
+Alternatively, you could leave off the `-ex` option and type the command
+manually into gdb once it starts.
+
+You can use any debugger you wish, so long as it speaks the gdb remote
+protocol, or any graphical wrapper around gdb.
+
+Some tests deliberately cause the sandbox process to exit, e.g. to test the
+reset command, or sandbox's CTRL-C handling. When this happens, you will need
+to attach the debugger to the new sandbox instance. If these tests are not
+relevant to your debugging session, you can skip them using pytest's -k
+command-line option; see the next section.
+
+## Command-line options
+
+- `--board-type`, `--bd`, `-B` set the type of the board to be tested. For
+ example, `sandbox` or `seaboard`.
+- `--board-identity`, `--id` set the identity of the board to be tested.
+ This allows differentiation between multiple instances of the same type of
+ physical board that are attached to the same host machine. This parameter is
+ not interpreted by the test script in any way, but rather is simply passed
+ to the hook scripts described below, and may be used in any site-specific
+ way deemed necessary.
+- `--build` indicates that the test script should compile U-Boot itself
+ before running the tests. If using this option, make sure that any
+ environment variables required by the build process are already set, such as
+ `$CROSS_COMPILE`.
+- `--build-dir` sets the directory containing the compiled U-Boot binaries.
+ If omitted, this is `${source_dir}/build-${board_type}`.
+- `--result-dir` sets the directory to write results, such as log files,
+ into. If omitted, the build directory is used.
+- `--persistent-data-dir` sets the directory used to store persistent test
+ data. This is test data that may be re-used across test runs, such as file-
+ system images.
+
+`pytest` also implements a number of its own command-line options. Commonly used
+options are mentioned below. Please see `pytest` documentation for complete
+details. Execute `py.test --version` for a brief summary. Note that U-Boot's
+test.py script passes all command-line arguments directly to `pytest` for
+processing.
+
+- `-k` selects which tests to run. The default is to run all known tests. This
+ option takes a single argument which is used to filter test names. Simple
+ logical operators are supported. For example:
+ - `'ums'` runs only tests with "ums" in their name.
+ - `'ut_dm'` runs only tests with "ut_dm" in their name. Note that in this
+ case, "ut_dm" is a parameter to a test rather than the test name. The full
+ test name is e.g. "test_ut[ut_dm_leak]".
+ - `'not reset'` runs everything except tests with "reset" in their name.
+ - `'ut or hush'` runs only tests with "ut" or "hush" in their name.
+ - `'not (ut or hush)'` runs everything except tests with "ut" or "hush" in
+ their name.
+- `-s` prevents pytest from hiding a test's stdout. This allows you to see
+ U-Boot's console log in real time on pytest's stdout.
+
+## Testing real hardware
+
+The tools and techniques used to interact with real hardware will vary
+radically between different host and target systems, and the whims of the user.
+For this reason, the test suite does not attempt to directly interact with real
+hardware in any way. Rather, it executes a standardized set of "hook" scripts
+via `$PATH`. These scripts implement certain actions on behalf of the test
+suite. This keeps the test suite simple and isolated from system variances
+unrelated to U-Boot features.
+
+### Hook scripts
+
+#### Environment variables
+
+The following environment variables are set when running hook scripts:
+
+- `UBOOT_BOARD_TYPE` the board type being tested.
+- `UBOOT_BOARD_IDENTITY` the board identity being tested, or `na` if none was
+ specified.
+- `UBOOT_SOURCE_DIR` the U-Boot source directory.
+- `UBOOT_TEST_PY_DIR` the full path to `test/py/` in the source directory.
+- `UBOOT_BUILD_DIR` the U-Boot build directory.
+- `UBOOT_RESULT_DIR` the test result directory.
+- `UBOOT_PERSISTENT_DATA_DIR` the test persistent data directory.
+
+#### `u-boot-test-console`
+
+This script provides access to the U-Boot console. The script's stdin/stdout
+should be connected to the board's console. This process should continue to run
+indefinitely, until killed. The test suite will run this script in parallel
+with all other hooks.
+
+This script may be implemented e.g. by exec()ing `cu`, `kermit`, `conmux`, etc.
+
+If you are able to run U-Boot under a hardware simulator such as qemu, then
+you would likely spawn that simulator from this script. However, note that
+`u-boot-test-reset` may be called multiple times per test script run, and must
+cause U-Boot to start execution from scratch each time. Hopefully your
+simulator includes a virtual reset button! If not, you can launch the
+simulator from `u-boot-test-reset` instead, while arranging for this console
+process to always communicate with the current simulator instance.
+
+#### `u-boot-test-flash`
+
+Prior to running the test suite against a board, some arrangement must be made
+so that the board executes the particular U-Boot binary to be tested. Often,
+this involves writing the U-Boot binary to the board's flash ROM. The test
+suite calls this hook script for that purpose.
+
+This script should perform the entire flashing process synchronously; the
+script should only exit once flashing is complete, and a board reset will
+cause the newly flashed U-Boot binary to be executed.
+
+It is conceivable that this script will do nothing. This might be useful in
+the following cases:
+
+- Some other process has already written the desired U-Boot binary into the
+ board's flash prior to running the test suite.
+- The board allows U-Boot to be downloaded directly into RAM, and executed
+ from there. Use of this feature will reduce wear on the board's flash, so
+ may be preferable if available, and if cold boot testing of U-Boot is not
+ required. If this feature is used, the `u-boot-test-reset` script should
+ perform this download, since the board could conceivably be reset multiple
+ times in a single test run.
+
+It is up to the user to determine if those situations exist, and to code this
+hook script appropriately.
+
+This script will typically be implemented by calling out to some SoC- or
+board-specific vendor flashing utility.
+
+#### `u-boot-test-reset`
+
+Whenever the test suite needs to reset the target board, this script is
+executed. This is guaranteed to happen at least once, prior to executing the
+first test function. If any test fails, the test infra-structure will execute
+this script again to restore U-Boot to an operational state before running the
+next test function.
+
+This script will likely be implemented by communicating with some form of
+relay or electronic switch attached to the board's reset signal.
+
+The semantics of this script require that when it is executed, U-Boot will
+start running from scratch. If the U-Boot binary to be tested has been written
+to flash, pulsing the board's reset signal is likely all this script need do.
+However, in some scenarios, this script may perform other actions. For
+example, it may call out to some SoC- or board-specific vendor utility in order
+to download the U-Boot binary directly into RAM and execute it. This would
+avoid the need for `u-boot-test-flash` to actually write U-Boot to flash, thus
+saving wear on the flash chip(s).
+
+#### Examples
+
+https://github.com/swarren/uboot-test-hooks contains some working example hook
+scripts, and may be useful as a reference when implementing hook scripts for
+your platform. These scripts are not considered part of U-Boot itself.
+
+### Board-type-specific configuration
+
+Each board has a different configuration and behaviour. Many of these
+differences can be automatically detected by parsing the `.config` file in the
+build directory. However, some differences can't yet be handled automatically.
+
+For each board, an optional Python module `u_boot_board_${board_type}` may exist
+to provide board-specific information to the test script. Any global value
+defined in these modules is available for use by any test function. The data
+contained in these scripts must be purely derived from U-Boot source code.
+Hence, these configuration files are part of the U-Boot source tree too.
+
+### Execution environment configuration
+
+Each user's hardware setup may enable testing different subsets of the features
+implemented by a particular board's configuration of U-Boot. For example, a
+U-Boot configuration may support USB device mode and USB Mass Storage, but this
+can only be tested if a USB cable is connected between the board and the host
+machine running the test script.
+
+For each board, optional Python modules `u_boot_boardenv_${board_type}` and
+`u_boot_boardenv_${board_type}_${board_identity}` may exist to provide
+board-specific and board-identity-specific information to the test script. Any
+global value defined in these modules is available for use by any test
+function. The data contained in these is specific to a particular user's
+hardware configuration. Hence, these configuration files are not part of the
+U-Boot source tree, and should be installed outside of the source tree. Users
+should set `$PYTHONPATH` prior to running the test script to allow these
+modules to be loaded.
+
+### Board module parameter usage
+
+The test scripts rely on the following variables being defined by the board
+module:
+
+- None at present.
+
+### U-Boot `.config` feature usage
+
+The test scripts rely on various U-Boot `.config` features, either directly in
+order to test those features, or indirectly in order to query information from
+the running U-Boot instance in order to test other features.
+
+One example is that testing of the `md` command requires knowledge of a RAM
+address to use for the test. This data is parsed from the output of the
+`bdinfo` command, and hence relies on CONFIG_CMD_BDI being enabled.
+
+For a complete list of dependencies, please search the test scripts for
+instances of:
+
+- `buildconfig.get(...`
+- `@pytest.mark.buildconfigspec(...`
+- `@pytest.mark.notbuildconfigspec(...`
+
+### Complete invocation example
+
+Assuming that you have installed the hook scripts into $HOME/ubtest/bin, and
+any required environment configuration Python modules into $HOME/ubtest/py,
+then you would likely invoke the test script as follows:
+
+If U-Boot has already been built:
+
+```bash
+PATH=$HOME/ubtest/bin:$PATH \
+ PYTHONPATH=${HOME}/ubtest/py/${HOSTNAME}:${PYTHONPATH} \
+ ./test/py/test.py --bd seaboard
+```
+
+If you want the test script to compile U-Boot for you too, then you likely
+need to set `$CROSS_COMPILE` to allow this, and invoke the test script as
+follow:
+
+```bash
+CROSS_COMPILE=arm-none-eabi- \
+ PATH=$HOME/ubtest/bin:$PATH \
+ PYTHONPATH=${HOME}/ubtest/py/${HOSTNAME}:${PYTHONPATH} \
+ ./test/py/test.py --bd seaboard --build
+```
+
+## Writing tests
+
+Please refer to the pytest documentation for details of writing pytest tests.
+Details specific to the U-Boot test suite are described below.
+
+A test fixture named `u_boot_console` should be used by each test function. This
+provides the means to interact with the U-Boot console, and retrieve board and
+environment configuration information.
+
+The function `u_boot_console.run_command()` executes a shell command on the
+U-Boot console, and returns all output from that command. This allows
+validation or interpretation of the command output. This function validates
+that certain strings are not seen on the U-Boot console. These include shell
+error messages and the U-Boot sign-on message (in order to detect unexpected
+board resets). See the source of `u_boot_console_base.py` for a complete list of
+"bad" strings. Some test scenarios are expected to trigger these strings. Use
+`u_boot_console.disable_check()` to temporarily disable checking for specific
+strings. See `test_unknown_cmd.py` for an example.
+
+Board- and board-environment configuration values may be accessed as sub-fields
+of the `u_boot_console.config` object, for example
+`u_boot_console.config.ram_base`.
+
+Build configuration values (from `.config`) may be accessed via the dictionary
+`u_boot_console.config.buildconfig`, with keys equal to the Kconfig variable
+names.
diff --git a/test/py/conftest.py b/test/py/conftest.py
new file mode 100644
index 00000000..bffee6b8
--- /dev/null
+++ b/test/py/conftest.py
@@ -0,0 +1,596 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+
+# Implementation of pytest run-time hook functions. These are invoked by
+# pytest at certain points during operation, e.g. startup, for each executed
+# test, at shutdown etc. These hooks perform functions such as:
+# - Parsing custom command-line options.
+# - Pullilng in user-specified board configuration.
+# - Creating the U-Boot console test fixture.
+# - Creating the HTML log file.
+# - Monitoring each test's results.
+# - Implementing custom pytest markers.
+
+import atexit
+import configparser
+import errno
+import io
+import os
+import os.path
+import pytest
+import re
+from _pytest.runner import runtestprotocol
+import sys
+
+# Globals: The HTML log file, and the connection to the U-Boot console.
+log = None
+console = None
+
+def mkdir_p(path):
+ """Create a directory path.
+
+ This includes creating any intermediate/parent directories. Any errors
+ caused due to already extant directories are ignored.
+
+ Args:
+ path: The directory path to create.
+
+ Returns:
+ Nothing.
+ """
+
+ try:
+ os.makedirs(path)
+ except OSError as exc:
+ if exc.errno == errno.EEXIST and os.path.isdir(path):
+ pass
+ else:
+ raise
+
+def pytest_addoption(parser):
+ """pytest hook: Add custom command-line options to the cmdline parser.
+
+ Args:
+ parser: The pytest command-line parser.
+
+ Returns:
+ Nothing.
+ """
+
+ parser.addoption('--build-dir', default=None,
+ help='U-Boot build directory (O=)')
+ parser.addoption('--result-dir', default=None,
+ help='U-Boot test result/tmp directory')
+ parser.addoption('--persistent-data-dir', default=None,
+ help='U-Boot test persistent generated data directory')
+ parser.addoption('--board-type', '--bd', '-B', default='sandbox',
+ help='U-Boot board type')
+ parser.addoption('--board-identity', '--id', default='na',
+ help='U-Boot board identity/instance')
+ parser.addoption('--build', default=False, action='store_true',
+ help='Compile U-Boot before running tests')
+ parser.addoption('--gdbserver', default=None,
+ help='Run sandbox under gdbserver. The argument is the channel '+
+ 'over which gdbserver should communicate, e.g. localhost:1234')
+
+def pytest_configure(config):
+ """pytest hook: Perform custom initialization at startup time.
+
+ Args:
+ config: The pytest configuration.
+
+ Returns:
+ Nothing.
+ """
+
+ global log
+ global console
+ global ubconfig
+
+ test_py_dir = os.path.dirname(os.path.abspath(__file__))
+ source_dir = os.path.dirname(os.path.dirname(test_py_dir))
+
+ board_type = config.getoption('board_type')
+ board_type_filename = board_type.replace('-', '_')
+
+ board_identity = config.getoption('board_identity')
+ board_identity_filename = board_identity.replace('-', '_')
+
+ build_dir = config.getoption('build_dir')
+ if not build_dir:
+ build_dir = source_dir + '/build-' + board_type
+ mkdir_p(build_dir)
+
+ result_dir = config.getoption('result_dir')
+ if not result_dir:
+ result_dir = build_dir
+ mkdir_p(result_dir)
+
+ persistent_data_dir = config.getoption('persistent_data_dir')
+ if not persistent_data_dir:
+ persistent_data_dir = build_dir + '/persistent-data'
+ mkdir_p(persistent_data_dir)
+
+ gdbserver = config.getoption('gdbserver')
+ if gdbserver and not board_type.startswith('sandbox'):
+ raise Exception('--gdbserver only supported with sandbox targets')
+
+ import multiplexed_log
+ log = multiplexed_log.Logfile(result_dir + '/test-log.html')
+
+ if config.getoption('build'):
+ if build_dir != source_dir:
+ o_opt = 'O=%s' % build_dir
+ else:
+ o_opt = ''
+ cmds = (
+ ['make', o_opt, '-s', board_type + '_defconfig'],
+ ['make', o_opt, '-s', '-j8'],
+ )
+ with log.section('make'):
+ runner = log.get_runner('make', sys.stdout)
+ for cmd in cmds:
+ runner.run(cmd, cwd=source_dir)
+ runner.close()
+ log.status_pass('OK')
+
+ class ArbitraryAttributeContainer(object):
+ pass
+
+ ubconfig = ArbitraryAttributeContainer()
+ ubconfig.brd = dict()
+ ubconfig.env = dict()
+
+ modules = [
+ (ubconfig.brd, 'u_boot_board_' + board_type_filename),
+ (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
+ (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
+ board_identity_filename),
+ ]
+ for (dict_to_fill, module_name) in modules:
+ try:
+ module = __import__(module_name)
+ except ImportError:
+ continue
+ dict_to_fill.update(module.__dict__)
+
+ ubconfig.buildconfig = dict()
+
+ for conf_file in ('.config', 'include/autoconf.mk'):
+ dot_config = build_dir + '/' + conf_file
+ if not os.path.exists(dot_config):
+ raise Exception(conf_file + ' does not exist; ' +
+ 'try passing --build option?')
+
+ with open(dot_config, 'rt') as f:
+ ini_str = '[root]\n' + f.read()
+ ini_sio = io.StringIO(ini_str)
+ parser = configparser.RawConfigParser()
+ parser.read_file(ini_sio)
+ ubconfig.buildconfig.update(parser.items('root'))
+
+ ubconfig.test_py_dir = test_py_dir
+ ubconfig.source_dir = source_dir
+ ubconfig.build_dir = build_dir
+ ubconfig.result_dir = result_dir
+ ubconfig.persistent_data_dir = persistent_data_dir
+ ubconfig.board_type = board_type
+ ubconfig.board_identity = board_identity
+ ubconfig.gdbserver = gdbserver
+ ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
+
+ env_vars = (
+ 'board_type',
+ 'board_identity',
+ 'source_dir',
+ 'test_py_dir',
+ 'build_dir',
+ 'result_dir',
+ 'persistent_data_dir',
+ )
+ for v in env_vars:
+ os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
+
+ if board_type.startswith('sandbox'):
+ import u_boot_console_sandbox
+ console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
+ else:
+ import u_boot_console_exec_attach
+ console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
+
+re_ut_test_list = re.compile(r'_u_boot_list_2_(.*)_test_2_\1_test_(.*)\s*$')
+def generate_ut_subtest(metafunc, fixture_name):
+ """Provide parametrization for a ut_subtest fixture.
+
+ Determines the set of unit tests built into a U-Boot binary by parsing the
+ list of symbols generated by the build process. Provides this information
+ to test functions by parameterizing their ut_subtest fixture parameter.
+
+ Args:
+ metafunc: The pytest test function.
+ fixture_name: The fixture name to test.
+
+ Returns:
+ Nothing.
+ """
+
+ fn = console.config.build_dir + '/u-boot.sym'
+ try:
+ with open(fn, 'rt') as f:
+ lines = f.readlines()
+ except:
+ lines = []
+ lines.sort()
+
+ vals = []
+ for l in lines:
+ m = re_ut_test_list.search(l)
+ if not m:
+ continue
+ vals.append(m.group(1) + ' ' + m.group(2))
+
+ ids = ['ut_' + s.replace(' ', '_') for s in vals]
+ metafunc.parametrize(fixture_name, vals, ids=ids)
+
+def generate_config(metafunc, fixture_name):
+ """Provide parametrization for {env,brd}__ fixtures.
+
+ If a test function takes parameter(s) (fixture names) of the form brd__xxx
+ or env__xxx, the brd and env configuration dictionaries are consulted to
+ find the list of values to use for those parameters, and the test is
+ parametrized so that it runs once for each combination of values.
+
+ Args:
+ metafunc: The pytest test function.
+ fixture_name: The fixture name to test.
+
+ Returns:
+ Nothing.
+ """
+
+ subconfigs = {
+ 'brd': console.config.brd,
+ 'env': console.config.env,
+ }
+ parts = fixture_name.split('__')
+ if len(parts) < 2:
+ return
+ if parts[0] not in subconfigs:
+ return
+ subconfig = subconfigs[parts[0]]
+ vals = []
+ val = subconfig.get(fixture_name, [])
+ # If that exact name is a key in the data source:
+ if val:
+ # ... use the dict value as a single parameter value.
+ vals = (val, )
+ else:
+ # ... otherwise, see if there's a key that contains a list of
+ # values to use instead.
+ vals = subconfig.get(fixture_name+ 's', [])
+ def fixture_id(index, val):
+ try:
+ return val['fixture_id']
+ except:
+ return fixture_name + str(index)
+ ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
+ metafunc.parametrize(fixture_name, vals, ids=ids)
+
+def pytest_generate_tests(metafunc):
+ """pytest hook: parameterize test functions based on custom rules.
+
+ Check each test function parameter (fixture name) to see if it is one of
+ our custom names, and if so, provide the correct parametrization for that
+ parameter.
+
+ Args:
+ metafunc: The pytest test function.
+
+ Returns:
+ Nothing.
+ """
+
+ for fn in metafunc.fixturenames:
+ if fn == 'ut_subtest':
+ generate_ut_subtest(metafunc, fn)
+ continue
+ generate_config(metafunc, fn)
+
+@pytest.fixture(scope='session')
+def u_boot_log(request):
+ """Generate the value of a test's log fixture.
+
+ Args:
+ request: The pytest request.
+
+ Returns:
+ The fixture value.
+ """
+
+ return console.log
+
+@pytest.fixture(scope='session')
+def u_boot_config(request):
+ """Generate the value of a test's u_boot_config fixture.
+
+ Args:
+ request: The pytest request.
+
+ Returns:
+ The fixture value.
+ """
+
+ return console.config
+
+@pytest.fixture(scope='function')
+def u_boot_console(request):
+ """Generate the value of a test's u_boot_console fixture.
+
+ Args:
+ request: The pytest request.
+
+ Returns:
+ The fixture value.
+ """
+
+ console.ensure_spawned()
+ return console
+
+anchors = {}
+tests_not_run = []
+tests_failed = []
+tests_xpassed = []
+tests_xfailed = []
+tests_skipped = []
+tests_warning = []
+tests_passed = []
+
+def pytest_itemcollected(item):
+ """pytest hook: Called once for each test found during collection.
+
+ This enables our custom result analysis code to see the list of all tests
+ that should eventually be run.
+
+ Args:
+ item: The item that was collected.
+
+ Returns:
+ Nothing.
+ """
+
+ tests_not_run.append(item.name)
+
+def cleanup():
+ """Clean up all global state.
+
+ Executed (via atexit) once the entire test process is complete. This
+ includes logging the status of all tests, and the identity of any failed
+ or skipped tests.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ """
+
+ if console:
+ console.close()
+ if log:
+ with log.section('Status Report', 'status_report'):
+ log.status_pass('%d passed' % len(tests_passed))
+ if tests_warning:
+ log.status_warning('%d passed with warning' % len(tests_warning))
+ for test in tests_warning:
+ anchor = anchors.get(test, None)
+ log.status_warning('... ' + test, anchor)
+ if tests_skipped:
+ log.status_skipped('%d skipped' % len(tests_skipped))
+ for test in tests_skipped:
+ anchor = anchors.get(test, None)
+ log.status_skipped('... ' + test, anchor)
+ if tests_xpassed:
+ log.status_xpass('%d xpass' % len(tests_xpassed))
+ for test in tests_xpassed:
+ anchor = anchors.get(test, None)
+ log.status_xpass('... ' + test, anchor)
+ if tests_xfailed:
+ log.status_xfail('%d xfail' % len(tests_xfailed))
+ for test in tests_xfailed:
+ anchor = anchors.get(test, None)
+ log.status_xfail('... ' + test, anchor)
+ if tests_failed:
+ log.status_fail('%d failed' % len(tests_failed))
+ for test in tests_failed:
+ anchor = anchors.get(test, None)
+ log.status_fail('... ' + test, anchor)
+ if tests_not_run:
+ log.status_fail('%d not run' % len(tests_not_run))
+ for test in tests_not_run:
+ anchor = anchors.get(test, None)
+ log.status_fail('... ' + test, anchor)
+ log.close()
+atexit.register(cleanup)
+
+def setup_boardspec(item):
+ """Process any 'boardspec' marker for a test.
+
+ Such a marker lists the set of board types that a test does/doesn't
+ support. If tests are being executed on an unsupported board, the test is
+ marked to be skipped.
+
+ Args:
+ item: The pytest test item.
+
+ Returns:
+ Nothing.
+ """
+
+ required_boards = []
+ for boards in item.iter_markers('boardspec'):
+ board = boards.args[0]
+ if board.startswith('!'):
+ if ubconfig.board_type == board[1:]:
+ pytest.skip('board "%s" not supported' % ubconfig.board_type)
+ return
+ else:
+ required_boards.append(board)
+ if required_boards and ubconfig.board_type not in required_boards:
+ pytest.skip('board "%s" not supported' % ubconfig.board_type)
+
+def setup_buildconfigspec(item):
+ """Process any 'buildconfigspec' marker for a test.
+
+ Such a marker lists some U-Boot configuration feature that the test
+ requires. If tests are being executed on an U-Boot build that doesn't
+ have the required feature, the test is marked to be skipped.
+
+ Args:
+ item: The pytest test item.
+
+ Returns:
+ Nothing.
+ """
+
+ for options in item.iter_markers('buildconfigspec'):
+ option = options.args[0]
+ if not ubconfig.buildconfig.get('config_' + option.lower(), None):
+ pytest.skip('.config feature "%s" not enabled' % option.lower())
+ for option in item.iter_markers('notbuildconfigspec'):
+ option = options.args[0]
+ if ubconfig.buildconfig.get('config_' + option.lower(), None):
+ pytest.skip('.config feature "%s" enabled' % option.lower())
+
+def tool_is_in_path(tool):
+ for path in os.environ["PATH"].split(os.pathsep):
+ fn = os.path.join(path, tool)
+ if os.path.isfile(fn) and os.access(fn, os.X_OK):
+ return True
+ return False
+
+def setup_requiredtool(item):
+ """Process any 'requiredtool' marker for a test.
+
+ Such a marker lists some external tool (binary, executable, application)
+ that the test requires. If tests are being executed on a system that
+ doesn't have the required tool, the test is marked to be skipped.
+
+ Args:
+ item: The pytest test item.
+
+ Returns:
+ Nothing.
+ """
+
+ for tools in item.iter_markers('requiredtool'):
+ tool = tools.args[0]
+ if not tool_is_in_path(tool):
+ pytest.skip('tool "%s" not in $PATH' % tool)
+
+def start_test_section(item):
+ anchors[item.name] = log.start_section(item.name)
+
+def pytest_runtest_setup(item):
+ """pytest hook: Configure (set up) a test item.
+
+ Called once for each test to perform any custom configuration. This hook
+ is used to skip the test if certain conditions apply.
+
+ Args:
+ item: The pytest test item.
+
+ Returns:
+ Nothing.
+ """
+
+ start_test_section(item)
+ setup_boardspec(item)
+ setup_buildconfigspec(item)
+ setup_requiredtool(item)
+
+def pytest_runtest_protocol(item, nextitem):
+ """pytest hook: Called to execute a test.
+
+ This hook wraps the standard pytest runtestprotocol() function in order
+ to acquire visibility into, and record, each test function's result.
+
+ Args:
+ item: The pytest test item to execute.
+ nextitem: The pytest test item that will be executed after this one.
+
+ Returns:
+ A list of pytest reports (test result data).
+ """
+
+ log.get_and_reset_warning()
+ reports = runtestprotocol(item, nextitem=nextitem)
+ was_warning = log.get_and_reset_warning()
+
+ # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
+ # the test is skipped. That call is required to create the test's section
+ # in the log file. The call to log.end_section() requires that the log
+ # contain a section for this test. Create a section for the test if it
+ # doesn't already exist.
+ if not item.name in anchors:
+ start_test_section(item)
+
+ failure_cleanup = False
+ if not was_warning:
+ test_list = tests_passed
+ msg = 'OK'
+ msg_log = log.status_pass
+ else:
+ test_list = tests_warning
+ msg = 'OK (with warning)'
+ msg_log = log.status_warning
+ for report in reports:
+ if report.outcome == 'failed':
+ if hasattr(report, 'wasxfail'):
+ test_list = tests_xpassed
+ msg = 'XPASSED'
+ msg_log = log.status_xpass
+ else:
+ failure_cleanup = True
+ test_list = tests_failed
+ msg = 'FAILED:\n' + str(report.longrepr)
+ msg_log = log.status_fail
+ break
+ if report.outcome == 'skipped':
+ if hasattr(report, 'wasxfail'):
+ failure_cleanup = True
+ test_list = tests_xfailed
+ msg = 'XFAILED:\n' + str(report.longrepr)
+ msg_log = log.status_xfail
+ break
+ test_list = tests_skipped
+ msg = 'SKIPPED:\n' + str(report.longrepr)
+ msg_log = log.status_skipped
+
+ if failure_cleanup:
+ console.drain_console()
+
+ test_list.append(item.name)
+ tests_not_run.remove(item.name)
+
+ try:
+ msg_log(msg)
+ except:
+ # If something went wrong with logging, it's better to let the test
+ # process continue, which may report other exceptions that triggered
+ # the logging issue (e.g. console.log wasn't created). Hence, just
+ # squash the exception. If the test setup failed due to e.g. syntax
+ # error somewhere else, this won't be seen. However, once that issue
+ # is fixed, if this exception still exists, it will then be logged as
+ # part of the test's stdout.
+ import traceback
+ print('Exception occurred while logging runtest status:')
+ traceback.print_exc()
+ # FIXME: Can we force a test failure here?
+
+ log.end_section(item.name)
+
+ if failure_cleanup:
+ console.cleanup_spawn()
+
+ return reports
diff --git a/test/py/multiplexed_log.css b/test/py/multiplexed_log.css
new file mode 100644
index 00000000..3db99272
--- /dev/null
+++ b/test/py/multiplexed_log.css
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015 Stephen Warren
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ */
+
+/*
+ * This provides pretty formatting of the HTML log file, e.g.
+ * - colored bars beside/above log sections for easily parsed delineation.
+ * - color highlighting of various messages.
+ */
+
+body {
+ background-color: black;
+ color: #ffffff;
+}
+
+pre {
+ margin-top: 0px;
+ margin-bottom: 0px;
+}
+
+.implicit {
+ color: #808080;
+}
+
+.block {
+ border-style: solid;
+ border-color: #303030;
+ border-width: 0px 0px 0px 5px;
+ padding-left: 5px
+}
+
+.block-header {
+ background-color: #303030;
+ margin-left: -5px;
+ margin-top: 5px;
+}
+
+.block-header:hover {
+ text-decoration: underline;
+}
+
+.block-trailer {
+ display: none;
+}
+
+.error {
+ color: #ff0000
+}
+
+.warning {
+ color: #ffff00
+}
+
+.info {
+ color: #808080
+}
+
+.action {
+ color: #8080ff
+}
+
+.timestamp {
+ color: #8080ff
+}
+
+.status-pass {
+ color: #00ff00
+}
+
+.status-warning {
+ color: #ffff00
+}
+
+.status-skipped {
+ color: #ffff00
+}
+
+.status-xfail {
+ color: #ff7f00
+}
+
+.status-xpass {
+ color: #ff7f00
+}
+
+.status-fail {
+ color: #ff0000
+}
+
+.hidden {
+ display: none;
+}
+
+a:link {
+ text-decoration: inherit;
+ color: inherit;
+}
+
+a:visited {
+ text-decoration: inherit;
+ color: inherit;
+}
+
+a:hover {
+ text-decoration: underline;
+}
diff --git a/test/py/multiplexed_log.py b/test/py/multiplexed_log.py
new file mode 100644
index 00000000..545a7743
--- /dev/null
+++ b/test/py/multiplexed_log.py
@@ -0,0 +1,709 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+
+# Generate an HTML-formatted log file containing multiple streams of data,
+# each represented in a well-delineated/-structured fashion.
+
+import datetime
+import html
+import os.path
+import shutil
+import subprocess
+
+mod_dir = os.path.dirname(os.path.abspath(__file__))
+
+class LogfileStream(object):
+ """A file-like object used to write a single logical stream of data into
+ a multiplexed log file. Objects of this type should be created by factory
+ functions in the Logfile class rather than directly."""
+
+ def __init__(self, logfile, name, chained_file):
+ """Initialize a new object.
+
+ Args:
+ logfile: The Logfile object to log to.
+ name: The name of this log stream.
+ chained_file: The file-like object to which all stream data should be
+ logged to in addition to logfile. Can be None.
+
+ Returns:
+ Nothing.
+ """
+
+ self.logfile = logfile
+ self.name = name
+ self.chained_file = chained_file
+
+ def close(self):
+ """Dummy function so that this class is "file-like".
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ """
+
+ pass
+
+ def write(self, data, implicit=False):
+ """Write data to the log stream.
+
+ Args:
+ data: The data to write to the file.
+ implicit: Boolean indicating whether data actually appeared in the
+ stream, or was implicitly generated. A valid use-case is to
+ repeat a shell prompt at the start of each separate log
+ section, which makes the log sections more readable in
+ isolation.
+
+ Returns:
+ Nothing.
+ """
+
+ self.logfile.write(self, data, implicit)
+ if self.chained_file:
+ # Chained file is console, convert things a little
+ self.chained_file.write((data.encode('ascii', 'replace')).decode())
+
+ def flush(self):
+ """Flush the log stream, to ensure correct log interleaving.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ """
+
+ self.logfile.flush()
+ if self.chained_file:
+ self.chained_file.flush()
+
+class RunAndLog(object):
+ """A utility object used to execute sub-processes and log their output to
+ a multiplexed log file. Objects of this type should be created by factory
+ functions in the Logfile class rather than directly."""
+
+ def __init__(self, logfile, name, chained_file):
+ """Initialize a new object.
+
+ Args:
+ logfile: The Logfile object to log to.
+ name: The name of this log stream or sub-process.
+ chained_file: The file-like object to which all stream data should
+ be logged to in addition to logfile. Can be None.
+
+ Returns:
+ Nothing.
+ """
+
+ self.logfile = logfile
+ self.name = name
+ self.chained_file = chained_file
+ self.output = None
+ self.exit_status = None
+
+ def close(self):
+ """Clean up any resources managed by this object."""
+ pass
+
+ def run(self, cmd, cwd=None, ignore_errors=False):
+ """Run a command as a sub-process, and log the results.
+
+ The output is available at self.output which can be useful if there is
+ an exception.
+
+ Args:
+ cmd: The command to execute.
+ cwd: The directory to run the command in. Can be None to use the
+ current directory.
+ ignore_errors: Indicate whether to ignore errors. If True, the
+ function will simply return if the command cannot be executed
+ or exits with an error code, otherwise an exception will be
+ raised if such problems occur.
+
+ Returns:
+ The output as a string.
+ """
+
+ msg = '+' + ' '.join(cmd) + '\n'
+ if self.chained_file:
+ self.chained_file.write(msg)
+ self.logfile.write(self, msg)
+
+ try:
+ p = subprocess.Popen(cmd, cwd=cwd,
+ stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ (stdout, stderr) = p.communicate()
+ if stdout is not None:
+ stdout = stdout.decode('utf-8')
+ if stderr is not None:
+ stderr = stderr.decode('utf-8')
+ output = ''
+ if stdout:
+ if stderr:
+ output += 'stdout:\n'
+ output += stdout
+ if stderr:
+ if stdout:
+ output += 'stderr:\n'
+ output += stderr
+ exit_status = p.returncode
+ exception = None
+ except subprocess.CalledProcessError as cpe:
+ output = cpe.output
+ exit_status = cpe.returncode
+ exception = cpe
+ except Exception as e:
+ output = ''
+ exit_status = 0
+ exception = e
+ if output and not output.endswith('\n'):
+ output += '\n'
+ if exit_status and not exception and not ignore_errors:
+ exception = Exception('Exit code: ' + str(exit_status))
+ if exception:
+ output += str(exception) + '\n'
+ self.logfile.write(self, output)
+ if self.chained_file:
+ self.chained_file.write(output)
+ self.logfile.timestamp()
+
+ # Store the output so it can be accessed if we raise an exception.
+ self.output = output
+ self.exit_status = exit_status
+ if exception:
+ raise exception
+ return output
+
+class SectionCtxMgr(object):
+ """A context manager for Python's "with" statement, which allows a certain
+ portion of test code to be logged to a separate section of the log file.
+ Objects of this type should be created by factory functions in the Logfile
+ class rather than directly."""
+
+ def __init__(self, log, marker, anchor):
+ """Initialize a new object.
+
+ Args:
+ log: The Logfile object to log to.
+ marker: The name of the nested log section.
+ anchor: The anchor value to pass to start_section().
+
+ Returns:
+ Nothing.
+ """
+
+ self.log = log
+ self.marker = marker
+ self.anchor = anchor
+
+ def __enter__(self):
+ self.anchor = self.log.start_section(self.marker, self.anchor)
+
+ def __exit__(self, extype, value, traceback):
+ self.log.end_section(self.marker)
+
+class Logfile(object):
+ """Generates an HTML-formatted log file containing multiple streams of
+ data, each represented in a well-delineated/-structured fashion."""
+
+ def __init__(self, fn):
+ """Initialize a new object.
+
+ Args:
+ fn: The filename to write to.
+
+ Returns:
+ Nothing.
+ """
+
+ self.f = open(fn, 'wt', encoding='utf-8')
+ self.last_stream = None
+ self.blocks = []
+ self.cur_evt = 1
+ self.anchor = 0
+ self.timestamp_start = self._get_time()
+ self.timestamp_prev = self.timestamp_start
+ self.timestamp_blocks = []
+ self.seen_warning = False
+
+ shutil.copy(mod_dir + '/multiplexed_log.css', os.path.dirname(fn))
+ self.f.write('''\
+<html>
+<head>
+<link rel="stylesheet" type="text/css" href="multiplexed_log.css">
+<script src="http://code.jquery.com/jquery.min.js"></script>
+<script>
+$(document).ready(function () {
+ // Copy status report HTML to start of log for easy access
+ sts = $(".block#status_report")[0].outerHTML;
+ $("tt").prepend(sts);
+
+ // Add expand/contract buttons to all block headers
+ btns = "<span class=\\\"block-expand hidden\\\">[+] </span>" +
+ "<span class=\\\"block-contract\\\">[-] </span>";
+ $(".block-header").prepend(btns);
+
+ // Pre-contract all blocks which passed, leaving only problem cases
+ // expanded, to highlight issues the user should look at.
+ // Only top-level blocks (sections) should have any status
+ passed_bcs = $(".block-content:has(.status-pass)");
+ // Some blocks might have multiple status entries (e.g. the status
+ // report), so take care not to hide blocks with partial success.
+ passed_bcs = passed_bcs.not(":has(.status-fail)");
+ passed_bcs = passed_bcs.not(":has(.status-xfail)");
+ passed_bcs = passed_bcs.not(":has(.status-xpass)");
+ passed_bcs = passed_bcs.not(":has(.status-skipped)");
+ passed_bcs = passed_bcs.not(":has(.status-warning)");
+ // Hide the passed blocks
+ passed_bcs.addClass("hidden");
+ // Flip the expand/contract button hiding for those blocks.
+ bhs = passed_bcs.parent().children(".block-header")
+ bhs.children(".block-expand").removeClass("hidden");
+ bhs.children(".block-contract").addClass("hidden");
+
+ // Add click handler to block headers.
+ // The handler expands/contracts the block.
+ $(".block-header").on("click", function (e) {
+ var header = $(this);
+ var content = header.next(".block-content");
+ var expanded = !content.hasClass("hidden");
+ if (expanded) {
+ content.addClass("hidden");
+ header.children(".block-expand").first().removeClass("hidden");
+ header.children(".block-contract").first().addClass("hidden");
+ } else {
+ header.children(".block-contract").first().removeClass("hidden");
+ header.children(".block-expand").first().addClass("hidden");
+ content.removeClass("hidden");
+ }
+ });
+
+ // When clicking on a link, expand the target block
+ $("a").on("click", function (e) {
+ var block = $($(this).attr("href"));
+ var header = block.children(".block-header");
+ var content = block.children(".block-content").first();
+ header.children(".block-contract").first().removeClass("hidden");
+ header.children(".block-expand").first().addClass("hidden");
+ content.removeClass("hidden");
+ });
+});
+</script>
+</head>
+<body>
+<tt>
+''')
+
+ def close(self):
+ """Close the log file.
+
+ After calling this function, no more data may be written to the log.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ """
+
+ self.f.write('''\
+</tt>
+</body>
+</html>
+''')
+ self.f.close()
+
+ # The set of characters that should be represented as hexadecimal codes in
+ # the log file.
+ _nonprint = {ord('%')}
+ _nonprint.update({c for c in range(0, 32) if c not in (9, 10)})
+ _nonprint.update({c for c in range(127, 256)})
+
+ def _escape(self, data):
+ """Render data format suitable for inclusion in an HTML document.
+
+ This includes HTML-escaping certain characters, and translating
+ control characters to a hexadecimal representation.
+
+ Args:
+ data: The raw string data to be escaped.
+
+ Returns:
+ An escaped version of the data.
+ """
+
+ data = data.replace(chr(13), '')
+ data = ''.join((ord(c) in self._nonprint) and ('%%%02x' % ord(c)) or
+ c for c in data)
+ data = html.escape(data)
+ return data
+
+ def _terminate_stream(self):
+ """Write HTML to the log file to terminate the current stream's data.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ """
+
+ self.cur_evt += 1
+ if not self.last_stream:
+ return
+ self.f.write('</pre>\n')
+ self.f.write('<div class="stream-trailer block-trailer">End stream: ' +
+ self.last_stream.name + '</div>\n')
+ self.f.write('</div>\n')
+ self.f.write('</div>\n')
+ self.last_stream = None
+
+ def _note(self, note_type, msg, anchor=None):
+ """Write a note or one-off message to the log file.
+
+ Args:
+ note_type: The type of note. This must be a value supported by the
+ accompanying multiplexed_log.css.
+ msg: The note/message to log.
+ anchor: Optional internal link target.
+
+ Returns:
+ Nothing.
+ """
+
+ self._terminate_stream()
+ self.f.write('<div class="' + note_type + '">\n')
+ self.f.write('<pre>')
+ if anchor:
+ self.f.write('<a href="#%s">' % anchor)
+ self.f.write(self._escape(msg))
+ if anchor:
+ self.f.write('</a>')
+ self.f.write('\n</pre>\n')
+ self.f.write('</div>\n')
+
+ def start_section(self, marker, anchor=None):
+ """Begin a new nested section in the log file.
+
+ Args:
+ marker: The name of the section that is starting.
+ anchor: The value to use for the anchor. If None, a unique value
+ will be calculated and used
+
+ Returns:
+ Name of the HTML anchor emitted before section.
+ """
+
+ self._terminate_stream()
+ self.blocks.append(marker)
+ self.timestamp_blocks.append(self._get_time())
+ if not anchor:
+ self.anchor += 1
+ anchor = str(self.anchor)
+ blk_path = '/'.join(self.blocks)
+ self.f.write('<div class="section block" id="' + anchor + '">\n')
+ self.f.write('<div class="section-header block-header">Section: ' +
+ blk_path + '</div>\n')
+ self.f.write('<div class="section-content block-content">\n')
+ self.timestamp()
+
+ return anchor
+
+ def end_section(self, marker):
+ """Terminate the current nested section in the log file.
+
+ This function validates proper nesting of start_section() and
+ end_section() calls. If a mismatch is found, an exception is raised.
+
+ Args:
+ marker: The name of the section that is ending.
+
+ Returns:
+ Nothing.
+ """
+
+ if (not self.blocks) or (marker != self.blocks[-1]):
+ raise Exception('Block nesting mismatch: "%s" "%s"' %
+ (marker, '/'.join(self.blocks)))
+ self._terminate_stream()
+ timestamp_now = self._get_time()
+ timestamp_section_start = self.timestamp_blocks.pop()
+ delta_section = timestamp_now - timestamp_section_start
+ self._note("timestamp",
+ "TIME: SINCE-SECTION: " + str(delta_section))
+ blk_path = '/'.join(self.blocks)
+ self.f.write('<div class="section-trailer block-trailer">' +
+ 'End section: ' + blk_path + '</div>\n')
+ self.f.write('</div>\n')
+ self.f.write('</div>\n')
+ self.blocks.pop()
+
+ def section(self, marker, anchor=None):
+ """Create a temporary section in the log file.
+
+ This function creates a context manager for Python's "with" statement,
+ which allows a certain portion of test code to be logged to a separate
+ section of the log file.
+
+ Usage:
+ with log.section("somename"):
+ some test code
+
+ Args:
+ marker: The name of the nested section.
+ anchor: The anchor value to pass to start_section().
+
+ Returns:
+ A context manager object.
+ """
+
+ return SectionCtxMgr(self, marker, anchor)
+
+ def error(self, msg):
+ """Write an error note to the log file.
+
+ Args:
+ msg: A message describing the error.
+
+ Returns:
+ Nothing.
+ """
+
+ self._note("error", msg)
+
+ def warning(self, msg):
+ """Write an warning note to the log file.
+
+ Args:
+ msg: A message describing the warning.
+
+ Returns:
+ Nothing.
+ """
+
+ self.seen_warning = True
+ self._note("warning", msg)
+
+ def get_and_reset_warning(self):
+ """Get and reset the log warning flag.
+
+ Args:
+ None
+
+ Returns:
+ Whether a warning was seen since the last call.
+ """
+
+ ret = self.seen_warning
+ self.seen_warning = False
+ return ret
+
+ def info(self, msg):
+ """Write an informational note to the log file.
+
+ Args:
+ msg: An informational message.
+
+ Returns:
+ Nothing.
+ """
+
+ self._note("info", msg)
+
+ def action(self, msg):
+ """Write an action note to the log file.
+
+ Args:
+ msg: A message describing the action that is being logged.
+
+ Returns:
+ Nothing.
+ """
+
+ self._note("action", msg)
+
+ def _get_time(self):
+ return datetime.datetime.now()
+
+ def timestamp(self):
+ """Write a timestamp to the log file.
+
+ Args:
+ None
+
+ Returns:
+ Nothing.
+ """
+
+ timestamp_now = self._get_time()
+ delta_prev = timestamp_now - self.timestamp_prev
+ delta_start = timestamp_now - self.timestamp_start
+ self.timestamp_prev = timestamp_now
+
+ self._note("timestamp",
+ "TIME: NOW: " + timestamp_now.strftime("%Y/%m/%d %H:%M:%S.%f"))
+ self._note("timestamp",
+ "TIME: SINCE-PREV: " + str(delta_prev))
+ self._note("timestamp",
+ "TIME: SINCE-START: " + str(delta_start))
+
+ def status_pass(self, msg, anchor=None):
+ """Write a note to the log file describing test(s) which passed.
+
+ Args:
+ msg: A message describing the passed test(s).
+ anchor: Optional internal link target.
+
+ Returns:
+ Nothing.
+ """
+
+ self._note("status-pass", msg, anchor)
+
+ def status_warning(self, msg, anchor=None):
+ """Write a note to the log file describing test(s) which passed.
+
+ Args:
+ msg: A message describing the passed test(s).
+ anchor: Optional internal link target.
+
+ Returns:
+ Nothing.
+ """
+
+ self._note("status-warning", msg, anchor)
+
+ def status_skipped(self, msg, anchor=None):
+ """Write a note to the log file describing skipped test(s).
+
+ Args:
+ msg: A message describing the skipped test(s).
+ anchor: Optional internal link target.
+
+ Returns:
+ Nothing.
+ """
+
+ self._note("status-skipped", msg, anchor)
+
+ def status_xfail(self, msg, anchor=None):
+ """Write a note to the log file describing xfailed test(s).
+
+ Args:
+ msg: A message describing the xfailed test(s).
+ anchor: Optional internal link target.
+
+ Returns:
+ Nothing.
+ """
+
+ self._note("status-xfail", msg, anchor)
+
+ def status_xpass(self, msg, anchor=None):
+ """Write a note to the log file describing xpassed test(s).
+
+ Args:
+ msg: A message describing the xpassed test(s).
+ anchor: Optional internal link target.
+
+ Returns:
+ Nothing.
+ """
+
+ self._note("status-xpass", msg, anchor)
+
+ def status_fail(self, msg, anchor=None):
+ """Write a note to the log file describing failed test(s).
+
+ Args:
+ msg: A message describing the failed test(s).
+ anchor: Optional internal link target.
+
+ Returns:
+ Nothing.
+ """
+
+ self._note("status-fail", msg, anchor)
+
+ def get_stream(self, name, chained_file=None):
+ """Create an object to log a single stream's data into the log file.
+
+ This creates a "file-like" object that can be written to in order to
+ write a single stream's data to the log file. The implementation will
+ handle any required interleaving of data (from multiple streams) in
+ the log, in a way that makes it obvious which stream each bit of data
+ came from.
+
+ Args:
+ name: The name of the stream.
+ chained_file: The file-like object to which all stream data should
+ be logged to in addition to this log. Can be None.
+
+ Returns:
+ A file-like object.
+ """
+
+ return LogfileStream(self, name, chained_file)
+
+ def get_runner(self, name, chained_file=None):
+ """Create an object that executes processes and logs their output.
+
+ Args:
+ name: The name of this sub-process.
+ chained_file: The file-like object to which all stream data should
+ be logged to in addition to logfile. Can be None.
+
+ Returns:
+ A RunAndLog object.
+ """
+
+ return RunAndLog(self, name, chained_file)
+
+ def write(self, stream, data, implicit=False):
+ """Write stream data into the log file.
+
+ This function should only be used by instances of LogfileStream or
+ RunAndLog.
+
+ Args:
+ stream: The stream whose data is being logged.
+ data: The data to log.
+ implicit: Boolean indicating whether data actually appeared in the
+ stream, or was implicitly generated. A valid use-case is to
+ repeat a shell prompt at the start of each separate log
+ section, which makes the log sections more readable in
+ isolation.
+
+ Returns:
+ Nothing.
+ """
+
+ if stream != self.last_stream:
+ self._terminate_stream()
+ self.f.write('<div class="stream block">\n')
+ self.f.write('<div class="stream-header block-header">Stream: ' +
+ stream.name + '</div>\n')
+ self.f.write('<div class="stream-content block-content">\n')
+ self.f.write('<pre>')
+ if implicit:
+ self.f.write('<span class="implicit">')
+ self.f.write(self._escape(data))
+ if implicit:
+ self.f.write('</span>')
+ self.last_stream = stream
+
+ def flush(self):
+ """Flush the log stream, to ensure correct log interleaving.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ """
+
+ self.f.flush()
diff --git a/test/py/pytest.ini b/test/py/pytest.ini
new file mode 100644
index 00000000..e93d010f
--- /dev/null
+++ b/test/py/pytest.ini
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+
+# Static configuration data for pytest. pytest reads this at startup time.
+
+[pytest]
+markers =
+ boardspec: U-Boot: Describes the set of boards a test can/can't run on.
+ buildconfigspec: U-Boot: Describes Kconfig/config-header constraints.
+ notbuildconfigspec: U-Boot: Describes required disabled Kconfig options.
+ requiredtool: U-Boot: Required host tools for a test.
+ slow: U-Boot: Specific test will run slowly.
diff --git a/test/py/requirements.txt b/test/py/requirements.txt
new file mode 100644
index 00000000..cf251186
--- /dev/null
+++ b/test/py/requirements.txt
@@ -0,0 +1,22 @@
+atomicwrites==1.3.0
+attrs==19.3.0
+coverage==4.5.4
+extras==1.0.0
+fixtures==3.0.0
+importlib-metadata==0.23
+linecache2==1.0.0
+more-itertools==7.2.0
+packaging==19.2
+pbr==5.4.3
+pluggy==0.13.0
+py==1.8.0
+pyparsing==2.4.2
+pytest==5.2.1
+python-mimeparse==1.6.0
+python-subunit==1.3.0
+six==1.12.0
+testtools==2.3.0
+traceback2==1.4.0
+unittest2==1.1.0
+wcwidth==0.1.7
+zipp==0.6.0
diff --git a/test/py/test.py b/test/py/test.py
new file mode 100755
index 00000000..bee88d96
--- /dev/null
+++ b/test/py/test.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+
+# Wrapper script to invoke pytest with the directory name that contains the
+# U-Boot tests.
+
+import os
+import os.path
+import sys
+from pkg_resources import load_entry_point
+
+# argv; py.test test_directory_name user-supplied-arguments
+args = [os.path.dirname(__file__) + '/tests']
+args.extend(sys.argv)
+
+if __name__ == '__main__':
+ sys.exit(load_entry_point('pytest', 'console_scripts', 'pytest')(args))
diff --git a/test/py/tests/test_000_version.py b/test/py/tests/test_000_version.py
new file mode 100644
index 00000000..bd089ab5
--- /dev/null
+++ b/test/py/tests/test_000_version.py
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+
+# pytest runs tests the order of their module path, which is related to the
+# filename containing the test. This file is named such that it is sorted
+# first, simply as a very basic sanity check of the functionality of the U-Boot
+# command prompt.
+
+def test_version(u_boot_console):
+ """Test that the "version" command prints the U-Boot version."""
+
+ # "version" prints the U-Boot sign-on message. This is usually considered
+ # an error, so that any unexpected reboot causes an error. Here, this
+ # error detection is disabled since the sign-on message is expected.
+ with u_boot_console.disable_check('main_signon'):
+ response = u_boot_console.run_command('version')
+ # Ensure "version" printed what we expected.
+ u_boot_console.validate_version_string_in_text(response)
diff --git a/test/py/tests/test_android/test_ab.py b/test/py/tests/test_android/test_ab.py
new file mode 100644
index 00000000..c79cb07f
--- /dev/null
+++ b/test/py/tests/test_android/test_ab.py
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0
+# (C) Copyright 2018 Texas Instruments, <www.ti.com>
+
+# Test A/B update commands.
+
+import os
+import pytest
+import u_boot_utils
+
+class ABTestDiskImage(object):
+ """Disk Image used by the A/B tests."""
+
+ def __init__(self, u_boot_console):
+ """Initialize a new ABTestDiskImage object.
+
+ Args:
+ u_boot_console: A U-Boot console.
+
+ Returns:
+ Nothing.
+ """
+
+ filename = 'test_ab_disk_image.bin'
+
+ persistent = u_boot_console.config.persistent_data_dir + '/' + filename
+ self.path = u_boot_console.config.result_dir + '/' + filename
+
+ with u_boot_utils.persistent_file_helper(u_boot_console.log, persistent):
+ if os.path.exists(persistent):
+ u_boot_console.log.action('Disk image file ' + persistent +
+ ' already exists')
+ else:
+ u_boot_console.log.action('Generating ' + persistent)
+ fd = os.open(persistent, os.O_RDWR | os.O_CREAT)
+ os.ftruncate(fd, 524288)
+ os.close(fd)
+ cmd = ('sgdisk', persistent)
+ u_boot_utils.run_and_log(u_boot_console, cmd)
+
+ cmd = ('sgdisk', '--new=1:64:512', '--change-name=1:misc',
+ persistent)
+ u_boot_utils.run_and_log(u_boot_console, cmd)
+ cmd = ('sgdisk', '--load-backup=' + persistent)
+ u_boot_utils.run_and_log(u_boot_console, cmd)
+
+ cmd = ('cp', persistent, self.path)
+ u_boot_utils.run_and_log(u_boot_console, cmd)
+
+di = None
+@pytest.fixture(scope='function')
+def ab_disk_image(u_boot_console):
+ global di
+ if not di:
+ di = ABTestDiskImage(u_boot_console)
+ return di
+
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.buildconfigspec('android_ab')
+@pytest.mark.buildconfigspec('cmd_ab_select')
+@pytest.mark.requiredtool('sgdisk')
+def test_ab(ab_disk_image, u_boot_console):
+ """Test the 'ab_select' command."""
+
+ u_boot_console.run_command('host bind 0 ' + ab_disk_image.path)
+
+ output = u_boot_console.run_command('ab_select slot_name host 0#misc')
+ assert 're-initializing A/B metadata' in output
+ assert 'Attempting slot a, tries remaining 7' in output
+ output = u_boot_console.run_command('printenv slot_name')
+ assert 'slot_name=a' in output
+
+ output = u_boot_console.run_command('ab_select slot_name host 0:1')
+ assert 'Attempting slot b, tries remaining 7' in output
+ output = u_boot_console.run_command('printenv slot_name')
+ assert 'slot_name=b' in output
diff --git a/test/py/tests/test_android/test_avb.py b/test/py/tests/test_android/test_avb.py
new file mode 100644
index 00000000..20ccaf67
--- /dev/null
+++ b/test/py/tests/test_android/test_avb.py
@@ -0,0 +1,137 @@
+# Copyright (c) 2018, Linaro Limited
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Android Verified Boot 2.0 Test
+
+"""
+This tests Android Verified Boot 2.0 support in U-boot:
+
+For additional details about how to build proper vbmeta partition
+check doc/android/avb2.txt
+
+For configuration verification:
+- Corrupt boot partition and check for failure
+- Corrupt vbmeta partition and check for failure
+"""
+
+import pytest
+import u_boot_utils as util
+
+# defauld mmc id
+mmc_dev = 1
+temp_addr = 0x90000000
+temp_addr2 = 0x90002000
+
+@pytest.mark.buildconfigspec('cmd_avb')
+@pytest.mark.buildconfigspec('cmd_mmc')
+def test_avb_verify(u_boot_console):
+ """Run AVB 2.0 boot verification chain with avb subset of commands
+ """
+
+ success_str = "Verification passed successfully"
+
+ response = u_boot_console.run_command('avb init %s' %str(mmc_dev))
+ assert response == ''
+ response = u_boot_console.run_command('avb verify')
+ assert response.find(success_str)
+
+
+@pytest.mark.buildconfigspec('cmd_avb')
+@pytest.mark.buildconfigspec('cmd_mmc')
+def test_avb_mmc_uuid(u_boot_console):
+ """Check if 'avb get_uuid' works, compare results with
+ 'part list mmc 1' output
+ """
+
+ response = u_boot_console.run_command('avb init %s' % str(mmc_dev))
+ assert response == ''
+
+ response = u_boot_console.run_command('mmc rescan; mmc dev %s' %
+ str(mmc_dev))
+ assert response.find('is current device')
+
+ part_lines = u_boot_console.run_command('mmc part').splitlines()
+ part_list = {}
+ cur_partname = ''
+
+ for line in part_lines:
+ if '"' in line:
+ start_pt = line.find('"')
+ end_pt = line.find('"', start_pt + 1)
+ cur_partname = line[start_pt + 1: end_pt]
+
+ if 'guid:' in line:
+ guid_to_check = line.split('guid:\t')
+ part_list[cur_partname] = guid_to_check[1]
+
+ # lets check all guids with avb get_guid
+ for part, guid in part_list.iteritems():
+ avb_guid_resp = u_boot_console.run_command('avb get_uuid %s' % part)
+ assert guid == avb_guid_resp.split('UUID: ')[1]
+
+
+@pytest.mark.buildconfigspec('cmd_avb')
+def test_avb_read_rb(u_boot_console):
+ """Test reading rollback indexes
+ """
+
+ response = u_boot_console.run_command('avb init %s' % str(mmc_dev))
+ assert response == ''
+
+ response = u_boot_console.run_command('avb read_rb 1')
+ assert response == 'Rollback index: 0'
+
+
+@pytest.mark.buildconfigspec('cmd_avb')
+def test_avb_is_unlocked(u_boot_console):
+ """Test if device is in the unlocked state
+ """
+
+ response = u_boot_console.run_command('avb init %s' % str(mmc_dev))
+ assert response == ''
+
+ response = u_boot_console.run_command('avb is_unlocked')
+ assert response == 'Unlocked = 1'
+
+
+@pytest.mark.buildconfigspec('cmd_avb')
+@pytest.mark.buildconfigspec('cmd_mmc')
+def test_avb_mmc_read(u_boot_console):
+ """Test mmc read operation
+ """
+
+ response = u_boot_console.run_command('mmc rescan; mmc dev %s 0' %
+ str(mmc_dev))
+ assert response.find('is current device')
+
+ response = u_boot_console.run_command('mmc read 0x%x 0x100 0x1' % temp_addr)
+ assert response.find('read: OK')
+
+ response = u_boot_console.run_command('avb init %s' % str(mmc_dev))
+ assert response == ''
+
+ response = u_boot_console.run_command('avb read_part xloader 0 100 0x%x' %
+ temp_addr2)
+ assert response.find('Read 512 bytes')
+
+ # Now lets compare two buffers
+ response = u_boot_console.run_command('cmp 0x%x 0x%x 40' %
+ (temp_addr, temp_addr2))
+ assert response.find('64 word')
+
+
+@pytest.mark.buildconfigspec('cmd_avb')
+@pytest.mark.buildconfigspec('optee_ta_avb')
+def test_avb_persistent_values(u_boot_console):
+ """Test reading/writing persistent storage to avb
+ """
+
+ response = u_boot_console.run_command('avb init %s' % str(mmc_dev))
+ assert response == ''
+
+ response = u_boot_console.run_command('avb write_pvalue test value_value')
+ assert response == 'Wrote 12 bytes'
+
+ response = u_boot_console.run_command('avb read_pvalue test 12')
+ assert response == 'Read 12 bytes, value = value_value'
diff --git a/test/py/tests/test_bind.py b/test/py/tests/test_bind.py
new file mode 100644
index 00000000..20c60503
--- /dev/null
+++ b/test/py/tests/test_bind.py
@@ -0,0 +1,179 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+
+import os.path
+import pytest
+import re
+
+def in_tree(response, name, uclass, drv, depth, last_child):
+ lines = [x.strip() for x in response.splitlines()]
+ leaf = ' ' * 4 * depth;
+ if not last_child:
+ leaf = leaf + r'\|'
+ else:
+ leaf = leaf + '`'
+ leaf = leaf + '-- ' + name
+ line = (r' *{:10.10} [0-9]* \[ [ +] \] {:20.20} {}$'
+ .format(uclass, drv, leaf))
+ prog = re.compile(line)
+ for l in lines:
+ if prog.match(l):
+ return True
+ return False
+
+
+@pytest.mark.buildconfigspec('cmd_bind')
+def test_bind_unbind_with_node(u_boot_console):
+
+ #bind /bind-test. Device should come up as well as its children
+ response = u_boot_console.run_command('bind /bind-test generic_simple_bus')
+ assert response == ''
+ tree = u_boot_console.run_command('dm tree')
+ assert in_tree(tree, 'bind-test', 'simple_bus', 'generic_simple_bus', 0, True)
+ assert in_tree(tree, 'bind-test-child1', 'phy', 'phy_sandbox', 1, False)
+ assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'generic_simple_bus', 1, True)
+
+ #Unbind child #1. No error expected and all devices should be there except for bind-test-child1
+ response = u_boot_console.run_command('unbind /bind-test/bind-test-child1')
+ assert response == ''
+ tree = u_boot_console.run_command('dm tree')
+ assert in_tree(tree, 'bind-test', 'simple_bus', 'generic_simple_bus', 0, True)
+ assert 'bind-test-child1' not in tree
+ assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'generic_simple_bus', 1, True)
+
+ #bind child #1. No error expected and all devices should be there
+ response = u_boot_console.run_command('bind /bind-test/bind-test-child1 phy_sandbox')
+ assert response == ''
+ tree = u_boot_console.run_command('dm tree')
+ assert in_tree(tree, 'bind-test', 'simple_bus', 'generic_simple_bus', 0, True)
+ assert in_tree(tree, 'bind-test-child1', 'phy', 'phy_sandbox', 1, True)
+ assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'generic_simple_bus', 1, False)
+
+ #Unbind child #2. No error expected and all devices should be there except for bind-test-child2
+ response = u_boot_console.run_command('unbind /bind-test/bind-test-child2')
+ assert response == ''
+ tree = u_boot_console.run_command('dm tree')
+ assert in_tree(tree, 'bind-test', 'simple_bus', 'generic_simple_bus', 0, True)
+ assert in_tree(tree, 'bind-test-child1', 'phy', 'phy_sandbox', 1, True)
+ assert 'bind-test-child2' not in tree
+
+
+ #Bind child #2. No error expected and all devices should be there
+ response = u_boot_console.run_command('bind /bind-test/bind-test-child2 generic_simple_bus')
+ assert response == ''
+ tree = u_boot_console.run_command('dm tree')
+ assert in_tree(tree, 'bind-test', 'simple_bus', 'generic_simple_bus', 0, True)
+ assert in_tree(tree, 'bind-test-child1', 'phy', 'phy_sandbox', 1, False)
+ assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'generic_simple_bus', 1, True)
+
+ #Unbind parent. No error expected. All devices should be removed and unbound
+ response = u_boot_console.run_command('unbind /bind-test')
+ assert response == ''
+ tree = u_boot_console.run_command('dm tree')
+ assert 'bind-test' not in tree
+ assert 'bind-test-child1' not in tree
+ assert 'bind-test-child2' not in tree
+
+ #try binding invalid node with valid driver
+ response = u_boot_console.run_command('bind /not-a-valid-node generic_simple_bus')
+ assert response != ''
+ tree = u_boot_console.run_command('dm tree')
+ assert 'not-a-valid-node' not in tree
+
+ #try binding valid node with invalid driver
+ response = u_boot_console.run_command('bind /bind-test not_a_driver')
+ assert response != ''
+ tree = u_boot_console.run_command('dm tree')
+ assert 'bind-test' not in tree
+
+ #bind /bind-test. Device should come up as well as its children
+ response = u_boot_console.run_command('bind /bind-test generic_simple_bus')
+ assert response == ''
+ tree = u_boot_console.run_command('dm tree')
+ assert in_tree(tree, 'bind-test', 'simple_bus', 'generic_simple_bus', 0, True)
+ assert in_tree(tree, 'bind-test-child1', 'phy', 'phy_sandbox', 1, False)
+ assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'generic_simple_bus', 1, True)
+
+ response = u_boot_console.run_command('unbind /bind-test')
+ assert response == ''
+
+def get_next_line(tree, name):
+ treelines = [x.strip() for x in tree.splitlines() if x.strip()]
+ child_line = ''
+ for idx, line in enumerate(treelines):
+ if ('-- ' + name) in line:
+ try:
+ child_line = treelines[idx+1]
+ except:
+ pass
+ break
+ return child_line
+
+@pytest.mark.buildconfigspec('cmd_bind')
+def test_bind_unbind_with_uclass(u_boot_console):
+ #bind /bind-test
+ response = u_boot_console.run_command('bind /bind-test generic_simple_bus')
+ assert response == ''
+
+ #make sure bind-test-child2 is there and get its uclass/index pair
+ tree = u_boot_console.run_command('dm tree')
+ child2_line = [x.strip() for x in tree.splitlines() if '-- bind-test-child2' in x]
+ assert len(child2_line) == 1
+
+ child2_uclass = child2_line[0].split()[0]
+ child2_index = int(child2_line[0].split()[1])
+
+ #bind generic_simple_bus as a child of bind-test-child2
+ response = u_boot_console.run_command('bind {} {} generic_simple_bus'.format(child2_uclass, child2_index, 'generic_simple_bus'))
+
+ #check that the child is there and its uclass/index pair is right
+ tree = u_boot_console.run_command('dm tree')
+
+ child_of_child2_line = get_next_line(tree, 'bind-test-child2')
+ assert child_of_child2_line
+ child_of_child2_index = int(child_of_child2_line.split()[1])
+ assert in_tree(tree, 'generic_simple_bus', 'simple_bus', 'generic_simple_bus', 2, True)
+ assert child_of_child2_index == child2_index + 1
+
+ #unbind the child and check it has been removed
+ response = u_boot_console.run_command('unbind simple_bus {}'.format(child_of_child2_index))
+ assert response == ''
+ tree = u_boot_console.run_command('dm tree')
+ assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'generic_simple_bus', 1, True)
+ assert not in_tree(tree, 'generic_simple_bus', 'simple_bus', 'generic_simple_bus', 2, True)
+ child_of_child2_line = get_next_line(tree, 'bind-test-child2')
+ assert child_of_child2_line == ''
+
+ #bind generic_simple_bus as a child of bind-test-child2
+ response = u_boot_console.run_command('bind {} {} generic_simple_bus'.format(child2_uclass, child2_index, 'generic_simple_bus'))
+
+ #check that the child is there and its uclass/index pair is right
+ tree = u_boot_console.run_command('dm tree')
+ treelines = [x.strip() for x in tree.splitlines() if x.strip()]
+
+ child_of_child2_line = get_next_line(tree, 'bind-test-child2')
+ assert child_of_child2_line
+ child_of_child2_index = int(child_of_child2_line.split()[1])
+ assert in_tree(tree, 'generic_simple_bus', 'simple_bus', 'generic_simple_bus', 2, True)
+ assert child_of_child2_index == child2_index + 1
+
+ #unbind the child and check it has been removed
+ response = u_boot_console.run_command('unbind {} {} generic_simple_bus'.format(child2_uclass, child2_index, 'generic_simple_bus'))
+ assert response == ''
+
+ tree = u_boot_console.run_command('dm tree')
+ assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'generic_simple_bus', 1, True)
+
+ child_of_child2_line = get_next_line(tree, 'bind-test-child2')
+ assert child_of_child2_line == ''
+
+ #unbind the child again and check it doesn't change the tree
+ tree_old = u_boot_console.run_command('dm tree')
+ response = u_boot_console.run_command('unbind {} {} generic_simple_bus'.format(child2_uclass, child2_index, 'generic_simple_bus'))
+ tree_new = u_boot_console.run_command('dm tree')
+
+ assert response == ''
+ assert tree_old == tree_new
+
+ response = u_boot_console.run_command('unbind /bind-test')
+ assert response == ''
diff --git a/test/py/tests/test_dfu.py b/test/py/tests/test_dfu.py
new file mode 100644
index 00000000..5d87eb34
--- /dev/null
+++ b/test/py/tests/test_dfu.py
@@ -0,0 +1,320 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+
+# Test U-Boot's "dfu" command. The test starts DFU in U-Boot, waits for USB
+# device enumeration on the host, executes dfu-util multiple times to test
+# various transfer sizes, many of which trigger USB driver edge cases, and
+# finally aborts the "dfu" command in U-Boot.
+
+import os
+import os.path
+import pytest
+import u_boot_utils
+
+"""
+Note: This test relies on:
+
+a) boardenv_* to contain configuration values to define which USB ports are
+available for testing. Without this, this test will be automatically skipped.
+For example:
+
+env__usb_dev_ports = (
+ {
+ 'fixture_id': 'micro_b',
+ 'tgt_usb_ctlr': '0',
+ 'host_usb_dev_node': '/dev/usbdev-p2371-2180',
+ # This parameter is optional /if/ you only have a single board
+ # attached to your host at a time.
+ 'host_usb_port_path': '3-13',
+ },
+)
+
+# Optional entries (required only when 'alt_id_test_file' and
+# 'alt_id_dummy_file' are specified).
+test_file_name = '/dfu_test.bin'
+dummy_file_name = '/dfu_dummy.bin'
+# Above files are used to generate proper 'alt_info' entry
+'alt_info': '/%s ext4 0 2;/%s ext4 0 2' % (test_file_name, dummy_file_name),
+
+env__dfu_configs = (
+ # eMMC, partition 1
+ {
+ 'fixture_id': 'emmc',
+ 'alt_info': '/dfu_test.bin ext4 0 1;/dfu_dummy.bin ext4 0 1',
+ 'cmd_params': 'mmc 0',
+ # This value is optional.
+ # If present, it specified the set of transfer sizes tested.
+ # If missing, a default list of sizes will be used, which covers
+ # various useful corner cases.
+ # Manually specifying test sizes is useful if you wish to test 4 DFU
+ # configurations, but don't want to test every single transfer size
+ # on each, to avoid bloating the overall time taken by testing.
+ 'test_sizes': (63, 64, 65),
+ # This value is optional.
+ # The name of the environment variable that the the dfu command reads
+ # alt info from. If unspecified, this defaults to dfu_alt_info, which is
+ # valid for most systems. Some systems use a different variable name.
+ # One example is the Odroid XU3, which automatically generates
+ # $dfu_alt_info, each time the dfu command is run, by concatenating
+ # $dfu_alt_boot and $dfu_alt_system.
+ 'alt_info_env_name': 'dfu_alt_system',
+ # This value is optional.
+ # For boards which require the 'test file' alt setting number other than
+ # default (0) it is possible to specify exact file name to be used as
+ # this parameter.
+ 'alt_id_test_file': test_file_name,
+ # This value is optional.
+ # For boards which require the 'dummy file' alt setting number other
+ # than default (1) it is possible to specify exact file name to be used
+ # as this parameter.
+ 'alt_id_dummy_file': dummy_file_name,
+ },
+)
+
+b) udev rules to set permissions on devices nodes, so that sudo is not
+required. For example:
+
+ACTION=="add", SUBSYSTEM=="block", SUBSYSTEMS=="usb", KERNELS=="3-13", MODE:="666"
+
+(You may wish to change the group ID instead of setting the permissions wide
+open. All that matters is that the user ID running the test can access the
+device.)
+
+c) An optional udev rule to give you a persistent value to use in
+host_usb_dev_node. For example:
+
+IMPORT{builtin}="path_id"
+ENV{ID_PATH}=="?*", ENV{.ID_PORT}=="", SYMLINK+="bus/usb/by-path/$env{ID_PATH}"
+ENV{ID_PATH}=="?*", ENV{.ID_PORT}=="?*", SYMLINK+="bus/usb/by-path/$env{ID_PATH}-port$env{.ID_PORT}"
+"""
+
+# The set of file sizes to test. These values trigger various edge-cases such
+# as one less than, equal to, and one greater than typical USB max packet
+# sizes, and similar boundary conditions.
+test_sizes_default = (
+ 64 - 1,
+ 64,
+ 64 + 1,
+ 128 - 1,
+ 128,
+ 128 + 1,
+ 960 - 1,
+ 960,
+ 960 + 1,
+ 4096 - 1,
+ 4096,
+ 4096 + 1,
+ 1024 * 1024 - 1,
+ 1024 * 1024,
+ 8 * 1024 * 1024,
+)
+
+first_usb_dev_port = None
+
+@pytest.mark.buildconfigspec('cmd_dfu')
+@pytest.mark.requiredtool('dfu-util')
+def test_dfu(u_boot_console, env__usb_dev_port, env__dfu_config):
+ """Test the "dfu" command; the host system must be able to enumerate a USB
+ device when "dfu" is running, various DFU transfers are tested, and the
+ USB device must disappear when "dfu" is aborted.
+
+ Args:
+ u_boot_console: A U-Boot console connection.
+ env__usb_dev_port: The single USB device-mode port specification on
+ which to run the test. See the file-level comment above for
+ details of the format.
+ env__dfu_config: The single DFU (memory region) configuration on which
+ to run the test. See the file-level comment above for details
+ of the format.
+
+ Returns:
+ Nothing.
+ """
+
+ def start_dfu():
+ """Start U-Boot's dfu shell command.
+
+ This also waits for the host-side USB enumeration process to complete.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ """
+
+ u_boot_utils.wait_until_file_open_fails(
+ env__usb_dev_port['host_usb_dev_node'], True)
+ fh = u_boot_utils.attempt_to_open_file(
+ env__usb_dev_port['host_usb_dev_node'])
+ if fh:
+ fh.close()
+ raise Exception('USB device present before dfu command invoked')
+
+ u_boot_console.log.action(
+ 'Starting long-running U-Boot dfu shell command')
+
+ dfu_alt_info_env = env__dfu_config.get('alt_info_env_name', \
+ 'dfu_alt_info')
+
+ cmd = 'setenv "%s" "%s"' % (dfu_alt_info_env,
+ env__dfu_config['alt_info'])
+ u_boot_console.run_command(cmd)
+
+ cmd = 'dfu 0 ' + env__dfu_config['cmd_params']
+ u_boot_console.run_command(cmd, wait_for_prompt=False)
+ u_boot_console.log.action('Waiting for DFU USB device to appear')
+ fh = u_boot_utils.wait_until_open_succeeds(
+ env__usb_dev_port['host_usb_dev_node'])
+ fh.close()
+
+ def stop_dfu(ignore_errors):
+ """Stop U-Boot's dfu shell command from executing.
+
+ This also waits for the host-side USB de-enumeration process to
+ complete.
+
+ Args:
+ ignore_errors: Ignore any errors. This is useful if an error has
+ already been detected, and the code is performing best-effort
+ cleanup. In this case, we do not want to mask the original
+ error by "honoring" any new errors.
+
+ Returns:
+ Nothing.
+ """
+
+ try:
+ u_boot_console.log.action(
+ 'Stopping long-running U-Boot dfu shell command')
+ u_boot_console.ctrlc()
+ u_boot_console.log.action(
+ 'Waiting for DFU USB device to disappear')
+ u_boot_utils.wait_until_file_open_fails(
+ env__usb_dev_port['host_usb_dev_node'], ignore_errors)
+ except:
+ if not ignore_errors:
+ raise
+
+ def run_dfu_util(alt_setting, fn, up_dn_load_arg):
+ """Invoke dfu-util on the host.
+
+ Args:
+ alt_setting: The DFU "alternate setting" identifier to interact
+ with.
+ fn: The host-side file name to transfer.
+ up_dn_load_arg: '-U' or '-D' depending on whether a DFU upload or
+ download operation should be performed.
+
+ Returns:
+ Nothing.
+ """
+
+ cmd = ['dfu-util', '-a', alt_setting, up_dn_load_arg, fn]
+ if 'host_usb_port_path' in env__usb_dev_port:
+ cmd += ['-p', env__usb_dev_port['host_usb_port_path']]
+ u_boot_utils.run_and_log(u_boot_console, cmd)
+ u_boot_console.wait_for('Ctrl+C to exit ...')
+
+ def dfu_write(alt_setting, fn):
+ """Write a file to the target board using DFU.
+
+ Args:
+ alt_setting: The DFU "alternate setting" identifier to interact
+ with.
+ fn: The host-side file name to transfer.
+
+ Returns:
+ Nothing.
+ """
+
+ run_dfu_util(alt_setting, fn, '-D')
+
+ def dfu_read(alt_setting, fn):
+ """Read a file from the target board using DFU.
+
+ Args:
+ alt_setting: The DFU "alternate setting" identifier to interact
+ with.
+ fn: The host-side file name to transfer.
+
+ Returns:
+ Nothing.
+ """
+
+ # dfu-util fails reads/uploads if the host file already exists
+ if os.path.exists(fn):
+ os.remove(fn)
+ run_dfu_util(alt_setting, fn, '-U')
+
+ def dfu_write_read_check(size):
+ """Test DFU transfers of a specific size of data
+
+ This function first writes data to the board then reads it back and
+ compares the written and read back data. Measures are taken to avoid
+ certain types of false positives.
+
+ Args:
+ size: The data size to test.
+
+ Returns:
+ Nothing.
+ """
+
+ test_f = u_boot_utils.PersistentRandomFile(u_boot_console,
+ 'dfu_%d.bin' % size, size)
+ readback_fn = u_boot_console.config.result_dir + '/dfu_readback.bin'
+
+ u_boot_console.log.action('Writing test data to DFU primary ' +
+ 'altsetting')
+ dfu_write(alt_setting_test_file, test_f.abs_fn)
+
+ u_boot_console.log.action('Writing dummy data to DFU secondary ' +
+ 'altsetting to clear DFU buffers')
+ dfu_write(alt_setting_dummy_file, dummy_f.abs_fn)
+
+ u_boot_console.log.action('Reading DFU primary altsetting for ' +
+ 'comparison')
+ dfu_read(alt_setting_test_file, readback_fn)
+
+ u_boot_console.log.action('Comparing written and read data')
+ written_hash = test_f.content_hash
+ read_back_hash = u_boot_utils.md5sum_file(readback_fn, size)
+ assert(written_hash == read_back_hash)
+
+ # This test may be executed against multiple USB ports. The test takes a
+ # long time, so we don't want to do the whole thing each time. Instead,
+ # execute the full test on the first USB port, and perform a very limited
+ # test on other ports. In the limited case, we solely validate that the
+ # host PC can enumerate the U-Boot USB device.
+ global first_usb_dev_port
+ if not first_usb_dev_port:
+ first_usb_dev_port = env__usb_dev_port
+ if env__usb_dev_port == first_usb_dev_port:
+ sizes = env__dfu_config.get('test_sizes', test_sizes_default)
+ else:
+ sizes = []
+
+ dummy_f = u_boot_utils.PersistentRandomFile(u_boot_console,
+ 'dfu_dummy.bin', 1024)
+
+ alt_setting_test_file = env__dfu_config.get('alt_id_test_file', '0')
+ alt_setting_dummy_file = env__dfu_config.get('alt_id_dummy_file', '1')
+
+ ignore_cleanup_errors = True
+ try:
+ start_dfu()
+
+ u_boot_console.log.action(
+ 'Overwriting DFU primary altsetting with dummy data')
+ dfu_write(alt_setting_test_file, dummy_f.abs_fn)
+
+ for size in sizes:
+ with u_boot_console.log.section('Data size %d' % size):
+ dfu_write_read_check(size)
+ # Make the status of each sub-test obvious. If the test didn't
+ # pass, an exception was thrown so this code isn't executed.
+ u_boot_console.log.status_pass('OK')
+ ignore_cleanup_errors = False
+ finally:
+ stop_dfu(ignore_cleanup_errors)
diff --git a/test/py/tests/test_efi_loader.py b/test/py/tests/test_efi_loader.py
new file mode 100644
index 00000000..d6b214f8
--- /dev/null
+++ b/test/py/tests/test_efi_loader.py
@@ -0,0 +1,196 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+# Copyright (c) 2016, Alexander Graf <agraf@suse.de>
+#
+# based on test_net.py.
+
+# Test efi loader implementation
+
+import pytest
+import u_boot_utils
+
+"""
+Note: This test relies on boardenv_* containing configuration values to define
+which network environment is available for testing. Without this, the parts
+that rely on network will be automatically skipped.
+
+For example:
+
+# Boolean indicating whether the Ethernet device is attached to USB, and hence
+# USB enumeration needs to be performed prior to network tests.
+# This variable may be omitted if its value is False.
+env__net_uses_usb = False
+
+# Boolean indicating whether the Ethernet device is attached to PCI, and hence
+# PCI enumeration needs to be performed prior to network tests.
+# This variable may be omitted if its value is False.
+env__net_uses_pci = True
+
+# True if a DHCP server is attached to the network, and should be tested.
+# If DHCP testing is not possible or desired, this variable may be omitted or
+# set to False.
+env__net_dhcp_server = True
+
+# A list of environment variables that should be set in order to configure a
+# static IP. If solely relying on DHCP, this variable may be omitted or set to
+# an empty list.
+env__net_static_env_vars = [
+ ('ipaddr', '10.0.0.100'),
+ ('netmask', '255.255.255.0'),
+ ('serverip', '10.0.0.1'),
+]
+
+# Details regarding a file that may be read from a TFTP server. This variable
+# may be omitted or set to None if TFTP testing is not possible or desired.
+env__efi_loader_helloworld_file = {
+ 'fn': 'lib/efi_loader/helloworld.efi',
+ 'size': 5058624,
+ 'crc32': 'c2244b26',
+}
+"""
+
+net_set_up = False
+
+def test_efi_pre_commands(u_boot_console):
+ """Execute any commands required to enable network hardware.
+
+ These commands are provided by the boardenv_* file; see the comment at the
+ beginning of this file.
+ """
+
+ init_usb = u_boot_console.config.env.get('env__net_uses_usb', False)
+ if init_usb:
+ u_boot_console.run_command('usb start')
+
+ init_pci = u_boot_console.config.env.get('env__net_uses_pci', False)
+ if init_pci:
+ u_boot_console.run_command('pci enum')
+
+@pytest.mark.buildconfigspec('cmd_dhcp')
+def test_efi_dhcp(u_boot_console):
+ """Test the dhcp command.
+
+ The boardenv_* file may be used to enable/disable this test; see the
+ comment at the beginning of this file.
+ """
+
+ test_dhcp = u_boot_console.config.env.get('env__net_dhcp_server', False)
+ if not test_dhcp:
+ pytest.skip('No DHCP server available')
+
+ u_boot_console.run_command('setenv autoload no')
+ output = u_boot_console.run_command('dhcp')
+ assert 'DHCP client bound to address ' in output
+
+ global net_set_up
+ net_set_up = True
+
+@pytest.mark.buildconfigspec('net')
+def test_efi_setup_static(u_boot_console):
+ """Set up a static IP configuration.
+
+ The configuration is provided by the boardenv_* file; see the comment at
+ the beginning of this file.
+ """
+
+ env_vars = u_boot_console.config.env.get('env__net_static_env_vars', None)
+ if not env_vars:
+ pytest.skip('No static network configuration is defined')
+
+ for (var, val) in env_vars:
+ u_boot_console.run_command('setenv %s %s' % (var, val))
+
+ global net_set_up
+ net_set_up = True
+
+def fetch_tftp_file(u_boot_console, env_conf):
+ """Grab an env described file via TFTP and return its address
+
+ A file as described by an env config <env_conf> is downloaded from the TFTP
+ server. The address to that file is returned.
+ """
+ if not net_set_up:
+ pytest.skip('Network not initialized')
+
+ f = u_boot_console.config.env.get(env_conf, None)
+ if not f:
+ pytest.skip('No %s binary specified in environment' % env_conf)
+
+ addr = f.get('addr', None)
+ if not addr:
+ addr = u_boot_utils.find_ram_base(u_boot_console)
+
+ fn = f['fn']
+ output = u_boot_console.run_command('tftpboot %x %s' % (addr, fn))
+ expected_text = 'Bytes transferred = '
+ sz = f.get('size', None)
+ if sz:
+ expected_text += '%d' % sz
+ assert expected_text in output
+
+ expected_crc = f.get('crc32', None)
+ if not expected_crc:
+ return addr
+
+ if u_boot_console.config.buildconfig.get('config_cmd_crc32', 'n') != 'y':
+ return addr
+
+ output = u_boot_console.run_command('crc32 %x $filesize' % addr)
+ assert expected_crc in output
+
+ return addr
+
+@pytest.mark.buildconfigspec('cmd_bootefi_hello_compile')
+def test_efi_helloworld_net(u_boot_console):
+ """Run the helloworld.efi binary via TFTP.
+
+ The helloworld.efi file is downloaded from the TFTP server and gets
+ executed.
+ """
+
+ addr = fetch_tftp_file(u_boot_console, 'env__efi_loader_helloworld_file')
+
+ output = u_boot_console.run_command('bootefi %x' % addr)
+ expected_text = 'Hello, world'
+ assert expected_text in output
+ expected_text = '## Application terminated, r = 0'
+ assert expected_text in output
+
+@pytest.mark.buildconfigspec('cmd_bootefi_hello')
+def test_efi_helloworld_builtin(u_boot_console):
+ """Run the builtin helloworld.efi binary.
+
+ The helloworld.efi file is included in U-Boot, execute it using the
+ special "bootefi hello" command.
+ """
+
+ output = u_boot_console.run_command('bootefi hello')
+ expected_text = 'Hello, world'
+ assert expected_text in output
+
+@pytest.mark.buildconfigspec('cmd_bootefi')
+def test_efi_grub_net(u_boot_console):
+ """Run the grub.efi binary via TFTP.
+
+ The grub.efi file is downloaded from the TFTP server and gets
+ executed.
+ """
+
+ addr = fetch_tftp_file(u_boot_console, 'env__efi_loader_grub_file')
+
+ u_boot_console.run_command('bootefi %x' % addr, wait_for_prompt=False)
+
+ # Verify that we have an SMBIOS table
+ check_smbios = u_boot_console.config.env.get('env__efi_loader_check_smbios', False)
+ if check_smbios:
+ u_boot_console.wait_for('grub>')
+ output = u_boot_console.run_command('lsefisystab', wait_for_prompt=False, wait_for_echo=False)
+ u_boot_console.wait_for('SMBIOS')
+
+ # Then exit cleanly
+ u_boot_console.wait_for('grub>')
+ output = u_boot_console.run_command('exit', wait_for_prompt=False, wait_for_echo=False)
+ u_boot_console.wait_for('r = 0')
+
+ # And give us our U-Boot prompt back
+ u_boot_console.run_command('')
diff --git a/test/py/tests/test_efi_selftest.py b/test/py/tests/test_efi_selftest.py
new file mode 100644
index 00000000..ca015420
--- /dev/null
+++ b/test/py/tests/test_efi_selftest.py
@@ -0,0 +1,198 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2017, Heinrich Schuchardt <xypron.glpk@gmx.de>
+
+# Test efi API implementation
+
+import pytest
+import u_boot_utils
+
+@pytest.mark.buildconfigspec('cmd_bootefi_selftest')
+def test_efi_selftest(u_boot_console):
+ """Test the UEFI implementation
+
+ :param u_boot_console: U-Boot console
+
+ This function executes all selftests that are not marked as on request.
+ """
+ u_boot_console.run_command(cmd='setenv efi_selftest')
+ u_boot_console.run_command(cmd='bootefi selftest', wait_for_prompt=False)
+ m = u_boot_console.p.expect(['Summary: 0 failures', 'Press any key'])
+ if m != 0:
+ raise Exception('Failures occurred during the EFI selftest')
+ u_boot_console.restart_uboot();
+
+@pytest.mark.buildconfigspec('cmd_bootefi_selftest')
+@pytest.mark.buildconfigspec('of_control')
+@pytest.mark.notbuildconfigspec('generate_acpi_table')
+def test_efi_selftest_device_tree(u_boot_console):
+ u_boot_console.run_command(cmd='setenv efi_selftest list')
+ output = u_boot_console.run_command('bootefi selftest')
+ assert '\'device tree\'' in output
+ u_boot_console.run_command(cmd='setenv efi_selftest device tree')
+ u_boot_console.run_command(cmd='setenv -f serial# Testing DT')
+ u_boot_console.run_command(cmd='bootefi selftest ${fdtcontroladdr}', wait_for_prompt=False)
+ m = u_boot_console.p.expect(['serial-number: Testing DT', 'U-Boot'])
+ if m != 0:
+ raise Exception('serial-number missing in device tree')
+ u_boot_console.restart_uboot();
+
+@pytest.mark.buildconfigspec('cmd_bootefi_selftest')
+def test_efi_selftest_watchdog_reboot(u_boot_console):
+ u_boot_console.run_command(cmd='setenv efi_selftest list')
+ output = u_boot_console.run_command('bootefi selftest')
+ assert '\'watchdog reboot\'' in output
+ u_boot_console.run_command(cmd='setenv efi_selftest watchdog reboot')
+ u_boot_console.run_command(cmd='bootefi selftest', wait_for_prompt=False)
+ m = u_boot_console.p.expect(['resetting', 'U-Boot'])
+ if m != 0:
+ raise Exception('Reset failed in \'watchdog reboot\' test')
+ u_boot_console.restart_uboot();
+
+@pytest.mark.buildconfigspec('cmd_bootefi_selftest')
+def test_efi_selftest_text_input(u_boot_console):
+ """Test the EFI_SIMPLE_TEXT_INPUT_PROTOCOL
+
+ :param u_boot_console: U-Boot console
+
+ This function calls the text input EFI selftest.
+ """
+ u_boot_console.run_command(cmd='setenv efi_selftest text input')
+ output = u_boot_console.run_command(cmd='bootefi selftest',
+ wait_for_prompt=False)
+ m = u_boot_console.p.expect([r'To terminate type \'x\''])
+ if m != 0:
+ raise Exception('No prompt for \'text input\' test')
+ u_boot_console.drain_console()
+ u_boot_console.p.timeout = 500
+ # EOT
+ u_boot_console.run_command(cmd=chr(4), wait_for_echo=False,
+ send_nl=False, wait_for_prompt=False)
+ m = u_boot_console.p.expect(
+ [r'Unicode char 4 \(unknown\), scan code 0 \(Null\)'])
+ if m != 0:
+ raise Exception('EOT failed in \'text input\' test')
+ u_boot_console.drain_console()
+ # BS
+ u_boot_console.run_command(cmd=chr(8), wait_for_echo=False,
+ send_nl=False, wait_for_prompt=False)
+ m = u_boot_console.p.expect(
+ [r'Unicode char 8 \(BS\), scan code 0 \(Null\)'])
+ if m != 0:
+ raise Exception('BS failed in \'text input\' test')
+ u_boot_console.drain_console()
+ # TAB
+ u_boot_console.run_command(cmd=chr(9), wait_for_echo=False,
+ send_nl=False, wait_for_prompt=False)
+ m = u_boot_console.p.expect(
+ [r'Unicode char 9 \(TAB\), scan code 0 \(Null\)'])
+ if m != 0:
+ raise Exception('BS failed in \'text input\' test')
+ u_boot_console.drain_console()
+ # a
+ u_boot_console.run_command(cmd='a', wait_for_echo=False, send_nl=False,
+ wait_for_prompt=False)
+ m = u_boot_console.p.expect(
+ [r'Unicode char 97 \(\'a\'\), scan code 0 \(Null\)'])
+ if m != 0:
+ raise Exception('\'a\' failed in \'text input\' test')
+ u_boot_console.drain_console()
+ # UP escape sequence
+ u_boot_console.run_command(cmd=chr(27) + '[A', wait_for_echo=False,
+ send_nl=False, wait_for_prompt=False)
+ m = u_boot_console.p.expect(
+ [r'Unicode char 0 \(Null\), scan code 1 \(Up\)'])
+ if m != 0:
+ raise Exception('UP failed in \'text input\' test')
+ u_boot_console.drain_console()
+ # Euro sign
+ u_boot_console.run_command(cmd=b'\xe2\x82\xac'.decode(), wait_for_echo=False,
+ send_nl=False, wait_for_prompt=False)
+ m = u_boot_console.p.expect([r'Unicode char 8364 \(\''])
+ if m != 0:
+ raise Exception('Euro sign failed in \'text input\' test')
+ u_boot_console.drain_console()
+ u_boot_console.run_command(cmd='x', wait_for_echo=False, send_nl=False,
+ wait_for_prompt=False)
+ m = u_boot_console.p.expect(['Summary: 0 failures', 'Press any key'])
+ if m != 0:
+ raise Exception('Failures occurred during the EFI selftest')
+ u_boot_console.restart_uboot();
+
+@pytest.mark.buildconfigspec('cmd_bootefi_selftest')
+def test_efi_selftest_text_input_ex(u_boot_console):
+ """Test the EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL
+
+ :param u_boot_console: U-Boot console
+
+ This function calls the extended text input EFI selftest.
+ """
+ u_boot_console.run_command(cmd='setenv efi_selftest extended text input')
+ output = u_boot_console.run_command(cmd='bootefi selftest',
+ wait_for_prompt=False)
+ m = u_boot_console.p.expect([r'To terminate type \'CTRL\+x\''])
+ if m != 0:
+ raise Exception('No prompt for \'text input\' test')
+ u_boot_console.drain_console()
+ u_boot_console.p.timeout = 500
+ # EOT
+ u_boot_console.run_command(cmd=chr(4), wait_for_echo=False,
+ send_nl=False, wait_for_prompt=False)
+ m = u_boot_console.p.expect(
+ [r'Unicode char 100 \(\'d\'\), scan code 0 \(CTRL\+Null\)'])
+ if m != 0:
+ raise Exception('EOT failed in \'text input\' test')
+ u_boot_console.drain_console()
+ # BS
+ u_boot_console.run_command(cmd=chr(8), wait_for_echo=False,
+ send_nl=False, wait_for_prompt=False)
+ m = u_boot_console.p.expect(
+ [r'Unicode char 8 \(BS\), scan code 0 \(\+Null\)'])
+ if m != 0:
+ raise Exception('BS failed in \'text input\' test')
+ u_boot_console.drain_console()
+ # TAB
+ u_boot_console.run_command(cmd=chr(9), wait_for_echo=False,
+ send_nl=False, wait_for_prompt=False)
+ m = u_boot_console.p.expect(
+ [r'Unicode char 9 \(TAB\), scan code 0 \(\+Null\)'])
+ if m != 0:
+ raise Exception('TAB failed in \'text input\' test')
+ u_boot_console.drain_console()
+ # a
+ u_boot_console.run_command(cmd='a', wait_for_echo=False, send_nl=False,
+ wait_for_prompt=False)
+ m = u_boot_console.p.expect(
+ [r'Unicode char 97 \(\'a\'\), scan code 0 \(Null\)'])
+ if m != 0:
+ raise Exception('\'a\' failed in \'text input\' test')
+ u_boot_console.drain_console()
+ # UP escape sequence
+ u_boot_console.run_command(cmd=chr(27) + '[A', wait_for_echo=False,
+ send_nl=False, wait_for_prompt=False)
+ m = u_boot_console.p.expect(
+ [r'Unicode char 0 \(Null\), scan code 1 \(\+Up\)'])
+ if m != 0:
+ raise Exception('UP failed in \'text input\' test')
+ u_boot_console.drain_console()
+ # Euro sign
+ u_boot_console.run_command(cmd=b'\xe2\x82\xac'.decode(), wait_for_echo=False,
+ send_nl=False, wait_for_prompt=False)
+ m = u_boot_console.p.expect([r'Unicode char 8364 \(\''])
+ if m != 0:
+ raise Exception('Euro sign failed in \'text input\' test')
+ u_boot_console.drain_console()
+ # SHIFT+ALT+FN 5
+ u_boot_console.run_command(cmd=b'\x1b\x5b\x31\x35\x3b\x34\x7e'.decode(),
+ wait_for_echo=False, send_nl=False,
+ wait_for_prompt=False)
+ m = u_boot_console.p.expect(
+ [r'Unicode char 0 \(Null\), scan code 15 \(SHIFT\+ALT\+FN 5\)'])
+ if m != 0:
+ raise Exception('SHIFT+ALT+FN 5 failed in \'text input\' test')
+ u_boot_console.drain_console()
+ u_boot_console.run_command(cmd=chr(24), wait_for_echo=False, send_nl=False,
+ wait_for_prompt=False)
+ m = u_boot_console.p.expect(['Summary: 0 failures', 'Press any key'])
+ if m != 0:
+ raise Exception('Failures occurred during the EFI selftest')
+ u_boot_console.restart_uboot();
diff --git a/test/py/tests/test_env.py b/test/py/tests/test_env.py
new file mode 100644
index 00000000..6ff38f10
--- /dev/null
+++ b/test/py/tests/test_env.py
@@ -0,0 +1,338 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+
+# Test operation of shell commands relating to environment variables.
+
+import pytest
+import u_boot_utils
+
+# FIXME: This might be useful for other tests;
+# perhaps refactor it into ConsoleBase or some other state object?
+class StateTestEnv(object):
+ """Container that represents the state of all U-Boot environment variables.
+ This enables quick determination of existant/non-existant variable
+ names.
+ """
+
+ def __init__(self, u_boot_console):
+ """Initialize a new StateTestEnv object.
+
+ Args:
+ u_boot_console: A U-Boot console.
+
+ Returns:
+ Nothing.
+ """
+
+ self.u_boot_console = u_boot_console
+ self.get_env()
+ self.set_var = self.get_non_existent_var()
+
+ def get_env(self):
+ """Read all current environment variables from U-Boot.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ """
+
+ if self.u_boot_console.config.buildconfig.get(
+ 'config_version_variable', 'n') == 'y':
+ with self.u_boot_console.disable_check('main_signon'):
+ response = self.u_boot_console.run_command('printenv')
+ else:
+ response = self.u_boot_console.run_command('printenv')
+ self.env = {}
+ for l in response.splitlines():
+ if not '=' in l:
+ continue
+ (var, value) = l.split('=', 1)
+ self.env[var] = value
+
+ def get_existent_var(self):
+ """Return the name of an environment variable that exists.
+
+ Args:
+ None.
+
+ Returns:
+ The name of an environment variable.
+ """
+
+ for var in self.env:
+ return var
+
+ def get_non_existent_var(self):
+ """Return the name of an environment variable that does not exist.
+
+ Args:
+ None.
+
+ Returns:
+ The name of an environment variable.
+ """
+
+ n = 0
+ while True:
+ var = 'test_env_' + str(n)
+ if var not in self.env:
+ return var
+ n += 1
+
+ste = None
+@pytest.fixture(scope='function')
+def state_test_env(u_boot_console):
+ """pytest fixture to provide a StateTestEnv object to tests."""
+
+ global ste
+ if not ste:
+ ste = StateTestEnv(u_boot_console)
+ return ste
+
+def unset_var(state_test_env, var):
+ """Unset an environment variable.
+
+ This both executes a U-Boot shell command and updates a StateTestEnv
+ object.
+
+ Args:
+ state_test_env: The StateTestEnv object to update.
+ var: The variable name to unset.
+
+ Returns:
+ Nothing.
+ """
+
+ state_test_env.u_boot_console.run_command('setenv %s' % var)
+ if var in state_test_env.env:
+ del state_test_env.env[var]
+
+def set_var(state_test_env, var, value):
+ """Set an environment variable.
+
+ This both executes a U-Boot shell command and updates a StateTestEnv
+ object.
+
+ Args:
+ state_test_env: The StateTestEnv object to update.
+ var: The variable name to set.
+ value: The value to set the variable to.
+
+ Returns:
+ Nothing.
+ """
+
+ bc = state_test_env.u_boot_console.config.buildconfig
+ if bc.get('config_hush_parser', None):
+ quote = '"'
+ else:
+ quote = ''
+ if ' ' in value:
+ pytest.skip('Space in variable value on non-Hush shell')
+
+ state_test_env.u_boot_console.run_command(
+ 'setenv %s %s%s%s' % (var, quote, value, quote))
+ state_test_env.env[var] = value
+
+def validate_empty(state_test_env, var):
+ """Validate that a variable is not set, using U-Boot shell commands.
+
+ Args:
+ var: The variable name to test.
+
+ Returns:
+ Nothing.
+ """
+
+ response = state_test_env.u_boot_console.run_command('echo $%s' % var)
+ assert response == ''
+
+def validate_set(state_test_env, var, value):
+ """Validate that a variable is set, using U-Boot shell commands.
+
+ Args:
+ var: The variable name to test.
+ value: The value the variable is expected to have.
+
+ Returns:
+ Nothing.
+ """
+
+ # echo does not preserve leading, internal, or trailing whitespace in the
+ # value. printenv does, and hence allows more complete testing.
+ response = state_test_env.u_boot_console.run_command('printenv %s' % var)
+ assert response == ('%s=%s' % (var, value))
+
+def test_env_echo_exists(state_test_env):
+ """Test echoing a variable that exists."""
+
+ var = state_test_env.get_existent_var()
+ value = state_test_env.env[var]
+ validate_set(state_test_env, var, value)
+
+@pytest.mark.buildconfigspec('cmd_echo')
+def test_env_echo_non_existent(state_test_env):
+ """Test echoing a variable that doesn't exist."""
+
+ var = state_test_env.set_var
+ validate_empty(state_test_env, var)
+
+def test_env_printenv_non_existent(state_test_env):
+ """Test printenv error message for non-existant variables."""
+
+ var = state_test_env.set_var
+ c = state_test_env.u_boot_console
+ with c.disable_check('error_notification'):
+ response = c.run_command('printenv %s' % var)
+ assert(response == '## Error: "%s" not defined' % var)
+
+@pytest.mark.buildconfigspec('cmd_echo')
+def test_env_unset_non_existent(state_test_env):
+ """Test unsetting a nonexistent variable."""
+
+ var = state_test_env.get_non_existent_var()
+ unset_var(state_test_env, var)
+ validate_empty(state_test_env, var)
+
+def test_env_set_non_existent(state_test_env):
+ """Test set a non-existant variable."""
+
+ var = state_test_env.set_var
+ value = 'foo'
+ set_var(state_test_env, var, value)
+ validate_set(state_test_env, var, value)
+
+def test_env_set_existing(state_test_env):
+ """Test setting an existant variable."""
+
+ var = state_test_env.set_var
+ value = 'bar'
+ set_var(state_test_env, var, value)
+ validate_set(state_test_env, var, value)
+
+@pytest.mark.buildconfigspec('cmd_echo')
+def test_env_unset_existing(state_test_env):
+ """Test unsetting a variable."""
+
+ var = state_test_env.set_var
+ unset_var(state_test_env, var)
+ validate_empty(state_test_env, var)
+
+def test_env_expansion_spaces(state_test_env):
+ """Test expanding a variable that contains a space in its value."""
+
+ var_space = None
+ var_test = None
+ try:
+ var_space = state_test_env.get_non_existent_var()
+ set_var(state_test_env, var_space, ' ')
+
+ var_test = state_test_env.get_non_existent_var()
+ value = ' 1${%(var_space)s}${%(var_space)s} 2 ' % locals()
+ set_var(state_test_env, var_test, value)
+ value = ' 1 2 '
+ validate_set(state_test_env, var_test, value)
+ finally:
+ if var_space:
+ unset_var(state_test_env, var_space)
+ if var_test:
+ unset_var(state_test_env, var_test)
+
+@pytest.mark.buildconfigspec('cmd_importenv')
+def test_env_import_checksum_no_size(state_test_env):
+ """Test that omitted ('-') size parameter with checksum validation fails the
+ env import function.
+ """
+ c = state_test_env.u_boot_console
+ ram_base = u_boot_utils.find_ram_base(state_test_env.u_boot_console)
+ addr = '%08x' % ram_base
+
+ with c.disable_check('error_notification'):
+ response = c.run_command('env import -c %s -' % addr)
+ assert(response == '## Error: external checksum format must pass size')
+
+@pytest.mark.buildconfigspec('cmd_importenv')
+def test_env_import_whitelist_checksum_no_size(state_test_env):
+ """Test that omitted ('-') size parameter with checksum validation fails the
+ env import function when variables are passed as parameters.
+ """
+ c = state_test_env.u_boot_console
+ ram_base = u_boot_utils.find_ram_base(state_test_env.u_boot_console)
+ addr = '%08x' % ram_base
+
+ with c.disable_check('error_notification'):
+ response = c.run_command('env import -c %s - foo1 foo2 foo4' % addr)
+ assert(response == '## Error: external checksum format must pass size')
+
+@pytest.mark.buildconfigspec('cmd_exportenv')
+@pytest.mark.buildconfigspec('cmd_importenv')
+def test_env_import_whitelist(state_test_env):
+ """Test importing only a handful of env variables from an environment."""
+ c = state_test_env.u_boot_console
+ ram_base = u_boot_utils.find_ram_base(state_test_env.u_boot_console)
+ addr = '%08x' % ram_base
+
+ set_var(state_test_env, 'foo1', 'bar1')
+ set_var(state_test_env, 'foo2', 'bar2')
+ set_var(state_test_env, 'foo3', 'bar3')
+
+ c.run_command('env export %s' % addr)
+
+ unset_var(state_test_env, 'foo1')
+ set_var(state_test_env, 'foo2', 'test2')
+ set_var(state_test_env, 'foo4', 'bar4')
+
+ # no foo1 in current env, foo2 overridden, foo3 should be of the value
+ # before exporting and foo4 should be of the value before importing.
+ c.run_command('env import %s - foo1 foo2 foo4' % addr)
+
+ validate_set(state_test_env, 'foo1', 'bar1')
+ validate_set(state_test_env, 'foo2', 'bar2')
+ validate_set(state_test_env, 'foo3', 'bar3')
+ validate_set(state_test_env, 'foo4', 'bar4')
+
+ # Cleanup test environment
+ unset_var(state_test_env, 'foo1')
+ unset_var(state_test_env, 'foo2')
+ unset_var(state_test_env, 'foo3')
+ unset_var(state_test_env, 'foo4')
+
+@pytest.mark.buildconfigspec('cmd_exportenv')
+@pytest.mark.buildconfigspec('cmd_importenv')
+def test_env_import_whitelist_delete(state_test_env):
+
+ """Test importing only a handful of env variables from an environment, with.
+ deletion if a var A that is passed to env import is not in the
+ environment to be imported.
+ """
+ c = state_test_env.u_boot_console
+ ram_base = u_boot_utils.find_ram_base(state_test_env.u_boot_console)
+ addr = '%08x' % ram_base
+
+ set_var(state_test_env, 'foo1', 'bar1')
+ set_var(state_test_env, 'foo2', 'bar2')
+ set_var(state_test_env, 'foo3', 'bar3')
+
+ c.run_command('env export %s' % addr)
+
+ unset_var(state_test_env, 'foo1')
+ set_var(state_test_env, 'foo2', 'test2')
+ set_var(state_test_env, 'foo4', 'bar4')
+
+ # no foo1 in current env, foo2 overridden, foo3 should be of the value
+ # before exporting and foo4 should be empty.
+ c.run_command('env import -d %s - foo1 foo2 foo4' % addr)
+
+ validate_set(state_test_env, 'foo1', 'bar1')
+ validate_set(state_test_env, 'foo2', 'bar2')
+ validate_set(state_test_env, 'foo3', 'bar3')
+ validate_empty(state_test_env, 'foo4')
+
+ # Cleanup test environment
+ unset_var(state_test_env, 'foo1')
+ unset_var(state_test_env, 'foo2')
+ unset_var(state_test_env, 'foo3')
+ unset_var(state_test_env, 'foo4')
diff --git a/test/py/tests/test_fit.py b/test/py/tests/test_fit.py
new file mode 100755
index 00000000..356d9a20
--- /dev/null
+++ b/test/py/tests/test_fit.py
@@ -0,0 +1,457 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2013, Google Inc.
+#
+# Sanity check of the FIT handling in U-Boot
+
+import os
+import pytest
+import struct
+import u_boot_utils as util
+
+# Define a base ITS which we can adjust using % and a dictionary
+base_its = '''
+/dts-v1/;
+
+/ {
+ description = "Chrome OS kernel image with one or more FDT blobs";
+ #address-cells = <1>;
+
+ images {
+ kernel@1 {
+ data = /incbin/("%(kernel)s");
+ type = "kernel";
+ arch = "sandbox";
+ os = "linux";
+ compression = "%(compression)s";
+ load = <0x40000>;
+ entry = <0x8>;
+ };
+ kernel@2 {
+ data = /incbin/("%(loadables1)s");
+ type = "kernel";
+ arch = "sandbox";
+ os = "linux";
+ compression = "none";
+ %(loadables1_load)s
+ entry = <0x0>;
+ };
+ fdt@1 {
+ description = "snow";
+ data = /incbin/("%(fdt)s");
+ type = "flat_dt";
+ arch = "sandbox";
+ %(fdt_load)s
+ compression = "%(compression)s";
+ signature@1 {
+ algo = "sha1,rsa2048";
+ key-name-hint = "dev";
+ };
+ };
+ ramdisk@1 {
+ description = "snow";
+ data = /incbin/("%(ramdisk)s");
+ type = "ramdisk";
+ arch = "sandbox";
+ os = "linux";
+ %(ramdisk_load)s
+ compression = "%(compression)s";
+ };
+ ramdisk@2 {
+ description = "snow";
+ data = /incbin/("%(loadables2)s");
+ type = "ramdisk";
+ arch = "sandbox";
+ os = "linux";
+ %(loadables2_load)s
+ compression = "none";
+ };
+ };
+ configurations {
+ default = "conf@1";
+ conf@1 {
+ kernel = "kernel@1";
+ fdt = "fdt@1";
+ %(ramdisk_config)s
+ %(loadables_config)s
+ };
+ };
+};
+'''
+
+# Define a base FDT - currently we don't use anything in this
+base_fdt = '''
+/dts-v1/;
+
+/ {
+ model = "Sandbox Verified Boot Test";
+ compatible = "sandbox";
+
+ reset@0 {
+ compatible = "sandbox,reset";
+ };
+
+};
+'''
+
+# This is the U-Boot script that is run for each test. First load the FIT,
+# then run the 'bootm' command, then save out memory from the places where
+# we expect 'bootm' to write things. Then quit.
+base_script = '''
+host load hostfs 0 %(fit_addr)x %(fit)s
+fdt addr %(fit_addr)x
+bootm start %(fit_addr)x
+bootm loados
+host save hostfs 0 %(kernel_addr)x %(kernel_out)s %(kernel_size)x
+host save hostfs 0 %(fdt_addr)x %(fdt_out)s %(fdt_size)x
+host save hostfs 0 %(ramdisk_addr)x %(ramdisk_out)s %(ramdisk_size)x
+host save hostfs 0 %(loadables1_addr)x %(loadables1_out)s %(loadables1_size)x
+host save hostfs 0 %(loadables2_addr)x %(loadables2_out)s %(loadables2_size)x
+'''
+
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.buildconfigspec('fit_signature')
+@pytest.mark.requiredtool('dtc')
+def test_fit(u_boot_console):
+ def make_fname(leaf):
+ """Make a temporary filename
+
+ Args:
+ leaf: Leaf name of file to create (within temporary directory)
+ Return:
+ Temporary filename
+ """
+
+ return os.path.join(cons.config.build_dir, leaf)
+
+ def filesize(fname):
+ """Get the size of a file
+
+ Args:
+ fname: Filename to check
+ Return:
+ Size of file in bytes
+ """
+ return os.stat(fname).st_size
+
+ def read_file(fname):
+ """Read the contents of a file
+
+ Args:
+ fname: Filename to read
+ Returns:
+ Contents of file as a string
+ """
+ with open(fname, 'rb') as fd:
+ return fd.read()
+
+ def make_dtb():
+ """Make a sample .dts file and compile it to a .dtb
+
+ Returns:
+ Filename of .dtb file created
+ """
+ src = make_fname('u-boot.dts')
+ dtb = make_fname('u-boot.dtb')
+ with open(src, 'w') as fd:
+ fd.write(base_fdt)
+ util.run_and_log(cons, ['dtc', src, '-O', 'dtb', '-o', dtb])
+ return dtb
+
+ def make_its(params):
+ """Make a sample .its file with parameters embedded
+
+ Args:
+ params: Dictionary containing parameters to embed in the %() strings
+ Returns:
+ Filename of .its file created
+ """
+ its = make_fname('test.its')
+ with open(its, 'w') as fd:
+ print(base_its % params, file=fd)
+ return its
+
+ def make_fit(mkimage, params):
+ """Make a sample .fit file ready for loading
+
+ This creates a .its script with the selected parameters and uses mkimage to
+ turn this into a .fit image.
+
+ Args:
+ mkimage: Filename of 'mkimage' utility
+ params: Dictionary containing parameters to embed in the %() strings
+ Return:
+ Filename of .fit file created
+ """
+ fit = make_fname('test.fit')
+ its = make_its(params)
+ util.run_and_log(cons, [mkimage, '-f', its, fit])
+ with open(make_fname('u-boot.dts'), 'w') as fd:
+ fd.write(base_fdt)
+ return fit
+
+ def make_kernel(filename, text):
+ """Make a sample kernel with test data
+
+ Args:
+ filename: the name of the file you want to create
+ Returns:
+ Full path and filename of the kernel it created
+ """
+ fname = make_fname(filename)
+ data = ''
+ for i in range(100):
+ data += 'this %s %d is unlikely to boot\n' % (text, i)
+ with open(fname, 'w') as fd:
+ print(data, file=fd)
+ return fname
+
+ def make_ramdisk(filename, text):
+ """Make a sample ramdisk with test data
+
+ Returns:
+ Filename of ramdisk created
+ """
+ fname = make_fname(filename)
+ data = ''
+ for i in range(100):
+ data += '%s %d was seldom used in the middle ages\n' % (text, i)
+ with open(fname, 'w') as fd:
+ print(data, file=fd)
+ return fname
+
+ def make_compressed(filename):
+ util.run_and_log(cons, ['gzip', '-f', '-k', filename])
+ return filename + '.gz'
+
+ def find_matching(text, match):
+ """Find a match in a line of text, and return the unmatched line portion
+
+ This is used to extract a part of a line from some text. The match string
+ is used to locate the line - we use the first line that contains that
+ match text.
+
+ Once we find a match, we discard the match string itself from the line,
+ and return what remains.
+
+ TODO: If this function becomes more generally useful, we could change it
+ to use regex and return groups.
+
+ Args:
+ text: Text to check (list of strings, one for each command issued)
+ match: String to search for
+ Return:
+ String containing unmatched portion of line
+ Exceptions:
+ ValueError: If match is not found
+
+ >>> find_matching(['first line:10', 'second_line:20'], 'first line:')
+ '10'
+ >>> find_matching(['first line:10', 'second_line:20'], 'second line')
+ Traceback (most recent call last):
+ ...
+ ValueError: Test aborted
+ >>> find_matching('first line:10\', 'second_line:20'], 'second_line:')
+ '20'
+ >>> find_matching('first line:10\', 'second_line:20\nthird_line:30'],
+ 'third_line:')
+ '30'
+ """
+ __tracebackhide__ = True
+ for line in '\n'.join(text).splitlines():
+ pos = line.find(match)
+ if pos != -1:
+ return line[:pos] + line[pos + len(match):]
+
+ pytest.fail("Expected '%s' but not found in output")
+
+ def check_equal(expected_fname, actual_fname, failure_msg):
+ """Check that a file matches its expected contents
+
+ This is always used on out-buffers whose size is decided by the test
+ script anyway, which in some cases may be larger than what we're
+ actually looking for. So it's safe to truncate it to the size of the
+ expected data.
+
+ Args:
+ expected_fname: Filename containing expected contents
+ actual_fname: Filename containing actual contents
+ failure_msg: Message to print on failure
+ """
+ expected_data = read_file(expected_fname)
+ actual_data = read_file(actual_fname)
+ if len(expected_data) < len(actual_data):
+ actual_data = actual_data[:len(expected_data)]
+ assert expected_data == actual_data, failure_msg
+
+ def check_not_equal(expected_fname, actual_fname, failure_msg):
+ """Check that a file does not match its expected contents
+
+ Args:
+ expected_fname: Filename containing expected contents
+ actual_fname: Filename containing actual contents
+ failure_msg: Message to print on failure
+ """
+ expected_data = read_file(expected_fname)
+ actual_data = read_file(actual_fname)
+ assert expected_data != actual_data, failure_msg
+
+ def run_fit_test(mkimage):
+ """Basic sanity check of FIT loading in U-Boot
+
+ TODO: Almost everything:
+ - hash algorithms - invalid hash/contents should be detected
+ - signature algorithms - invalid sig/contents should be detected
+ - compression
+ - checking that errors are detected like:
+ - image overwriting
+ - missing images
+ - invalid configurations
+ - incorrect os/arch/type fields
+ - empty data
+ - images too large/small
+ - invalid FDT (e.g. putting a random binary in instead)
+ - default configuration selection
+ - bootm command line parameters should have desired effect
+ - run code coverage to make sure we are testing all the code
+ """
+ # Set up invariant files
+ control_dtb = make_dtb()
+ kernel = make_kernel('test-kernel.bin', 'kernel')
+ ramdisk = make_ramdisk('test-ramdisk.bin', 'ramdisk')
+ loadables1 = make_kernel('test-loadables1.bin', 'lenrek')
+ loadables2 = make_ramdisk('test-loadables2.bin', 'ksidmar')
+ kernel_out = make_fname('kernel-out.bin')
+ fdt = make_fname('u-boot.dtb')
+ fdt_out = make_fname('fdt-out.dtb')
+ ramdisk_out = make_fname('ramdisk-out.bin')
+ loadables1_out = make_fname('loadables1-out.bin')
+ loadables2_out = make_fname('loadables2-out.bin')
+
+ # Set up basic parameters with default values
+ params = {
+ 'fit_addr' : 0x1000,
+
+ 'kernel' : kernel,
+ 'kernel_out' : kernel_out,
+ 'kernel_addr' : 0x40000,
+ 'kernel_size' : filesize(kernel),
+
+ 'fdt' : fdt,
+ 'fdt_out' : fdt_out,
+ 'fdt_addr' : 0x80000,
+ 'fdt_size' : filesize(control_dtb),
+ 'fdt_load' : '',
+
+ 'ramdisk' : ramdisk,
+ 'ramdisk_out' : ramdisk_out,
+ 'ramdisk_addr' : 0xc0000,
+ 'ramdisk_size' : filesize(ramdisk),
+ 'ramdisk_load' : '',
+ 'ramdisk_config' : '',
+
+ 'loadables1' : loadables1,
+ 'loadables1_out' : loadables1_out,
+ 'loadables1_addr' : 0x100000,
+ 'loadables1_size' : filesize(loadables1),
+ 'loadables1_load' : '',
+
+ 'loadables2' : loadables2,
+ 'loadables2_out' : loadables2_out,
+ 'loadables2_addr' : 0x140000,
+ 'loadables2_size' : filesize(loadables2),
+ 'loadables2_load' : '',
+
+ 'loadables_config' : '',
+ 'compression' : 'none',
+ }
+
+ # Make a basic FIT and a script to load it
+ fit = make_fit(mkimage, params)
+ params['fit'] = fit
+ cmd = base_script % params
+
+ # First check that we can load a kernel
+ # We could perhaps reduce duplication with some loss of readability
+ cons.config.dtb = control_dtb
+ cons.restart_uboot()
+ with cons.log.section('Kernel load'):
+ output = cons.run_command_list(cmd.splitlines())
+ check_equal(kernel, kernel_out, 'Kernel not loaded')
+ check_not_equal(control_dtb, fdt_out,
+ 'FDT loaded but should be ignored')
+ check_not_equal(ramdisk, ramdisk_out,
+ 'Ramdisk loaded but should not be')
+
+ # Find out the offset in the FIT where U-Boot has found the FDT
+ line = find_matching(output, 'Booting using the fdt blob at ')
+ fit_offset = int(line, 16) - params['fit_addr']
+ fdt_magic = struct.pack('>L', 0xd00dfeed)
+ data = read_file(fit)
+
+ # Now find where it actually is in the FIT (skip the first word)
+ real_fit_offset = data.find(fdt_magic, 4)
+ assert fit_offset == real_fit_offset, (
+ 'U-Boot loaded FDT from offset %#x, FDT is actually at %#x' %
+ (fit_offset, real_fit_offset))
+
+ # Now a kernel and an FDT
+ with cons.log.section('Kernel + FDT load'):
+ params['fdt_load'] = 'load = <%#x>;' % params['fdt_addr']
+ fit = make_fit(mkimage, params)
+ cons.restart_uboot()
+ output = cons.run_command_list(cmd.splitlines())
+ check_equal(kernel, kernel_out, 'Kernel not loaded')
+ check_equal(control_dtb, fdt_out, 'FDT not loaded')
+ check_not_equal(ramdisk, ramdisk_out,
+ 'Ramdisk loaded but should not be')
+
+ # Try a ramdisk
+ with cons.log.section('Kernel + FDT + Ramdisk load'):
+ params['ramdisk_config'] = 'ramdisk = "ramdisk@1";'
+ params['ramdisk_load'] = 'load = <%#x>;' % params['ramdisk_addr']
+ fit = make_fit(mkimage, params)
+ cons.restart_uboot()
+ output = cons.run_command_list(cmd.splitlines())
+ check_equal(ramdisk, ramdisk_out, 'Ramdisk not loaded')
+
+ # Configuration with some Loadables
+ with cons.log.section('Kernel + FDT + Ramdisk load + Loadables'):
+ params['loadables_config'] = 'loadables = "kernel@2", "ramdisk@2";'
+ params['loadables1_load'] = ('load = <%#x>;' %
+ params['loadables1_addr'])
+ params['loadables2_load'] = ('load = <%#x>;' %
+ params['loadables2_addr'])
+ fit = make_fit(mkimage, params)
+ cons.restart_uboot()
+ output = cons.run_command_list(cmd.splitlines())
+ check_equal(loadables1, loadables1_out,
+ 'Loadables1 (kernel) not loaded')
+ check_equal(loadables2, loadables2_out,
+ 'Loadables2 (ramdisk) not loaded')
+
+ # Kernel, FDT and Ramdisk all compressed
+ with cons.log.section('(Kernel + FDT + Ramdisk) compressed'):
+ params['compression'] = 'gzip'
+ params['kernel'] = make_compressed(kernel)
+ params['fdt'] = make_compressed(fdt)
+ params['ramdisk'] = make_compressed(ramdisk)
+ fit = make_fit(mkimage, params)
+ cons.restart_uboot()
+ output = cons.run_command_list(cmd.splitlines())
+ check_equal(kernel, kernel_out, 'Kernel not loaded')
+ check_equal(control_dtb, fdt_out, 'FDT not loaded')
+ check_not_equal(ramdisk, ramdisk_out, 'Ramdisk got decompressed?')
+ check_equal(ramdisk + '.gz', ramdisk_out, 'Ramdist not loaded')
+
+
+ cons = u_boot_console
+ try:
+ # We need to use our own device tree file. Remember to restore it
+ # afterwards.
+ old_dtb = cons.config.dtb
+ mkimage = cons.config.build_dir + '/tools/mkimage'
+ run_fit_test(mkimage)
+ finally:
+ # Go back to the original U-Boot with the correct dtb.
+ cons.config.dtb = old_dtb
+ cons.restart_uboot()
diff --git a/test/py/tests/test_fpga.py b/test/py/tests/test_fpga.py
new file mode 100644
index 00000000..ca7ef8ea
--- /dev/null
+++ b/test/py/tests/test_fpga.py
@@ -0,0 +1,565 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2018, Xilinx Inc.
+#
+# Michal Simek
+# Siva Durga Prasad Paladugu
+
+import pytest
+import re
+import random
+import u_boot_utils
+
+"""
+Note: This test relies on boardenv_* containing configuration values to define
+the network available and files to be used for testing. Without this, this test
+will be automatically skipped.
+
+For example:
+
+# True if a DHCP server is attached to the network, and should be tested.
+env__net_dhcp_server = True
+
+# A list of environment variables that should be set in order to configure a
+# static IP. In this test case we atleast need serverip for performing tftpb
+# to get required files.
+env__net_static_env_vars = [
+ ('ipaddr', '10.0.0.100'),
+ ('netmask', '255.255.255.0'),
+ ('serverip', '10.0.0.1'),
+]
+
+# Details regarding the files that may be read from a TFTP server. .
+env__fpga_secure_readable_file = {
+ 'fn': 'auth_bhdr_ppk1_bit.bin',
+ 'enckupfn': 'auth_bhdr_enc_kup_load_bit.bin',
+ 'addr': 0x1000000,
+ 'keyaddr': 0x100000,
+ 'keyfn': 'key.txt',
+}
+
+env__fpga_under_test = {
+ 'dev': 0,
+ 'addr' : 0x1000000,
+ 'bitstream_load': 'compress.bin',
+ 'bitstream_load_size': 1831960,
+ 'bitstream_loadp': 'compress_pr.bin',
+ 'bitstream_loadp_size': 423352,
+ 'bitstream_loadb': 'compress.bit',
+ 'bitstream_loadb_size': 1832086,
+ 'bitstream_loadbp': 'compress_pr.bit',
+ 'bitstream_loadbp_size': 423491,
+ 'mkimage_legacy': 'download.ub',
+ 'mkimage_legacy_size': 13321468,
+ 'mkimage_legacy_gz': 'download.gz.ub',
+ 'mkimage_legacy_gz_size': 53632,
+ 'mkimage_fit': 'download-fit.ub',
+ 'mkimage_fit_size': 13322784,
+ 'loadfs': 'mmc 0 compress.bin',
+ 'loadfs_size': 1831960,
+ 'loadfs_block_size': 0x10000,
+}
+"""
+
+import test_net
+
+def check_dev(u_boot_console):
+ f = u_boot_console.config.env.get('env__fpga_under_test', None)
+ if not f:
+ pytest.skip('No FPGA to test')
+
+ dev = f.get('dev', -1)
+ if dev < 0:
+ pytest.fail('No dev specified via env__fpga_under_test')
+
+ return dev, f
+
+def load_file_from_var(u_boot_console, name):
+ dev, f = check_dev(u_boot_console)
+
+ addr = f.get('addr', -1)
+ if addr < 0:
+ pytest.fail('No address specified via env__fpga_under_test')
+
+ test_net.test_net_dhcp(u_boot_console)
+ test_net.test_net_setup_static(u_boot_console)
+ bit = f['%s' % (name)]
+ bit_size = f['%s_size' % (name)]
+
+ expected_tftp = 'Bytes transferred = %d' % bit_size
+ output = u_boot_console.run_command('tftpboot %x %s' % (addr, bit))
+ assert expected_tftp in output
+
+ return f, dev, addr, bit, bit_size
+
+###### FPGA FAIL test ######
+expected_usage = 'fpga - loadable FPGA image support'
+
+@pytest.mark.xfail
+@pytest.mark.buildconfigspec('cmd_fpga')
+def test_fpga_fail(u_boot_console):
+ # Test non valid fpga subcommand
+ expected = 'fpga: non existing command'
+ output = u_boot_console.run_command('fpga broken 0')
+ #assert expected in output
+ assert expected_usage in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+def test_fpga_help(u_boot_console):
+ # Just show help
+ output = u_boot_console.run_command('fpga')
+ assert expected_usage in output
+
+
+###### FPGA DUMP tests ######
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+def test_fpga_dump(u_boot_console):
+ pytest.skip('Not implemented now')
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+def test_fpga_dump_variable(u_boot_console):
+ # Same as above but via "fpga" variable
+ pytest.skip('Not implemented now')
+
+###### FPGA INFO tests ######
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+def test_fpga_info_fail(u_boot_console):
+ # Maybe this can be skipped completely
+ dev, f = check_dev(u_boot_console)
+
+ # Multiple parameters to fpga info should fail
+ expected = 'fpga: more parameters passed'
+ output = u_boot_console.run_command('fpga info 0 0')
+ #assert expected in output
+ assert expected_usage in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+def test_fpga_info_list(u_boot_console):
+ # Maybe this can be skipped completely
+ dev, f = check_dev(u_boot_console)
+
+ # Code is design in a way that if fpga dev is not passed it should
+ # return list of all fpga devices in the system
+ u_boot_console.run_command('setenv fpga')
+ output = u_boot_console.run_command('fpga info')
+ assert expected_usage not in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+def test_fpga_info(u_boot_console):
+ dev, f = check_dev(u_boot_console)
+
+ output = u_boot_console.run_command('fpga info %x' % (dev))
+ assert expected_usage not in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+def test_fpga_info_variable(u_boot_console):
+ dev, f = check_dev(u_boot_console)
+
+ #
+ # fpga variable is storing device number which doesn't need to be passed
+ #
+ u_boot_console.run_command('setenv fpga %x' % (dev))
+
+ output = u_boot_console.run_command('fpga info')
+ # Variable cleanup
+ u_boot_console.run_command('setenv fpga')
+ assert expected_usage not in output
+
+###### FPGA LOAD tests ######
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_echo')
+def test_fpga_load_fail(u_boot_console):
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'bitstream_load')
+
+ for cmd in ['dump', 'load', 'loadb']:
+ # missing dev parameter
+ expected = 'fpga: incorrect parameters passed'
+ output = u_boot_console.run_command('fpga %s %x $filesize' % (cmd, addr))
+ #assert expected in output
+ assert expected_usage in output
+
+ # more parameters - 0 at the end
+ expected = 'fpga: more parameters passed'
+ output = u_boot_console.run_command('fpga %s %x %x $filesize 0' % (cmd, dev, addr))
+ #assert expected in output
+ assert expected_usage in output
+
+ # 0 address
+ expected = 'fpga: zero fpga_data address'
+ output = u_boot_console.run_command('fpga %s %x 0 $filesize' % (cmd, dev))
+ #assert expected in output
+ assert expected_usage in output
+
+ # 0 filesize
+ expected = 'fpga: zero size'
+ output = u_boot_console.run_command('fpga %s %x %x 0' % (cmd, dev, addr))
+ #assert expected in output
+ assert expected_usage in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_echo')
+def test_fpga_load(u_boot_console):
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'bitstream_load')
+
+ expected_text = 'FPGA loaded successfully'
+ output = u_boot_console.run_command('fpga load %x %x $filesize && echo %s' % (dev, addr, expected_text))
+ assert expected_text in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_fpga_loadp')
+@pytest.mark.buildconfigspec('cmd_echo')
+def test_fpga_loadp(u_boot_console):
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'bitstream_load')
+
+ expected_text = 'FPGA loaded successfully'
+ output = u_boot_console.run_command('fpga load %x %x $filesize && echo %s' % (dev, addr, expected_text))
+ assert expected_text in output
+
+ # And load also partial bistream
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'bitstream_loadp')
+
+ expected_text = 'FPGA loaded successfully'
+ output = u_boot_console.run_command('fpga loadp %x %x $filesize && echo %s' % (dev, addr, expected_text))
+ assert expected_text in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_echo')
+def test_fpga_loadb(u_boot_console):
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'bitstream_loadb')
+
+ expected_text = 'FPGA loaded successfully'
+ output = u_boot_console.run_command('fpga loadb %x %x $filesize && echo %s' % (dev, addr, expected_text))
+ assert expected_text in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_fpga_loadbp')
+@pytest.mark.buildconfigspec('cmd_echo')
+def test_fpga_loadbp(u_boot_console):
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'bitstream_loadb')
+
+ expected_text = 'FPGA loaded successfully'
+ output = u_boot_console.run_command('fpga loadb %x %x $filesize && echo %s' % (dev, addr, expected_text))
+ assert expected_text in output
+
+ # And load also partial bistream in bit format
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'bitstream_loadbp')
+
+ expected_text = 'FPGA loaded successfully'
+ output = u_boot_console.run_command('fpga loadbp %x %x $filesize && echo %s' % (dev, addr, expected_text))
+ assert expected_text in output
+
+###### FPGA LOADMK tests ######
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_fpga_loadmk')
+@pytest.mark.buildconfigspec('cmd_echo')
+@pytest.mark.buildconfigspec('image_format_legacy')
+def test_fpga_loadmk_fail(u_boot_console):
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_legacy')
+
+ u_boot_console.run_command('imi %x' % (addr))
+
+ # load image but pass incorrect address to show error message
+ expected = 'Unknown image type'
+ output = u_boot_console.run_command('fpga loadmk %x %x' % (dev, addr + 0x10))
+ assert expected in output
+
+ # Pass more parameters then command expects - 0 at the end
+ output = u_boot_console.run_command('fpga loadmk %x %x 0' % (dev, addr))
+ #assert expected in output
+ assert expected_usage in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_fpga_loadmk')
+@pytest.mark.buildconfigspec('cmd_echo')
+@pytest.mark.buildconfigspec('image_format_legacy')
+def test_fpga_loadmk_legacy(u_boot_console):
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_legacy')
+
+ u_boot_console.run_command('imi %x' % (addr))
+
+ expected_text = 'FPGA loaded successfully'
+ output = u_boot_console.run_command('fpga loadmk %x %x && echo %s' % (dev, addr, expected_text))
+ assert expected_text in output
+
+@pytest.mark.xfail
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_fpga_loadmk')
+@pytest.mark.buildconfigspec('cmd_echo')
+@pytest.mark.buildconfigspec('image_format_legacy')
+def test_fpga_loadmk_legacy_variable_fpga(u_boot_console):
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_legacy')
+
+ u_boot_console.run_command('imi %x' % (addr))
+
+ u_boot_console.run_command('setenv fpga %x' % (dev))
+
+ # this testcase should cover case which looks like it is supported but dev pointer is broken by loading mkimage address
+ expected_text = 'FPGA loaded successfully'
+ output = u_boot_console.run_command('fpga loadmk %x && echo %s' % (addr, expected_text))
+ u_boot_console.run_command('setenv fpga')
+ assert expected_text in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_fpga_loadmk')
+@pytest.mark.buildconfigspec('cmd_echo')
+@pytest.mark.buildconfigspec('image_format_legacy')
+def test_fpga_loadmk_legacy_variable_fpgadata(u_boot_console):
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_legacy')
+
+ u_boot_console.run_command('imi %x' % (addr))
+
+ u_boot_console.run_command('setenv fpgadata %x' % (addr))
+
+ # this testcase should cover case which looks like it is supported but dev pointer is broken by loading mkimage address
+ expected_text = 'FPGA loaded successfully'
+ output = u_boot_console.run_command('fpga loadmk %x && echo %s' % (dev, expected_text))
+ u_boot_console.run_command('setenv fpgadata')
+ assert expected_text in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_fpga_loadmk')
+@pytest.mark.buildconfigspec('cmd_echo')
+@pytest.mark.buildconfigspec('image_format_legacy')
+def test_fpga_loadmk_legacy_variable(u_boot_console):
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_legacy')
+
+ u_boot_console.run_command('imi %x' % (addr))
+
+ u_boot_console.run_command('setenv fpga %x' % (dev))
+ u_boot_console.run_command('setenv fpgadata %x' % (addr))
+
+ # this testcase should cover case which looks like it is supported but dev pointer is broken by loading mkimage address
+ expected_text = 'FPGA loaded successfully'
+ output = u_boot_console.run_command('fpga loadmk && echo %s' % (expected_text))
+ u_boot_console.run_command('setenv fpga')
+ u_boot_console.run_command('setenv fpgadata')
+ assert expected_text in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_fpga_loadmk')
+@pytest.mark.buildconfigspec('cmd_echo')
+@pytest.mark.buildconfigspec('image_format_legacy')
+@pytest.mark.buildconfigspec('gzip')
+def test_fpga_loadmk_legacy_gz(u_boot_console):
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_legacy_gz')
+
+ u_boot_console.run_command('imi %x' % (addr))
+
+ expected_text = 'FPGA loaded successfully'
+ output = u_boot_console.run_command('fpga loadmk %x %x && echo %s' % (dev, addr, expected_text))
+ assert expected_text in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_fpga_loadmk')
+@pytest.mark.buildconfigspec('fit')
+@pytest.mark.buildconfigspec('cmd_echo')
+def test_fpga_loadmk_fit_external(u_boot_console):
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_fit_external')
+
+ u_boot_console.run_command('imi %x' % (addr))
+
+ expected_text = 'FPGA loaded successfully'
+ output = u_boot_console.run_command('fpga loadmk %x %x:fpga && echo %s' % (dev, addr, expected_text))
+ assert expected_text in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_fpga_loadmk')
+@pytest.mark.buildconfigspec('fit')
+@pytest.mark.buildconfigspec('cmd_echo')
+def test_fpga_loadmk_fit(u_boot_console):
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_fit')
+
+ u_boot_console.run_command('imi %x' % (addr))
+
+ expected_text = 'FPGA loaded successfully'
+ output = u_boot_console.run_command('fpga loadmk %x %x:fpga && echo %s' % (dev, addr, expected_text))
+ assert expected_text in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_fpga_loadmk')
+@pytest.mark.buildconfigspec('fit')
+@pytest.mark.buildconfigspec('cmd_echo')
+def test_fpga_loadmk_fit_variable_fpga(u_boot_console):
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_fit')
+
+ u_boot_console.run_command('imi %x' % (addr))
+ # FIXME this should fail - broken support in past
+ u_boot_console.run_command('setenv fpga %x' % (dev))
+
+ expected_text = 'FPGA loaded successfully'
+ output = u_boot_console.run_command('fpga loadmk %x:fpga && echo %s' % (addr, expected_text))
+ u_boot_console.run_command('setenv fpga')
+ assert expected_text in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_fpga_loadmk')
+@pytest.mark.buildconfigspec('fit')
+@pytest.mark.buildconfigspec('cmd_echo')
+def test_fpga_loadmk_fit_variable_fpgadata(u_boot_console):
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_fit')
+
+ u_boot_console.run_command('imi %x' % (addr))
+ # FIXME this should fail - broken support in past
+ u_boot_console.run_command('setenv fpgadata %x:fpga' % (addr))
+
+ expected_text = 'FPGA loaded successfully'
+ output = u_boot_console.run_command('fpga loadmk %x && echo %s' % (dev, expected_text))
+ u_boot_console.run_command('setenv fpgadata')
+ assert expected_text in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_fpga_loadmk')
+@pytest.mark.buildconfigspec('fit')
+@pytest.mark.buildconfigspec('cmd_echo')
+def test_fpga_loadmk_fit_variable(u_boot_console):
+ f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_fit')
+
+ u_boot_console.run_command('imi %x' % (addr))
+
+ u_boot_console.run_command('setenv fpga %x' % (dev))
+ u_boot_console.run_command('setenv fpgadata %x:fpga' % (addr))
+
+ expected_text = 'FPGA loaded successfully'
+ output = u_boot_console.run_command('fpga loadmk && echo %s' % (expected_text))
+ u_boot_console.run_command('setenv fpga')
+ u_boot_console.run_command('setenv fpgadata')
+ assert expected_text in output
+
+###### FPGA LOAD tests ######
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+def test_fpga_loadfs_fail(u_boot_console):
+ dev, f = check_dev(u_boot_console)
+
+ addr = f.get('addr', -1)
+ if addr < 0:
+ pytest.fail('No address specified via env__fpga_under_test')
+
+ bit = f['loadfs']
+ bit_size = f['loadfs_size']
+ block_size = f['loadfs_block_size']
+
+ # less params - dev number removed
+ expected = 'fpga: incorrect parameters passed'
+ output = u_boot_console.run_command('fpga loadfs %x %x %x %s' % (addr, bit_size, block_size, bit))
+ #assert expected in output
+ assert expected_usage in output
+
+ # one more param - 0 at the end
+ # This is the longest command that's why there is no message from cmd/fpga.c
+ output = u_boot_console.run_command('fpga loadfs %x %x %x %x %s 0' % (dev, addr, bit_size, block_size, bit))
+ assert expected_usage in output
+
+ # zero address 0
+ expected = 'fpga: zero fpga_data address'
+ output = u_boot_console.run_command('fpga loadfs %x %x %x %x %s' % (dev, 0, bit_size, block_size, bit))
+ #assert expected in output
+ assert expected_usage in output
+
+ # bit_size 0
+ expected = 'fpga: zero size'
+ output = u_boot_console.run_command('fpga loadfs %x %x %x %x %s' % (dev, addr, 0, block_size, bit))
+ #assert expected in output
+ assert expected_usage in output
+
+ # block size 0
+ # FIXME this should pass but it failing too
+ output = u_boot_console.run_command('fpga loadfs %x %x %x %x %s' % (dev, addr, bit_size, 0, bit))
+ assert expected_usage in output
+
+ # non existing bitstream name
+ expected = 'Unable to read file noname'
+ output = u_boot_console.run_command('fpga loadfs %x %x %x %x mmc 0 noname' % (dev, addr, bit_size, block_size))
+ assert expected in output
+ assert expected_usage in output
+
+ # -1 dev number
+ expected = 'fpga_fsload: Invalid device number -1'
+ output = u_boot_console.run_command('fpga loadfs %d %x %x %x mmc 0 noname' % (-1, addr, bit_size, block_size))
+ assert expected in output
+ assert expected_usage in output
+
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_echo')
+def test_fpga_loadfs(u_boot_console):
+ dev, f = check_dev(u_boot_console)
+
+ addr = f.get('addr', -1)
+ if addr < 0:
+ pytest.fail('No address specified via env__fpga_under_test')
+
+ bit = f['loadfs']
+ bit_size = f['loadfs_size']
+ block_size = f['loadfs_block_size']
+
+ # This should be done better
+ expected_text = 'FPGA loaded successfully'
+ output = u_boot_console.run_command('fpga loadfs %x %x %x %x %s && echo %s' % (dev, addr, bit_size, block_size, bit, expected_text))
+ assert expected_text in output
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_fpga_load_secure')
+@pytest.mark.buildconfigspec('cmd_net')
+@pytest.mark.buildconfigspec('cmd_dhcp')
+@pytest.mark.buildconfigspec('net')
+def test_fpga_secure_bit_auth(u_boot_console):
+
+ test_net.test_net_dhcp(u_boot_console)
+ test_net.test_net_setup_static(u_boot_console)
+
+ f = u_boot_console.config.env.get('env__fpga_secure_readable_file', None)
+ if not f:
+ pytest.skip('No TFTP readable file to read')
+
+ addr = f.get('addr', None)
+ if not addr:
+ addr = u_boot_utils.find_ram_base(u_boot_console)
+
+ expected_tftp = 'Bytes transferred = '
+ fn = f['fn']
+ output = u_boot_console.run_command('tftpboot %x %s' % (addr, fn))
+ assert expected_tftp in output
+
+ expected_zynqmpsecure = 'Bitstream successfully loaded'
+ output = u_boot_console.run_command('fpga loads 0 %x $filesize 0 2' % (addr))
+ assert expected_zynqmpsecure in output
+
+
+@pytest.mark.buildconfigspec('cmd_fpga')
+@pytest.mark.buildconfigspec('cmd_fpga_load_secure')
+@pytest.mark.buildconfigspec('cmd_net')
+@pytest.mark.buildconfigspec('cmd_dhcp')
+@pytest.mark.buildconfigspec('net')
+def test_fpga_secure_bit_img_auth_kup(u_boot_console):
+
+ test_net.test_net_dhcp(u_boot_console)
+ test_net.test_net_setup_static(u_boot_console)
+
+ f = u_boot_console.config.env.get('env__fpga_secure_readable_file', None)
+ if not f:
+ pytest.skip('No TFTP readable file to read')
+
+ keyaddr = f.get('keyaddr', None)
+ if not keyaddr:
+ addr = u_boot_utils.find_ram_base(u_boot_console)
+ expected_tftp = 'Bytes transferred = '
+ keyfn = f['keyfn']
+ output = u_boot_console.run_command('tftpboot %x %s' % (keyaddr, keyfn))
+ assert expected_tftp in output
+
+ addr = f.get('addr', None)
+ if not addr:
+ addr = u_boot_utils.find_ram_base(u_boot_console)
+ expected_tftp = 'Bytes transferred = '
+ fn = f['enckupfn']
+ output = u_boot_console.run_command('tftpboot %x %s' % (addr, fn))
+ assert expected_tftp in output
+
+ expected_zynqmpsecure = 'Bitstream successfully loaded'
+ output = u_boot_console.run_command('fpga loads 0 %x $filesize 0 1 %x' % (addr, keyaddr))
+ assert expected_zynqmpsecure in output
diff --git a/test/py/tests/test_fs/conftest.py b/test/py/tests/test_fs/conftest.py
new file mode 100644
index 00000000..1949f916
--- /dev/null
+++ b/test/py/tests/test_fs/conftest.py
@@ -0,0 +1,602 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018, Linaro Limited
+# Author: Takahiro Akashi <takahiro.akashi@linaro.org>
+
+import os
+import os.path
+import pytest
+import re
+from subprocess import call, check_call, check_output, CalledProcessError
+from fstest_defs import *
+
+supported_fs_basic = ['fat16', 'fat32', 'ext4']
+supported_fs_ext = ['fat16', 'fat32']
+supported_fs_mkdir = ['fat16', 'fat32']
+supported_fs_unlink = ['fat16', 'fat32']
+supported_fs_symlink = ['ext4']
+
+#
+# Filesystem test specific setup
+#
+def pytest_addoption(parser):
+ """Enable --fs-type option.
+
+ See pytest_configure() about how it works.
+
+ Args:
+ parser: Pytest command-line parser.
+
+ Returns:
+ Nothing.
+ """
+ parser.addoption('--fs-type', action='append', default=None,
+ help='Targeting Filesystem Types')
+
+def pytest_configure(config):
+ """Restrict a file system(s) to be tested.
+
+ A file system explicitly named with --fs-type option is selected
+ if it belongs to a default supported_fs_xxx list.
+ Multiple options can be specified.
+
+ Args:
+ config: Pytest configuration.
+
+ Returns:
+ Nothing.
+ """
+ global supported_fs_basic
+ global supported_fs_ext
+ global supported_fs_mkdir
+ global supported_fs_unlink
+ global supported_fs_symlink
+
+ def intersect(listA, listB):
+ return [x for x in listA if x in listB]
+
+ supported_fs = config.getoption('fs_type')
+ if supported_fs:
+ print('*** FS TYPE modified: %s' % supported_fs)
+ supported_fs_basic = intersect(supported_fs, supported_fs_basic)
+ supported_fs_ext = intersect(supported_fs, supported_fs_ext)
+ supported_fs_mkdir = intersect(supported_fs, supported_fs_mkdir)
+ supported_fs_unlink = intersect(supported_fs, supported_fs_unlink)
+ supported_fs_symlink = intersect(supported_fs, supported_fs_symlink)
+
+def pytest_generate_tests(metafunc):
+ """Parametrize fixtures, fs_obj_xxx
+
+ Each fixture will be parametrized with a corresponding support_fs_xxx
+ list.
+
+ Args:
+ metafunc: Pytest test function.
+
+ Returns:
+ Nothing.
+ """
+ if 'fs_obj_basic' in metafunc.fixturenames:
+ metafunc.parametrize('fs_obj_basic', supported_fs_basic,
+ indirect=True, scope='module')
+ if 'fs_obj_ext' in metafunc.fixturenames:
+ metafunc.parametrize('fs_obj_ext', supported_fs_ext,
+ indirect=True, scope='module')
+ if 'fs_obj_mkdir' in metafunc.fixturenames:
+ metafunc.parametrize('fs_obj_mkdir', supported_fs_mkdir,
+ indirect=True, scope='module')
+ if 'fs_obj_unlink' in metafunc.fixturenames:
+ metafunc.parametrize('fs_obj_unlink', supported_fs_unlink,
+ indirect=True, scope='module')
+ if 'fs_obj_symlink' in metafunc.fixturenames:
+ metafunc.parametrize('fs_obj_symlink', supported_fs_symlink,
+ indirect=True, scope='module')
+
+#
+# Helper functions
+#
+def fstype_to_ubname(fs_type):
+ """Convert a file system type to an U-boot specific string
+
+ A generated string can be used as part of file system related commands
+ or a config name in u-boot. Currently fat16 and fat32 are handled
+ specifically.
+
+ Args:
+ fs_type: File system type.
+
+ Return:
+ A corresponding string for file system type.
+ """
+ if re.match('fat', fs_type):
+ return 'fat'
+ else:
+ return fs_type
+
+def check_ubconfig(config, fs_type):
+ """Check whether a file system is enabled in u-boot configuration.
+
+ This function is assumed to be called in a fixture function so that
+ the whole test cases will be skipped if a given file system is not
+ enabled.
+
+ Args:
+ fs_type: File system type.
+
+ Return:
+ Nothing.
+ """
+ if not config.buildconfig.get('config_cmd_%s' % fs_type, None):
+ pytest.skip('.config feature "CMD_%s" not enabled' % fs_type.upper())
+ if not config.buildconfig.get('config_%s_write' % fs_type, None):
+ pytest.skip('.config feature "%s_WRITE" not enabled'
+ % fs_type.upper())
+
+def mk_fs(config, fs_type, size, id):
+ """Create a file system volume.
+
+ Args:
+ fs_type: File system type.
+ size: Size of file system in MiB.
+ id: Prefix string of volume's file name.
+
+ Return:
+ Nothing.
+ """
+ fs_img = '%s.%s.img' % (id, fs_type)
+ fs_img = config.persistent_data_dir + '/' + fs_img
+
+ if fs_type == 'fat16':
+ mkfs_opt = '-F 16'
+ elif fs_type == 'fat32':
+ mkfs_opt = '-F 32'
+ elif fs_type == 'ext4':
+ mkfs_opt = '-O ^metadata_csum'
+ else:
+ mkfs_opt = ''
+
+ if re.match('fat', fs_type):
+ fs_lnxtype = 'vfat'
+ else:
+ fs_lnxtype = fs_type
+
+ count = (size + 1048576 - 1) / 1048576
+
+ try:
+ check_call('rm -f %s' % fs_img, shell=True)
+ check_call('dd if=/dev/zero of=%s bs=1M count=%d'
+ % (fs_img, count), shell=True)
+ check_call('mkfs.%s %s %s'
+ % (fs_lnxtype, mkfs_opt, fs_img), shell=True)
+ return fs_img
+ except CalledProcessError:
+ call('rm -f %s' % fs_img, shell=True)
+ raise
+
+# from test/py/conftest.py
+def tool_is_in_path(tool):
+ """Check whether a given command is available on host.
+
+ Args:
+ tool: Command name.
+
+ Return:
+ True if available, False if not.
+ """
+ for path in os.environ['PATH'].split(os.pathsep):
+ fn = os.path.join(path, tool)
+ if os.path.isfile(fn) and os.access(fn, os.X_OK):
+ return True
+ return False
+
+fuse_mounted = False
+
+def mount_fs(fs_type, device, mount_point):
+ """Mount a volume.
+
+ Args:
+ fs_type: File system type.
+ device: Volume's file name.
+ mount_point: Mount point.
+
+ Return:
+ Nothing.
+ """
+ global fuse_mounted
+
+ fuse_mounted = False
+ try:
+ if tool_is_in_path('guestmount'):
+ fuse_mounted = True
+ check_call('guestmount -a %s -m /dev/sda %s'
+ % (device, mount_point), shell=True)
+ else:
+ mount_opt = 'loop,rw'
+ if re.match('fat', fs_type):
+ mount_opt += ',umask=0000'
+
+ check_call('sudo mount -o %s %s %s'
+ % (mount_opt, device, mount_point), shell=True)
+
+ # may not be effective for some file systems
+ check_call('sudo chmod a+rw %s' % mount_point, shell=True)
+ except CalledProcessError:
+ raise
+
+def umount_fs(mount_point):
+ """Unmount a volume.
+
+ Args:
+ mount_point: Mount point.
+
+ Return:
+ Nothing.
+ """
+ if fuse_mounted:
+ call('sync')
+ call('guestunmount %s' % mount_point, shell=True)
+ else:
+ call('sudo umount %s' % mount_point, shell=True)
+
+#
+# Fixture for basic fs test
+# derived from test/fs/fs-test.sh
+#
+# NOTE: yield_fixture was deprecated since pytest-3.0
+@pytest.yield_fixture()
+def fs_obj_basic(request, u_boot_config):
+ """Set up a file system to be used in basic fs test.
+
+ Args:
+ request: Pytest request object.
+ u_boot_config: U-boot configuration.
+
+ Return:
+ A fixture for basic fs test, i.e. a triplet of file system type,
+ volume file name and a list of MD5 hashes.
+ """
+ fs_type = request.param
+ fs_img = ''
+
+ fs_ubtype = fstype_to_ubname(fs_type)
+ check_ubconfig(u_boot_config, fs_ubtype)
+
+ mount_dir = u_boot_config.persistent_data_dir + '/mnt'
+
+ small_file = mount_dir + '/' + SMALL_FILE
+ big_file = mount_dir + '/' + BIG_FILE
+
+ try:
+
+ # 3GiB volume
+ fs_img = mk_fs(u_boot_config, fs_type, 0xc0000000, '3GB')
+
+ # Mount the image so we can populate it.
+ check_call('mkdir -p %s' % mount_dir, shell=True)
+ mount_fs(fs_type, fs_img, mount_dir)
+
+ # Create a subdirectory.
+ check_call('mkdir %s/SUBDIR' % mount_dir, shell=True)
+
+ # Create big file in this image.
+ # Note that we work only on the start 1MB, couple MBs in the 2GB range
+ # and the last 1 MB of the huge 2.5GB file.
+ # So, just put random values only in those areas.
+ check_call('dd if=/dev/urandom of=%s bs=1M count=1'
+ % big_file, shell=True)
+ check_call('dd if=/dev/urandom of=%s bs=1M count=2 seek=2047'
+ % big_file, shell=True)
+ check_call('dd if=/dev/urandom of=%s bs=1M count=1 seek=2499'
+ % big_file, shell=True)
+
+ # Create a small file in this image.
+ check_call('dd if=/dev/urandom of=%s bs=1M count=1'
+ % small_file, shell=True)
+
+ # Delete the small file copies which possibly are written as part of a
+ # previous test.
+ # check_call('rm -f "%s.w"' % MB1, shell=True)
+ # check_call('rm -f "%s.w2"' % MB1, shell=True)
+
+ # Generate the md5sums of reads that we will test against small file
+ out = check_output(
+ 'dd if=%s bs=1M skip=0 count=1 2> /dev/null | md5sum'
+ % small_file, shell=True).decode()
+ md5val = [ out.split()[0] ]
+
+ # Generate the md5sums of reads that we will test against big file
+ # One from beginning of file.
+ out = check_output(
+ 'dd if=%s bs=1M skip=0 count=1 2> /dev/null | md5sum'
+ % big_file, shell=True).decode()
+ md5val.append(out.split()[0])
+
+ # One from end of file.
+ out = check_output(
+ 'dd if=%s bs=1M skip=2499 count=1 2> /dev/null | md5sum'
+ % big_file, shell=True).decode()
+ md5val.append(out.split()[0])
+
+ # One from the last 1MB chunk of 2GB
+ out = check_output(
+ 'dd if=%s bs=1M skip=2047 count=1 2> /dev/null | md5sum'
+ % big_file, shell=True).decode()
+ md5val.append(out.split()[0])
+
+ # One from the start 1MB chunk from 2GB
+ out = check_output(
+ 'dd if=%s bs=1M skip=2048 count=1 2> /dev/null | md5sum'
+ % big_file, shell=True).decode()
+ md5val.append(out.split()[0])
+
+ # One 1MB chunk crossing the 2GB boundary
+ out = check_output(
+ 'dd if=%s bs=512K skip=4095 count=2 2> /dev/null | md5sum'
+ % big_file, shell=True).decode()
+ md5val.append(out.split()[0])
+
+ umount_fs(mount_dir)
+ except CalledProcessError:
+ pytest.skip('Setup failed for filesystem: ' + fs_type)
+ return
+ else:
+ yield [fs_ubtype, fs_img, md5val]
+ finally:
+ umount_fs(mount_dir)
+ call('rmdir %s' % mount_dir, shell=True)
+ if fs_img:
+ call('rm -f %s' % fs_img, shell=True)
+
+#
+# Fixture for extended fs test
+#
+# NOTE: yield_fixture was deprecated since pytest-3.0
+@pytest.yield_fixture()
+def fs_obj_ext(request, u_boot_config):
+ """Set up a file system to be used in extended fs test.
+
+ Args:
+ request: Pytest request object.
+ u_boot_config: U-boot configuration.
+
+ Return:
+ A fixture for extended fs test, i.e. a triplet of file system type,
+ volume file name and a list of MD5 hashes.
+ """
+ fs_type = request.param
+ fs_img = ''
+
+ fs_ubtype = fstype_to_ubname(fs_type)
+ check_ubconfig(u_boot_config, fs_ubtype)
+
+ mount_dir = u_boot_config.persistent_data_dir + '/mnt'
+
+ min_file = mount_dir + '/' + MIN_FILE
+ tmp_file = mount_dir + '/tmpfile'
+
+ try:
+
+ # 128MiB volume
+ fs_img = mk_fs(u_boot_config, fs_type, 0x8000000, '128MB')
+
+ # Mount the image so we can populate it.
+ check_call('mkdir -p %s' % mount_dir, shell=True)
+ mount_fs(fs_type, fs_img, mount_dir)
+
+ # Create a test directory
+ check_call('mkdir %s/dir1' % mount_dir, shell=True)
+
+ # Create a small file and calculate md5
+ check_call('dd if=/dev/urandom of=%s bs=1K count=20'
+ % min_file, shell=True)
+ out = check_output(
+ 'dd if=%s bs=1K 2> /dev/null | md5sum'
+ % min_file, shell=True).decode()
+ md5val = [ out.split()[0] ]
+
+ # Calculate md5sum of Test Case 4
+ check_call('dd if=%s of=%s bs=1K count=20'
+ % (min_file, tmp_file), shell=True)
+ check_call('dd if=%s of=%s bs=1K seek=5 count=20'
+ % (min_file, tmp_file), shell=True)
+ out = check_output('dd if=%s bs=1K 2> /dev/null | md5sum'
+ % tmp_file, shell=True).decode()
+ md5val.append(out.split()[0])
+
+ # Calculate md5sum of Test Case 5
+ check_call('dd if=%s of=%s bs=1K count=20'
+ % (min_file, tmp_file), shell=True)
+ check_call('dd if=%s of=%s bs=1K seek=5 count=5'
+ % (min_file, tmp_file), shell=True)
+ out = check_output('dd if=%s bs=1K 2> /dev/null | md5sum'
+ % tmp_file, shell=True).decode()
+ md5val.append(out.split()[0])
+
+ # Calculate md5sum of Test Case 7
+ check_call('dd if=%s of=%s bs=1K count=20'
+ % (min_file, tmp_file), shell=True)
+ check_call('dd if=%s of=%s bs=1K seek=20 count=20'
+ % (min_file, tmp_file), shell=True)
+ out = check_output('dd if=%s bs=1K 2> /dev/null | md5sum'
+ % tmp_file, shell=True).decode()
+ md5val.append(out.split()[0])
+
+ check_call('rm %s' % tmp_file, shell=True)
+ umount_fs(mount_dir)
+ except CalledProcessError:
+ pytest.skip('Setup failed for filesystem: ' + fs_type)
+ return
+ else:
+ yield [fs_ubtype, fs_img, md5val]
+ finally:
+ umount_fs(mount_dir)
+ call('rmdir %s' % mount_dir, shell=True)
+ if fs_img:
+ call('rm -f %s' % fs_img, shell=True)
+
+#
+# Fixture for mkdir test
+#
+# NOTE: yield_fixture was deprecated since pytest-3.0
+@pytest.yield_fixture()
+def fs_obj_mkdir(request, u_boot_config):
+ """Set up a file system to be used in mkdir test.
+
+ Args:
+ request: Pytest request object.
+ u_boot_config: U-boot configuration.
+
+ Return:
+ A fixture for mkdir test, i.e. a duplet of file system type and
+ volume file name.
+ """
+ fs_type = request.param
+ fs_img = ''
+
+ fs_ubtype = fstype_to_ubname(fs_type)
+ check_ubconfig(u_boot_config, fs_ubtype)
+
+ try:
+ # 128MiB volume
+ fs_img = mk_fs(u_boot_config, fs_type, 0x8000000, '128MB')
+ except:
+ pytest.skip('Setup failed for filesystem: ' + fs_type)
+ else:
+ yield [fs_ubtype, fs_img]
+ finally:
+ if fs_img:
+ call('rm -f %s' % fs_img, shell=True)
+
+#
+# Fixture for unlink test
+#
+# NOTE: yield_fixture was deprecated since pytest-3.0
+@pytest.yield_fixture()
+def fs_obj_unlink(request, u_boot_config):
+ """Set up a file system to be used in unlink test.
+
+ Args:
+ request: Pytest request object.
+ u_boot_config: U-boot configuration.
+
+ Return:
+ A fixture for unlink test, i.e. a duplet of file system type and
+ volume file name.
+ """
+ fs_type = request.param
+ fs_img = ''
+
+ fs_ubtype = fstype_to_ubname(fs_type)
+ check_ubconfig(u_boot_config, fs_ubtype)
+
+ mount_dir = u_boot_config.persistent_data_dir + '/mnt'
+
+ try:
+
+ # 128MiB volume
+ fs_img = mk_fs(u_boot_config, fs_type, 0x8000000, '128MB')
+
+ # Mount the image so we can populate it.
+ check_call('mkdir -p %s' % mount_dir, shell=True)
+ mount_fs(fs_type, fs_img, mount_dir)
+
+ # Test Case 1 & 3
+ check_call('mkdir %s/dir1' % mount_dir, shell=True)
+ check_call('dd if=/dev/urandom of=%s/dir1/file1 bs=1K count=1'
+ % mount_dir, shell=True)
+ check_call('dd if=/dev/urandom of=%s/dir1/file2 bs=1K count=1'
+ % mount_dir, shell=True)
+
+ # Test Case 2
+ check_call('mkdir %s/dir2' % mount_dir, shell=True)
+ for i in range(0, 20):
+ check_call('mkdir %s/dir2/0123456789abcdef%02x'
+ % (mount_dir, i), shell=True)
+
+ # Test Case 4
+ check_call('mkdir %s/dir4' % mount_dir, shell=True)
+
+ # Test Case 5, 6 & 7
+ check_call('mkdir %s/dir5' % mount_dir, shell=True)
+ check_call('dd if=/dev/urandom of=%s/dir5/file1 bs=1K count=1'
+ % mount_dir, shell=True)
+
+ umount_fs(mount_dir)
+ except CalledProcessError:
+ pytest.skip('Setup failed for filesystem: ' + fs_type)
+ return
+ else:
+ yield [fs_ubtype, fs_img]
+ finally:
+ umount_fs(mount_dir)
+ call('rmdir %s' % mount_dir, shell=True)
+ if fs_img:
+ call('rm -f %s' % fs_img, shell=True)
+
+#
+# Fixture for symlink fs test
+#
+# NOTE: yield_fixture was deprecated since pytest-3.0
+@pytest.yield_fixture()
+def fs_obj_symlink(request, u_boot_config):
+ """Set up a file system to be used in symlink fs test.
+
+ Args:
+ request: Pytest request object.
+ u_boot_config: U-boot configuration.
+
+ Return:
+ A fixture for basic fs test, i.e. a triplet of file system type,
+ volume file name and a list of MD5 hashes.
+ """
+ fs_type = request.param
+ fs_img = ''
+
+ fs_ubtype = fstype_to_ubname(fs_type)
+ check_ubconfig(u_boot_config, fs_ubtype)
+
+ mount_dir = u_boot_config.persistent_data_dir + '/mnt'
+
+ small_file = mount_dir + '/' + SMALL_FILE
+ medium_file = mount_dir + '/' + MEDIUM_FILE
+
+ try:
+
+ # 3GiB volume
+ fs_img = mk_fs(u_boot_config, fs_type, 0x40000000, '1GB')
+
+ # Mount the image so we can populate it.
+ check_call('mkdir -p %s' % mount_dir, shell=True)
+ mount_fs(fs_type, fs_img, mount_dir)
+
+ # Create a subdirectory.
+ check_call('mkdir %s/SUBDIR' % mount_dir, shell=True)
+
+ # Create a small file in this image.
+ check_call('dd if=/dev/urandom of=%s bs=1M count=1'
+ % small_file, shell=True)
+
+ # Create a medium file in this image.
+ check_call('dd if=/dev/urandom of=%s bs=10M count=1'
+ % medium_file, shell=True)
+
+ # Generate the md5sums of reads that we will test against small file
+ out = check_output(
+ 'dd if=%s bs=1M skip=0 count=1 2> /dev/null | md5sum'
+ % small_file, shell=True).decode()
+ md5val = [out.split()[0]]
+ out = check_output(
+ 'dd if=%s bs=10M skip=0 count=1 2> /dev/null | md5sum'
+ % medium_file, shell=True).decode()
+ md5val.extend([out.split()[0]])
+
+ umount_fs(mount_dir)
+ except CalledProcessError:
+ pytest.skip('Setup failed for filesystem: ' + fs_type)
+ return
+ else:
+ yield [fs_ubtype, fs_img, md5val]
+ finally:
+ umount_fs(mount_dir)
+ call('rmdir %s' % mount_dir, shell=True)
+ if fs_img:
+ call('rm -f %s' % fs_img, shell=True)
diff --git a/test/py/tests/test_fs/fstest_defs.py b/test/py/tests/test_fs/fstest_defs.py
new file mode 100644
index 00000000..35b2bb65
--- /dev/null
+++ b/test/py/tests/test_fs/fstest_defs.py
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0+
+
+# $MIN_FILE is the name of the 20KB file in the file system image
+MIN_FILE='testfile'
+
+# $SMALL_FILE is the name of the 1MB file in the file system image
+SMALL_FILE='1MB.file'
+
+# $MEDIUM_FILE is the name of the 10MB file in the file system image
+MEDIUM_FILE='10MB.file'
+
+# $BIG_FILE is the name of the 2.5GB file in the file system image
+BIG_FILE='2.5GB.file'
+
+ADDR=0x01000008
+LENGTH=0x00100000
diff --git a/test/py/tests/test_fs/fstest_helpers.py b/test/py/tests/test_fs/fstest_helpers.py
new file mode 100644
index 00000000..faec2982
--- /dev/null
+++ b/test/py/tests/test_fs/fstest_helpers.py
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2019, Texas Instrument
+# Author: JJ Hiblot <jjhiblot@ti.com>
+#
+
+from subprocess import check_call, CalledProcessError
+
+def assert_fs_integrity(fs_type, fs_img):
+ try:
+ if fs_type == 'ext4':
+ check_call('fsck.ext4 -n -f %s' % fs_img, shell=True)
+ except CalledProcessError:
+ raise
diff --git a/test/py/tests/test_fs/test_basic.py b/test/py/tests/test_fs/test_basic.py
new file mode 100644
index 00000000..71f3e86f
--- /dev/null
+++ b/test/py/tests/test_fs/test_basic.py
@@ -0,0 +1,292 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018, Linaro Limited
+# Author: Takahiro Akashi <takahiro.akashi@linaro.org>
+#
+# U-Boot File System:Basic Test
+
+"""
+This test verifies basic read/write operation on file system.
+"""
+
+import pytest
+import re
+from fstest_defs import *
+from fstest_helpers import assert_fs_integrity
+
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.slow
+class TestFsBasic(object):
+ def test_fs1(self, u_boot_console, fs_obj_basic):
+ """
+ Test Case 1 - ls command, listing a root directory and invalid directory
+ """
+ fs_type,fs_img,md5val = fs_obj_basic
+ with u_boot_console.log.section('Test Case 1a - ls'):
+ # Test Case 1 - ls
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sls host 0:0' % fs_type])
+ assert(re.search('2621440000 *%s' % BIG_FILE, ''.join(output)))
+ assert(re.search('1048576 *%s' % SMALL_FILE, ''.join(output)))
+
+ with u_boot_console.log.section('Test Case 1b - ls (invalid dir)'):
+ # In addition, test with a nonexistent directory to see if we crash.
+ output = u_boot_console.run_command(
+ '%sls host 0:0 invalid_d' % fs_type)
+ if fs_type == 'ext4':
+ assert('Can not find directory' in output)
+ else:
+ assert('' == output)
+
+ def test_fs2(self, u_boot_console, fs_obj_basic):
+ """
+ Test Case 2 - size command for a small file
+ """
+ fs_type,fs_img,md5val = fs_obj_basic
+ with u_boot_console.log.section('Test Case 2a - size (small)'):
+ # 1MB is 0x0010 0000
+ # Test Case 2a - size of small file
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%ssize host 0:0 /%s' % (fs_type, SMALL_FILE),
+ 'printenv filesize',
+ 'setenv filesize'])
+ assert('filesize=100000' in ''.join(output))
+
+ with u_boot_console.log.section('Test Case 2b - size (/../<file>)'):
+ # Test Case 2b - size of small file via a path using '..'
+ output = u_boot_console.run_command_list([
+ '%ssize host 0:0 /SUBDIR/../%s' % (fs_type, SMALL_FILE),
+ 'printenv filesize',
+ 'setenv filesize'])
+ assert('filesize=100000' in ''.join(output))
+
+ def test_fs3(self, u_boot_console, fs_obj_basic):
+ """
+ Test Case 3 - size command for a large file
+ """
+ fs_type,fs_img,md5val = fs_obj_basic
+ with u_boot_console.log.section('Test Case 3 - size (large)'):
+ # 2.5GB (1024*1024*2500) is 0x9C40 0000
+ # Test Case 3 - size of big file
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%ssize host 0:0 /%s' % (fs_type, BIG_FILE),
+ 'printenv filesize',
+ 'setenv filesize'])
+ assert('filesize=9c400000' in ''.join(output))
+
+ def test_fs4(self, u_boot_console, fs_obj_basic):
+ """
+ Test Case 4 - load a small file, 1MB
+ """
+ fs_type,fs_img,md5val = fs_obj_basic
+ with u_boot_console.log.section('Test Case 4 - load (small)'):
+ # Test Case 4a - Read full 1MB of small file
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE),
+ 'printenv filesize'])
+ assert('filesize=100000' in ''.join(output))
+
+ # Test Case 4b - Read full 1MB of small file
+ output = u_boot_console.run_command_list([
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[0] in ''.join(output))
+
+ def test_fs5(self, u_boot_console, fs_obj_basic):
+ """
+ Test Case 5 - load, reading first 1MB of 3GB file
+ """
+ fs_type,fs_img,md5val = fs_obj_basic
+ with u_boot_console.log.section('Test Case 5 - load (first 1MB)'):
+ # Test Case 5a - First 1MB of big file
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s %x 0x0' % (fs_type, ADDR, BIG_FILE, LENGTH),
+ 'printenv filesize'])
+ assert('filesize=100000' in ''.join(output))
+
+ # Test Case 5b - First 1MB of big file
+ output = u_boot_console.run_command_list([
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[1] in ''.join(output))
+
+ def test_fs6(self, u_boot_console, fs_obj_basic):
+ """
+ Test Case 6 - load, reading last 1MB of 3GB file
+ """
+ fs_type,fs_img,md5val = fs_obj_basic
+ with u_boot_console.log.section('Test Case 6 - load (last 1MB)'):
+ # fails for ext as no offset support
+ # Test Case 6a - Last 1MB of big file
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s %x 0x9c300000'
+ % (fs_type, ADDR, BIG_FILE, LENGTH),
+ 'printenv filesize'])
+ assert('filesize=100000' in ''.join(output))
+
+ # Test Case 6b - Last 1MB of big file
+ output = u_boot_console.run_command_list([
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[2] in ''.join(output))
+
+ def test_fs7(self, u_boot_console, fs_obj_basic):
+ """
+ Test Case 7 - load, 1MB from the last 1MB in 2GB
+ """
+ fs_type,fs_img,md5val = fs_obj_basic
+ with u_boot_console.log.section('Test Case 7 - load (last 1MB in 2GB)'):
+ # fails for ext as no offset support
+ # Test Case 7a - One from the last 1MB chunk of 2GB
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s %x 0x7ff00000'
+ % (fs_type, ADDR, BIG_FILE, LENGTH),
+ 'printenv filesize'])
+ assert('filesize=100000' in ''.join(output))
+
+ # Test Case 7b - One from the last 1MB chunk of 2GB
+ output = u_boot_console.run_command_list([
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[3] in ''.join(output))
+
+ def test_fs8(self, u_boot_console, fs_obj_basic):
+ """
+ Test Case 8 - load, reading first 1MB in 2GB
+ """
+ fs_type,fs_img,md5val = fs_obj_basic
+ with u_boot_console.log.section('Test Case 8 - load (first 1MB in 2GB)'):
+ # fails for ext as no offset support
+ # Test Case 8a - One from the start 1MB chunk from 2GB
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s %x 0x80000000'
+ % (fs_type, ADDR, BIG_FILE, LENGTH),
+ 'printenv filesize'])
+ assert('filesize=100000' in ''.join(output))
+
+ # Test Case 8b - One from the start 1MB chunk from 2GB
+ output = u_boot_console.run_command_list([
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[4] in ''.join(output))
+
+ def test_fs9(self, u_boot_console, fs_obj_basic):
+ """
+ Test Case 9 - load, 1MB crossing 2GB boundary
+ """
+ fs_type,fs_img,md5val = fs_obj_basic
+ with u_boot_console.log.section('Test Case 9 - load (crossing 2GB boundary)'):
+ # fails for ext as no offset support
+ # Test Case 9a - One 1MB chunk crossing the 2GB boundary
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s %x 0x7ff80000'
+ % (fs_type, ADDR, BIG_FILE, LENGTH),
+ 'printenv filesize'])
+ assert('filesize=100000' in ''.join(output))
+
+ # Test Case 9b - One 1MB chunk crossing the 2GB boundary
+ output = u_boot_console.run_command_list([
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[5] in ''.join(output))
+
+ def test_fs10(self, u_boot_console, fs_obj_basic):
+ """
+ Test Case 10 - load, reading beyond file end'):
+ """
+ fs_type,fs_img,md5val = fs_obj_basic
+ with u_boot_console.log.section('Test Case 10 - load (beyond file end)'):
+ # Generic failure case
+ # Test Case 10 - 2MB chunk from the last 1MB of big file
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s 0x00200000 0x9c300000'
+ % (fs_type, ADDR, BIG_FILE),
+ 'printenv filesize',
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert('filesize=100000' in ''.join(output))
+
+ def test_fs11(self, u_boot_console, fs_obj_basic):
+ """
+ Test Case 11 - write'
+ """
+ fs_type,fs_img,md5val = fs_obj_basic
+ with u_boot_console.log.section('Test Case 11 - write'):
+ # Read 1MB from small file
+ # Write it back to test the writes
+ # Test Case 11a - Check that the write succeeded
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE),
+ '%swrite host 0:0 %x /%s.w $filesize'
+ % (fs_type, ADDR, SMALL_FILE)])
+ assert('1048576 bytes written' in ''.join(output))
+
+ # Test Case 11b - Check md5 of written to is same
+ # as the one read from
+ output = u_boot_console.run_command_list([
+ '%sload host 0:0 %x /%s.w' % (fs_type, ADDR, SMALL_FILE),
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[0] in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_fs12(self, u_boot_console, fs_obj_basic):
+ """
+ Test Case 12 - write to "." directory
+ """
+ fs_type,fs_img,md5val = fs_obj_basic
+ with u_boot_console.log.section('Test Case 12 - write (".")'):
+ # Next test case checks writing a file whose dirent
+ # is the first in the block, which is always true for "."
+ # The write should fail, but the lookup should work
+ # Test Case 12 - Check directory traversal
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%swrite host 0:0 %x /. 0x10' % (fs_type, ADDR)])
+ assert('Unable to write' in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_fs13(self, u_boot_console, fs_obj_basic):
+ """
+ Test Case 13 - write to a file with "/./<filename>"
+ """
+ fs_type,fs_img,md5val = fs_obj_basic
+ with u_boot_console.log.section('Test Case 13 - write ("./<file>")'):
+ # Read 1MB from small file
+ # Write it via "same directory", i.e. "." dirent
+ # Test Case 13a - Check directory traversal
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE),
+ '%swrite host 0:0 %x /./%s2 $filesize'
+ % (fs_type, ADDR, SMALL_FILE)])
+ assert('1048576 bytes written' in ''.join(output))
+
+ # Test Case 13b - Check md5 of written to is same
+ # as the one read from
+ output = u_boot_console.run_command_list([
+ 'mw.b %x 00 100' % ADDR,
+ '%sload host 0:0 %x /./%s2' % (fs_type, ADDR, SMALL_FILE),
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[0] in ''.join(output))
+
+ # Test Case 13c - Check md5 of written to is same
+ # as the one read from
+ output = u_boot_console.run_command_list([
+ 'mw.b %x 00 100' % ADDR,
+ '%sload host 0:0 %x /%s2' % (fs_type, ADDR, SMALL_FILE),
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[0] in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
diff --git a/test/py/tests/test_fs/test_ext.py b/test/py/tests/test_fs/test_ext.py
new file mode 100644
index 00000000..6b7fc487
--- /dev/null
+++ b/test/py/tests/test_fs/test_ext.py
@@ -0,0 +1,319 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018, Linaro Limited
+# Author: Takahiro Akashi <takahiro.akashi@linaro.org>
+#
+# U-Boot File System:Exntented Test
+
+"""
+This test verifies extended write operation on file system.
+"""
+
+import pytest
+import re
+from fstest_defs import *
+from fstest_helpers import assert_fs_integrity
+
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.slow
+class TestFsExt(object):
+ def test_fs_ext1(self, u_boot_console, fs_obj_ext):
+ """
+ Test Case 1 - write a file with absolute path
+ """
+ fs_type,fs_img,md5val = fs_obj_ext
+ with u_boot_console.log.section('Test Case 1 - write with abs path'):
+ # Test Case 1a - Check if command successfully returned
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE),
+ '%swrite host 0:0 %x /dir1/%s.w1 $filesize'
+ % (fs_type, ADDR, MIN_FILE)])
+ assert('20480 bytes written' in ''.join(output))
+
+ # Test Case 1b - Check md5 of file content
+ output = u_boot_console.run_command_list([
+ 'mw.b %x 00 100' % ADDR,
+ '%sload host 0:0 %x /dir1/%s.w1' % (fs_type, ADDR, MIN_FILE),
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[0] in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_fs_ext2(self, u_boot_console, fs_obj_ext):
+ """
+ Test Case 2 - write to a file with relative path
+ """
+ fs_type,fs_img,md5val = fs_obj_ext
+ with u_boot_console.log.section('Test Case 2 - write with rel path'):
+ # Test Case 2a - Check if command successfully returned
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE),
+ '%swrite host 0:0 %x dir1/%s.w2 $filesize'
+ % (fs_type, ADDR, MIN_FILE)])
+ assert('20480 bytes written' in ''.join(output))
+
+ # Test Case 2b - Check md5 of file content
+ output = u_boot_console.run_command_list([
+ 'mw.b %x 00 100' % ADDR,
+ '%sload host 0:0 %x dir1/%s.w2' % (fs_type, ADDR, MIN_FILE),
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[0] in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_fs_ext3(self, u_boot_console, fs_obj_ext):
+ """
+ Test Case 3 - write to a file with invalid path
+ """
+ fs_type,fs_img,md5val = fs_obj_ext
+ with u_boot_console.log.section('Test Case 3 - write with invalid path'):
+ # Test Case 3 - Check if command expectedly failed
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE),
+ '%swrite host 0:0 %x /dir1/none/%s.w3 $filesize'
+ % (fs_type, ADDR, MIN_FILE)])
+ assert('Unable to write "/dir1/none/' in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_fs_ext4(self, u_boot_console, fs_obj_ext):
+ """
+ Test Case 4 - write at non-zero offset, enlarging file size
+ """
+ fs_type,fs_img,md5val = fs_obj_ext
+ with u_boot_console.log.section('Test Case 4 - write at non-zero offset, enlarging file size'):
+ # Test Case 4a - Check if command successfully returned
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE),
+ '%swrite host 0:0 %x /dir1/%s.w4 $filesize'
+ % (fs_type, ADDR, MIN_FILE)])
+ output = u_boot_console.run_command(
+ '%swrite host 0:0 %x /dir1/%s.w4 $filesize 0x1400'
+ % (fs_type, ADDR, MIN_FILE))
+ assert('20480 bytes written' in output)
+
+ # Test Case 4b - Check size of written file
+ output = u_boot_console.run_command_list([
+ '%ssize host 0:0 /dir1/%s.w4' % (fs_type, MIN_FILE),
+ 'printenv filesize',
+ 'setenv filesize'])
+ assert('filesize=6400' in ''.join(output))
+
+ # Test Case 4c - Check md5 of file content
+ output = u_boot_console.run_command_list([
+ 'mw.b %x 00 100' % ADDR,
+ '%sload host 0:0 %x /dir1/%s.w4' % (fs_type, ADDR, MIN_FILE),
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[1] in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_fs_ext5(self, u_boot_console, fs_obj_ext):
+ """
+ Test Case 5 - write at non-zero offset, shrinking file size
+ """
+ fs_type,fs_img,md5val = fs_obj_ext
+ with u_boot_console.log.section('Test Case 5 - write at non-zero offset, shrinking file size'):
+ # Test Case 5a - Check if command successfully returned
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE),
+ '%swrite host 0:0 %x /dir1/%s.w5 $filesize'
+ % (fs_type, ADDR, MIN_FILE)])
+ output = u_boot_console.run_command(
+ '%swrite host 0:0 %x /dir1/%s.w5 0x1400 0x1400'
+ % (fs_type, ADDR, MIN_FILE))
+ assert('5120 bytes written' in output)
+
+ # Test Case 5b - Check size of written file
+ output = u_boot_console.run_command_list([
+ '%ssize host 0:0 /dir1/%s.w5' % (fs_type, MIN_FILE),
+ 'printenv filesize',
+ 'setenv filesize'])
+ assert('filesize=2800' in ''.join(output))
+
+ # Test Case 5c - Check md5 of file content
+ output = u_boot_console.run_command_list([
+ 'mw.b %x 00 100' % ADDR,
+ '%sload host 0:0 %x /dir1/%s.w5' % (fs_type, ADDR, MIN_FILE),
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[2] in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_fs_ext6(self, u_boot_console, fs_obj_ext):
+ """
+ Test Case 6 - write nothing at the start, truncating to zero
+ """
+ fs_type,fs_img,md5val = fs_obj_ext
+ with u_boot_console.log.section('Test Case 6 - write nothing at the start, truncating to zero'):
+ # Test Case 6a - Check if command successfully returned
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE),
+ '%swrite host 0:0 %x /dir1/%s.w6 $filesize'
+ % (fs_type, ADDR, MIN_FILE)])
+ output = u_boot_console.run_command(
+ '%swrite host 0:0 %x /dir1/%s.w6 0 0'
+ % (fs_type, ADDR, MIN_FILE))
+ assert('0 bytes written' in output)
+
+ # Test Case 6b - Check size of written file
+ output = u_boot_console.run_command_list([
+ '%ssize host 0:0 /dir1/%s.w6' % (fs_type, MIN_FILE),
+ 'printenv filesize',
+ 'setenv filesize'])
+ assert('filesize=0' in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_fs_ext7(self, u_boot_console, fs_obj_ext):
+ """
+ Test Case 7 - write at the end (append)
+ """
+ fs_type,fs_img,md5val = fs_obj_ext
+ with u_boot_console.log.section('Test Case 7 - write at the end (append)'):
+ # Test Case 7a - Check if command successfully returned
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE),
+ '%swrite host 0:0 %x /dir1/%s.w7 $filesize'
+ % (fs_type, ADDR, MIN_FILE)])
+ output = u_boot_console.run_command(
+ '%swrite host 0:0 %x /dir1/%s.w7 $filesize $filesize'
+ % (fs_type, ADDR, MIN_FILE))
+ assert('20480 bytes written' in output)
+
+ # Test Case 7b - Check size of written file
+ output = u_boot_console.run_command_list([
+ '%ssize host 0:0 /dir1/%s.w7' % (fs_type, MIN_FILE),
+ 'printenv filesize',
+ 'setenv filesize'])
+ assert('filesize=a000' in ''.join(output))
+
+ # Test Case 7c - Check md5 of file content
+ output = u_boot_console.run_command_list([
+ 'mw.b %x 00 100' % ADDR,
+ '%sload host 0:0 %x /dir1/%s.w7' % (fs_type, ADDR, MIN_FILE),
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[3] in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_fs_ext8(self, u_boot_console, fs_obj_ext):
+ """
+ Test Case 8 - write at offset beyond the end of file
+ """
+ fs_type,fs_img,md5val = fs_obj_ext
+ with u_boot_console.log.section('Test Case 8 - write beyond the end'):
+ # Test Case 8a - Check if command expectedly failed
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE),
+ '%swrite host 0:0 %x /dir1/%s.w8 $filesize'
+ % (fs_type, ADDR, MIN_FILE)])
+ output = u_boot_console.run_command(
+ '%swrite host 0:0 %x /dir1/%s.w8 0x1400 %x'
+ % (fs_type, ADDR, MIN_FILE, 0x100000 + 0x1400))
+ assert('Unable to write "/dir1' in output)
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_fs_ext9(self, u_boot_console, fs_obj_ext):
+ """
+ Test Case 9 - write to a non-existing file at non-zero offset
+ """
+ fs_type,fs_img,md5val = fs_obj_ext
+ with u_boot_console.log.section('Test Case 9 - write to non-existing file with non-zero offset'):
+ # Test Case 9a - Check if command expectedly failed
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE),
+ '%swrite host 0:0 %x /dir1/%s.w9 0x1400 0x1400'
+ % (fs_type, ADDR, MIN_FILE)])
+ assert('Unable to write "/dir1' in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_fs_ext10(self, u_boot_console, fs_obj_ext):
+ """
+ 'Test Case 10 - create/delete as many directories under root directory
+ as amount of directory entries goes beyond one cluster size)'
+ """
+ fs_type,fs_img,md5val = fs_obj_ext
+ with u_boot_console.log.section('Test Case 10 - create/delete (many)'):
+ # Test Case 10a - Create many files
+ # Please note that the size of directory entry is 32 bytes.
+ # So one typical cluster may holds 64 (2048/32) entries.
+ output = u_boot_console.run_command(
+ 'host bind 0 %s' % fs_img)
+
+ for i in range(0, 66):
+ output = u_boot_console.run_command(
+ '%swrite host 0:0 %x /FILE0123456789_%02x 100'
+ % (fs_type, ADDR, i))
+ output = u_boot_console.run_command('%sls host 0:0 /' % fs_type)
+ assert('FILE0123456789_00' in output)
+ assert('FILE0123456789_41' in output)
+
+ # Test Case 10b - Delete many files
+ for i in range(0, 66):
+ output = u_boot_console.run_command(
+ '%srm host 0:0 /FILE0123456789_%02x'
+ % (fs_type, i))
+ output = u_boot_console.run_command('%sls host 0:0 /' % fs_type)
+ assert(not 'FILE0123456789_00' in output)
+ assert(not 'FILE0123456789_41' in output)
+
+ # Test Case 10c - Create many files again
+ # Please note no.64 and 65 are intentionally re-created
+ for i in range(64, 128):
+ output = u_boot_console.run_command(
+ '%swrite host 0:0 %x /FILE0123456789_%02x 100'
+ % (fs_type, ADDR, i))
+ output = u_boot_console.run_command('%sls host 0:0 /' % fs_type)
+ assert('FILE0123456789_40' in output)
+ assert('FILE0123456789_79' in output)
+
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_fs_ext11(self, u_boot_console, fs_obj_ext):
+ """
+ 'Test Case 11 - create/delete as many directories under non-root
+ directory as amount of directory entries goes beyond one cluster size)'
+ """
+ fs_type,fs_img,md5val = fs_obj_ext
+ with u_boot_console.log.section('Test Case 11 - create/delete (many)'):
+ # Test Case 11a - Create many files
+ # Please note that the size of directory entry is 32 bytes.
+ # So one typical cluster may holds 64 (2048/32) entries.
+ output = u_boot_console.run_command(
+ 'host bind 0 %s' % fs_img)
+
+ for i in range(0, 66):
+ output = u_boot_console.run_command(
+ '%swrite host 0:0 %x /dir1/FILE0123456789_%02x 100'
+ % (fs_type, ADDR, i))
+ output = u_boot_console.run_command('%sls host 0:0 /dir1' % fs_type)
+ assert('FILE0123456789_00' in output)
+ assert('FILE0123456789_41' in output)
+
+ # Test Case 11b - Delete many files
+ for i in range(0, 66):
+ output = u_boot_console.run_command(
+ '%srm host 0:0 /dir1/FILE0123456789_%02x'
+ % (fs_type, i))
+ output = u_boot_console.run_command('%sls host 0:0 /dir1' % fs_type)
+ assert(not 'FILE0123456789_00' in output)
+ assert(not 'FILE0123456789_41' in output)
+
+ # Test Case 11c - Create many files again
+ # Please note no.64 and 65 are intentionally re-created
+ for i in range(64, 128):
+ output = u_boot_console.run_command(
+ '%swrite host 0:0 %x /dir1/FILE0123456789_%02x 100'
+ % (fs_type, ADDR, i))
+ output = u_boot_console.run_command('%sls host 0:0 /dir1' % fs_type)
+ assert('FILE0123456789_40' in output)
+ assert('FILE0123456789_79' in output)
+
+ assert_fs_integrity(fs_type, fs_img)
diff --git a/test/py/tests/test_fs/test_mkdir.py b/test/py/tests/test_fs/test_mkdir.py
new file mode 100644
index 00000000..fa9561ec
--- /dev/null
+++ b/test/py/tests/test_fs/test_mkdir.py
@@ -0,0 +1,121 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018, Linaro Limited
+# Author: Takahiro Akashi <takahiro.akashi@linaro.org>
+#
+# U-Boot File System:mkdir Test
+
+"""
+This test verifies mkdir operation on file system.
+"""
+
+import pytest
+from fstest_helpers import assert_fs_integrity
+
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.slow
+class TestMkdir(object):
+ def test_mkdir1(self, u_boot_console, fs_obj_mkdir):
+ """
+ Test Case 1 - create a directory under a root
+ """
+ fs_type,fs_img = fs_obj_mkdir
+ with u_boot_console.log.section('Test Case 1 - mkdir'):
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%smkdir host 0:0 dir1' % fs_type,
+ '%sls host 0:0 /' % fs_type])
+ assert('dir1/' in ''.join(output))
+
+ output = u_boot_console.run_command(
+ '%sls host 0:0 dir1' % fs_type)
+ assert('./' in output)
+ assert('../' in output)
+ assert_fs_integrity(fs_type, fs_img)
+
+
+ def test_mkdir2(self, u_boot_console, fs_obj_mkdir):
+ """
+ Test Case 2 - create a directory under a sub-directory
+ """
+ fs_type,fs_img = fs_obj_mkdir
+ with u_boot_console.log.section('Test Case 2 - mkdir (sub-sub directory)'):
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%smkdir host 0:0 dir1/dir2' % fs_type,
+ '%sls host 0:0 dir1' % fs_type])
+ assert('dir2/' in ''.join(output))
+
+ output = u_boot_console.run_command(
+ '%sls host 0:0 dir1/dir2' % fs_type)
+ assert('./' in output)
+ assert('../' in output)
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_mkdir3(self, u_boot_console, fs_obj_mkdir):
+ """
+ Test Case 3 - trying to create a directory with a non-existing
+ path should fail
+ """
+ fs_type,fs_img = fs_obj_mkdir
+ with u_boot_console.log.section('Test Case 3 - mkdir (non-existing path)'):
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%smkdir host 0:0 none/dir3' % fs_type])
+ assert('Unable to create a directory' in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_mkdir4(self, u_boot_console, fs_obj_mkdir):
+ """
+ Test Case 4 - trying to create "." should fail
+ """
+ fs_type,fs_img = fs_obj_mkdir
+ with u_boot_console.log.section('Test Case 4 - mkdir (".")'):
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%smkdir host 0:0 .' % fs_type])
+ assert('Unable to create a directory' in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_mkdir5(self, u_boot_console, fs_obj_mkdir):
+ """
+ Test Case 5 - trying to create ".." should fail
+ """
+ fs_type,fs_img = fs_obj_mkdir
+ with u_boot_console.log.section('Test Case 5 - mkdir ("..")'):
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%smkdir host 0:0 ..' % fs_type])
+ assert('Unable to create a directory' in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_mkdir6(self, u_boot_console, fs_obj_mkdir):
+ """
+ 'Test Case 6 - create as many directories as amount of directory
+ entries goes beyond a cluster size)'
+ """
+ fs_type,fs_img = fs_obj_mkdir
+ with u_boot_console.log.section('Test Case 6 - mkdir (create many)'):
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%smkdir host 0:0 dir6' % fs_type,
+ '%sls host 0:0 /' % fs_type])
+ assert('dir6/' in ''.join(output))
+
+ for i in range(0, 20):
+ output = u_boot_console.run_command(
+ '%smkdir host 0:0 dir6/0123456789abcdef%02x'
+ % (fs_type, i))
+ output = u_boot_console.run_command('%sls host 0:0 dir6' % fs_type)
+ assert('0123456789abcdef00/' in output)
+ assert('0123456789abcdef13/' in output)
+
+ output = u_boot_console.run_command(
+ '%sls host 0:0 dir6/0123456789abcdef13/.' % fs_type)
+ assert('./' in output)
+ assert('../' in output)
+
+ output = u_boot_console.run_command(
+ '%sls host 0:0 dir6/0123456789abcdef13/..' % fs_type)
+ assert('0123456789abcdef00/' in output)
+ assert('0123456789abcdef13/' in output)
+ assert_fs_integrity(fs_type, fs_img)
diff --git a/test/py/tests/test_fs/test_symlink.py b/test/py/tests/test_fs/test_symlink.py
new file mode 100644
index 00000000..9ced101a
--- /dev/null
+++ b/test/py/tests/test_fs/test_symlink.py
@@ -0,0 +1,130 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2019, Texas Instrument
+# Author: Jean-Jacques Hiblot <jjhiblot@ti.com>
+#
+# U-Boot File System:symlink Test
+
+"""
+This test verifies unlink operation (deleting a file or a directory)
+on file system.
+"""
+
+import pytest
+import re
+from fstest_defs import *
+from fstest_helpers import assert_fs_integrity
+
+
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.slow
+class TestSymlink(object):
+ def test_symlink1(self, u_boot_console, fs_obj_symlink):
+ """
+ Test Case 1 - create a link. and follow it when reading
+ """
+ fs_type, fs_img, md5val = fs_obj_symlink
+ with u_boot_console.log.section('Test Case 1 - create link and read'):
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ 'setenv filesize',
+ 'ln host 0:0 %s /%s.link ' % (SMALL_FILE, SMALL_FILE),
+ ])
+ assert('' in ''.join(output))
+
+ output = u_boot_console.run_command_list([
+ '%sload host 0:0 %x /%s.link' % (fs_type, ADDR, SMALL_FILE),
+ 'printenv filesize'])
+ assert('filesize=100000' in ''.join(output))
+
+ # Test Case 4b - Read full 1MB of small file
+ output = u_boot_console.run_command_list([
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[0] in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_symlink2(self, u_boot_console, fs_obj_symlink):
+ """
+ Test Case 2 - create chained links
+ """
+ fs_type, fs_img, md5val = fs_obj_symlink
+ with u_boot_console.log.section('Test Case 2 - create chained links'):
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ 'setenv filesize',
+ 'ln host 0:0 %s /%s.link1 ' % (SMALL_FILE, SMALL_FILE),
+ 'ln host 0:0 /%s.link1 /SUBDIR/%s.link2' % (
+ SMALL_FILE, SMALL_FILE),
+ 'ln host 0:0 SUBDIR/%s.link2 /%s.link3' % (
+ SMALL_FILE, SMALL_FILE),
+ ])
+ assert('' in ''.join(output))
+
+ output = u_boot_console.run_command_list([
+ '%sload host 0:0 %x /%s.link3' % (fs_type, ADDR, SMALL_FILE),
+ 'printenv filesize'])
+ assert('filesize=100000' in ''.join(output))
+
+ # Test Case 4b - Read full 1MB of small file
+ output = u_boot_console.run_command_list([
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[0] in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_symlink3(self, u_boot_console, fs_obj_symlink):
+ """
+ Test Case 3 - replace file/link with link
+ """
+ fs_type, fs_img, md5val = fs_obj_symlink
+ with u_boot_console.log.section('Test Case 1 - create link and read'):
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ 'setenv filesize',
+ 'ln host 0:0 %s /%s ' % (MEDIUM_FILE, SMALL_FILE),
+ 'ln host 0:0 %s /%s.link ' % (MEDIUM_FILE, MEDIUM_FILE),
+ ])
+ assert('' in ''.join(output))
+
+ output = u_boot_console.run_command_list([
+ '%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE),
+ 'printenv filesize'])
+ assert('filesize=a00000' in ''.join(output))
+
+ output = u_boot_console.run_command_list([
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[1] in ''.join(output))
+
+ output = u_boot_console.run_command_list([
+ 'ln host 0:0 %s.link /%s ' % (MEDIUM_FILE, SMALL_FILE),
+ '%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE),
+ 'printenv filesize'])
+ assert('filesize=a00000' in ''.join(output))
+
+ output = u_boot_console.run_command_list([
+ 'md5sum %x $filesize' % ADDR,
+ 'setenv filesize'])
+ assert(md5val[1] in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_symlink4(self, u_boot_console, fs_obj_symlink):
+ """
+ Test Case 4 - create a broken link
+ """
+ fs_type, fs_img, md5val = fs_obj_symlink
+ with u_boot_console.log.section('Test Case 1 - create link and read'):
+
+ output = u_boot_console.run_command_list([
+ 'setenv filesize',
+ 'ln host 0:0 nowhere /link ',
+ ])
+ assert('' in ''.join(output))
+
+ output = u_boot_console.run_command(
+ '%sload host 0:0 %x /link' %
+ (fs_type, ADDR))
+ with u_boot_console.disable_check('error_notification'):
+ output = u_boot_console.run_command('printenv filesize')
+ assert('"filesize" not defined' in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
diff --git a/test/py/tests/test_fs/test_unlink.py b/test/py/tests/test_fs/test_unlink.py
new file mode 100644
index 00000000..97aafc63
--- /dev/null
+++ b/test/py/tests/test_fs/test_unlink.py
@@ -0,0 +1,118 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018, Linaro Limited
+# Author: Takahiro Akashi <takahiro.akashi@linaro.org>
+#
+# U-Boot File System:unlink Test
+
+"""
+This test verifies unlink operation (deleting a file or a directory)
+on file system.
+"""
+
+import pytest
+from fstest_helpers import assert_fs_integrity
+
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.slow
+class TestUnlink(object):
+ def test_unlink1(self, u_boot_console, fs_obj_unlink):
+ """
+ Test Case 1 - delete a file
+ """
+ fs_type,fs_img = fs_obj_unlink
+ with u_boot_console.log.section('Test Case 1 - unlink (file)'):
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%srm host 0:0 dir1/file1' % fs_type,
+ '%sls host 0:0 dir1/file1' % fs_type])
+ assert('' == ''.join(output))
+
+ output = u_boot_console.run_command(
+ '%sls host 0:0 dir1/' % fs_type)
+ assert(not 'file1' in output)
+ assert('file2' in output)
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_unlink2(self, u_boot_console, fs_obj_unlink):
+ """
+ Test Case 2 - delete many files
+ """
+ fs_type,fs_img = fs_obj_unlink
+ with u_boot_console.log.section('Test Case 2 - unlink (many)'):
+ output = u_boot_console.run_command('host bind 0 %s' % fs_img)
+
+ for i in range(0, 20):
+ output = u_boot_console.run_command_list([
+ '%srm host 0:0 dir2/0123456789abcdef%02x' % (fs_type, i),
+ '%sls host 0:0 dir2/0123456789abcdef%02x' % (fs_type, i)])
+ assert('' == ''.join(output))
+
+ output = u_boot_console.run_command(
+ '%sls host 0:0 dir2' % fs_type)
+ assert('0 file(s), 2 dir(s)' in output)
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_unlink3(self, u_boot_console, fs_obj_unlink):
+ """
+ Test Case 3 - trying to delete a non-existing file should fail
+ """
+ fs_type,fs_img = fs_obj_unlink
+ with u_boot_console.log.section('Test Case 3 - unlink (non-existing)'):
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%srm host 0:0 dir1/nofile' % fs_type])
+ assert('nofile: doesn\'t exist' in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_unlink4(self, u_boot_console, fs_obj_unlink):
+ """
+ Test Case 4 - delete an empty directory
+ """
+ fs_type,fs_img = fs_obj_unlink
+ with u_boot_console.log.section('Test Case 4 - unlink (directory)'):
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%srm host 0:0 dir4' % fs_type])
+ assert('' == ''.join(output))
+
+ output = u_boot_console.run_command(
+ '%sls host 0:0 /' % fs_type)
+ assert(not 'dir4' in output)
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_unlink5(self, u_boot_console, fs_obj_unlink):
+ """
+ Test Case 5 - trying to deleting a non-empty directory ".."
+ should fail
+ """
+ fs_type,fs_img = fs_obj_unlink
+ with u_boot_console.log.section('Test Case 5 - unlink ("non-empty directory")'):
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%srm host 0:0 dir5' % fs_type])
+ assert('directory is not empty' in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_unlink6(self, u_boot_console, fs_obj_unlink):
+ """
+ Test Case 6 - trying to deleting a "." should fail
+ """
+ fs_type,fs_img = fs_obj_unlink
+ with u_boot_console.log.section('Test Case 6 - unlink (".")'):
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%srm host 0:0 dir5/.' % fs_type])
+ assert('directory is not empty' in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
+
+ def test_unlink7(self, u_boot_console, fs_obj_unlink):
+ """
+ Test Case 7 - trying to deleting a ".." should fail
+ """
+ fs_type,fs_img = fs_obj_unlink
+ with u_boot_console.log.section('Test Case 7 - unlink ("..")'):
+ output = u_boot_console.run_command_list([
+ 'host bind 0 %s' % fs_img,
+ '%srm host 0:0 dir5/..' % fs_type])
+ assert('directory is not empty' in ''.join(output))
+ assert_fs_integrity(fs_type, fs_img)
diff --git a/test/py/tests/test_gpt.py b/test/py/tests/test_gpt.py
new file mode 100644
index 00000000..229d7eb2
--- /dev/null
+++ b/test/py/tests/test_gpt.py
@@ -0,0 +1,178 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2017 Alison Chaiken
+# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+
+# Test GPT manipulation commands.
+
+import os
+import pytest
+import u_boot_utils
+
+"""
+These tests rely on a 4 MB disk image, which is automatically created by
+the test.
+"""
+
+class GptTestDiskImage(object):
+ """Disk Image used by the GPT tests."""
+
+ def __init__(self, u_boot_console):
+ """Initialize a new GptTestDiskImage object.
+
+ Args:
+ u_boot_console: A U-Boot console.
+
+ Returns:
+ Nothing.
+ """
+
+ filename = 'test_gpt_disk_image.bin'
+
+ persistent = u_boot_console.config.persistent_data_dir + '/' + filename
+ self.path = u_boot_console.config.result_dir + '/' + filename
+
+ with u_boot_utils.persistent_file_helper(u_boot_console.log, persistent):
+ if os.path.exists(persistent):
+ u_boot_console.log.action('Disk image file ' + persistent +
+ ' already exists')
+ else:
+ u_boot_console.log.action('Generating ' + persistent)
+ fd = os.open(persistent, os.O_RDWR | os.O_CREAT)
+ os.ftruncate(fd, 4194304)
+ os.close(fd)
+ cmd = ('sgdisk',
+ '--disk-guid=375a56f7-d6c9-4e81-b5f0-09d41ca89efe',
+ persistent)
+ u_boot_utils.run_and_log(u_boot_console, cmd)
+ # part1 offset 1MB size 1MB
+ cmd = ('sgdisk', '--new=1:2048:4095', '--change-name=1:part1',
+ persistent)
+ # part2 offset 2MB size 1.5MB
+ u_boot_utils.run_and_log(u_boot_console, cmd)
+ cmd = ('sgdisk', '--new=2:4096:7167', '--change-name=2:part2',
+ persistent)
+ u_boot_utils.run_and_log(u_boot_console, cmd)
+ cmd = ('sgdisk', '--load-backup=' + persistent)
+ u_boot_utils.run_and_log(u_boot_console, cmd)
+
+ cmd = ('cp', persistent, self.path)
+ u_boot_utils.run_and_log(u_boot_console, cmd)
+
+gtdi = None
+@pytest.fixture(scope='function')
+def state_disk_image(u_boot_console):
+ """pytest fixture to provide a GptTestDiskImage object to tests.
+ This is function-scoped because it uses u_boot_console, which is also
+ function-scoped. However, we don't need to actually do any function-scope
+ work, so this simply returns the same object over and over each time."""
+
+ global gtdi
+ if not gtdi:
+ gtdi = GptTestDiskImage(u_boot_console)
+ return gtdi
+
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.buildconfigspec('cmd_gpt')
+@pytest.mark.buildconfigspec('cmd_part')
+@pytest.mark.requiredtool('sgdisk')
+def test_gpt_read(state_disk_image, u_boot_console):
+ """Test the gpt read command."""
+
+ u_boot_console.run_command('host bind 0 ' + state_disk_image.path)
+ output = u_boot_console.run_command('gpt read host 0')
+ assert 'Start 1MiB, size 1MiB' in output
+ assert 'Block size 512, name part1' in output
+ assert 'Start 2MiB, size 1MiB' in output
+ assert 'Block size 512, name part2' in output
+ output = u_boot_console.run_command('part list host 0')
+ assert '0x00000800 0x00000fff "part1"' in output
+ assert '0x00001000 0x00001bff "part2"' in output
+
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.buildconfigspec('cmd_gpt')
+@pytest.mark.requiredtool('sgdisk')
+def test_gpt_verify(state_disk_image, u_boot_console):
+ """Test the gpt verify command."""
+
+ u_boot_console.run_command('host bind 0 ' + state_disk_image.path)
+ output = u_boot_console.run_command('gpt verify host 0')
+ assert 'Verify GPT: success!' in output
+
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.buildconfigspec('cmd_gpt')
+@pytest.mark.requiredtool('sgdisk')
+def test_gpt_guid(state_disk_image, u_boot_console):
+ """Test the gpt guid command."""
+
+ u_boot_console.run_command('host bind 0 ' + state_disk_image.path)
+ output = u_boot_console.run_command('gpt guid host 0')
+ assert '375a56f7-d6c9-4e81-b5f0-09d41ca89efe' in output
+
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.buildconfigspec('cmd_gpt')
+@pytest.mark.requiredtool('sgdisk')
+def test_gpt_save_guid(state_disk_image, u_boot_console):
+ """Test the gpt guid command to save GUID into a string."""
+
+ if u_boot_console.config.buildconfig.get('config_cmd_gpt', 'n') != 'y':
+ pytest.skip('gpt command not supported')
+ u_boot_console.run_command('host bind 0 ' + state_disk_image.path)
+ output = u_boot_console.run_command('gpt guid host 0 newguid')
+ output = u_boot_console.run_command('printenv newguid')
+ assert '375a56f7-d6c9-4e81-b5f0-09d41ca89efe' in output
+
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.buildconfigspec('cmd_gpt')
+@pytest.mark.buildconfigspec('cmd_gpt_rename')
+@pytest.mark.buildconfigspec('cmd_part')
+@pytest.mark.requiredtool('sgdisk')
+def test_gpt_rename_partition(state_disk_image, u_boot_console):
+ """Test the gpt rename command to write partition names."""
+
+ u_boot_console.run_command('host bind 0 ' + state_disk_image.path)
+ u_boot_console.run_command('gpt rename host 0 1 first')
+ output = u_boot_console.run_command('gpt read host 0')
+ assert 'name first' in output
+ u_boot_console.run_command('gpt rename host 0 2 second')
+ output = u_boot_console.run_command('gpt read host 0')
+ assert 'name second' in output
+ output = u_boot_console.run_command('part list host 0')
+ assert '0x00000800 0x00000fff "first"' in output
+ assert '0x00001000 0x00001bff "second"' in output
+
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.buildconfigspec('cmd_gpt')
+@pytest.mark.buildconfigspec('cmd_gpt_rename')
+@pytest.mark.buildconfigspec('cmd_part')
+@pytest.mark.requiredtool('sgdisk')
+def test_gpt_swap_partitions(state_disk_image, u_boot_console):
+ """Test the gpt swap command to exchange two partition names."""
+
+ u_boot_console.run_command('host bind 0 ' + state_disk_image.path)
+ output = u_boot_console.run_command('part list host 0')
+ assert '0x00000800 0x00000fff "first"' in output
+ assert '0x00001000 0x00001bff "second"' in output
+ u_boot_console.run_command('gpt swap host 0 first second')
+ output = u_boot_console.run_command('part list host 0')
+ assert '0x00000800 0x00000fff "second"' in output
+ assert '0x00001000 0x00001bff "first"' in output
+
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.buildconfigspec('cmd_gpt')
+@pytest.mark.buildconfigspec('cmd_part')
+@pytest.mark.requiredtool('sgdisk')
+def test_gpt_write(state_disk_image, u_boot_console):
+ """Test the gpt write command."""
+
+ u_boot_console.run_command('host bind 0 ' + state_disk_image.path)
+ output = u_boot_console.run_command('gpt write host 0 "name=all,size=0"')
+ assert 'Writing GPT: success!' in output
+ output = u_boot_console.run_command('part list host 0')
+ assert '0x00000022 0x00001fde "all"' in output
+ output = u_boot_console.run_command('gpt write host 0 "uuid_disk=375a56f7-d6c9-4e81-b5f0-09d41ca89efe;name=first,start=1M,size=1M;name=second,start=0x200000,size=0x180000;"')
+ assert 'Writing GPT: success!' in output
+ output = u_boot_console.run_command('part list host 0')
+ assert '0x00000800 0x00000fff "first"' in output
+ assert '0x00001000 0x00001bff "second"' in output
+ output = u_boot_console.run_command('gpt guid host 0')
+ assert '375a56f7-d6c9-4e81-b5f0-09d41ca89efe' in output
diff --git a/test/py/tests/test_handoff.py b/test/py/tests/test_handoff.py
new file mode 100644
index 00000000..038f0306
--- /dev/null
+++ b/test/py/tests/test_handoff.py
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+
+import pytest
+
+# Magic number to check that SPL handoff is working
+TEST_HANDOFF_MAGIC = 0x14f93c7b
+
+@pytest.mark.boardspec('sandbox_spl')
+@pytest.mark.buildconfigspec('spl')
+def test_handoff(u_boot_console):
+ """Test that of-platdata can be generated and used in sandbox"""
+ cons = u_boot_console
+ response = cons.run_command('sb handoff')
+ assert ('SPL handoff magic %x' % TEST_HANDOFF_MAGIC) in response
diff --git a/test/py/tests/test_help.py b/test/py/tests/test_help.py
new file mode 100644
index 00000000..d50295e5
--- /dev/null
+++ b/test/py/tests/test_help.py
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+
+def test_help(u_boot_console):
+ """Test that the "help" command can be executed."""
+
+ u_boot_console.run_command('help')
diff --git a/test/py/tests/test_hush_if_test.py b/test/py/tests/test_hush_if_test.py
new file mode 100644
index 00000000..bba8d41d
--- /dev/null
+++ b/test/py/tests/test_hush_if_test.py
@@ -0,0 +1,161 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+
+# Test operation of the "if" shell command.
+
+import os
+import os.path
+import pytest
+
+pytestmark = pytest.mark.buildconfigspec('hush_parser')
+
+# The list of "if test" conditions to test.
+subtests = (
+ # Base if functionality.
+
+ ('true', True),
+ ('false', False),
+
+ # Basic operators.
+
+ ('test aaa = aaa', True),
+ ('test aaa = bbb', False),
+
+ ('test aaa != bbb', True),
+ ('test aaa != aaa', False),
+
+ ('test aaa < bbb', True),
+ ('test bbb < aaa', False),
+
+ ('test bbb > aaa', True),
+ ('test aaa > bbb', False),
+
+ ('test 123 -eq 123', True),
+ ('test 123 -eq 456', False),
+
+ ('test 123 -ne 456', True),
+ ('test 123 -ne 123', False),
+
+ ('test 123 -lt 456', True),
+ ('test 123 -lt 123', False),
+ ('test 456 -lt 123', False),
+
+ ('test 123 -le 456', True),
+ ('test 123 -le 123', True),
+ ('test 456 -le 123', False),
+
+ ('test 456 -gt 123', True),
+ ('test 123 -gt 123', False),
+ ('test 123 -gt 456', False),
+
+ ('test 456 -ge 123', True),
+ ('test 123 -ge 123', True),
+ ('test 123 -ge 456', False),
+
+ ('test -z ""', True),
+ ('test -z "aaa"', False),
+
+ ('test -n "aaa"', True),
+ ('test -n ""', False),
+
+ # Inversion of simple tests.
+
+ ('test ! aaa = aaa', False),
+ ('test ! aaa = bbb', True),
+ ('test ! ! aaa = aaa', True),
+ ('test ! ! aaa = bbb', False),
+
+ # Binary operators.
+
+ ('test aaa != aaa -o bbb != bbb', False),
+ ('test aaa != aaa -o bbb = bbb', True),
+ ('test aaa = aaa -o bbb != bbb', True),
+ ('test aaa = aaa -o bbb = bbb', True),
+
+ ('test aaa != aaa -a bbb != bbb', False),
+ ('test aaa != aaa -a bbb = bbb', False),
+ ('test aaa = aaa -a bbb != bbb', False),
+ ('test aaa = aaa -a bbb = bbb', True),
+
+ # Inversion within binary operators.
+
+ ('test ! aaa != aaa -o ! bbb != bbb', True),
+ ('test ! aaa != aaa -o ! bbb = bbb', True),
+ ('test ! aaa = aaa -o ! bbb != bbb', True),
+ ('test ! aaa = aaa -o ! bbb = bbb', False),
+
+ ('test ! ! aaa != aaa -o ! ! bbb != bbb', False),
+ ('test ! ! aaa != aaa -o ! ! bbb = bbb', True),
+ ('test ! ! aaa = aaa -o ! ! bbb != bbb', True),
+ ('test ! ! aaa = aaa -o ! ! bbb = bbb', True),
+
+ # -z operator.
+
+ ('test -z "$ut_var_nonexistent"', True),
+ ('test -z "$ut_var_exists"', False),
+)
+
+def exec_hush_if(u_boot_console, expr, result):
+ """Execute a shell "if" command, and validate its result."""
+
+ config = u_boot_console.config.buildconfig
+ maxargs = int(config.get('config_sys_maxargs', '0'))
+ args = len(expr.split(' ')) - 1
+ if args > maxargs:
+ u_boot_console.log.warning('CONFIG_SYS_MAXARGS too low; need ' +
+ str(args))
+ pytest.skip()
+
+ cmd = 'if ' + expr + '; then echo true; else echo false; fi'
+ response = u_boot_console.run_command(cmd)
+ assert response.strip() == str(result).lower()
+
+def test_hush_if_test_setup(u_boot_console):
+ """Set up environment variables used during the "if" tests."""
+
+ u_boot_console.run_command('setenv ut_var_nonexistent')
+ u_boot_console.run_command('setenv ut_var_exists 1')
+
+@pytest.mark.buildconfigspec('cmd_echo')
+@pytest.mark.parametrize('expr,result', subtests)
+def test_hush_if_test(u_boot_console, expr, result):
+ """Test a single "if test" condition."""
+
+ exec_hush_if(u_boot_console, expr, result)
+
+def test_hush_if_test_teardown(u_boot_console):
+ """Clean up environment variables used during the "if" tests."""
+
+ u_boot_console.run_command('setenv ut_var_exists')
+
+# We might test this on real filesystems via UMS, DFU, 'save', etc.
+# Of those, only UMS currently allows file removal though.
+@pytest.mark.buildconfigspec('cmd_echo')
+@pytest.mark.boardspec('sandbox')
+def test_hush_if_test_host_file_exists(u_boot_console):
+ """Test the "if test -e" shell command."""
+
+ test_file = u_boot_console.config.result_dir + \
+ '/creating_this_file_breaks_u_boot_tests'
+
+ try:
+ os.unlink(test_file)
+ except:
+ pass
+ assert not os.path.exists(test_file)
+
+ expr = 'test -e hostfs - ' + test_file
+ exec_hush_if(u_boot_console, expr, False)
+
+ try:
+ with open(test_file, 'wb'):
+ pass
+ assert os.path.exists(test_file)
+
+ expr = 'test -e hostfs - ' + test_file
+ exec_hush_if(u_boot_console, expr, True)
+ finally:
+ os.unlink(test_file)
+
+ expr = 'test -e hostfs - ' + test_file
+ exec_hush_if(u_boot_console, expr, False)
diff --git a/test/py/tests/test_log.py b/test/py/tests/test_log.py
new file mode 100644
index 00000000..75325fad
--- /dev/null
+++ b/test/py/tests/test_log.py
@@ -0,0 +1,132 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016, Google Inc.
+#
+# U-Boot Verified Boot Test
+
+"""
+This tests U-Boot logging. It uses the 'log test' command with various options
+and checks that the output is correct.
+"""
+
+import pytest
+
+LOGL_FIRST, LOGL_WARNING, LOGL_INFO = (0, 4, 6)
+
+@pytest.mark.buildconfigspec('cmd_log')
+def test_log(u_boot_console):
+ """Test that U-Boot logging works correctly."""
+ def check_log_entries(lines, mask, max_level=LOGL_INFO):
+ """Check that the expected log records appear in the output
+
+ Args:
+ lines: iterator containing lines to check
+ mask: bit mask to select which lines to check for:
+ bit 0: standard log line
+ bit 1: _log line
+ max_level: maximum log level to expect in the output
+ """
+ for i in range(max_level):
+ if mask & 1:
+ assert 'log_run() log %d' % i == next(lines)
+ if mask & 3:
+ assert 'func() _log %d' % i == next(lines)
+
+ def run_test(testnum):
+ """Run a particular test number (the 'log test' command)
+
+ Args:
+ testnum: Test number to run
+ Returns:
+ iterator containing the lines output from the command
+ """
+ with cons.log.section('basic'):
+ output = u_boot_console.run_command('log test %d' % testnum)
+ split = output.replace('\r', '').splitlines()
+ lines = iter(split)
+ assert 'test %d' % testnum == next(lines)
+ return lines
+
+ def test0():
+ lines = run_test(0)
+ check_log_entries(lines, 3)
+
+ def test1():
+ lines = run_test(1)
+ check_log_entries(lines, 3)
+
+ def test2():
+ lines = run_test(2)
+
+ def test3():
+ lines = run_test(3)
+ check_log_entries(lines, 2)
+
+ def test4():
+ lines = run_test(4)
+ assert next(lines, None) == None
+
+ def test5():
+ lines = run_test(5)
+ check_log_entries(lines, 2)
+
+ def test6():
+ lines = run_test(6)
+ check_log_entries(lines, 3)
+
+ def test7():
+ lines = run_test(7)
+ check_log_entries(lines, 3, LOGL_WARNING)
+
+ def test8():
+ lines = run_test(8)
+ check_log_entries(lines, 3)
+
+ def test9():
+ lines = run_test(9)
+ check_log_entries(lines, 3)
+
+ def test10():
+ lines = run_test(10)
+ for i in range(7):
+ assert 'log_test() level %d' % i == next(lines)
+
+ # TODO(sjg@chromium.org): Consider structuring this as separate tests
+ cons = u_boot_console
+ test0()
+ test1()
+ test2()
+ test3()
+ test4()
+ test5()
+ test6()
+ test7()
+ test8()
+ test9()
+ test10()
+
+@pytest.mark.buildconfigspec('cmd_log')
+def test_log_format(u_boot_console):
+ """Test the 'log format' and 'log rec' commands"""
+ def run_with_format(fmt, expected_output):
+ """Set up the log format and then write a log record
+
+ Args:
+ fmt: Format to use for 'log format'
+ expected_output: Expected output from the 'log rec' command
+ """
+ output = cons.run_command('log format %s' % fmt)
+ assert output == ''
+ output = cons.run_command('log rec arch notice file.c 123 func msg')
+ assert output == expected_output
+
+ cons = u_boot_console
+ with cons.log.section('format'):
+ run_with_format('all', 'NOTICE.arch,file.c:123-func() msg')
+ output = cons.run_command('log format')
+ assert output == 'Log format: clFLfm'
+
+ run_with_format('fm', 'func() msg')
+ run_with_format('clfm', 'NOTICE.arch,func() msg')
+ run_with_format('FLfm', 'file.c:123-func() msg')
+ run_with_format('lm', 'NOTICE. msg')
+ run_with_format('m', 'msg')
diff --git a/test/py/tests/test_md.py b/test/py/tests/test_md.py
new file mode 100644
index 00000000..83e3c546
--- /dev/null
+++ b/test/py/tests/test_md.py
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+
+import pytest
+import u_boot_utils
+
+@pytest.mark.buildconfigspec('cmd_memory')
+def test_md(u_boot_console):
+ """Test that md reads memory as expected, and that memory can be modified
+ using the mw command."""
+
+ ram_base = u_boot_utils.find_ram_base(u_boot_console)
+ addr = '%08x' % ram_base
+ val = 'a5f09876'
+ expected_response = addr + ': ' + val
+ u_boot_console.run_command('mw ' + addr + ' 0 10')
+ response = u_boot_console.run_command('md ' + addr + ' 10')
+ assert(not (expected_response in response))
+ u_boot_console.run_command('mw ' + addr + ' ' + val)
+ response = u_boot_console.run_command('md ' + addr + ' 10')
+ assert(expected_response in response)
+
+@pytest.mark.buildconfigspec('cmd_memory')
+def test_md_repeat(u_boot_console):
+ """Test command repeat (via executing an empty command) operates correctly
+ for "md"; the command must repeat and dump an incrementing address."""
+
+ ram_base = u_boot_utils.find_ram_base(u_boot_console)
+ addr_base = '%08x' % ram_base
+ words = 0x10
+ addr_repeat = '%08x' % (ram_base + (words * 4))
+ u_boot_console.run_command('md %s %x' % (addr_base, words))
+ response = u_boot_console.run_command('')
+ expected_response = addr_repeat + ': '
+ assert(expected_response in response)
diff --git a/test/py/tests/test_mmc_rd.py b/test/py/tests/test_mmc_rd.py
new file mode 100644
index 00000000..a25aa5f6
--- /dev/null
+++ b/test/py/tests/test_mmc_rd.py
@@ -0,0 +1,286 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+
+# Test U-Boot's "mmc read" command. The test reads data from the eMMC or SD
+# card, and validates the no errors occurred, and that the expected data was
+# read if the test configuration contains a CRC of the expected data.
+
+import pytest
+import time
+import u_boot_utils
+
+"""
+This test relies on boardenv_* to containing configuration values to define
+which MMC devices should be tested. For example:
+
+# Configuration data for test_mmc_dev, test_mmc_rescan, test_mmc_info; defines
+# whole MMC devices that mmc dev/rescan/info commands may operate upon.
+env__mmc_dev_configs = (
+ {
+ 'fixture_id': 'emmc-boot0',
+ 'is_emmc': True,
+ 'devid': 0,
+ 'partid': 1,
+ 'info_device': ???,
+ 'info_speed': ???,
+ 'info_mode': ???,
+ 'info_buswidth': ???.
+ },
+ {
+ 'fixture_id': 'emmc-boot1',
+ 'is_emmc': True,
+ 'devid': 0,
+ 'partid': 2,
+ 'info_device': ???,
+ 'info_speed': ???,
+ 'info_mode': ???,
+ 'info_buswidth': ???.
+ },
+ {
+ 'fixture_id': 'emmc-data',
+ 'is_emmc': True,
+ 'devid': 0,
+ 'partid': 0,
+ 'info_device': ???,
+ 'info_speed': ???,
+ 'info_mode': ???,
+ 'info_buswidth': ???.
+ },
+ {
+ 'fixture_id': 'sd',
+ 'is_emmc': False,
+ 'devid': 1,
+ 'partid': None,
+ 'info_device': ???,
+ 'info_speed': ???,
+ 'info_mode': ???,
+ 'info_buswidth': ???.
+ },
+}
+
+# Configuration data for test_mmc_rd; defines regions of the MMC (entire
+# devices, or ranges of sectors) which can be read:
+env__mmc_rd_configs = (
+ {
+ 'fixture_id': 'emmc-boot0',
+ 'is_emmc': True,
+ 'devid': 0,
+ 'partid': 1,
+ 'sector': 0x10,
+ 'count': 1,
+ },
+ {
+ 'fixture_id': 'emmc-boot1',
+ 'is_emmc': True,
+ 'devid': 0,
+ 'partid': 2,
+ 'sector': 0x10,
+ 'count': 1,
+ },
+ {
+ 'fixture_id': 'emmc-data',
+ 'is_emmc': True,
+ 'devid': 0,
+ 'partid': 0,
+ 'sector': 0x10,
+ 'count': 0x1000,
+ },
+ {
+ 'fixture_id': 'sd-mbr',
+ 'is_emmc': False,
+ 'devid': 1,
+ 'partid': None,
+ 'sector': 0,
+ 'count': 1,
+ 'crc32': '8f6ecf0d',
+ },
+ {
+ 'fixture_id': 'sd-large',
+ 'is_emmc': False,
+ 'devid': 1,
+ 'partid': None,
+ 'sector': 0x10,
+ 'count': 0x1000,
+ },
+)
+"""
+
+def mmc_dev(u_boot_console, is_emmc, devid, partid):
+ """Run the "mmc dev" command.
+
+ Args:
+ u_boot_console: A U-Boot console connection.
+ is_emmc: Whether the device is eMMC
+ devid: Device ID
+ partid: Partition ID
+
+ Returns:
+ Nothing.
+ """
+
+ # Select MMC device
+ cmd = 'mmc dev %d' % devid
+ if is_emmc:
+ cmd += ' %d' % partid
+ response = u_boot_console.run_command(cmd)
+ assert 'no card present' not in response
+ if is_emmc:
+ partid_response = '(part %d)' % partid
+ else:
+ partid_response = ''
+ good_response = 'mmc%d%s is current device' % (devid, partid_response)
+ assert good_response in response
+
+@pytest.mark.buildconfigspec('cmd_mmc')
+def test_mmc_dev(u_boot_console, env__mmc_dev_config):
+ """Test the "mmc dev" command.
+
+ Args:
+ u_boot_console: A U-Boot console connection.
+ env__mmc_dev_config: The single MMC configuration on which
+ to run the test. See the file-level comment above for details
+ of the format.
+
+ Returns:
+ Nothing.
+ """
+
+ is_emmc = env__mmc_dev_config['is_emmc']
+ devid = env__mmc_dev_config['devid']
+ partid = env__mmc_dev_config.get('partid', 0)
+
+ # Select MMC device
+ mmc_dev(u_boot_console, is_emmc, devid, partid)
+
+@pytest.mark.buildconfigspec('cmd_mmc')
+def test_mmc_rescan(u_boot_console, env__mmc_dev_config):
+ """Test the "mmc rescan" command.
+
+ Args:
+ u_boot_console: A U-Boot console connection.
+ env__mmc_dev_config: The single MMC configuration on which
+ to run the test. See the file-level comment above for details
+ of the format.
+
+ Returns:
+ Nothing.
+ """
+
+ is_emmc = env__mmc_dev_config['is_emmc']
+ devid = env__mmc_dev_config['devid']
+ partid = env__mmc_dev_config.get('partid', 0)
+
+ # Select MMC device
+ mmc_dev(u_boot_console, is_emmc, devid, partid)
+
+ # Rescan MMC device
+ cmd = 'mmc rescan'
+ response = u_boot_console.run_command(cmd)
+ assert 'no card present' not in response
+
+@pytest.mark.buildconfigspec('cmd_mmc')
+def test_mmc_info(u_boot_console, env__mmc_dev_config):
+ """Test the "mmc info" command.
+
+ Args:
+ u_boot_console: A U-Boot console connection.
+ env__mmc_dev_config: The single MMC configuration on which
+ to run the test. See the file-level comment above for details
+ of the format.
+
+ Returns:
+ Nothing.
+ """
+
+ is_emmc = env__mmc_dev_config['is_emmc']
+ devid = env__mmc_dev_config['devid']
+ partid = env__mmc_dev_config.get('partid', 0)
+ info_device = env__mmc_dev_config['info_device']
+ info_speed = env__mmc_dev_config['info_speed']
+ info_mode = env__mmc_dev_config['info_mode']
+ info_buswidth = env__mmc_dev_config['info_buswidth']
+
+ # Select MMC device
+ mmc_dev(u_boot_console, is_emmc, devid, partid)
+
+ # Read MMC device information
+ cmd = 'mmc info'
+ response = u_boot_console.run_command(cmd)
+ good_response = "Device: %s" % info_device
+ assert good_response in response
+ good_response = "Bus Speed: %s" % info_speed
+ assert good_response in response
+ good_response = "Mode : %s" % info_mode
+ assert good_response in response
+ good_response = "Bus Width: %s" % info_buswidth
+ assert good_response in response
+
+@pytest.mark.buildconfigspec('cmd_mmc')
+def test_mmc_rd(u_boot_console, env__mmc_rd_config):
+ """Test the "mmc read" command.
+
+ Args:
+ u_boot_console: A U-Boot console connection.
+ env__mmc_rd_config: The single MMC configuration on which
+ to run the test. See the file-level comment above for details
+ of the format.
+
+ Returns:
+ Nothing.
+ """
+
+ is_emmc = env__mmc_rd_config['is_emmc']
+ devid = env__mmc_rd_config['devid']
+ partid = env__mmc_rd_config.get('partid', 0)
+ sector = env__mmc_rd_config.get('sector', 0)
+ count_sectors = env__mmc_rd_config.get('count', 1)
+ expected_crc32 = env__mmc_rd_config.get('crc32', None)
+ read_duration_max = env__mmc_rd_config.get('read_duration_max', 0)
+
+ count_bytes = count_sectors * 512
+ bcfg = u_boot_console.config.buildconfig
+ has_cmd_memory = bcfg.get('config_cmd_memory', 'n') == 'y'
+ has_cmd_crc32 = bcfg.get('config_cmd_crc32', 'n') == 'y'
+ ram_base = u_boot_utils.find_ram_base(u_boot_console)
+ addr = '0x%08x' % ram_base
+
+ # Select MMC device
+ mmc_dev(u_boot_console, is_emmc, devid, partid)
+
+ # Clear target RAM
+ if expected_crc32:
+ if has_cmd_memory and has_cmd_crc32:
+ cmd = 'mw.b %s 0 0x%x' % (addr, count_bytes)
+ u_boot_console.run_command(cmd)
+
+ cmd = 'crc32 %s 0x%x' % (addr, count_bytes)
+ response = u_boot_console.run_command(cmd)
+ assert expected_crc32 not in response
+ else:
+ u_boot_console.log.warning(
+ 'CONFIG_CMD_MEMORY or CONFIG_CMD_CRC32 != y: Skipping RAM clear')
+
+ # Read data
+ cmd = 'mmc read %s %x %x' % (addr, sector, count_sectors)
+ tstart = time.time()
+ response = u_boot_console.run_command(cmd)
+ tend = time.time()
+ good_response = 'MMC read: dev # %d, block # %d, count %d ... %d blocks read: OK' % (
+ devid, sector, count_sectors, count_sectors)
+ assert good_response in response
+
+ # Check target RAM
+ if expected_crc32:
+ if has_cmd_crc32:
+ cmd = 'crc32 %s 0x%x' % (addr, count_bytes)
+ response = u_boot_console.run_command(cmd)
+ assert expected_crc32 in response
+ else:
+ u_boot_console.log.warning('CONFIG_CMD_CRC32 != y: Skipping check')
+
+ # Check if the command did not take too long
+ if read_duration_max:
+ elapsed = tend - tstart
+ u_boot_console.log.info('Reading %d bytes took %f seconds' %
+ (count_bytes, elapsed))
+ assert elapsed <= (read_duration_max - 0.01)
diff --git a/test/py/tests/test_mmc_wr.py b/test/py/tests/test_mmc_wr.py
new file mode 100644
index 00000000..05e5c1ee
--- /dev/null
+++ b/test/py/tests/test_mmc_wr.py
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2019, Texas Instrument
+# Author: Jean-Jacques Hiblot <jjhiblot@ti.com>
+
+# Test U-Boot's "mmc write" command. The test generates random data, writes it
+# to the eMMC or SD card, then reads it back and performs a comparison.
+
+import pytest
+import u_boot_utils
+
+"""
+This test relies on boardenv_* to containing configuration values to define
+which MMC devices should be tested. For example:
+
+env__mmc_wr_configs = (
+ {
+ "fixture_id": "emmc-boot0",
+ "is_emmc": True,
+ "devid": 1,
+ "partid": 1,
+ "sector": 0x10,
+ "count": 100,
+ "test_iterations": 50,
+ },
+ {
+ "fixture_id": "emmc-boot1",
+ "is_emmc": True,
+ "devid": 1,
+ "partid": 2,
+ "sector": 0x10,
+ "count": 100,
+ "test_iterations": 50,
+ },
+)
+
+"""
+
+@pytest.mark.buildconfigspec('cmd_mmc')
+@pytest.mark.buildconfigspec('cmd_memory')
+@pytest.mark.buildconfigspec('cmd_random')
+def test_mmc_wr(u_boot_console, env__mmc_wr_config):
+ """Test the "mmc write" command.
+
+ Args:
+ u_boot_console: A U-Boot console connection.
+ env__mmc_wr_config: The single MMC configuration on which
+ to run the test. See the file-level comment above for details
+ of the format.
+
+ Returns:
+ Nothing.
+ """
+
+ is_emmc = env__mmc_wr_config['is_emmc']
+ devid = env__mmc_wr_config['devid']
+ partid = env__mmc_wr_config.get('partid', 0)
+ sector = env__mmc_wr_config.get('sector', 0)
+ count_sectors = env__mmc_wr_config.get('count', 1)
+ test_iterations = env__mmc_wr_config.get('test_iterations', 1)
+
+
+ count_bytes = count_sectors * 512
+ bcfg = u_boot_console.config.buildconfig
+ ram_base = u_boot_utils.find_ram_base(u_boot_console)
+ src_addr = '0x%08x' % ram_base
+ dst_addr = '0x%08x' % (ram_base + count_bytes)
+
+
+ for i in range(test_iterations):
+ # Generate random data
+ cmd = 'random %s %x' % (src_addr, count_bytes)
+ response = u_boot_console.run_command(cmd)
+ good_response = '%d bytes filled with random data' % (count_bytes)
+ assert good_response in response
+
+ # Select MMC device
+ cmd = 'mmc dev %d' % devid
+ if is_emmc:
+ cmd += ' %d' % partid
+ response = u_boot_console.run_command(cmd)
+ assert 'no card present' not in response
+ if is_emmc:
+ partid_response = "(part %d)" % partid
+ else:
+ partid_response = ""
+ good_response = 'mmc%d%s is current device' % (devid, partid_response)
+ assert good_response in response
+
+ # Write data
+ cmd = 'mmc write %s %x %x' % (src_addr, sector, count_sectors)
+ response = u_boot_console.run_command(cmd)
+ good_response = 'MMC write: dev # %d, block # %d, count %d ... %d blocks written: OK' % (devid, sector, count_sectors, count_sectors)
+ assert good_response in response
+
+ # Read data
+ cmd = 'mmc read %s %x %x' % (dst_addr, sector, count_sectors)
+ response = u_boot_console.run_command(cmd)
+ good_response = 'MMC read: dev # %d, block # %d, count %d ... %d blocks read: OK' % (devid, sector, count_sectors, count_sectors)
+ assert good_response in response
+
+ # Compare src and dst data
+ cmd = 'cmp.b %s %s %x' % (src_addr, dst_addr, count_bytes)
+ response = u_boot_console.run_command(cmd)
+ good_response = 'Total of %d byte(s) were the same' % (count_bytes)
+ assert good_response in response
diff --git a/test/py/tests/test_net.py b/test/py/tests/test_net.py
new file mode 100644
index 00000000..9ca6743a
--- /dev/null
+++ b/test/py/tests/test_net.py
@@ -0,0 +1,208 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+
+# Test various network-related functionality, such as the dhcp, ping, and
+# tftpboot commands.
+
+import pytest
+import u_boot_utils
+
+"""
+Note: This test relies on boardenv_* containing configuration values to define
+which the network environment available for testing. Without this, this test
+will be automatically skipped.
+
+For example:
+
+# Boolean indicating whether the Ethernet device is attached to USB, and hence
+# USB enumeration needs to be performed prior to network tests.
+# This variable may be omitted if its value is False.
+env__net_uses_usb = False
+
+# Boolean indicating whether the Ethernet device is attached to PCI, and hence
+# PCI enumeration needs to be performed prior to network tests.
+# This variable may be omitted if its value is False.
+env__net_uses_pci = True
+
+# True if a DHCP server is attached to the network, and should be tested.
+# If DHCP testing is not possible or desired, this variable may be omitted or
+# set to False.
+env__net_dhcp_server = True
+
+# A list of environment variables that should be set in order to configure a
+# static IP. If solely relying on DHCP, this variable may be omitted or set to
+# an empty list.
+env__net_static_env_vars = [
+ ('ipaddr', '10.0.0.100'),
+ ('netmask', '255.255.255.0'),
+ ('serverip', '10.0.0.1'),
+]
+
+# Details regarding a file that may be read from a TFTP server. This variable
+# may be omitted or set to None if TFTP testing is not possible or desired.
+env__net_tftp_readable_file = {
+ 'fn': 'ubtest-readable.bin',
+ 'addr': 0x10000000,
+ 'size': 5058624,
+ 'crc32': 'c2244b26',
+}
+
+# Details regarding a file that may be read from a NFS server. This variable
+# may be omitted or set to None if NFS testing is not possible or desired.
+env__net_nfs_readable_file = {
+ 'fn': 'ubtest-readable.bin',
+ 'addr': 0x10000000,
+ 'size': 5058624,
+ 'crc32': 'c2244b26',
+}
+"""
+
+net_set_up = False
+
+def test_net_pre_commands(u_boot_console):
+ """Execute any commands required to enable network hardware.
+
+ These commands are provided by the boardenv_* file; see the comment at the
+ beginning of this file.
+ """
+
+ init_usb = u_boot_console.config.env.get('env__net_uses_usb', False)
+ if init_usb:
+ u_boot_console.run_command('usb start')
+
+ init_pci = u_boot_console.config.env.get('env__net_uses_pci', False)
+ if init_pci:
+ u_boot_console.run_command('pci enum')
+
+@pytest.mark.buildconfigspec('cmd_dhcp')
+def test_net_dhcp(u_boot_console):
+ """Test the dhcp command.
+
+ The boardenv_* file may be used to enable/disable this test; see the
+ comment at the beginning of this file.
+ """
+
+ test_dhcp = u_boot_console.config.env.get('env__net_dhcp_server', False)
+ if not test_dhcp:
+ pytest.skip('No DHCP server available')
+
+ u_boot_console.run_command('setenv autoload no')
+ output = u_boot_console.run_command('dhcp')
+ assert 'DHCP client bound to address ' in output
+
+ global net_set_up
+ net_set_up = True
+
+@pytest.mark.buildconfigspec('net')
+def test_net_setup_static(u_boot_console):
+ """Set up a static IP configuration.
+
+ The configuration is provided by the boardenv_* file; see the comment at
+ the beginning of this file.
+ """
+
+ env_vars = u_boot_console.config.env.get('env__net_static_env_vars', None)
+ if not env_vars:
+ pytest.skip('No static network configuration is defined')
+
+ for (var, val) in env_vars:
+ u_boot_console.run_command('setenv %s %s' % (var, val))
+
+ global net_set_up
+ net_set_up = True
+
+@pytest.mark.buildconfigspec('cmd_ping')
+def test_net_ping(u_boot_console):
+ """Test the ping command.
+
+ The $serverip (as set up by either test_net_dhcp or test_net_setup_static)
+ is pinged. The test validates that the host is alive, as reported by the
+ ping command's output.
+ """
+
+ if not net_set_up:
+ pytest.skip('Network not initialized')
+
+ output = u_boot_console.run_command('ping $serverip')
+ assert 'is alive' in output
+
+@pytest.mark.buildconfigspec('cmd_net')
+def test_net_tftpboot(u_boot_console):
+ """Test the tftpboot command.
+
+ A file is downloaded from the TFTP server, its size and optionally its
+ CRC32 are validated.
+
+ The details of the file to download are provided by the boardenv_* file;
+ see the comment at the beginning of this file.
+ """
+
+ if not net_set_up:
+ pytest.skip('Network not initialized')
+
+ f = u_boot_console.config.env.get('env__net_tftp_readable_file', None)
+ if not f:
+ pytest.skip('No TFTP readable file to read')
+
+ addr = f.get('addr', None)
+
+ fn = f['fn']
+ if not addr:
+ output = u_boot_console.run_command('tftpboot %s' % (fn))
+ else:
+ output = u_boot_console.run_command('tftpboot %x %s' % (addr, fn))
+ expected_text = 'Bytes transferred = '
+ sz = f.get('size', None)
+ if sz:
+ expected_text += '%d' % sz
+ assert expected_text in output
+
+ expected_crc = f.get('crc32', None)
+ if not expected_crc:
+ return
+
+ if u_boot_console.config.buildconfig.get('config_cmd_crc32', 'n') != 'y':
+ return
+
+ output = u_boot_console.run_command('crc32 $fileaddr $filesize')
+ assert expected_crc in output
+
+@pytest.mark.buildconfigspec('cmd_nfs')
+def test_net_nfs(u_boot_console):
+ """Test the nfs command.
+
+ A file is downloaded from the NFS server, its size and optionally its
+ CRC32 are validated.
+
+ The details of the file to download are provided by the boardenv_* file;
+ see the comment at the beginning of this file.
+ """
+
+ if not net_set_up:
+ pytest.skip('Network not initialized')
+
+ f = u_boot_console.config.env.get('env__net_nfs_readable_file', None)
+ if not f:
+ pytest.skip('No NFS readable file to read')
+
+ addr = f.get('addr', None)
+ if not addr:
+ addr = u_boot_utils.find_ram_base(u_boot_console)
+
+ fn = f['fn']
+ output = u_boot_console.run_command('nfs %x %s' % (addr, fn))
+ expected_text = 'Bytes transferred = '
+ sz = f.get('size', None)
+ if sz:
+ expected_text += '%d' % sz
+ assert expected_text in output
+
+ expected_crc = f.get('crc32', None)
+ if not expected_crc:
+ return
+
+ if u_boot_console.config.buildconfig.get('config_cmd_crc32', 'n') != 'y':
+ return
+
+ output = u_boot_console.run_command('crc32 %x $filesize' % addr)
+ assert expected_crc in output
diff --git a/test/py/tests/test_ofplatdata.py b/test/py/tests/test_ofplatdata.py
new file mode 100644
index 00000000..263334b0
--- /dev/null
+++ b/test/py/tests/test_ofplatdata.py
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+
+import pytest
+import u_boot_utils as util
+
+OF_PLATDATA_OUTPUT = '''
+of-platdata probe:
+bool 1
+byte 05
+bytearray 06 00 00
+int 1
+intarray 2 3 4 0
+longbytearray 09 0a 0b 0c 0d 0e 0f 10 11
+string message
+stringarray "multi-word" "message" ""
+of-platdata probe:
+bool 0
+byte 08
+bytearray 01 23 34
+int 3
+intarray 5 0 0 0
+longbytearray 09 00 00 00 00 00 00 00 00
+string message2
+stringarray "another" "multi-word" "message"
+of-platdata probe:
+bool 0
+byte 00
+bytearray 00 00 00
+int 0
+intarray 0 0 0 0
+longbytearray 00 00 00 00 00 00 00 00 00
+string <NULL>
+stringarray "one" "" ""
+of-platdata probe:
+bool 0
+byte 00
+bytearray 00 00 00
+int 0
+intarray 0 0 0 0
+longbytearray 00 00 00 00 00 00 00 00 00
+string <NULL>
+stringarray "spl" "" ""
+'''
+
+@pytest.mark.buildconfigspec('spl_of_platdata')
+def test_ofplatdata(u_boot_console):
+ """Test that of-platdata can be generated and used in sandbox"""
+ cons = u_boot_console
+ cons.restart_uboot_with_flags(['--show_of_platdata'])
+ output = cons.get_spawn_output().replace('\r', '')
+ assert OF_PLATDATA_OUTPUT in output
+
+@pytest.mark.buildconfigspec('spl_of_platdata')
+def test_spl_devicetree(u_boot_console):
+ """Test content of spl device-tree"""
+ cons = u_boot_console
+ dtb = cons.config.build_dir + '/spl/u-boot-spl.dtb'
+ fdtgrep = cons.config.build_dir + '/tools/fdtgrep'
+ output = util.run_and_log(cons, [fdtgrep, '-l', dtb])
+
+ assert "u-boot,dm-pre-reloc" not in output
+ assert "u-boot,dm-pre-proper" not in output
+ assert "u-boot,dm-spl" not in output
+ assert "u-boot,dm-tpl" not in output
+
+ assert "spl-test4" in output
+ assert "spl-test5" not in output
+ assert "spl-test6" not in output
+ assert "spl-test7" in output
diff --git a/test/py/tests/test_pinmux.py b/test/py/tests/test_pinmux.py
new file mode 100644
index 00000000..25394f1f
--- /dev/null
+++ b/test/py/tests/test_pinmux.py
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: GPL-2.0
+
+import pytest
+import u_boot_utils
+
+@pytest.mark.buildconfigspec('cmd_pinmux')
+def test_pinmux_usage_1(u_boot_console):
+ """Test that 'pinmux' command without parameters displays
+ pinmux usage."""
+ output = u_boot_console.run_command('pinmux')
+ assert 'Usage:' in output
+
+@pytest.mark.buildconfigspec('cmd_pinmux')
+def test_pinmux_usage_2(u_boot_console):
+ """Test that 'pinmux status' executed without previous "pinmux dev"
+ command displays pinmux usage."""
+ output = u_boot_console.run_command('pinmux status')
+ assert 'Usage:' in output
+
+@pytest.mark.buildconfigspec('cmd_pinmux')
+@pytest.mark.boardspec('sandbox')
+def test_pinmux_status_all(u_boot_console):
+ """Test that 'pinmux status -a' displays pin's muxing."""
+ output = u_boot_console.run_command('pinmux status -a')
+ assert ('SCL : I2C SCL' in output)
+ assert ('SDA : I2C SDA' in output)
+ assert ('TX : Uart TX' in output)
+ assert ('RX : Uart RX' in output)
+ assert ('W1 : 1-wire gpio' in output)
+
+@pytest.mark.buildconfigspec('cmd_pinmux')
+@pytest.mark.boardspec('sandbox')
+def test_pinmux_list(u_boot_console):
+ """Test that 'pinmux list' returns the pin-controller list."""
+ output = u_boot_console.run_command('pinmux list')
+ assert 'sandbox_pinctrl' in output
+
+@pytest.mark.buildconfigspec('cmd_pinmux')
+def test_pinmux_dev_bad(u_boot_console):
+ """Test that 'pinmux dev' returns an error when trying to select a
+ wrong pin controller."""
+ pincontroller = 'bad_pin_controller_name'
+ output = u_boot_console.run_command('pinmux dev ' + pincontroller)
+ expected_output = 'Can\'t get the pin-controller: ' + pincontroller + '!'
+ assert (expected_output in output)
+
+@pytest.mark.buildconfigspec('cmd_pinmux')
+@pytest.mark.boardspec('sandbox')
+def test_pinmux_dev(u_boot_console):
+ """Test that 'pinmux dev' select the wanted pin controller."""
+ pincontroller = 'pinctrl'
+ output = u_boot_console.run_command('pinmux dev ' + pincontroller)
+ expected_output = 'dev: ' + pincontroller
+ assert (expected_output in output)
+
+@pytest.mark.buildconfigspec('cmd_pinmux')
+@pytest.mark.boardspec('sandbox')
+def test_pinmux_status(u_boot_console):
+ """Test that 'pinmux status' displays selected pincontroller's pin
+ muxing descriptions."""
+ output = u_boot_console.run_command('pinmux status')
+ assert ('SCL : I2C SCL' in output)
+ assert ('SDA : I2C SDA' in output)
+ assert ('TX : Uart TX' in output)
+ assert ('RX : Uart RX' in output)
+ assert ('W1 : 1-wire gpio' in output)
diff --git a/test/py/tests/test_sandbox_exit.py b/test/py/tests/test_sandbox_exit.py
new file mode 100644
index 00000000..a301f4b5
--- /dev/null
+++ b/test/py/tests/test_sandbox_exit.py
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+
+import pytest
+import signal
+
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.buildconfigspec('sysreset')
+def test_reset(u_boot_console):
+ """Test that the "reset" command exits sandbox process."""
+
+ u_boot_console.run_command('reset', wait_for_prompt=False)
+ assert(u_boot_console.validate_exited())
+
+@pytest.mark.boardspec('sandbox')
+def test_ctrl_c(u_boot_console):
+ """Test that sending SIGINT to sandbox causes it to exit."""
+
+ u_boot_console.kill(signal.SIGINT)
+ assert(u_boot_console.validate_exited())
diff --git a/test/py/tests/test_sf.py b/test/py/tests/test_sf.py
new file mode 100644
index 00000000..adf8b7dc
--- /dev/null
+++ b/test/py/tests/test_sf.py
@@ -0,0 +1,217 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2016, Xilinx Inc. Michal Simek
+# Copyright (c) 2017, Xiphos Systems Corp. All rights reserved.
+
+import re
+import pytest
+import random
+import u_boot_utils
+
+"""
+Note: This test relies on boardenv_* containing configuration values to define
+which SPI Flash areas are available for testing. Without this, this test will
+be automatically skipped.
+For example:
+
+# A list of sections of Flash memory to be tested.
+env__sf_configs = (
+ {
+ # Where in SPI Flash should the test operate.
+ 'offset': 0x00000000,
+ # This value is optional.
+ # If present, specifies the [[bus:]cs] argument used in `sf probe`
+ # If missing, defaults to 0.
+ 'id': '0:1',
+ # This value is optional.
+ # If set as a number, specifies the speed of the SPI Flash.
+ # If set as an array of 2, specifies a range for a random speed.
+ # If missing, defaults to 0.
+ 'speed': 1000000,
+ # This value is optional.
+ # If present, specifies the size to use for read/write operations.
+ # If missing, the SPI Flash page size is used as a default (based on
+ # the `sf probe` output).
+ 'len': 0x10000,
+ # This value is optional.
+ # If present, specifies if the test can write to Flash offset
+ # If missing, defaults to False.
+ 'writeable': False,
+ # This value is optional.
+ # If present, specifies the expected CRC32 value of the flash area.
+ # If missing, extra check is ignored.
+ 'crc32': 0xCAFECAFE,
+ },
+)
+"""
+
+def sf_prepare(u_boot_console, env__sf_config):
+ """Check global state of the SPI Flash before running any test.
+
+ Args:
+ u_boot_console: A U-Boot console connection.
+ env__sf_config: The single SPI Flash device configuration on which to
+ run the tests.
+
+ Returns:
+ sf_params: a dictionary of SPI Flash parameters.
+ """
+
+ sf_params = {}
+ sf_params['ram_base'] = u_boot_utils.find_ram_base(u_boot_console)
+
+ probe_id = env__sf_config.get('id', 0)
+ speed = env__sf_config.get('speed', 0)
+ if isinstance(speed, int):
+ sf_params['speed'] = speed
+ else:
+ assert len(speed) == 2, "If speed is a list, it must have 2 entries"
+ sf_params['speed'] = random.randint(speed[0], speed[1])
+
+ cmd = 'sf probe %d %d' % (probe_id, sf_params['speed'])
+
+ output = u_boot_console.run_command(cmd)
+ assert 'SF: Detected' in output, 'No Flash device available'
+
+ m = re.search('page size (.+?) Bytes', output)
+ assert m, 'SPI Flash page size not recognized'
+ sf_params['page_size'] = int(m.group(1))
+
+ m = re.search('erase size (.+?) KiB', output)
+ assert m, 'SPI Flash erase size not recognized'
+ sf_params['erase_size'] = int(m.group(1))
+ sf_params['erase_size'] *= 1024
+
+ m = re.search('total (.+?) MiB', output)
+ assert m, 'SPI Flash total size not recognized'
+ sf_params['total_size'] = int(m.group(1))
+ sf_params['total_size'] *= 1024 * 1024
+
+ assert 'offset' in env__sf_config, \
+ '\'offset\' is required for this test.'
+ sf_params['len'] = env__sf_config.get('len', sf_params['erase_size'])
+
+ assert not env__sf_config['offset'] % sf_params['erase_size'], \
+ 'offset not multiple of erase size.'
+ assert not sf_params['len'] % sf_params['erase_size'], \
+ 'erase length not multiple of erase size.'
+
+ assert not (env__sf_config.get('writeable', False) and
+ 'crc32' in env__sf_config), \
+ 'Cannot check crc32 on writeable sections'
+
+ return sf_params
+
+def sf_read(u_boot_console, env__sf_config, sf_params):
+ """Helper function used to read and compute the CRC32 value of a section of
+ SPI Flash memory.
+
+ Args:
+ u_boot_console: A U-Boot console connection.
+ env__sf_config: The single SPI Flash device configuration on which to
+ run the tests.
+ sf_params: SPI Flash parameters.
+
+ Returns:
+ CRC32 value of SPI Flash section
+ """
+
+ addr = sf_params['ram_base']
+ offset = env__sf_config['offset']
+ count = sf_params['len']
+ pattern = random.randint(0, 0xFF)
+ crc_expected = env__sf_config.get('crc32', None)
+
+ cmd = 'mw.b %08x %02x %x' % (addr, pattern, count)
+ u_boot_console.run_command(cmd)
+ crc_pattern = u_boot_utils.crc32(u_boot_console, addr, count)
+ if crc_expected:
+ assert crc_pattern != crc_expected
+
+ cmd = 'sf read %08x %08x %x' % (addr, offset, count)
+ response = u_boot_console.run_command(cmd)
+ assert 'Read: OK' in response, 'Read operation failed'
+ crc_readback = u_boot_utils.crc32(u_boot_console, addr, count)
+ assert crc_pattern != crc_readback, 'sf read did not update RAM content.'
+ if crc_expected:
+ assert crc_readback == crc_expected
+
+ return crc_readback
+
+def sf_update(u_boot_console, env__sf_config, sf_params):
+ """Helper function used to update a section of SPI Flash memory.
+
+ Args:
+ u_boot_console: A U-Boot console connection.
+ env__sf_config: The single SPI Flash device configuration on which to
+ run the tests.
+
+ Returns:
+ CRC32 value of SPI Flash section
+ """
+
+ addr = sf_params['ram_base']
+ offset = env__sf_config['offset']
+ count = sf_params['len']
+ pattern = int(random.random() * 0xFF)
+
+ cmd = 'mw.b %08x %02x %x' % (addr, pattern, count)
+ u_boot_console.run_command(cmd)
+ crc_pattern = u_boot_utils.crc32(u_boot_console, addr, count)
+
+ cmd = 'sf update %08x %08x %x' % (addr, offset, count)
+ u_boot_console.run_command(cmd)
+ crc_readback = sf_read(u_boot_console, env__sf_config, sf_params)
+
+ assert crc_readback == crc_pattern
+
+@pytest.mark.buildconfigspec('cmd_sf')
+@pytest.mark.buildconfigspec('cmd_crc32')
+@pytest.mark.buildconfigspec('cmd_memory')
+def test_sf_read(u_boot_console, env__sf_config):
+ sf_params = sf_prepare(u_boot_console, env__sf_config)
+ sf_read(u_boot_console, env__sf_config, sf_params)
+
+@pytest.mark.buildconfigspec('cmd_sf')
+@pytest.mark.buildconfigspec('cmd_crc32')
+@pytest.mark.buildconfigspec('cmd_memory')
+def test_sf_read_twice(u_boot_console, env__sf_config):
+ sf_params = sf_prepare(u_boot_console, env__sf_config)
+
+ crc1 = sf_read(u_boot_console, env__sf_config, sf_params)
+ sf_params['ram_base'] += 0x100
+ crc2 = sf_read(u_boot_console, env__sf_config, sf_params)
+
+ assert crc1 == crc2, 'CRC32 of two successive read operation do not match'
+
+@pytest.mark.buildconfigspec('cmd_sf')
+@pytest.mark.buildconfigspec('cmd_crc32')
+@pytest.mark.buildconfigspec('cmd_memory')
+def test_sf_erase(u_boot_console, env__sf_config):
+ if not env__sf_config.get('writeable', False):
+ pytest.skip('Flash config is tagged as not writeable')
+
+ sf_params = sf_prepare(u_boot_console, env__sf_config)
+ addr = sf_params['ram_base']
+ offset = env__sf_config['offset']
+ count = sf_params['len']
+
+ cmd = 'sf erase %08x %x' % (offset, count)
+ output = u_boot_console.run_command(cmd)
+ assert 'Erased: OK' in output, 'Erase operation failed'
+
+ cmd = 'mw.b %08x ff %x' % (addr, count)
+ u_boot_console.run_command(cmd)
+ crc_ffs = u_boot_utils.crc32(u_boot_console, addr, count)
+
+ crc_read = sf_read(u_boot_console, env__sf_config, sf_params)
+ assert crc_ffs == crc_read, 'Unexpected CRC32 after erase operation.'
+
+@pytest.mark.buildconfigspec('cmd_sf')
+@pytest.mark.buildconfigspec('cmd_crc32')
+@pytest.mark.buildconfigspec('cmd_memory')
+def test_sf_update(u_boot_console, env__sf_config):
+ if not env__sf_config.get('writeable', False):
+ pytest.skip('Flash config is tagged as not writeable')
+
+ sf_params = sf_prepare(u_boot_console, env__sf_config)
+ sf_update(u_boot_console, env__sf_config, sf_params)
diff --git a/test/py/tests/test_shell_basics.py b/test/py/tests/test_shell_basics.py
new file mode 100644
index 00000000..f54f7b74
--- /dev/null
+++ b/test/py/tests/test_shell_basics.py
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+
+# Test basic shell functionality, such as commands separate by semi-colons.
+
+import pytest
+
+pytestmark = pytest.mark.buildconfigspec('cmd_echo')
+
+def test_shell_execute(u_boot_console):
+ """Test any shell command."""
+
+ response = u_boot_console.run_command('echo hello')
+ assert response.strip() == 'hello'
+
+def test_shell_semicolon_two(u_boot_console):
+ """Test two shell commands separate by a semi-colon."""
+
+ cmd = 'echo hello; echo world'
+ response = u_boot_console.run_command(cmd)
+ # This validation method ignores the exact whitespace between the strings
+ assert response.index('hello') < response.index('world')
+
+def test_shell_semicolon_three(u_boot_console):
+ """Test three shell commands separate by a semi-colon, with variable
+ expansion dependencies between them."""
+
+ cmd = 'setenv list 1; setenv list ${list}2; setenv list ${list}3; ' + \
+ 'echo ${list}'
+ response = u_boot_console.run_command(cmd)
+ assert response.strip() == '123'
+ u_boot_console.run_command('setenv list')
+
+def test_shell_run(u_boot_console):
+ """Test the "run" shell command."""
+
+ u_boot_console.run_command('setenv foo "setenv monty 1; setenv python 2"')
+ u_boot_console.run_command('run foo')
+ response = u_boot_console.run_command('echo $monty')
+ assert response.strip() == '1'
+ response = u_boot_console.run_command('echo $python')
+ assert response.strip() == '2'
+ u_boot_console.run_command('setenv foo')
+ u_boot_console.run_command('setenv monty')
+ u_boot_console.run_command('setenv python')
diff --git a/test/py/tests/test_sleep.py b/test/py/tests/test_sleep.py
new file mode 100644
index 00000000..b69edf26
--- /dev/null
+++ b/test/py/tests/test_sleep.py
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+
+import pytest
+import time
+
+"""
+Note: This test doesn't rely on boardenv_* configuration values but they can
+change test behavior.
+
+# Setup env__sleep_accurate to False if time is not accurate on your platform
+env__sleep_accurate = False
+
+"""
+
+def test_sleep(u_boot_console):
+ """Test the sleep command, and validate that it sleeps for approximately
+ the correct amount of time."""
+
+ sleep_skip = u_boot_console.config.env.get('env__sleep_accurate', True)
+ if not sleep_skip:
+ pytest.skip('sleep is not accurate')
+
+ if u_boot_console.config.buildconfig.get('config_cmd_misc', 'n') != 'y':
+ pytest.skip('sleep command not supported')
+ # 3s isn't too long, but is enough to cross a few second boundaries.
+ sleep_time = 3
+ tstart = time.time()
+ u_boot_console.run_command('sleep %d' % sleep_time)
+ tend = time.time()
+ elapsed = tend - tstart
+ assert elapsed >= (sleep_time - 0.01)
+ if not u_boot_console.config.gdbserver:
+ # 0.25s margin is hopefully enough to account for any system overhead.
+ assert elapsed < (sleep_time + 0.25)
diff --git a/test/py/tests/test_tpm2.py b/test/py/tests/test_tpm2.py
new file mode 100644
index 00000000..70f906da
--- /dev/null
+++ b/test/py/tests/test_tpm2.py
@@ -0,0 +1,233 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018, Bootlin
+# Author: Miquel Raynal <miquel.raynal@bootlin.com>
+
+import os.path
+import pytest
+import u_boot_utils
+import re
+import time
+
+"""
+Test the TPMv2.x related commands. You must have a working hardware setup in
+order to do these tests.
+
+Notes:
+* These tests will prove the password mechanism. The TPM chip must be cleared of
+any password.
+* Commands like pcr_setauthpolicy and pcr_resetauthpolicy are not implemented
+here because they would fail the tests in most cases (TPMs do not implement them
+and return an error).
+"""
+
+updates = 0
+
+def force_init(u_boot_console, force=False):
+ """When a test fails, U-Boot is reset. Because TPM stack must be initialized
+ after each reboot, we must ensure these lines are always executed before
+ trying any command or they will fail with no reason. Executing 'tpm init'
+ twice will spawn an error used to detect that the TPM was not reset and no
+ initialization code should be run.
+ """
+ output = u_boot_console.run_command('tpm2 init')
+ if force or not 'Error' in output:
+ u_boot_console.run_command('echo --- start of init ---')
+ u_boot_console.run_command('tpm2 startup TPM2_SU_CLEAR')
+ u_boot_console.run_command('tpm2 self_test full')
+ u_boot_console.run_command('tpm2 clear TPM2_RH_LOCKOUT')
+ output = u_boot_console.run_command('echo $?')
+ if not output.endswith('0'):
+ u_boot_console.run_command('tpm2 clear TPM2_RH_PLATFORM')
+ u_boot_console.run_command('echo --- end of init ---')
+
+@pytest.mark.buildconfigspec('cmd_tpm_v2')
+def test_tpm2_init(u_boot_console):
+ """Init the software stack to use TPMv2 commands."""
+
+ u_boot_console.run_command('tpm2 init')
+ output = u_boot_console.run_command('echo $?')
+ assert output.endswith('0')
+
+@pytest.mark.buildconfigspec('cmd_tpm_v2')
+def test_tpm2_startup(u_boot_console):
+ """Execute a TPM2_Startup command.
+
+ Initiate the TPM internal state machine.
+ """
+
+ u_boot_console.run_command('tpm2 startup TPM2_SU_CLEAR')
+ output = u_boot_console.run_command('echo $?')
+ assert output.endswith('0')
+
+@pytest.mark.buildconfigspec('cmd_tpm_v2')
+def test_tpm2_self_test_full(u_boot_console):
+ """Execute a TPM2_SelfTest (full) command.
+
+ Ask the TPM to perform all self tests to also enable full capabilities.
+ """
+
+ u_boot_console.run_command('tpm2 self_test full')
+ output = u_boot_console.run_command('echo $?')
+ assert output.endswith('0')
+
+@pytest.mark.buildconfigspec('cmd_tpm_v2')
+def test_tpm2_continue_self_test(u_boot_console):
+ """Execute a TPM2_SelfTest (continued) command.
+
+ Ask the TPM to finish its self tests (alternative to the full test) in order
+ to enter a fully operational state.
+ """
+
+ u_boot_console.run_command('tpm2 self_test continue')
+ output = u_boot_console.run_command('echo $?')
+ assert output.endswith('0')
+
+@pytest.mark.buildconfigspec('cmd_tpm_v2')
+def test_tpm2_clear(u_boot_console):
+ """Execute a TPM2_Clear command.
+
+ Ask the TPM to reset entirely its internal state (including internal
+ configuration, passwords, counters and DAM parameters). This is half of the
+ TAKE_OWNERSHIP command from TPMv1.
+
+ Use the LOCKOUT hierarchy for this. The LOCKOUT/PLATFORM hierarchies must
+ not have a password set, otherwise this test will fail. ENDORSEMENT and
+ PLATFORM hierarchies are also available.
+ """
+
+ u_boot_console.run_command('tpm2 clear TPM2_RH_LOCKOUT')
+ output = u_boot_console.run_command('echo $?')
+ assert output.endswith('0')
+
+ u_boot_console.run_command('tpm2 clear TPM2_RH_PLATFORM')
+ output = u_boot_console.run_command('echo $?')
+ assert output.endswith('0')
+
+@pytest.mark.buildconfigspec('cmd_tpm_v2')
+def test_tpm2_change_auth(u_boot_console):
+ """Execute a TPM2_HierarchyChangeAuth command.
+
+ Ask the TPM to change the owner, ie. set a new password: 'unicorn'
+
+ Use the LOCKOUT hierarchy for this. ENDORSEMENT and PLATFORM hierarchies are
+ also available.
+ """
+
+ force_init(u_boot_console)
+
+ u_boot_console.run_command('tpm2 change_auth TPM2_RH_LOCKOUT unicorn')
+ output = u_boot_console.run_command('echo $?')
+ assert output.endswith('0')
+
+ u_boot_console.run_command('tpm2 clear TPM2_RH_LOCKOUT unicorn')
+ output = u_boot_console.run_command('echo $?')
+ u_boot_console.run_command('tpm2 clear TPM2_RH_PLATFORM')
+ assert output.endswith('0')
+
+@pytest.mark.buildconfigspec('cmd_tpm_v2')
+def test_tpm2_get_capability(u_boot_console):
+ """Execute a TPM_GetCapability command.
+
+ Display one capability. In our test case, let's display the default DAM
+ lockout counter that should be 0 since the CLEAR:
+ - TPM_CAP_TPM_PROPERTIES = 0x6
+ - TPM_PT_LOCKOUT_COUNTER (1st parameter) = PTR_VAR + 14
+
+ There is no expected default values because it would depend on the chip
+ used. We can still save them in order to check they have changed later.
+ """
+
+ force_init(u_boot_console)
+ ram = u_boot_utils.find_ram_base(u_boot_console)
+
+ read_cap = u_boot_console.run_command('tpm2 get_capability 0x6 0x20e 0x200 1') #0x%x 1' % ram)
+ output = u_boot_console.run_command('echo $?')
+ assert output.endswith('0')
+ assert 'Property 0x0000020e: 0x00000000' in read_cap
+
+@pytest.mark.buildconfigspec('cmd_tpm_v2')
+def test_tpm2_dam_parameters(u_boot_console):
+ """Execute a TPM2_DictionaryAttackParameters command.
+
+ Change Dictionary Attack Mitigation (DAM) parameters. Ask the TPM to change:
+ - Max number of failed authentication before lockout: 3
+ - Time before the failure counter is automatically decremented: 10 sec
+ - Time after a lockout failure before it can be attempted again: 0 sec
+
+ For an unknown reason, the DAM parameters must be changed before changing
+ the authentication, otherwise the lockout will be engaged after the first
+ failed authentication attempt.
+ """
+
+ force_init(u_boot_console)
+ ram = u_boot_utils.find_ram_base(u_boot_console)
+
+ # Set the DAM parameters to known values
+ u_boot_console.run_command('tpm2 dam_parameters 3 10 0')
+ output = u_boot_console.run_command('echo $?')
+ assert output.endswith('0')
+
+ # Check the values have been saved
+ read_cap = u_boot_console.run_command('tpm2 get_capability 0x6 0x20f 0x%x 3' % ram)
+ output = u_boot_console.run_command('echo $?')
+ assert output.endswith('0')
+ assert 'Property 0x0000020f: 0x00000003' in read_cap
+ assert 'Property 0x00000210: 0x0000000a' in read_cap
+ assert 'Property 0x00000211: 0x00000000' in read_cap
+
+@pytest.mark.buildconfigspec('cmd_tpm_v2')
+def test_tpm2_pcr_read(u_boot_console):
+ """Execute a TPM2_PCR_Read command.
+
+ Perform a PCR read of the 0th PCR. Must be zero.
+ """
+
+ force_init(u_boot_console)
+ ram = u_boot_utils.find_ram_base(u_boot_console)
+
+ read_pcr = u_boot_console.run_command('tpm2 pcr_read 0 0x%x' % ram)
+ output = u_boot_console.run_command('echo $?')
+ assert output.endswith('0')
+
+ # Save the number of PCR updates
+ str = re.findall(r'\d+ known updates', read_pcr)[0]
+ global updates
+ updates = int(re.findall(r'\d+', str)[0])
+
+ # Check the output value
+ assert 'PCR #0 content' in read_pcr
+ assert '00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00' in read_pcr
+
+@pytest.mark.buildconfigspec('cmd_tpm_v2')
+def test_tpm2_pcr_extend(u_boot_console):
+ """Execute a TPM2_PCR_Extend command.
+
+ Perform a PCR extension with a known hash in memory (zeroed since the board
+ must have been rebooted).
+
+ No authentication mechanism is used here, not protecting against packet
+ replay, yet.
+ """
+
+ force_init(u_boot_console)
+ ram = u_boot_utils.find_ram_base(u_boot_console)
+
+ u_boot_console.run_command('tpm2 pcr_extend 0 0x%x' % ram)
+ output = u_boot_console.run_command('echo $?')
+ assert output.endswith('0')
+
+ read_pcr = u_boot_console.run_command('tpm2 pcr_read 0 0x%x' % ram)
+ output = u_boot_console.run_command('echo $?')
+ assert output.endswith('0')
+ assert 'f5 a5 fd 42 d1 6a 20 30 27 98 ef 6e d3 09 97 9b' in read_pcr
+ assert '43 00 3d 23 20 d9 f0 e8 ea 98 31 a9 27 59 fb 4b' in read_pcr
+
+ str = re.findall(r'\d+ known updates', read_pcr)[0]
+ new_updates = int(re.findall(r'\d+', str)[0])
+ assert (updates + 1) == new_updates
+
+@pytest.mark.buildconfigspec('cmd_tpm_v2')
+def test_tpm2_cleanup(u_boot_console):
+ """Ensure the TPM is cleared from password or test related configuration."""
+
+ force_init(u_boot_console, True)
diff --git a/test/py/tests/test_ums.py b/test/py/tests/test_ums.py
new file mode 100644
index 00000000..749b1606
--- /dev/null
+++ b/test/py/tests/test_ums.py
@@ -0,0 +1,236 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+
+# Test U-Boot's "ums" command. The test starts UMS in U-Boot, waits for USB
+# device enumeration on the host, reads a small block of data from the UMS
+# block device, optionally mounts a partition and performs filesystem-based
+# read/write tests, and finally aborts the "ums" command in U-Boot.
+
+import os
+import os.path
+import pytest
+import re
+import time
+import u_boot_utils
+
+"""
+Note: This test relies on:
+
+a) boardenv_* to contain configuration values to define which USB ports are
+available for testing. Without this, this test will be automatically skipped.
+For example:
+
+# Leave this list empty if you have no block_devs below with writable
+# partitions defined.
+env__mount_points = (
+ '/mnt/ubtest-mnt-p2371-2180-na',
+)
+
+env__usb_dev_ports = (
+ {
+ 'fixture_id': 'micro_b',
+ 'tgt_usb_ctlr': '0',
+ 'host_ums_dev_node': '/dev/disk/by-path/pci-0000:00:14.0-usb-0:13:1.0-scsi-0:0:0:0',
+ },
+)
+
+env__block_devs = (
+ # eMMC; always present
+ {
+ 'fixture_id': 'emmc',
+ 'type': 'mmc',
+ 'id': '0',
+ # The following two properties are optional.
+ # If present, the partition will be mounted and a file written-to and
+ # read-from it. If missing, only a simple block read test will be
+ # performed.
+ 'writable_fs_partition': 1,
+ 'writable_fs_subdir': 'tmp/',
+ },
+ # SD card; present since I plugged one in
+ {
+ 'fixture_id': 'sd',
+ 'type': 'mmc',
+ 'id': '1'
+ },
+)
+
+b) udev rules to set permissions on devices nodes, so that sudo is not
+required. For example:
+
+ACTION=="add", SUBSYSTEM=="block", SUBSYSTEMS=="usb", KERNELS=="3-13", MODE:="666"
+
+(You may wish to change the group ID instead of setting the permissions wide
+open. All that matters is that the user ID running the test can access the
+device.)
+
+c) /etc/fstab entries to allow the block device to be mounted without requiring
+root permissions. For example:
+
+/dev/disk/by-path/pci-0000:00:14.0-usb-0:13:1.0-scsi-0:0:0:0-part1 /mnt/ubtest-mnt-p2371-2180-na ext4 noauto,user,nosuid,nodev
+
+This entry is only needed if any block_devs above contain a
+writable_fs_partition value.
+"""
+
+@pytest.mark.buildconfigspec('cmd_usb_mass_storage')
+def test_ums(u_boot_console, env__usb_dev_port, env__block_devs):
+ """Test the "ums" command; the host system must be able to enumerate a UMS
+ device when "ums" is running, block and optionally file I/O are tested,
+ and this device must disappear when "ums" is aborted.
+
+ Args:
+ u_boot_console: A U-Boot console connection.
+ env__usb_dev_port: The single USB device-mode port specification on
+ which to run the test. See the file-level comment above for
+ details of the format.
+ env__block_devs: The list of block devices that the target U-Boot
+ device has attached. See the file-level comment above for details
+ of the format.
+
+ Returns:
+ Nothing.
+ """
+
+ have_writable_fs_partition = 'writable_fs_partition' in env__block_devs[0]
+ if not have_writable_fs_partition:
+ # If 'writable_fs_subdir' is missing, we'll skip all parts of the
+ # testing which mount filesystems.
+ u_boot_console.log.warning(
+ 'boardenv missing "writable_fs_partition"; ' +
+ 'UMS testing will be limited.')
+
+ tgt_usb_ctlr = env__usb_dev_port['tgt_usb_ctlr']
+ host_ums_dev_node = env__usb_dev_port['host_ums_dev_node']
+
+ # We're interested in testing USB device mode on each port, not the cross-
+ # product of that with each device. So, just pick the first entry in the
+ # device list here. We'll test each block device somewhere else.
+ tgt_dev_type = env__block_devs[0]['type']
+ tgt_dev_id = env__block_devs[0]['id']
+ if have_writable_fs_partition:
+ mount_point = u_boot_console.config.env['env__mount_points'][0]
+ mount_subdir = env__block_devs[0]['writable_fs_subdir']
+ part_num = env__block_devs[0]['writable_fs_partition']
+ host_ums_part_node = '%s-part%d' % (host_ums_dev_node, part_num)
+ else:
+ host_ums_part_node = host_ums_dev_node
+
+ test_f = u_boot_utils.PersistentRandomFile(u_boot_console, 'ums.bin',
+ 1024 * 1024);
+ if have_writable_fs_partition:
+ mounted_test_fn = mount_point + '/' + mount_subdir + test_f.fn
+
+ def start_ums():
+ """Start U-Boot's ums shell command.
+
+ This also waits for the host-side USB enumeration process to complete.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ """
+
+ u_boot_console.log.action(
+ 'Starting long-running U-Boot ums shell command')
+ cmd = 'ums %s %s %s' % (tgt_usb_ctlr, tgt_dev_type, tgt_dev_id)
+ u_boot_console.run_command(cmd, wait_for_prompt=False)
+ u_boot_console.wait_for(re.compile('UMS: LUN.*[\r\n]'))
+ fh = u_boot_utils.wait_until_open_succeeds(host_ums_part_node)
+ u_boot_console.log.action('Reading raw data from UMS device')
+ fh.read(4096)
+ fh.close()
+
+ def mount():
+ """Mount the block device that U-Boot exports.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ """
+
+ u_boot_console.log.action('Mounting exported UMS device')
+ cmd = ('/bin/mount', host_ums_part_node)
+ u_boot_utils.run_and_log(u_boot_console, cmd)
+
+ def umount(ignore_errors):
+ """Unmount the block device that U-Boot exports.
+
+ Args:
+ ignore_errors: Ignore any errors. This is useful if an error has
+ already been detected, and the code is performing best-effort
+ cleanup. In this case, we do not want to mask the original
+ error by "honoring" any new errors.
+
+ Returns:
+ Nothing.
+ """
+
+ u_boot_console.log.action('Unmounting UMS device')
+ cmd = ('/bin/umount', host_ums_part_node)
+ u_boot_utils.run_and_log(u_boot_console, cmd, ignore_errors)
+
+ def stop_ums(ignore_errors):
+ """Stop U-Boot's ums shell command from executing.
+
+ This also waits for the host-side USB de-enumeration process to
+ complete.
+
+ Args:
+ ignore_errors: Ignore any errors. This is useful if an error has
+ already been detected, and the code is performing best-effort
+ cleanup. In this case, we do not want to mask the original
+ error by "honoring" any new errors.
+
+ Returns:
+ Nothing.
+ """
+
+ u_boot_console.log.action(
+ 'Stopping long-running U-Boot ums shell command')
+ u_boot_console.ctrlc()
+ u_boot_utils.wait_until_file_open_fails(host_ums_part_node,
+ ignore_errors)
+
+ ignore_cleanup_errors = True
+ try:
+ start_ums()
+ if not have_writable_fs_partition:
+ # Skip filesystem-based testing if not configured
+ return
+ try:
+ mount()
+ u_boot_console.log.action('Writing test file via UMS')
+ cmd = ('rm', '-f', mounted_test_fn)
+ u_boot_utils.run_and_log(u_boot_console, cmd)
+ if os.path.exists(mounted_test_fn):
+ raise Exception('Could not rm target UMS test file')
+ cmd = ('cp', test_f.abs_fn, mounted_test_fn)
+ u_boot_utils.run_and_log(u_boot_console, cmd)
+ ignore_cleanup_errors = False
+ finally:
+ umount(ignore_errors=ignore_cleanup_errors)
+ finally:
+ stop_ums(ignore_errors=ignore_cleanup_errors)
+
+ ignore_cleanup_errors = True
+ try:
+ start_ums()
+ try:
+ mount()
+ u_boot_console.log.action('Reading test file back via UMS')
+ read_back_hash = u_boot_utils.md5sum_file(mounted_test_fn)
+ cmd = ('rm', '-f', mounted_test_fn)
+ u_boot_utils.run_and_log(u_boot_console, cmd)
+ ignore_cleanup_errors = False
+ finally:
+ umount(ignore_errors=ignore_cleanup_errors)
+ finally:
+ stop_ums(ignore_errors=ignore_cleanup_errors)
+
+ written_hash = test_f.content_hash
+ assert(written_hash == read_back_hash)
diff --git a/test/py/tests/test_unknown_cmd.py b/test/py/tests/test_unknown_cmd.py
new file mode 100644
index 00000000..8fc284a9
--- /dev/null
+++ b/test/py/tests/test_unknown_cmd.py
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+
+def test_unknown_command(u_boot_console):
+ """Test that executing an unknown command causes U-Boot to print an
+ error."""
+
+ # The "unknown command" error is actively expected here,
+ # so error detection for it is disabled.
+ with u_boot_console.disable_check('unknown_command'):
+ response = u_boot_console.run_command('non_existent_cmd')
+ assert('Unknown command \'non_existent_cmd\' - try \'help\'' in response)
diff --git a/test/py/tests/test_ut.py b/test/py/tests/test_ut.py
new file mode 100644
index 00000000..6c7b8dd2
--- /dev/null
+++ b/test/py/tests/test_ut.py
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+
+import os.path
+import pytest
+
+@pytest.mark.buildconfigspec('ut_dm')
+def test_ut_dm_init(u_boot_console):
+ """Initialize data for ut dm tests."""
+
+ fn = u_boot_console.config.source_dir + '/testflash.bin'
+ if not os.path.exists(fn):
+ data = b'this is a test'
+ data += b'\x00' * ((4 * 1024 * 1024) - len(data))
+ with open(fn, 'wb') as fh:
+ fh.write(data)
+
+ fn = u_boot_console.config.source_dir + '/spi.bin'
+ if not os.path.exists(fn):
+ data = b'\x00' * (2 * 1024 * 1024)
+ with open(fn, 'wb') as fh:
+ fh.write(data)
+
+def test_ut(u_boot_console, ut_subtest):
+ """Execute a "ut" subtest."""
+
+ output = u_boot_console.run_command('ut ' + ut_subtest)
+ assert output.endswith('Failures: 0')
diff --git a/test/py/tests/test_vboot.py b/test/py/tests/test_vboot.py
new file mode 100644
index 00000000..9c41ee56
--- /dev/null
+++ b/test/py/tests/test_vboot.py
@@ -0,0 +1,293 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016, Google Inc.
+#
+# U-Boot Verified Boot Test
+
+"""
+This tests verified boot in the following ways:
+
+For image verification:
+- Create FIT (unsigned) with mkimage
+- Check that verification shows that no keys are verified
+- Sign image
+- Check that verification shows that a key is now verified
+
+For configuration verification:
+- Corrupt signature and check for failure
+- Create FIT (with unsigned configuration) with mkimage
+- Check that image verification works
+- Sign the FIT and mark the key as 'required' for verification
+- Check that image verification works
+- Corrupt the signature
+- Check that image verification no-longer works
+
+Tests run with both SHA1 and SHA256 hashing.
+"""
+
+import pytest
+import sys
+import struct
+import u_boot_utils as util
+
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.buildconfigspec('fit_signature')
+@pytest.mark.requiredtool('dtc')
+@pytest.mark.requiredtool('fdtget')
+@pytest.mark.requiredtool('fdtput')
+@pytest.mark.requiredtool('openssl')
+def test_vboot(u_boot_console):
+ """Test verified boot signing with mkimage and verification with 'bootm'.
+
+ This works using sandbox only as it needs to update the device tree used
+ by U-Boot to hold public keys from the signing process.
+
+ The SHA1 and SHA256 tests are combined into a single test since the
+ key-generation process is quite slow and we want to avoid doing it twice.
+ """
+ def dtc(dts):
+ """Run the device tree compiler to compile a .dts file
+
+ The output file will be the same as the input file but with a .dtb
+ extension.
+
+ Args:
+ dts: Device tree file to compile.
+ """
+ dtb = dts.replace('.dts', '.dtb')
+ util.run_and_log(cons, 'dtc %s %s%s -O dtb '
+ '-o %s%s' % (dtc_args, datadir, dts, tmpdir, dtb))
+
+ def run_bootm(sha_algo, test_type, expect_string, boots):
+ """Run a 'bootm' command U-Boot.
+
+ This always starts a fresh U-Boot instance since the device tree may
+ contain a new public key.
+
+ Args:
+ test_type: A string identifying the test type.
+ expect_string: A string which is expected in the output.
+ sha_algo: Either 'sha1' or 'sha256', to select the algorithm to
+ use.
+ boots: A boolean that is True if Linux should boot and False if
+ we are expected to not boot
+ """
+ cons.restart_uboot()
+ with cons.log.section('Verified boot %s %s' % (sha_algo, test_type)):
+ output = cons.run_command_list(
+ ['host load hostfs - 100 %stest.fit' % tmpdir,
+ 'fdt addr 100',
+ 'bootm 100'])
+ assert(expect_string in ''.join(output))
+ if boots:
+ assert('sandbox: continuing, as we cannot run' in ''.join(output))
+ else:
+ assert('sandbox: continuing, as we cannot run' not in ''.join(output))
+
+ def make_fit(its):
+ """Make a new FIT from the .its source file.
+
+ This runs 'mkimage -f' to create a new FIT.
+
+ Args:
+ its: Filename containing .its source.
+ """
+ util.run_and_log(cons, [mkimage, '-D', dtc_args, '-f',
+ '%s%s' % (datadir, its), fit])
+
+ def sign_fit(sha_algo):
+ """Sign the FIT
+
+ Signs the FIT and writes the signature into it. It also writes the
+ public key into the dtb.
+
+ Args:
+ sha_algo: Either 'sha1' or 'sha256', to select the algorithm to
+ use.
+ """
+ cons.log.action('%s: Sign images' % sha_algo)
+ util.run_and_log(cons, [mkimage, '-F', '-k', tmpdir, '-K', dtb,
+ '-r', fit])
+
+ def sign_fit_norequire(sha_algo):
+ """Sign the FIT
+
+ Signs the FIT and writes the signature into it. It also writes the
+ public key into the dtb.
+
+ Args:
+ sha_algo: Either 'sha1' or 'sha256', to select the algorithm to
+ use.
+ """
+ cons.log.action('%s: Sign images' % sha_algo)
+ util.run_and_log(cons, [mkimage, '-F', '-k', tmpdir, '-K', dtb,
+ fit])
+
+ def replace_fit_totalsize(size):
+ """Replace FIT header's totalsize with something greater.
+
+ The totalsize must be less than or equal to FIT_SIGNATURE_MAX_SIZE.
+ If the size is greater, the signature verification should return false.
+
+ Args:
+ size: The new totalsize of the header
+
+ Returns:
+ prev_size: The previous totalsize read from the header
+ """
+ total_size = 0
+ with open(fit, 'r+b') as handle:
+ handle.seek(4)
+ total_size = handle.read(4)
+ handle.seek(4)
+ handle.write(struct.pack(">I", size))
+ return struct.unpack(">I", total_size)[0]
+
+ def test_with_algo(sha_algo, padding):
+ """Test verified boot with the given hash algorithm.
+
+ This is the main part of the test code. The same procedure is followed
+ for both hashing algorithms.
+
+ Args:
+ sha_algo: Either 'sha1' or 'sha256', to select the algorithm to
+ use.
+ """
+ # Compile our device tree files for kernel and U-Boot. These are
+ # regenerated here since mkimage will modify them (by adding a
+ # public key) below.
+ dtc('sandbox-kernel.dts')
+ dtc('sandbox-u-boot.dts')
+
+ # Build the FIT, but don't sign anything yet
+ cons.log.action('%s: Test FIT with signed images' % sha_algo)
+ make_fit('sign-images-%s%s.its' % (sha_algo , padding))
+ run_bootm(sha_algo, 'unsigned images', 'dev-', True)
+
+ # Sign images with our dev keys
+ sign_fit(sha_algo)
+ run_bootm(sha_algo, 'signed images', 'dev+', True)
+
+ # Create a fresh .dtb without the public keys
+ dtc('sandbox-u-boot.dts')
+
+ cons.log.action('%s: Test FIT with signed configuration' % sha_algo)
+ make_fit('sign-configs-%s%s.its' % (sha_algo , padding))
+ run_bootm(sha_algo, 'unsigned config', '%s+ OK' % sha_algo, True)
+
+ # Sign images with our dev keys
+ sign_fit(sha_algo)
+ run_bootm(sha_algo, 'signed config', 'dev+', True)
+
+ cons.log.action('%s: Check signed config on the host' % sha_algo)
+
+ util.run_and_log(cons, [fit_check_sign, '-f', fit, '-k', tmpdir,
+ '-k', dtb])
+
+ # Replace header bytes
+ bcfg = u_boot_console.config.buildconfig
+ max_size = int(bcfg.get('config_fit_signature_max_size', 0x10000000), 0)
+ existing_size = replace_fit_totalsize(max_size + 1)
+ run_bootm(sha_algo, 'Signed config with bad hash', 'Bad Data Hash', False)
+ cons.log.action('%s: Check overflowed FIT header totalsize' % sha_algo)
+
+ # Replace with existing header bytes
+ replace_fit_totalsize(existing_size)
+ run_bootm(sha_algo, 'signed config', 'dev+', True)
+ cons.log.action('%s: Check default FIT header totalsize' % sha_algo)
+
+ # Increment the first byte of the signature, which should cause failure
+ sig = util.run_and_log(cons, 'fdtget -t bx %s %s value' %
+ (fit, sig_node))
+ byte_list = sig.split()
+ byte = int(byte_list[0], 16)
+ byte_list[0] = '%x' % (byte + 1)
+ sig = ' '.join(byte_list)
+ util.run_and_log(cons, 'fdtput -t bx %s %s value %s' %
+ (fit, sig_node, sig))
+
+ run_bootm(sha_algo, 'Signed config with bad hash', 'Bad Data Hash', False)
+
+ cons.log.action('%s: Check bad config on the host' % sha_algo)
+ util.run_and_log_expect_exception(cons, [fit_check_sign, '-f', fit,
+ '-k', dtb], 1, 'Failed to verify required signature')
+
+ def test_required_key(sha_algo, padding):
+ """Test verified boot with the given hash algorithm.
+
+ This function test if u-boot reject an image when a required
+ key isn't used to sign a FIT.
+
+ Args:
+ sha_algo: Either 'sha1' or 'sha256', to select the algorithm to
+ use.
+ """
+ # Compile our device tree files for kernel and U-Boot. These are
+ # regenerated here since mkimage will modify them (by adding a
+ # public key) below.
+ dtc('sandbox-kernel.dts')
+ dtc('sandbox-u-boot.dts')
+
+ # Build the FIT with prod key (keys required)
+ # Build the FIT with dev key (keys NOT required)
+ # The dtb contain the key prod and dev and the key prod are set as required.
+ # Then try to boot the FIT with dev key
+ # This FIT should not be accepted by u-boot because the key prod is required
+ cons.log.action('%s: Test FIT with configs images' % sha_algo)
+ make_fit('sign-configs-%s%s-prod.its' % (sha_algo , padding))
+ sign_fit(sha_algo)
+ make_fit('sign-configs-%s%s.its' % (sha_algo , padding))
+ sign_fit(sha_algo)
+
+ run_bootm(sha_algo, 'signed configs', '', False)
+
+ cons = u_boot_console
+ tmpdir = cons.config.result_dir + '/'
+ tmp = tmpdir + 'vboot.tmp'
+ datadir = cons.config.source_dir + '/test/py/tests/vboot/'
+ fit = '%stest.fit' % tmpdir
+ mkimage = cons.config.build_dir + '/tools/mkimage'
+ fit_check_sign = cons.config.build_dir + '/tools/fit_check_sign'
+ dtc_args = '-I dts -O dtb -i %s' % tmpdir
+ dtb = '%ssandbox-u-boot.dtb' % tmpdir
+ sig_node = '/configurations/conf-1/signature'
+
+ # Create an RSA key pair
+ public_exponent = 65537
+ util.run_and_log(cons, 'openssl genpkey -algorithm RSA -out %sdev.key '
+ '-pkeyopt rsa_keygen_bits:2048 '
+ '-pkeyopt rsa_keygen_pubexp:%d' %
+ (tmpdir, public_exponent))
+
+ # Create a certificate containing the public key
+ util.run_and_log(cons, 'openssl req -batch -new -x509 -key %sdev.key -out '
+ '%sdev.crt' % (tmpdir, tmpdir))
+
+ # Create an RSA key pair (prod)
+ public_exponent = 65537
+ util.run_and_log(cons, 'openssl genpkey -algorithm RSA -out %sprod.key '
+ '-pkeyopt rsa_keygen_bits:2048 '
+ '-pkeyopt rsa_keygen_pubexp:%d' %
+ (tmpdir, public_exponent))
+
+ # Create a certificate containing the public key (prod)
+ util.run_and_log(cons, 'openssl req -batch -new -x509 -key %sprod.key -out '
+ '%sprod.crt' % (tmpdir, tmpdir))
+
+ # Create a number kernel image with zeroes
+ with open('%stest-kernel.bin' % tmpdir, 'w') as fd:
+ fd.write(5000 * chr(0))
+
+ try:
+ # We need to use our own device tree file. Remember to restore it
+ # afterwards.
+ old_dtb = cons.config.dtb
+ cons.config.dtb = dtb
+ test_with_algo('sha1','')
+ test_with_algo('sha1','-pss')
+ test_with_algo('sha256','')
+ test_with_algo('sha256','-pss')
+ test_required_key('sha256','-pss')
+ finally:
+ # Go back to the original U-Boot with the correct dtb.
+ cons.config.dtb = old_dtb
+ cons.restart_uboot()
diff --git a/test/py/tests/vboot/sandbox-kernel.dts b/test/py/tests/vboot/sandbox-kernel.dts
new file mode 100644
index 00000000..a1e853c9
--- /dev/null
+++ b/test/py/tests/vboot/sandbox-kernel.dts
@@ -0,0 +1,7 @@
+/dts-v1/;
+
+/ {
+ model = "Sandbox Verified Boot Test";
+ compatible = "sandbox";
+
+};
diff --git a/test/py/tests/vboot/sandbox-u-boot.dts b/test/py/tests/vboot/sandbox-u-boot.dts
new file mode 100644
index 00000000..63f8f401
--- /dev/null
+++ b/test/py/tests/vboot/sandbox-u-boot.dts
@@ -0,0 +1,10 @@
+/dts-v1/;
+
+/ {
+ model = "Sandbox Verified Boot Test";
+ compatible = "sandbox";
+
+ reset@0 {
+ compatible = "sandbox,reset";
+ };
+};
diff --git a/test/py/tests/vboot/sign-configs-sha1-pss.its b/test/py/tests/vboot/sign-configs-sha1-pss.its
new file mode 100644
index 00000000..72a5637e
--- /dev/null
+++ b/test/py/tests/vboot/sign-configs-sha1-pss.its
@@ -0,0 +1,46 @@
+/dts-v1/;
+
+/ {
+ description = "Chrome OS kernel image with one or more FDT blobs";
+ #address-cells = <1>;
+
+ images {
+ kernel {
+ data = /incbin/("test-kernel.bin");
+ type = "kernel_noload";
+ arch = "sandbox";
+ os = "linux";
+ compression = "none";
+ load = <0x4>;
+ entry = <0x8>;
+ kernel-version = <1>;
+ hash-1 {
+ algo = "sha1";
+ };
+ };
+ fdt-1 {
+ description = "snow";
+ data = /incbin/("sandbox-kernel.dtb");
+ type = "flat_dt";
+ arch = "sandbox";
+ compression = "none";
+ fdt-version = <1>;
+ hash-1 {
+ algo = "sha1";
+ };
+ };
+ };
+ configurations {
+ default = "conf-1";
+ conf-1 {
+ kernel = "kernel";
+ fdt = "fdt-1";
+ signature {
+ algo = "sha1,rsa2048";
+ padding = "pss";
+ key-name-hint = "dev";
+ sign-images = "fdt", "kernel";
+ };
+ };
+ };
+};
diff --git a/test/py/tests/vboot/sign-configs-sha1.its b/test/py/tests/vboot/sign-configs-sha1.its
new file mode 100644
index 00000000..d8bc1fa0
--- /dev/null
+++ b/test/py/tests/vboot/sign-configs-sha1.its
@@ -0,0 +1,45 @@
+/dts-v1/;
+
+/ {
+ description = "Chrome OS kernel image with one or more FDT blobs";
+ #address-cells = <1>;
+
+ images {
+ kernel {
+ data = /incbin/("test-kernel.bin");
+ type = "kernel_noload";
+ arch = "sandbox";
+ os = "linux";
+ compression = "none";
+ load = <0x4>;
+ entry = <0x8>;
+ kernel-version = <1>;
+ hash-1 {
+ algo = "sha1";
+ };
+ };
+ fdt-1 {
+ description = "snow";
+ data = /incbin/("sandbox-kernel.dtb");
+ type = "flat_dt";
+ arch = "sandbox";
+ compression = "none";
+ fdt-version = <1>;
+ hash-1 {
+ algo = "sha1";
+ };
+ };
+ };
+ configurations {
+ default = "conf-1";
+ conf-1 {
+ kernel = "kernel";
+ fdt = "fdt-1";
+ signature {
+ algo = "sha1,rsa2048";
+ key-name-hint = "dev";
+ sign-images = "fdt", "kernel";
+ };
+ };
+ };
+};
diff --git a/test/py/tests/vboot/sign-configs-sha256-pss-prod.its b/test/py/tests/vboot/sign-configs-sha256-pss-prod.its
new file mode 100644
index 00000000..aac732e3
--- /dev/null
+++ b/test/py/tests/vboot/sign-configs-sha256-pss-prod.its
@@ -0,0 +1,46 @@
+/dts-v1/;
+
+/ {
+ description = "Chrome OS kernel image with one or more FDT blobs";
+ #address-cells = <1>;
+
+ images {
+ kernel {
+ data = /incbin/("test-kernel.bin");
+ type = "kernel_noload";
+ arch = "sandbox";
+ os = "linux";
+ compression = "none";
+ load = <0x4>;
+ entry = <0x8>;
+ kernel-version = <1>;
+ hash-1 {
+ algo = "sha256";
+ };
+ };
+ fdt-1 {
+ description = "snow";
+ data = /incbin/("sandbox-kernel.dtb");
+ type = "flat_dt";
+ arch = "sandbox";
+ compression = "none";
+ fdt-version = <1>;
+ hash-1 {
+ algo = "sha256";
+ };
+ };
+ };
+ configurations {
+ default = "conf-1";
+ conf-1 {
+ kernel = "kernel";
+ fdt = "fdt-1";
+ signature {
+ algo = "sha256,rsa2048";
+ padding = "pss";
+ key-name-hint = "prod";
+ sign-images = "fdt", "kernel";
+ };
+ };
+ };
+};
diff --git a/test/py/tests/vboot/sign-configs-sha256-pss.its b/test/py/tests/vboot/sign-configs-sha256-pss.its
new file mode 100644
index 00000000..7bdcc7e2
--- /dev/null
+++ b/test/py/tests/vboot/sign-configs-sha256-pss.its
@@ -0,0 +1,46 @@
+/dts-v1/;
+
+/ {
+ description = "Chrome OS kernel image with one or more FDT blobs";
+ #address-cells = <1>;
+
+ images {
+ kernel {
+ data = /incbin/("test-kernel.bin");
+ type = "kernel_noload";
+ arch = "sandbox";
+ os = "linux";
+ compression = "none";
+ load = <0x4>;
+ entry = <0x8>;
+ kernel-version = <1>;
+ hash-1 {
+ algo = "sha256";
+ };
+ };
+ fdt-1 {
+ description = "snow";
+ data = /incbin/("sandbox-kernel.dtb");
+ type = "flat_dt";
+ arch = "sandbox";
+ compression = "none";
+ fdt-version = <1>;
+ hash-1 {
+ algo = "sha256";
+ };
+ };
+ };
+ configurations {
+ default = "conf-1";
+ conf-1 {
+ kernel = "kernel";
+ fdt = "fdt-1";
+ signature {
+ algo = "sha256,rsa2048";
+ padding = "pss";
+ key-name-hint = "dev";
+ sign-images = "fdt", "kernel";
+ };
+ };
+ };
+};
diff --git a/test/py/tests/vboot/sign-configs-sha256.its b/test/py/tests/vboot/sign-configs-sha256.its
new file mode 100644
index 00000000..f5591aad
--- /dev/null
+++ b/test/py/tests/vboot/sign-configs-sha256.its
@@ -0,0 +1,45 @@
+/dts-v1/;
+
+/ {
+ description = "Chrome OS kernel image with one or more FDT blobs";
+ #address-cells = <1>;
+
+ images {
+ kernel {
+ data = /incbin/("test-kernel.bin");
+ type = "kernel_noload";
+ arch = "sandbox";
+ os = "linux";
+ compression = "none";
+ load = <0x4>;
+ entry = <0x8>;
+ kernel-version = <1>;
+ hash-1 {
+ algo = "sha256";
+ };
+ };
+ fdt-1 {
+ description = "snow";
+ data = /incbin/("sandbox-kernel.dtb");
+ type = "flat_dt";
+ arch = "sandbox";
+ compression = "none";
+ fdt-version = <1>;
+ hash-1 {
+ algo = "sha256";
+ };
+ };
+ };
+ configurations {
+ default = "conf-1";
+ conf-1 {
+ kernel = "kernel";
+ fdt = "fdt-1";
+ signature {
+ algo = "sha256,rsa2048";
+ key-name-hint = "dev";
+ sign-images = "fdt", "kernel";
+ };
+ };
+ };
+};
diff --git a/test/py/tests/vboot/sign-images-sha1-pss.its b/test/py/tests/vboot/sign-images-sha1-pss.its
new file mode 100644
index 00000000..ded7ae4f
--- /dev/null
+++ b/test/py/tests/vboot/sign-images-sha1-pss.its
@@ -0,0 +1,44 @@
+/dts-v1/;
+
+/ {
+ description = "Chrome OS kernel image with one or more FDT blobs";
+ #address-cells = <1>;
+
+ images {
+ kernel {
+ data = /incbin/("test-kernel.bin");
+ type = "kernel_noload";
+ arch = "sandbox";
+ os = "linux";
+ compression = "none";
+ load = <0x4>;
+ entry = <0x8>;
+ kernel-version = <1>;
+ signature {
+ algo = "sha1,rsa2048";
+ padding = "pss";
+ key-name-hint = "dev";
+ };
+ };
+ fdt-1 {
+ description = "snow";
+ data = /incbin/("sandbox-kernel.dtb");
+ type = "flat_dt";
+ arch = "sandbox";
+ compression = "none";
+ fdt-version = <1>;
+ signature {
+ algo = "sha1,rsa2048";
+ padding = "pss";
+ key-name-hint = "dev";
+ };
+ };
+ };
+ configurations {
+ default = "conf-1";
+ conf-1 {
+ kernel = "kernel";
+ fdt = "fdt-1";
+ };
+ };
+};
diff --git a/test/py/tests/vboot/sign-images-sha1.its b/test/py/tests/vboot/sign-images-sha1.its
new file mode 100644
index 00000000..18c759e9
--- /dev/null
+++ b/test/py/tests/vboot/sign-images-sha1.its
@@ -0,0 +1,42 @@
+/dts-v1/;
+
+/ {
+ description = "Chrome OS kernel image with one or more FDT blobs";
+ #address-cells = <1>;
+
+ images {
+ kernel {
+ data = /incbin/("test-kernel.bin");
+ type = "kernel_noload";
+ arch = "sandbox";
+ os = "linux";
+ compression = "none";
+ load = <0x4>;
+ entry = <0x8>;
+ kernel-version = <1>;
+ signature {
+ algo = "sha1,rsa2048";
+ key-name-hint = "dev";
+ };
+ };
+ fdt-1 {
+ description = "snow";
+ data = /incbin/("sandbox-kernel.dtb");
+ type = "flat_dt";
+ arch = "sandbox";
+ compression = "none";
+ fdt-version = <1>;
+ signature {
+ algo = "sha1,rsa2048";
+ key-name-hint = "dev";
+ };
+ };
+ };
+ configurations {
+ default = "conf-1";
+ conf-1 {
+ kernel = "kernel";
+ fdt = "fdt-1";
+ };
+ };
+};
diff --git a/test/py/tests/vboot/sign-images-sha256-pss.its b/test/py/tests/vboot/sign-images-sha256-pss.its
new file mode 100644
index 00000000..34850cc6
--- /dev/null
+++ b/test/py/tests/vboot/sign-images-sha256-pss.its
@@ -0,0 +1,44 @@
+/dts-v1/;
+
+/ {
+ description = "Chrome OS kernel image with one or more FDT blobs";
+ #address-cells = <1>;
+
+ images {
+ kernel {
+ data = /incbin/("test-kernel.bin");
+ type = "kernel_noload";
+ arch = "sandbox";
+ os = "linux";
+ compression = "none";
+ load = <0x4>;
+ entry = <0x8>;
+ kernel-version = <1>;
+ signature {
+ algo = "sha256,rsa2048";
+ padding = "pss";
+ key-name-hint = "dev";
+ };
+ };
+ fdt-1 {
+ description = "snow";
+ data = /incbin/("sandbox-kernel.dtb");
+ type = "flat_dt";
+ arch = "sandbox";
+ compression = "none";
+ fdt-version = <1>;
+ signature {
+ algo = "sha256,rsa2048";
+ padding = "pss";
+ key-name-hint = "dev";
+ };
+ };
+ };
+ configurations {
+ default = "conf-1";
+ conf-1 {
+ kernel = "kernel";
+ fdt = "fdt-1";
+ };
+ };
+};
diff --git a/test/py/tests/vboot/sign-images-sha256.its b/test/py/tests/vboot/sign-images-sha256.its
new file mode 100644
index 00000000..bb0f8ee8
--- /dev/null
+++ b/test/py/tests/vboot/sign-images-sha256.its
@@ -0,0 +1,42 @@
+/dts-v1/;
+
+/ {
+ description = "Chrome OS kernel image with one or more FDT blobs";
+ #address-cells = <1>;
+
+ images {
+ kernel {
+ data = /incbin/("test-kernel.bin");
+ type = "kernel_noload";
+ arch = "sandbox";
+ os = "linux";
+ compression = "none";
+ load = <0x4>;
+ entry = <0x8>;
+ kernel-version = <1>;
+ signature {
+ algo = "sha256,rsa2048";
+ key-name-hint = "dev";
+ };
+ };
+ fdt-1 {
+ description = "snow";
+ data = /incbin/("sandbox-kernel.dtb");
+ type = "flat_dt";
+ arch = "sandbox";
+ compression = "none";
+ fdt-version = <1>;
+ signature {
+ algo = "sha256,rsa2048";
+ key-name-hint = "dev";
+ };
+ };
+ };
+ configurations {
+ default = "conf-1";
+ conf-1 {
+ kernel = "kernel";
+ fdt = "fdt-1";
+ };
+ };
+};
diff --git a/test/py/u_boot_console_base.py b/test/py/u_boot_console_base.py
new file mode 100644
index 00000000..326b2ac5
--- /dev/null
+++ b/test/py/u_boot_console_base.py
@@ -0,0 +1,468 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+
+# Common logic to interact with U-Boot via the console. This class provides
+# the interface that tests use to execute U-Boot shell commands and wait for
+# their results. Sub-classes exist to perform board-type-specific setup
+# operations, such as spawning a sub-process for Sandbox, or attaching to the
+# serial console of real hardware.
+
+import multiplexed_log
+import os
+import pytest
+import re
+import sys
+import u_boot_spawn
+
+# Regexes for text we expect U-Boot to send to the console.
+pattern_u_boot_spl_signon = re.compile('(U-Boot SPL \\d{4}\\.\\d{2}[^\r\n]*\\))')
+pattern_u_boot_main_signon = re.compile('(U-Boot \\d{4}\\.\\d{2}[^\r\n]*\\))')
+pattern_stop_autoboot_prompt = re.compile('Hit any key to stop autoboot: ')
+pattern_unknown_command = re.compile('Unknown command \'.*\' - try \'help\'')
+pattern_error_notification = re.compile('## Error: ')
+pattern_error_please_reset = re.compile('### ERROR ### Please RESET the board ###')
+
+PAT_ID = 0
+PAT_RE = 1
+
+bad_pattern_defs = (
+ ('spl_signon', pattern_u_boot_spl_signon),
+ ('main_signon', pattern_u_boot_main_signon),
+ ('stop_autoboot_prompt', pattern_stop_autoboot_prompt),
+ ('unknown_command', pattern_unknown_command),
+ ('error_notification', pattern_error_notification),
+ ('error_please_reset', pattern_error_please_reset),
+)
+
+class ConsoleDisableCheck(object):
+ """Context manager (for Python's with statement) that temporarily disables
+ the specified console output error check. This is useful when deliberately
+ executing a command that is known to trigger one of the error checks, in
+ order to test that the error condition is actually raised. This class is
+ used internally by ConsoleBase::disable_check(); it is not intended for
+ direct usage."""
+
+ def __init__(self, console, check_type):
+ self.console = console
+ self.check_type = check_type
+
+ def __enter__(self):
+ self.console.disable_check_count[self.check_type] += 1
+ self.console.eval_bad_patterns()
+
+ def __exit__(self, extype, value, traceback):
+ self.console.disable_check_count[self.check_type] -= 1
+ self.console.eval_bad_patterns()
+
+class ConsoleSetupTimeout(object):
+ """Context manager (for Python's with statement) that temporarily sets up
+ timeout for specific command. This is useful when execution time is greater
+ then default 30s."""
+
+ def __init__(self, console, timeout):
+ self.p = console.p
+ self.orig_timeout = self.p.timeout
+ self.p.timeout = timeout
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, extype, value, traceback):
+ self.p.timeout = self.orig_timeout
+
+class ConsoleBase(object):
+ """The interface through which test functions interact with the U-Boot
+ console. This primarily involves executing shell commands, capturing their
+ results, and checking for common error conditions. Some common utilities
+ are also provided too."""
+
+ def __init__(self, log, config, max_fifo_fill):
+ """Initialize a U-Boot console connection.
+
+ Can only usefully be called by sub-classes.
+
+ Args:
+ log: A mulptiplex_log.Logfile object, to which the U-Boot output
+ will be logged.
+ config: A configuration data structure, as built by conftest.py.
+ max_fifo_fill: The maximum number of characters to send to U-Boot
+ command-line before waiting for U-Boot to echo the characters
+ back. For UART-based HW without HW flow control, this value
+ should be set less than the UART RX FIFO size to avoid
+ overflow, assuming that U-Boot can't keep up with full-rate
+ traffic at the baud rate.
+
+ Returns:
+ Nothing.
+ """
+
+ self.log = log
+ self.config = config
+ self.max_fifo_fill = max_fifo_fill
+
+ self.logstream = self.log.get_stream('console', sys.stdout)
+
+ # Array slice removes leading/trailing quotes
+ self.prompt = self.config.buildconfig['config_sys_prompt'][1:-1]
+ self.prompt_compiled = re.compile('^' + re.escape(self.prompt), re.MULTILINE)
+ self.p = None
+ self.disable_check_count = {pat[PAT_ID]: 0 for pat in bad_pattern_defs}
+ self.eval_bad_patterns()
+
+ self.at_prompt = False
+ self.at_prompt_logevt = None
+
+ def eval_bad_patterns(self):
+ self.bad_patterns = [pat[PAT_RE] for pat in bad_pattern_defs \
+ if self.disable_check_count[pat[PAT_ID]] == 0]
+ self.bad_pattern_ids = [pat[PAT_ID] for pat in bad_pattern_defs \
+ if self.disable_check_count[pat[PAT_ID]] == 0]
+
+ def close(self):
+ """Terminate the connection to the U-Boot console.
+
+ This function is only useful once all interaction with U-Boot is
+ complete. Once this function is called, data cannot be sent to or
+ received from U-Boot.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ """
+
+ if self.p:
+ self.p.close()
+ self.logstream.close()
+
+ def run_command(self, cmd, wait_for_echo=True, send_nl=True,
+ wait_for_prompt=True):
+ """Execute a command via the U-Boot console.
+
+ The command is always sent to U-Boot.
+
+ U-Boot echoes any command back to its output, and this function
+ typically waits for that to occur. The wait can be disabled by setting
+ wait_for_echo=False, which is useful e.g. when sending CTRL-C to
+ interrupt a long-running command such as "ums".
+
+ Command execution is typically triggered by sending a newline
+ character. This can be disabled by setting send_nl=False, which is
+ also useful when sending CTRL-C.
+
+ This function typically waits for the command to finish executing, and
+ returns the console output that it generated. This can be disabled by
+ setting wait_for_prompt=False, which is useful when invoking a long-
+ running command such as "ums".
+
+ Args:
+ cmd: The command to send.
+ wait_for_echo: Boolean indicating whether to wait for U-Boot to
+ echo the command text back to its output.
+ send_nl: Boolean indicating whether to send a newline character
+ after the command string.
+ wait_for_prompt: Boolean indicating whether to wait for the
+ command prompt to be sent by U-Boot. This typically occurs
+ immediately after the command has been executed.
+
+ Returns:
+ If wait_for_prompt == False:
+ Nothing.
+ Else:
+ The output from U-Boot during command execution. In other
+ words, the text U-Boot emitted between the point it echod the
+ command string and emitted the subsequent command prompts.
+ """
+
+ if self.at_prompt and \
+ self.at_prompt_logevt != self.logstream.logfile.cur_evt:
+ self.logstream.write(self.prompt, implicit=True)
+
+ try:
+ self.at_prompt = False
+ if send_nl:
+ cmd += '\n'
+ while cmd:
+ # Limit max outstanding data, so UART FIFOs don't overflow
+ chunk = cmd[:self.max_fifo_fill]
+ cmd = cmd[self.max_fifo_fill:]
+ self.p.send(chunk)
+ if not wait_for_echo:
+ continue
+ chunk = re.escape(chunk)
+ chunk = chunk.replace('\\\n', '[\r\n]')
+ m = self.p.expect([chunk] + self.bad_patterns)
+ if m != 0:
+ self.at_prompt = False
+ raise Exception('Bad pattern found on console: ' +
+ self.bad_pattern_ids[m - 1])
+ if not wait_for_prompt:
+ return
+ m = self.p.expect([self.prompt_compiled] + self.bad_patterns)
+ if m != 0:
+ self.at_prompt = False
+ raise Exception('Bad pattern found on console: ' +
+ self.bad_pattern_ids[m - 1])
+ self.at_prompt = True
+ self.at_prompt_logevt = self.logstream.logfile.cur_evt
+ # Only strip \r\n; space/TAB might be significant if testing
+ # indentation.
+ return self.p.before.strip('\r\n')
+ except Exception as ex:
+ self.log.error(str(ex))
+ self.cleanup_spawn()
+ raise
+ finally:
+ self.log.timestamp()
+
+ def run_command_list(self, cmds):
+ """Run a list of commands.
+
+ This is a helper function to call run_command() with default arguments
+ for each command in a list.
+
+ Args:
+ cmd: List of commands (each a string).
+ Returns:
+ A list of output strings from each command, one element for each
+ command.
+ """
+ output = []
+ for cmd in cmds:
+ output.append(self.run_command(cmd))
+ return output
+
+ def ctrlc(self):
+ """Send a CTRL-C character to U-Boot.
+
+ This is useful in order to stop execution of long-running synchronous
+ commands such as "ums".
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ """
+
+ self.log.action('Sending Ctrl-C')
+ self.run_command(chr(3), wait_for_echo=False, send_nl=False)
+
+ def wait_for(self, text):
+ """Wait for a pattern to be emitted by U-Boot.
+
+ This is useful when a long-running command such as "dfu" is executing,
+ and it periodically emits some text that should show up at a specific
+ location in the log file.
+
+ Args:
+ text: The text to wait for; either a string (containing raw text,
+ not a regular expression) or an re object.
+
+ Returns:
+ Nothing.
+ """
+
+ if type(text) == type(''):
+ text = re.escape(text)
+ m = self.p.expect([text] + self.bad_patterns)
+ if m != 0:
+ raise Exception('Bad pattern found on console: ' +
+ self.bad_pattern_ids[m - 1])
+
+ def drain_console(self):
+ """Read from and log the U-Boot console for a short time.
+
+ U-Boot's console output is only logged when the test code actively
+ waits for U-Boot to emit specific data. There are cases where tests
+ can fail without doing this. For example, if a test asks U-Boot to
+ enable USB device mode, then polls until a host-side device node
+ exists. In such a case, it is useful to log U-Boot's console output
+ in case U-Boot printed clues as to why the host-side even did not
+ occur. This function will do that.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ """
+
+ # If we are already not connected to U-Boot, there's nothing to drain.
+ # This should only happen when a previous call to run_command() or
+ # wait_for() failed (and hence the output has already been logged), or
+ # the system is shutting down.
+ if not self.p:
+ return
+
+ orig_timeout = self.p.timeout
+ try:
+ # Drain the log for a relatively short time.
+ self.p.timeout = 1000
+ # Wait for something U-Boot will likely never send. This will
+ # cause the console output to be read and logged.
+ self.p.expect(['This should never match U-Boot output'])
+ except:
+ # We expect a timeout, since U-Boot won't print what we waited
+ # for. Squash it when it happens.
+ #
+ # Squash any other exception too. This function is only used to
+ # drain (and log) the U-Boot console output after a failed test.
+ # The U-Boot process will be restarted, or target board reset, once
+ # this function returns. So, we don't care about detecting any
+ # additional errors, so they're squashed so that the rest of the
+ # post-test-failure cleanup code can continue operation, and
+ # correctly terminate any log sections, etc.
+ pass
+ finally:
+ self.p.timeout = orig_timeout
+
+ def ensure_spawned(self):
+ """Ensure a connection to a correctly running U-Boot instance.
+
+ This may require spawning a new Sandbox process or resetting target
+ hardware, as defined by the implementation sub-class.
+
+ This is an internal function and should not be called directly.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ """
+
+ if self.p:
+ return
+ try:
+ self.log.start_section('Starting U-Boot')
+ self.at_prompt = False
+ self.p = self.get_spawn()
+ # Real targets can take a long time to scroll large amounts of
+ # text if LCD is enabled. This value may need tweaking in the
+ # future, possibly per-test to be optimal. This works for 'help'
+ # on board 'seaboard'.
+ if not self.config.gdbserver:
+ self.p.timeout = 30000
+ self.p.logfile_read = self.logstream
+ bcfg = self.config.buildconfig
+ config_spl = bcfg.get('config_spl', 'n') == 'y'
+ config_spl_serial_support = bcfg.get('config_spl_serial_support',
+ 'n') == 'y'
+ env_spl_skipped = self.config.env.get('env__spl_skipped',
+ False)
+ if config_spl and config_spl_serial_support and not env_spl_skipped:
+ m = self.p.expect([pattern_u_boot_spl_signon] +
+ self.bad_patterns)
+ if m != 0:
+ raise Exception('Bad pattern found on SPL console: ' +
+ self.bad_pattern_ids[m - 1])
+ m = self.p.expect([pattern_u_boot_main_signon] + self.bad_patterns)
+ if m != 0:
+ raise Exception('Bad pattern found on console: ' +
+ self.bad_pattern_ids[m - 1])
+ self.u_boot_version_string = self.p.after
+ while True:
+ m = self.p.expect([self.prompt_compiled,
+ pattern_stop_autoboot_prompt] + self.bad_patterns)
+ if m == 0:
+ break
+ if m == 1:
+ self.p.send(' ')
+ continue
+ raise Exception('Bad pattern found on console: ' +
+ self.bad_pattern_ids[m - 2])
+ self.at_prompt = True
+ self.at_prompt_logevt = self.logstream.logfile.cur_evt
+ except Exception as ex:
+ self.log.error(str(ex))
+ self.cleanup_spawn()
+ raise
+ finally:
+ self.log.timestamp()
+ self.log.end_section('Starting U-Boot')
+
+ def cleanup_spawn(self):
+ """Shut down all interaction with the U-Boot instance.
+
+ This is used when an error is detected prior to re-establishing a
+ connection with a fresh U-Boot instance.
+
+ This is an internal function and should not be called directly.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ """
+
+ try:
+ if self.p:
+ self.p.close()
+ except:
+ pass
+ self.p = None
+
+ def restart_uboot(self):
+ """Shut down and restart U-Boot."""
+ self.cleanup_spawn()
+ self.ensure_spawned()
+
+ def get_spawn_output(self):
+ """Return the start-up output from U-Boot
+
+ Returns:
+ The output produced by ensure_spawed(), as a string.
+ """
+ if self.p:
+ return self.p.get_expect_output()
+ return None
+
+ def validate_version_string_in_text(self, text):
+ """Assert that a command's output includes the U-Boot signon message.
+
+ This is primarily useful for validating the "version" command without
+ duplicating the signon text regex in a test function.
+
+ Args:
+ text: The command output text to check.
+
+ Returns:
+ Nothing. An exception is raised if the validation fails.
+ """
+
+ assert(self.u_boot_version_string in text)
+
+ def disable_check(self, check_type):
+ """Temporarily disable an error check of U-Boot's output.
+
+ Create a new context manager (for use with the "with" statement) which
+ temporarily disables a particular console output error check.
+
+ Args:
+ check_type: The type of error-check to disable. Valid values may
+ be found in self.disable_check_count above.
+
+ Returns:
+ A context manager object.
+ """
+
+ return ConsoleDisableCheck(self, check_type)
+
+ def temporary_timeout(self, timeout):
+ """Temporarily set up different timeout for commands.
+
+ Create a new context manager (for use with the "with" statement) which
+ temporarily change timeout.
+
+ Args:
+ timeout: Time in milliseconds.
+
+ Returns:
+ A context manager object.
+ """
+
+ return ConsoleSetupTimeout(self, timeout)
diff --git a/test/py/u_boot_console_exec_attach.py b/test/py/u_boot_console_exec_attach.py
new file mode 100644
index 00000000..27834b55
--- /dev/null
+++ b/test/py/u_boot_console_exec_attach.py
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+
+# Logic to interact with U-Boot running on real hardware, typically via a
+# physical serial port.
+
+import sys
+from u_boot_spawn import Spawn
+from u_boot_console_base import ConsoleBase
+
+class ConsoleExecAttach(ConsoleBase):
+ """Represents a physical connection to a U-Boot console, typically via a
+ serial port. This implementation executes a sub-process to attach to the
+ console, expecting that the stdin/out of the sub-process will be forwarded
+ to/from the physical hardware. This approach isolates the test infra-
+ structure from the user-/installation-specific details of how to
+ communicate with, and the identity of, serial ports etc."""
+
+ def __init__(self, log, config):
+ """Initialize a U-Boot console connection.
+
+ Args:
+ log: A multiplexed_log.Logfile instance.
+ config: A "configuration" object as defined in conftest.py.
+
+ Returns:
+ Nothing.
+ """
+
+ # The max_fifo_fill value might need tweaking per-board/-SoC?
+ # 1 would be safe anywhere, but is very slow (a pexpect issue?).
+ # 16 is a common FIFO size.
+ # HW flow control would mean this could be infinite.
+ super(ConsoleExecAttach, self).__init__(log, config, max_fifo_fill=16)
+
+ with self.log.section('flash'):
+ self.log.action('Flashing U-Boot')
+ cmd = ['u-boot-test-flash', config.board_type, config.board_identity]
+ runner = self.log.get_runner(cmd[0], sys.stdout)
+ runner.run(cmd)
+ runner.close()
+ self.log.status_pass('OK')
+
+ def get_spawn(self):
+ """Connect to a fresh U-Boot instance.
+
+ The target board is reset, so that U-Boot begins running from scratch.
+
+ Args:
+ None.
+
+ Returns:
+ A u_boot_spawn.Spawn object that is attached to U-Boot.
+ """
+
+ args = [self.config.board_type, self.config.board_identity]
+ s = Spawn(['u-boot-test-console'] + args)
+
+ try:
+ self.log.action('Resetting board')
+ cmd = ['u-boot-test-reset'] + args
+ runner = self.log.get_runner(cmd[0], sys.stdout)
+ runner.run(cmd)
+ runner.close()
+ except:
+ s.close()
+ raise
+
+ return s
diff --git a/test/py/u_boot_console_sandbox.py b/test/py/u_boot_console_sandbox.py
new file mode 100644
index 00000000..836f5a9e
--- /dev/null
+++ b/test/py/u_boot_console_sandbox.py
@@ -0,0 +1,108 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+
+# Logic to interact with the sandbox port of U-Boot, running as a sub-process.
+
+import time
+from u_boot_spawn import Spawn
+from u_boot_console_base import ConsoleBase
+
+class ConsoleSandbox(ConsoleBase):
+ """Represents a connection to a sandbox U-Boot console, executed as a sub-
+ process."""
+
+ def __init__(self, log, config):
+ """Initialize a U-Boot console connection.
+
+ Args:
+ log: A multiplexed_log.Logfile instance.
+ config: A "configuration" object as defined in conftest.py.
+
+ Returns:
+ Nothing.
+ """
+
+ super(ConsoleSandbox, self).__init__(log, config, max_fifo_fill=1024)
+ self.sandbox_flags = []
+
+ def get_spawn(self):
+ """Connect to a fresh U-Boot instance.
+
+ A new sandbox process is created, so that U-Boot begins running from
+ scratch.
+
+ Args:
+ None.
+
+ Returns:
+ A u_boot_spawn.Spawn object that is attached to U-Boot.
+ """
+
+ bcfg = self.config.buildconfig
+ config_spl = bcfg.get('config_spl', 'n') == 'y'
+ fname = '/spl/u-boot-spl' if config_spl else '/u-boot'
+ print(fname)
+ cmd = []
+ if self.config.gdbserver:
+ cmd += ['gdbserver', self.config.gdbserver]
+ cmd += [
+ self.config.build_dir + fname,
+ '-v',
+ '-d',
+ self.config.dtb
+ ]
+ cmd += self.sandbox_flags
+ return Spawn(cmd, cwd=self.config.source_dir)
+
+ def restart_uboot_with_flags(self, flags):
+ """Run U-Boot with the given command-line flags
+
+ Args:
+ flags: List of flags to pass, each a string
+
+ Returns:
+ A u_boot_spawn.Spawn object that is attached to U-Boot.
+ """
+
+ try:
+ self.sandbox_flags = flags
+ return self.restart_uboot()
+ finally:
+ self.sandbox_flags = []
+
+ def kill(self, sig):
+ """Send a specific Unix signal to the sandbox process.
+
+ Args:
+ sig: The Unix signal to send to the process.
+
+ Returns:
+ Nothing.
+ """
+
+ self.log.action('kill %d' % sig)
+ self.p.kill(sig)
+
+ def validate_exited(self):
+ """Determine whether the sandbox process has exited.
+
+ If required, this function waits a reasonable time for the process to
+ exit.
+
+ Args:
+ None.
+
+ Returns:
+ Boolean indicating whether the process has exited.
+ """
+
+ p = self.p
+ self.p = None
+ for i in range(100):
+ ret = not p.isalive()
+ if ret:
+ break
+ time.sleep(0.1)
+ p.close()
+ return ret
diff --git a/test/py/u_boot_spawn.py b/test/py/u_boot_spawn.py
new file mode 100644
index 00000000..6991b78c
--- /dev/null
+++ b/test/py/u_boot_spawn.py
@@ -0,0 +1,209 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+
+# Logic to spawn a sub-process and interact with its stdio.
+
+import os
+import re
+import pty
+import signal
+import select
+import time
+
+class Timeout(Exception):
+ """An exception sub-class that indicates that a timeout occurred."""
+ pass
+
+class Spawn(object):
+ """Represents the stdio of a freshly created sub-process. Commands may be
+ sent to the process, and responses waited for.
+
+ Members:
+ output: accumulated output from expect()
+ """
+
+ def __init__(self, args, cwd=None):
+ """Spawn (fork/exec) the sub-process.
+
+ Args:
+ args: array of processs arguments. argv[0] is the command to
+ execute.
+ cwd: the directory to run the process in, or None for no change.
+
+ Returns:
+ Nothing.
+ """
+
+ self.waited = False
+ self.buf = ''
+ self.output = ''
+ self.logfile_read = None
+ self.before = ''
+ self.after = ''
+ self.timeout = None
+ # http://stackoverflow.com/questions/7857352/python-regex-to-match-vt100-escape-sequences
+ self.re_vt100 = re.compile(r'(\x1b\[|\x9b)[^@-_]*[@-_]|\x1b[@-_]', re.I)
+
+ (self.pid, self.fd) = pty.fork()
+ if self.pid == 0:
+ try:
+ # For some reason, SIGHUP is set to SIG_IGN at this point when
+ # run under "go" (www.go.cd). Perhaps this happens under any
+ # background (non-interactive) system?
+ signal.signal(signal.SIGHUP, signal.SIG_DFL)
+ if cwd:
+ os.chdir(cwd)
+ os.execvp(args[0], args)
+ except:
+ print('CHILD EXECEPTION:')
+ import traceback
+ traceback.print_exc()
+ finally:
+ os._exit(255)
+
+ try:
+ self.poll = select.poll()
+ self.poll.register(self.fd, select.POLLIN | select.POLLPRI | select.POLLERR | select.POLLHUP | select.POLLNVAL)
+ except:
+ self.close()
+ raise
+
+ def kill(self, sig):
+ """Send unix signal "sig" to the child process.
+
+ Args:
+ sig: The signal number to send.
+
+ Returns:
+ Nothing.
+ """
+
+ os.kill(self.pid, sig)
+
+ def isalive(self):
+ """Determine whether the child process is still running.
+
+ Args:
+ None.
+
+ Returns:
+ Boolean indicating whether process is alive.
+ """
+
+ if self.waited:
+ return False
+
+ w = os.waitpid(self.pid, os.WNOHANG)
+ if w[0] == 0:
+ return True
+
+ self.waited = True
+ return False
+
+ def send(self, data):
+ """Send data to the sub-process's stdin.
+
+ Args:
+ data: The data to send to the process.
+
+ Returns:
+ Nothing.
+ """
+
+ os.write(self.fd, data.encode(errors='replace'))
+
+ def expect(self, patterns):
+ """Wait for the sub-process to emit specific data.
+
+ This function waits for the process to emit one pattern from the
+ supplied list of patterns, or for a timeout to occur.
+
+ Args:
+ patterns: A list of strings or regex objects that we expect to
+ see in the sub-process' stdout.
+
+ Returns:
+ The index within the patterns array of the pattern the process
+ emitted.
+
+ Notable exceptions:
+ Timeout, if the process did not emit any of the patterns within
+ the expected time.
+ """
+
+ for pi in range(len(patterns)):
+ if type(patterns[pi]) == type(''):
+ patterns[pi] = re.compile(patterns[pi])
+
+ tstart_s = time.time()
+ try:
+ while True:
+ earliest_m = None
+ earliest_pi = None
+ for pi in range(len(patterns)):
+ pattern = patterns[pi]
+ m = pattern.search(self.buf)
+ if not m:
+ continue
+ if earliest_m and m.start() >= earliest_m.start():
+ continue
+ earliest_m = m
+ earliest_pi = pi
+ if earliest_m:
+ pos = earliest_m.start()
+ posafter = earliest_m.end()
+ self.before = self.buf[:pos]
+ self.after = self.buf[pos:posafter]
+ self.output += self.buf[:posafter]
+ self.buf = self.buf[posafter:]
+ return earliest_pi
+ tnow_s = time.time()
+ if self.timeout:
+ tdelta_ms = (tnow_s - tstart_s) * 1000
+ poll_maxwait = self.timeout - tdelta_ms
+ if tdelta_ms > self.timeout:
+ raise Timeout()
+ else:
+ poll_maxwait = None
+ events = self.poll.poll(poll_maxwait)
+ if not events:
+ raise Timeout()
+ c = os.read(self.fd, 1024).decode(errors='replace')
+ if not c:
+ raise EOFError()
+ if self.logfile_read:
+ self.logfile_read.write(c)
+ self.buf += c
+ # count=0 is supposed to be the default, which indicates
+ # unlimited substitutions, but in practice the version of
+ # Python in Ubuntu 14.04 appears to default to count=2!
+ self.buf = self.re_vt100.sub('', self.buf, count=1000000)
+ finally:
+ if self.logfile_read:
+ self.logfile_read.flush()
+
+ def close(self):
+ """Close the stdio connection to the sub-process.
+
+ This also waits a reasonable time for the sub-process to stop running.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ """
+
+ os.close(self.fd)
+ for i in range(100):
+ if not self.isalive():
+ break
+ time.sleep(0.1)
+
+ def get_expect_output(self):
+ """Return the output read by expect()
+
+ Returns:
+ The output processed by expect(), as a string.
+ """
+ return self.output
diff --git a/test/py/u_boot_utils.py b/test/py/u_boot_utils.py
new file mode 100644
index 00000000..bf2a0fc0
--- /dev/null
+++ b/test/py/u_boot_utils.py
@@ -0,0 +1,340 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+
+# Utility code shared across multiple tests.
+
+import hashlib
+import inspect
+import os
+import os.path
+import pytest
+import sys
+import time
+import re
+
+def md5sum_data(data):
+ """Calculate the MD5 hash of some data.
+
+ Args:
+ data: The data to hash.
+
+ Returns:
+ The hash of the data, as a binary string.
+ """
+
+ h = hashlib.md5()
+ h.update(data)
+ return h.digest()
+
+def md5sum_file(fn, max_length=None):
+ """Calculate the MD5 hash of the contents of a file.
+
+ Args:
+ fn: The filename of the file to hash.
+ max_length: The number of bytes to hash. If the file has more
+ bytes than this, they will be ignored. If None or omitted, the
+ entire file will be hashed.
+
+ Returns:
+ The hash of the file content, as a binary string.
+ """
+
+ with open(fn, 'rb') as fh:
+ if max_length:
+ params = [max_length]
+ else:
+ params = []
+ data = fh.read(*params)
+ return md5sum_data(data)
+
+class PersistentRandomFile(object):
+ """Generate and store information about a persistent file containing
+ random data."""
+
+ def __init__(self, u_boot_console, fn, size):
+ """Create or process the persistent file.
+
+ If the file does not exist, it is generated.
+
+ If the file does exist, its content is hashed for later comparison.
+
+ These files are always located in the "persistent data directory" of
+ the current test run.
+
+ Args:
+ u_boot_console: A console connection to U-Boot.
+ fn: The filename (without path) to create.
+ size: The desired size of the file in bytes.
+
+ Returns:
+ Nothing.
+ """
+
+ self.fn = fn
+
+ self.abs_fn = u_boot_console.config.persistent_data_dir + '/' + fn
+
+ if os.path.exists(self.abs_fn):
+ u_boot_console.log.action('Persistent data file ' + self.abs_fn +
+ ' already exists')
+ self.content_hash = md5sum_file(self.abs_fn)
+ else:
+ u_boot_console.log.action('Generating ' + self.abs_fn +
+ ' (random, persistent, %d bytes)' % size)
+ data = os.urandom(size)
+ with open(self.abs_fn, 'wb') as fh:
+ fh.write(data)
+ self.content_hash = md5sum_data(data)
+
+def attempt_to_open_file(fn):
+ """Attempt to open a file, without throwing exceptions.
+
+ Any errors (exceptions) that occur during the attempt to open the file
+ are ignored. This is useful in order to test whether a file (in
+ particular, a device node) exists and can be successfully opened, in order
+ to poll for e.g. USB enumeration completion.
+
+ Args:
+ fn: The filename to attempt to open.
+
+ Returns:
+ An open file handle to the file, or None if the file could not be
+ opened.
+ """
+
+ try:
+ return open(fn, 'rb')
+ except:
+ return None
+
+def wait_until_open_succeeds(fn):
+ """Poll until a file can be opened, or a timeout occurs.
+
+ Continually attempt to open a file, and return when this succeeds, or
+ raise an exception after a timeout.
+
+ Args:
+ fn: The filename to attempt to open.
+
+ Returns:
+ An open file handle to the file.
+ """
+
+ for i in range(100):
+ fh = attempt_to_open_file(fn)
+ if fh:
+ return fh
+ time.sleep(0.1)
+ raise Exception('File could not be opened')
+
+def wait_until_file_open_fails(fn, ignore_errors):
+ """Poll until a file cannot be opened, or a timeout occurs.
+
+ Continually attempt to open a file, and return when this fails, or
+ raise an exception after a timeout.
+
+ Args:
+ fn: The filename to attempt to open.
+ ignore_errors: Indicate whether to ignore timeout errors. If True, the
+ function will simply return if a timeout occurs, otherwise an
+ exception will be raised.
+
+ Returns:
+ Nothing.
+ """
+
+ for i in range(100):
+ fh = attempt_to_open_file(fn)
+ if not fh:
+ return
+ fh.close()
+ time.sleep(0.1)
+ if ignore_errors:
+ return
+ raise Exception('File can still be opened')
+
+def run_and_log(u_boot_console, cmd, ignore_errors=False):
+ """Run a command and log its output.
+
+ Args:
+ u_boot_console: A console connection to U-Boot.
+ cmd: The command to run, as an array of argv[], or a string.
+ If a string, note that it is split up so that quoted spaces
+ will not be preserved. E.g. "fred and" becomes ['"fred', 'and"']
+ ignore_errors: Indicate whether to ignore errors. If True, the function
+ will simply return if the command cannot be executed or exits with
+ an error code, otherwise an exception will be raised if such
+ problems occur.
+
+ Returns:
+ The output as a string.
+ """
+ if isinstance(cmd, str):
+ cmd = cmd.split()
+ runner = u_boot_console.log.get_runner(cmd[0], sys.stdout)
+ output = runner.run(cmd, ignore_errors=ignore_errors)
+ runner.close()
+ return output
+
+def run_and_log_expect_exception(u_boot_console, cmd, retcode, msg):
+ """Run a command that is expected to fail.
+
+ This runs a command and checks that it fails with the expected return code
+ and exception method. If not, an exception is raised.
+
+ Args:
+ u_boot_console: A console connection to U-Boot.
+ cmd: The command to run, as an array of argv[].
+ retcode: Expected non-zero return code from the command.
+ msg: String that should be contained within the command's output.
+ """
+ try:
+ runner = u_boot_console.log.get_runner(cmd[0], sys.stdout)
+ runner.run(cmd)
+ except Exception as e:
+ assert(retcode == runner.exit_status)
+ assert(msg in runner.output)
+ else:
+ raise Exception("Expected an exception with retcode %d message '%s',"
+ "but it was not raised" % (retcode, msg))
+ finally:
+ runner.close()
+
+ram_base = None
+def find_ram_base(u_boot_console):
+ """Find the running U-Boot's RAM location.
+
+ Probe the running U-Boot to determine the address of the first bank
+ of RAM. This is useful for tests that test reading/writing RAM, or
+ load/save files that aren't associated with some standard address
+ typically represented in an environment variable such as
+ ${kernel_addr_r}. The value is cached so that it only needs to be
+ actively read once.
+
+ Args:
+ u_boot_console: A console connection to U-Boot.
+
+ Returns:
+ The address of U-Boot's first RAM bank, as an integer.
+ """
+
+ global ram_base
+ if u_boot_console.config.buildconfig.get('config_cmd_bdi', 'n') != 'y':
+ pytest.skip('bdinfo command not supported')
+ if ram_base == -1:
+ pytest.skip('Previously failed to find RAM bank start')
+ if ram_base is not None:
+ return ram_base
+
+ with u_boot_console.log.section('find_ram_base'):
+ response = u_boot_console.run_command('bdinfo')
+ for l in response.split('\n'):
+ if '-> start' in l or 'memstart =' in l:
+ ram_base = int(l.split('=')[1].strip(), 16)
+ break
+ if ram_base is None:
+ ram_base = -1
+ raise Exception('Failed to find RAM bank start in `bdinfo`')
+
+ # We don't want ram_base to be zero as some functions test if the given
+ # address is NULL (0). Let's add 2MiB then (size of an ARM LPAE/v8 section).
+
+ if ram_base == 0:
+ ram_base += 1024 * 1024 * 2
+
+ return ram_base
+
+class PersistentFileHelperCtxMgr(object):
+ """A context manager for Python's "with" statement, which ensures that any
+ generated file is deleted (and hence regenerated) if its mtime is older
+ than the mtime of the Python module which generated it, and gets an mtime
+ newer than the mtime of the Python module which generated after it is
+ generated. Objects of this type should be created by factory function
+ persistent_file_helper rather than directly."""
+
+ def __init__(self, log, filename):
+ """Initialize a new object.
+
+ Args:
+ log: The Logfile object to log to.
+ filename: The filename of the generated file.
+
+ Returns:
+ Nothing.
+ """
+
+ self.log = log
+ self.filename = filename
+
+ def __enter__(self):
+ frame = inspect.stack()[1]
+ module = inspect.getmodule(frame[0])
+ self.module_filename = module.__file__
+ self.module_timestamp = os.path.getmtime(self.module_filename)
+
+ if os.path.exists(self.filename):
+ filename_timestamp = os.path.getmtime(self.filename)
+ if filename_timestamp < self.module_timestamp:
+ self.log.action('Removing stale generated file ' +
+ self.filename)
+ os.unlink(self.filename)
+
+ def __exit__(self, extype, value, traceback):
+ if extype:
+ try:
+ os.path.unlink(self.filename)
+ except:
+ pass
+ return
+ logged = False
+ for i in range(20):
+ filename_timestamp = os.path.getmtime(self.filename)
+ if filename_timestamp > self.module_timestamp:
+ break
+ if not logged:
+ self.log.action(
+ 'Waiting for generated file timestamp to increase')
+ logged = True
+ os.utime(self.filename)
+ time.sleep(0.1)
+
+def persistent_file_helper(u_boot_log, filename):
+ """Manage the timestamps and regeneration of a persistent generated
+ file. This function creates a context manager for Python's "with"
+ statement
+
+ Usage:
+ with persistent_file_helper(u_boot_console.log, filename):
+ code to generate the file, if it's missing.
+
+ Args:
+ u_boot_log: u_boot_console.log.
+ filename: The filename of the generated file.
+
+ Returns:
+ A context manager object.
+ """
+
+ return PersistentFileHelperCtxMgr(u_boot_log, filename)
+
+def crc32(u_boot_console, address, count):
+ """Helper function used to compute the CRC32 value of a section of RAM.
+
+ Args:
+ u_boot_console: A U-Boot console connection.
+ address: Address where data starts.
+ count: Amount of data to use for calculation.
+
+ Returns:
+ CRC32 value
+ """
+
+ bcfg = u_boot_console.config.buildconfig
+ has_cmd_crc32 = bcfg.get('config_cmd_crc32', 'n') == 'y'
+ assert has_cmd_crc32, 'Cannot compute crc32 without CONFIG_CMD_CRC32.'
+ output = u_boot_console.run_command('crc32 %08x %x' % (address, count))
+
+ m = re.search('==> ([0-9a-fA-F]{8})$', output)
+ assert m, 'CRC32 operation failed.'
+
+ return m.group(1)