aboutsummaryrefslogtreecommitdiffstats
path: root/meta-oe/recipes-benchmark
diff options
context:
space:
mode:
authorAlexander Kanavin <alex.kanavin@gmail.com>2023-11-29 14:24:24 +0100
committerKhem Raj <raj.khem@gmail.com>2023-11-29 07:35:36 -0800
commita4e9bec26db2a53e8bfc114e406f153f60ad6340 (patch)
treeca286458fbc1460e3b7a453fd57e6f38fc9b8b7f /meta-oe/recipes-benchmark
parent3991d85383b466a4948283cfd0167213a1dd2311 (diff)
downloadmeta-openembedded-contrib-a4e9bec26db2a53e8bfc114e406f153f60ad6340.tar.gz
fio: revert "fio: upgrade 3.32 -> 2022"
This was a bogus update (from AUH I believe) and should be reverted. Signed-off-by: Alexander Kanavin <alex@linutronix.de> Signed-off-by: Khem Raj <raj.khem@gmail.com>
Diffstat (limited to 'meta-oe/recipes-benchmark')
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0001-Fio-3.31.patch23
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0002-lib-rand-Enhance-__fill_random_buf-using-the-multi-r.patch136
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0003-lib-rand-get-rid-of-unused-MAX_SEED_BUCKETS.patch31
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0004-ioengines-merge-filecreate-filestat-filedelete-engin.patch800
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0005-engines-http-Add-storage-class-option-for-s3.patch87
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0006-engines-http-Add-s3-crypto-options-for-s3.patch246
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0007-doc-Add-usage-and-example-about-s3-storage-class-and.patch155
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0008-README-link-to-GitHub-releases-for-Windows.patch33
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0009-engines-xnvme-fix-segfault-issue-with-xnvme-ioengine.patch54
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0010-doc-update-fio-doc-for-xnvme-engine.patch168
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0011-test-add-latency-test-using-posixaio-ioengine.patch55
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0012-test-fix-hash-for-t0016.patch33
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0013-doc-get-rid-of-trailing-whitespace.patch82
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0014-doc-clarify-that-I-O-errors-may-go-unnoticed-without.patch54
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0015-Revert-Minor-style-fixups.patch41
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0016-Revert-Fix-multithread-issues-when-operating-on-a-si.patch141
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0017-Add-wait-for-handling-SIGBREAK.patch59
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0018-engines-io_uring-pass-back-correct-error-value-when-.patch58
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0019-Enable-CPU-affinity-support-on-Android.patch64
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0020-io_uring-Replace-pthread_self-with-s-tid.patch41
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0021-engines-io_uring-delete-debug-code.patch37
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0022-t-io_uring-prep-for-including-engines-nvme.h-in-t-io.patch72
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0023-t-io_uring-add-support-for-async-passthru.patch379
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0024-t-io_uring-fix-64-bit-cast-on-32-bit-archs.patch37
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0025-test-add-basic-test-for-io_uring-ioengine.patch91
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0026-t-io_uring-remove-duplicate-definition-of-gettid.patch59
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0027-test-add-some-tests-for-seq-and-rand-offsets.patch157
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0028-test-use-Ubuntu-22.04-for-64-bit-tests.patch72
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0029-test-get-32-bit-Ubuntu-22.04-build-working.patch79
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0030-test-add-tests-for-lfsr-and-norandommap.patch143
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0031-backend-revert-bad-memory-leak-fix.patch39
-rw-r--r--meta-oe/recipes-benchmark/fio/fio/0032-Fio-3.32.patch23
-rw-r--r--meta-oe/recipes-benchmark/fio/fio_2022.bb77
-rw-r--r--meta-oe/recipes-benchmark/fio/fio_3.32.bb44
34 files changed, 44 insertions, 3626 deletions
diff --git a/meta-oe/recipes-benchmark/fio/fio/0001-Fio-3.31.patch b/meta-oe/recipes-benchmark/fio/fio/0001-Fio-3.31.patch
deleted file mode 100644
index 9b8b7d02e3..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0001-Fio-3.31.patch
+++ /dev/null
@@ -1,23 +0,0 @@
-From 6cafe8445fd1e04e5f7d67bbc73029a538d1b253 Mon Sep 17 00:00:00 2001
-From: Jens Axboe <axboe@kernel.dk>
-Date: Tue, 9 Aug 2022 14:41:25 -0600
-Subject: [PATCH] Fio 3.31
-
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
----
- FIO-VERSION-GEN | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/FIO-VERSION-GEN b/FIO-VERSION-GEN
-index fa64f50f..72630dd0 100755
---- a/FIO-VERSION-GEN
-+++ b/FIO-VERSION-GEN
-@@ -1,7 +1,7 @@
- #!/bin/sh
-
- GVF=FIO-VERSION-FILE
--DEF_VER=fio-3.30
-+DEF_VER=fio-3.31
-
- LF='
- '
diff --git a/meta-oe/recipes-benchmark/fio/fio/0002-lib-rand-Enhance-__fill_random_buf-using-the-multi-r.patch b/meta-oe/recipes-benchmark/fio/fio/0002-lib-rand-Enhance-__fill_random_buf-using-the-multi-r.patch
deleted file mode 100644
index 16506566c2..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0002-lib-rand-Enhance-__fill_random_buf-using-the-multi-r.patch
+++ /dev/null
@@ -1,136 +0,0 @@
-From 40ba7a05de6a08cfd382b116f76dbeaa7237df45 Mon Sep 17 00:00:00 2001
-From: Sungup Moon <sungup.moon@samsung.com>
-Date: Mon, 8 Aug 2022 17:21:46 +0900
-Subject: [PATCH] lib/rand: Enhance __fill_random_buf using the multi random
- seed
-
-The __fill_random_buf fills a buffer using the random 8byte integer to
-write. But, this mechanism is depend on the CPU performance and could
-not reach the max performance on the PCIe Gen5 devices. I have tested
-128KB single worker sequential write on PCIe Gen5 NVMe, but it cannot
-reach write throughput 6.0GB/s.
-
-So, I have reviewed the __fill_random_buf and focused the multiplier
-dependency to generate the random number. So, I have changed
-__fill_random_buf using the multiple-random-seed to reduce the
-dependencies in the small data filling loop.
-
-I'll attach detail analysis result in the PR of this branch.
-
-Signed-off-by: Sungup Moon <sungup.moon@samsung.com>
----
- configure | 17 +++++++++++++++++
- lib/rand.c | 37 ++++++++++++++++++++++++++++++++++++-
- 2 files changed, 53 insertions(+), 1 deletion(-)
-
-diff --git a/configure b/configure
-index 36450df8..a2b9bd4c 100755
---- a/configure
-+++ b/configure
-@@ -116,6 +116,10 @@ has() {
- type "$1" >/dev/null 2>&1
- }
-
-+num() {
-+ echo "$1" | grep -P -q "^[0-9]+$"
-+}
-+
- check_define() {
- cat > $TMPC <<EOF
- #if !defined($1)
-@@ -174,6 +178,7 @@ libnfs=""
- xnvme=""
- libzbc=""
- dfs=""
-+seed_buckets=""
- dynamic_engines="no"
- prefix=/usr/local
-
-@@ -255,6 +260,8 @@ for opt do
- ;;
- --enable-asan) asan="yes"
- ;;
-+ --seed-buckets=*) seed_buckets="$optarg"
-+ ;;
- --help)
- show_help="yes"
- ;;
-@@ -302,6 +309,7 @@ if test "$show_help" = "yes" ; then
- echo "--dynamic-libengines Lib-based ioengines as dynamic libraries"
- echo "--disable-dfs Disable DAOS File System support even if found"
- echo "--enable-asan Enable address sanitizer"
-+ echo "--seed-buckets= Number of seed buckets for the refill-buffer"
- exit $exit_val
- fi
-
-@@ -3273,6 +3281,15 @@ if test "$disable_tcmalloc" != "yes"; then
- fi
- fi
- print_config "TCMalloc support" "$tcmalloc"
-+if ! num "$seed_buckets"; then
-+ seed_buckets=4
-+elif test "$seed_buckets" -lt 2; then
-+ seed_buckets=2
-+elif test "$seed_buckets" -gt 16; then
-+ seed_buckets=16
-+fi
-+echo "#define CONFIG_SEED_BUCKETS $seed_buckets" >> $config_host_h
-+print_config "seed_buckets" "$seed_buckets"
-
- echo "LIBS+=$LIBS" >> $config_host_mak
- echo "GFIO_LIBS+=$GFIO_LIBS" >> $config_host_mak
-diff --git a/lib/rand.c b/lib/rand.c
-index 1e669116..1ce4a849 100644
---- a/lib/rand.c
-+++ b/lib/rand.c
-@@ -95,7 +95,7 @@ void init_rand_seed(struct frand_state *state, uint64_t seed, bool use64)
- __init_rand64(&state->state64, seed);
- }
-
--void __fill_random_buf(void *buf, unsigned int len, uint64_t seed)
-+void __fill_random_buf_small(void *buf, unsigned int len, uint64_t seed)
- {
- uint64_t *b = buf;
- uint64_t *e = b + len / sizeof(*b);
-@@ -110,6 +110,41 @@ void __fill_random_buf(void *buf, unsigned int len, uint64_t seed)
- __builtin_memcpy(e, &seed, rest);
- }
-
-+void __fill_random_buf(void *buf, unsigned int len, uint64_t seed)
-+{
-+#define MAX_SEED_BUCKETS 16
-+ static uint64_t prime[MAX_SEED_BUCKETS] = {1, 2, 3, 5,
-+ 7, 11, 13, 17,
-+ 19, 23, 29, 31,
-+ 37, 41, 43, 47};
-+
-+ uint64_t *b, *e, s[CONFIG_SEED_BUCKETS];
-+ unsigned int rest;
-+ int p;
-+
-+ /*
-+ * Calculate the max index which is multiples of the seed buckets.
-+ */
-+ rest = (len / sizeof(*b) / CONFIG_SEED_BUCKETS) * CONFIG_SEED_BUCKETS;
-+
-+ b = buf;
-+ e = b + rest;
-+
-+ rest = len - (rest * sizeof(*b));
-+
-+ for (p = 0; p < CONFIG_SEED_BUCKETS; p++)
-+ s[p] = seed * prime[p];
-+
-+ for (; b != e; b += CONFIG_SEED_BUCKETS) {
-+ for (p = 0; p < CONFIG_SEED_BUCKETS; ++p) {
-+ b[p] = s[p];
-+ s[p] = __hash_u64(s[p]);
-+ }
-+ }
-+
-+ __fill_random_buf_small(b, rest, s[0]);
-+}
-+
- uint64_t fill_random_buf(struct frand_state *fs, void *buf,
- unsigned int len)
- {
diff --git a/meta-oe/recipes-benchmark/fio/fio/0003-lib-rand-get-rid-of-unused-MAX_SEED_BUCKETS.patch b/meta-oe/recipes-benchmark/fio/fio/0003-lib-rand-get-rid-of-unused-MAX_SEED_BUCKETS.patch
deleted file mode 100644
index fc4af0a519..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0003-lib-rand-get-rid-of-unused-MAX_SEED_BUCKETS.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-From f4dd3f2ad435a75862ad3f34a661b169f72c7885 Mon Sep 17 00:00:00 2001
-From: Jens Axboe <axboe@kernel.dk>
-Date: Wed, 10 Aug 2022 09:51:49 -0600
-Subject: [PATCH] lib/rand: get rid of unused MAX_SEED_BUCKETS
-
-It's only used to size the array, we don't need it.
-
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
----
- lib/rand.c | 8 ++------
- 1 file changed, 2 insertions(+), 6 deletions(-)
-
-diff --git a/lib/rand.c b/lib/rand.c
-index 1ce4a849..0e787a62 100644
---- a/lib/rand.c
-+++ b/lib/rand.c
-@@ -112,12 +112,8 @@ void __fill_random_buf_small(void *buf, unsigned int len, uint64_t seed)
-
- void __fill_random_buf(void *buf, unsigned int len, uint64_t seed)
- {
--#define MAX_SEED_BUCKETS 16
-- static uint64_t prime[MAX_SEED_BUCKETS] = {1, 2, 3, 5,
-- 7, 11, 13, 17,
-- 19, 23, 29, 31,
-- 37, 41, 43, 47};
--
-+ static uint64_t prime[] = {1, 2, 3, 5, 7, 11, 13, 17,
-+ 19, 23, 29, 31, 37, 41, 43, 47};
- uint64_t *b, *e, s[CONFIG_SEED_BUCKETS];
- unsigned int rest;
- int p;
diff --git a/meta-oe/recipes-benchmark/fio/fio/0004-ioengines-merge-filecreate-filestat-filedelete-engin.patch b/meta-oe/recipes-benchmark/fio/fio/0004-ioengines-merge-filecreate-filestat-filedelete-engin.patch
deleted file mode 100644
index 1b59ad304c..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0004-ioengines-merge-filecreate-filestat-filedelete-engin.patch
+++ /dev/null
@@ -1,800 +0,0 @@
-From 1cfbaff9806f17c2afbabe79c1c87b96eba7f35a Mon Sep 17 00:00:00 2001
-From: "Friendy.Su@sony.com" <Friendy.Su@sony.com>
-Date: Mon, 8 Aug 2022 08:35:50 +0000
-Subject: [PATCH] ioengines: merge filecreate, filestat, filedelete engines to
- fileoperations.c
-
-file operation engines have similar structure, implement them
-in one file.
-
-Signed-off-by: friendy-su <friendy.su@sony.com>
----
- Makefile | 2 +-
- engines/filecreate.c | 118 ---------------
- engines/filedelete.c | 115 --------------
- engines/fileoperations.c | 318 +++++++++++++++++++++++++++++++++++++++
- engines/filestat.c | 190 -----------------------
- 5 files changed, 319 insertions(+), 424 deletions(-)
- delete mode 100644 engines/filecreate.c
- delete mode 100644 engines/filedelete.c
- create mode 100644 engines/fileoperations.c
- delete mode 100644 engines/filestat.c
-
-diff --git a/Makefile b/Makefile
-index 188a74d7..634d2c93 100644
---- a/Makefile
-+++ b/Makefile
-@@ -56,7 +56,7 @@ SOURCE := $(sort $(patsubst $(SRCDIR)/%,%,$(wildcard $(SRCDIR)/crc/*.c)) \
- pshared.c options.c \
- smalloc.c filehash.c profile.c debug.c engines/cpu.c \
- engines/mmap.c engines/sync.c engines/null.c engines/net.c \
-- engines/ftruncate.c engines/filecreate.c engines/filestat.c engines/filedelete.c \
-+ engines/ftruncate.c engines/fileoperations.c \
- engines/exec.c \
- server.c client.c iolog.c backend.c libfio.c flow.c cconv.c \
- gettime-thread.c helpers.c json.c idletime.c td_error.c \
-diff --git a/engines/filecreate.c b/engines/filecreate.c
-deleted file mode 100644
-index 7884752d..00000000
---- a/engines/filecreate.c
-+++ /dev/null
-@@ -1,118 +0,0 @@
--/*
-- * filecreate engine
-- *
-- * IO engine that doesn't do any IO, just creates files and tracks the latency
-- * of the file creation.
-- */
--#include <stdio.h>
--#include <fcntl.h>
--#include <errno.h>
--
--#include "../fio.h"
--
--struct fc_data {
-- enum fio_ddir stat_ddir;
--};
--
--static int open_file(struct thread_data *td, struct fio_file *f)
--{
-- struct timespec start;
-- int do_lat = !td->o.disable_lat;
--
-- dprint(FD_FILE, "fd open %s\n", f->file_name);
--
-- if (f->filetype != FIO_TYPE_FILE) {
-- log_err("fio: only files are supported\n");
-- return 1;
-- }
-- if (!strcmp(f->file_name, "-")) {
-- log_err("fio: can't read/write to stdin/out\n");
-- return 1;
-- }
--
-- if (do_lat)
-- fio_gettime(&start, NULL);
--
-- f->fd = open(f->file_name, O_CREAT|O_RDWR, 0600);
--
-- if (f->fd == -1) {
-- char buf[FIO_VERROR_SIZE];
-- int e = errno;
--
-- snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
-- td_verror(td, e, buf);
-- return 1;
-- }
--
-- if (do_lat) {
-- struct fc_data *data = td->io_ops_data;
-- uint64_t nsec;
--
-- nsec = ntime_since_now(&start);
-- add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0, 0);
-- }
--
-- return 0;
--}
--
--static enum fio_q_status queue_io(struct thread_data *td,
-- struct io_u fio_unused *io_u)
--{
-- return FIO_Q_COMPLETED;
--}
--
--/*
-- * Ensure that we at least have a block size worth of IO to do for each
-- * file. If the job file has td->o.size < nr_files * block_size, then
-- * fio won't do anything.
-- */
--static int get_file_size(struct thread_data *td, struct fio_file *f)
--{
-- f->real_file_size = td_min_bs(td);
-- return 0;
--}
--
--static int init(struct thread_data *td)
--{
-- struct fc_data *data;
--
-- data = calloc(1, sizeof(*data));
--
-- if (td_read(td))
-- data->stat_ddir = DDIR_READ;
-- else if (td_write(td))
-- data->stat_ddir = DDIR_WRITE;
--
-- td->io_ops_data = data;
-- return 0;
--}
--
--static void cleanup(struct thread_data *td)
--{
-- struct fc_data *data = td->io_ops_data;
--
-- free(data);
--}
--
--static struct ioengine_ops ioengine = {
-- .name = "filecreate",
-- .version = FIO_IOOPS_VERSION,
-- .init = init,
-- .cleanup = cleanup,
-- .queue = queue_io,
-- .get_file_size = get_file_size,
-- .open_file = open_file,
-- .close_file = generic_close_file,
-- .flags = FIO_DISKLESSIO | FIO_SYNCIO | FIO_FAKEIO |
-- FIO_NOSTATS | FIO_NOFILEHASH,
--};
--
--static void fio_init fio_filecreate_register(void)
--{
-- register_ioengine(&ioengine);
--}
--
--static void fio_exit fio_filecreate_unregister(void)
--{
-- unregister_ioengine(&ioengine);
--}
-diff --git a/engines/filedelete.c b/engines/filedelete.c
-deleted file mode 100644
-index df388ac9..00000000
---- a/engines/filedelete.c
-+++ /dev/null
-@@ -1,115 +0,0 @@
--/*
-- * file delete engine
-- *
-- * IO engine that doesn't do any IO, just delete files and track the latency
-- * of the file deletion.
-- */
--#include <stdio.h>
--#include <fcntl.h>
--#include <errno.h>
--#include <sys/types.h>
--#include <unistd.h>
--#include "../fio.h"
--
--struct fc_data {
-- enum fio_ddir stat_ddir;
--};
--
--static int delete_file(struct thread_data *td, struct fio_file *f)
--{
-- struct timespec start;
-- int do_lat = !td->o.disable_lat;
-- int ret;
--
-- dprint(FD_FILE, "fd delete %s\n", f->file_name);
--
-- if (f->filetype != FIO_TYPE_FILE) {
-- log_err("fio: only files are supported\n");
-- return 1;
-- }
-- if (!strcmp(f->file_name, "-")) {
-- log_err("fio: can't read/write to stdin/out\n");
-- return 1;
-- }
--
-- if (do_lat)
-- fio_gettime(&start, NULL);
--
-- ret = unlink(f->file_name);
--
-- if (ret == -1) {
-- char buf[FIO_VERROR_SIZE];
-- int e = errno;
--
-- snprintf(buf, sizeof(buf), "delete(%s)", f->file_name);
-- td_verror(td, e, buf);
-- return 1;
-- }
--
-- if (do_lat) {
-- struct fc_data *data = td->io_ops_data;
-- uint64_t nsec;
--
-- nsec = ntime_since_now(&start);
-- add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0, 0);
-- }
--
-- return 0;
--}
--
--
--static enum fio_q_status queue_io(struct thread_data *td, struct io_u fio_unused *io_u)
--{
-- return FIO_Q_COMPLETED;
--}
--
--static int init(struct thread_data *td)
--{
-- struct fc_data *data;
--
-- data = calloc(1, sizeof(*data));
--
-- if (td_read(td))
-- data->stat_ddir = DDIR_READ;
-- else if (td_write(td))
-- data->stat_ddir = DDIR_WRITE;
--
-- td->io_ops_data = data;
-- return 0;
--}
--
--static int delete_invalidate(struct thread_data *td, struct fio_file *f)
--{
-- /* do nothing because file not opened */
-- return 0;
--}
--
--static void cleanup(struct thread_data *td)
--{
-- struct fc_data *data = td->io_ops_data;
--
-- free(data);
--}
--
--static struct ioengine_ops ioengine = {
-- .name = "filedelete",
-- .version = FIO_IOOPS_VERSION,
-- .init = init,
-- .invalidate = delete_invalidate,
-- .cleanup = cleanup,
-- .queue = queue_io,
-- .get_file_size = generic_get_file_size,
-- .open_file = delete_file,
-- .flags = FIO_SYNCIO | FIO_FAKEIO |
-- FIO_NOSTATS | FIO_NOFILEHASH,
--};
--
--static void fio_init fio_filedelete_register(void)
--{
-- register_ioengine(&ioengine);
--}
--
--static void fio_exit fio_filedelete_unregister(void)
--{
-- unregister_ioengine(&ioengine);
--}
-diff --git a/engines/fileoperations.c b/engines/fileoperations.c
-new file mode 100644
-index 00000000..1db60da1
---- /dev/null
-+++ b/engines/fileoperations.c
-@@ -0,0 +1,318 @@
-+/*
-+ * fileoperations engine
-+ *
-+ * IO engine that doesn't do any IO, just operates files and tracks the latency
-+ * of the file operation.
-+ */
-+#include <stdio.h>
-+#include <stdlib.h>
-+#include <fcntl.h>
-+#include <errno.h>
-+#include <sys/types.h>
-+#include <sys/stat.h>
-+#include <unistd.h>
-+#include "../fio.h"
-+#include "../optgroup.h"
-+#include "../oslib/statx.h"
-+
-+
-+struct fc_data {
-+ enum fio_ddir stat_ddir;
-+};
-+
-+struct filestat_options {
-+ void *pad;
-+ unsigned int stat_type;
-+};
-+
-+enum {
-+ FIO_FILESTAT_STAT = 1,
-+ FIO_FILESTAT_LSTAT = 2,
-+ FIO_FILESTAT_STATX = 3,
-+};
-+
-+static struct fio_option options[] = {
-+ {
-+ .name = "stat_type",
-+ .lname = "stat_type",
-+ .type = FIO_OPT_STR,
-+ .off1 = offsetof(struct filestat_options, stat_type),
-+ .help = "Specify stat system call type to measure lookup/getattr performance",
-+ .def = "stat",
-+ .posval = {
-+ { .ival = "stat",
-+ .oval = FIO_FILESTAT_STAT,
-+ .help = "Use stat(2)",
-+ },
-+ { .ival = "lstat",
-+ .oval = FIO_FILESTAT_LSTAT,
-+ .help = "Use lstat(2)",
-+ },
-+ { .ival = "statx",
-+ .oval = FIO_FILESTAT_STATX,
-+ .help = "Use statx(2) if exists",
-+ },
-+ },
-+ .category = FIO_OPT_C_ENGINE,
-+ .group = FIO_OPT_G_FILESTAT,
-+ },
-+ {
-+ .name = NULL,
-+ },
-+};
-+
-+
-+static int open_file(struct thread_data *td, struct fio_file *f)
-+{
-+ struct timespec start;
-+ int do_lat = !td->o.disable_lat;
-+
-+ dprint(FD_FILE, "fd open %s\n", f->file_name);
-+
-+ if (f->filetype != FIO_TYPE_FILE) {
-+ log_err("fio: only files are supported\n");
-+ return 1;
-+ }
-+ if (!strcmp(f->file_name, "-")) {
-+ log_err("fio: can't read/write to stdin/out\n");
-+ return 1;
-+ }
-+
-+ if (do_lat)
-+ fio_gettime(&start, NULL);
-+
-+ f->fd = open(f->file_name, O_CREAT|O_RDWR, 0600);
-+
-+ if (f->fd == -1) {
-+ char buf[FIO_VERROR_SIZE];
-+ int e = errno;
-+
-+ snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
-+ td_verror(td, e, buf);
-+ return 1;
-+ }
-+
-+ if (do_lat) {
-+ struct fc_data *data = td->io_ops_data;
-+ uint64_t nsec;
-+
-+ nsec = ntime_since_now(&start);
-+ add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0, 0);
-+ }
-+
-+ return 0;
-+}
-+
-+static int stat_file(struct thread_data *td, struct fio_file *f)
-+{
-+ struct filestat_options *o = td->eo;
-+ struct timespec start;
-+ int do_lat = !td->o.disable_lat;
-+ struct stat statbuf;
-+#ifndef WIN32
-+ struct statx statxbuf;
-+ char *abspath;
-+#endif
-+ int ret;
-+
-+ dprint(FD_FILE, "fd stat %s\n", f->file_name);
-+
-+ if (f->filetype != FIO_TYPE_FILE) {
-+ log_err("fio: only files are supported\n");
-+ return 1;
-+ }
-+ if (!strcmp(f->file_name, "-")) {
-+ log_err("fio: can't read/write to stdin/out\n");
-+ return 1;
-+ }
-+
-+ if (do_lat)
-+ fio_gettime(&start, NULL);
-+
-+ switch (o->stat_type) {
-+ case FIO_FILESTAT_STAT:
-+ ret = stat(f->file_name, &statbuf);
-+ break;
-+ case FIO_FILESTAT_LSTAT:
-+ ret = lstat(f->file_name, &statbuf);
-+ break;
-+ case FIO_FILESTAT_STATX:
-+#ifndef WIN32
-+ abspath = realpath(f->file_name, NULL);
-+ if (abspath) {
-+ ret = statx(-1, abspath, 0, STATX_ALL, &statxbuf);
-+ free(abspath);
-+ } else
-+ ret = -1;
-+#else
-+ ret = -1;
-+#endif
-+ break;
-+ default:
-+ ret = -1;
-+ break;
-+ }
-+
-+ if (ret == -1) {
-+ char buf[FIO_VERROR_SIZE];
-+ int e = errno;
-+
-+ snprintf(buf, sizeof(buf), "stat(%s) type=%u", f->file_name,
-+ o->stat_type);
-+ td_verror(td, e, buf);
-+ return 1;
-+ }
-+
-+ if (do_lat) {
-+ struct fc_data *data = td->io_ops_data;
-+ uint64_t nsec;
-+
-+ nsec = ntime_since_now(&start);
-+ add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0, 0);
-+ }
-+
-+ return 0;
-+}
-+
-+
-+static int delete_file(struct thread_data *td, struct fio_file *f)
-+{
-+ struct timespec start;
-+ int do_lat = !td->o.disable_lat;
-+ int ret;
-+
-+ dprint(FD_FILE, "fd delete %s\n", f->file_name);
-+
-+ if (f->filetype != FIO_TYPE_FILE) {
-+ log_err("fio: only files are supported\n");
-+ return 1;
-+ }
-+ if (!strcmp(f->file_name, "-")) {
-+ log_err("fio: can't read/write to stdin/out\n");
-+ return 1;
-+ }
-+
-+ if (do_lat)
-+ fio_gettime(&start, NULL);
-+
-+ ret = unlink(f->file_name);
-+
-+ if (ret == -1) {
-+ char buf[FIO_VERROR_SIZE];
-+ int e = errno;
-+
-+ snprintf(buf, sizeof(buf), "delete(%s)", f->file_name);
-+ td_verror(td, e, buf);
-+ return 1;
-+ }
-+
-+ if (do_lat) {
-+ struct fc_data *data = td->io_ops_data;
-+ uint64_t nsec;
-+
-+ nsec = ntime_since_now(&start);
-+ add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0, 0);
-+ }
-+
-+ return 0;
-+}
-+
-+static int invalidate_do_nothing(struct thread_data *td, struct fio_file *f)
-+{
-+ /* do nothing because file not opened */
-+ return 0;
-+}
-+
-+static enum fio_q_status queue_io(struct thread_data *td, struct io_u *io_u)
-+{
-+ return FIO_Q_COMPLETED;
-+}
-+
-+/*
-+ * Ensure that we at least have a block size worth of IO to do for each
-+ * file. If the job file has td->o.size < nr_files * block_size, then
-+ * fio won't do anything.
-+ */
-+static int get_file_size(struct thread_data *td, struct fio_file *f)
-+{
-+ f->real_file_size = td_min_bs(td);
-+ return 0;
-+}
-+
-+static int init(struct thread_data *td)
-+{
-+ struct fc_data *data;
-+
-+ data = calloc(1, sizeof(*data));
-+
-+ if (td_read(td))
-+ data->stat_ddir = DDIR_READ;
-+ else if (td_write(td))
-+ data->stat_ddir = DDIR_WRITE;
-+
-+ td->io_ops_data = data;
-+ return 0;
-+}
-+
-+static void cleanup(struct thread_data *td)
-+{
-+ struct fc_data *data = td->io_ops_data;
-+
-+ free(data);
-+}
-+
-+static struct ioengine_ops ioengine_filecreate = {
-+ .name = "filecreate",
-+ .version = FIO_IOOPS_VERSION,
-+ .init = init,
-+ .cleanup = cleanup,
-+ .queue = queue_io,
-+ .get_file_size = get_file_size,
-+ .open_file = open_file,
-+ .close_file = generic_close_file,
-+ .flags = FIO_DISKLESSIO | FIO_SYNCIO | FIO_FAKEIO |
-+ FIO_NOSTATS | FIO_NOFILEHASH,
-+};
-+
-+static struct ioengine_ops ioengine_filestat = {
-+ .name = "filestat",
-+ .version = FIO_IOOPS_VERSION,
-+ .init = init,
-+ .cleanup = cleanup,
-+ .queue = queue_io,
-+ .invalidate = invalidate_do_nothing,
-+ .get_file_size = generic_get_file_size,
-+ .open_file = stat_file,
-+ .flags = FIO_SYNCIO | FIO_FAKEIO |
-+ FIO_NOSTATS | FIO_NOFILEHASH,
-+ .options = options,
-+ .option_struct_size = sizeof(struct filestat_options),
-+};
-+
-+static struct ioengine_ops ioengine_filedelete = {
-+ .name = "filedelete",
-+ .version = FIO_IOOPS_VERSION,
-+ .init = init,
-+ .invalidate = invalidate_do_nothing,
-+ .cleanup = cleanup,
-+ .queue = queue_io,
-+ .get_file_size = generic_get_file_size,
-+ .open_file = delete_file,
-+ .flags = FIO_SYNCIO | FIO_FAKEIO |
-+ FIO_NOSTATS | FIO_NOFILEHASH,
-+};
-+
-+
-+static void fio_init fio_fileoperations_register(void)
-+{
-+ register_ioengine(&ioengine_filecreate);
-+ register_ioengine(&ioengine_filestat);
-+ register_ioengine(&ioengine_filedelete);
-+}
-+
-+static void fio_exit fio_fileoperations_unregister(void)
-+{
-+ unregister_ioengine(&ioengine_filecreate);
-+ unregister_ioengine(&ioengine_filestat);
-+ unregister_ioengine(&ioengine_filedelete);
-+}
-diff --git a/engines/filestat.c b/engines/filestat.c
-deleted file mode 100644
-index e587eb54..00000000
---- a/engines/filestat.c
-+++ /dev/null
-@@ -1,190 +0,0 @@
--/*
-- * filestat engine
-- *
-- * IO engine that doesn't do any IO, just stat files and tracks the latency
-- * of the file stat.
-- */
--#include <stdio.h>
--#include <stdlib.h>
--#include <fcntl.h>
--#include <errno.h>
--#include <sys/types.h>
--#include <sys/stat.h>
--#include <unistd.h>
--#include "../fio.h"
--#include "../optgroup.h"
--#include "../oslib/statx.h"
--
--struct fc_data {
-- enum fio_ddir stat_ddir;
--};
--
--struct filestat_options {
-- void *pad;
-- unsigned int stat_type;
--};
--
--enum {
-- FIO_FILESTAT_STAT = 1,
-- FIO_FILESTAT_LSTAT = 2,
-- FIO_FILESTAT_STATX = 3,
--};
--
--static struct fio_option options[] = {
-- {
-- .name = "stat_type",
-- .lname = "stat_type",
-- .type = FIO_OPT_STR,
-- .off1 = offsetof(struct filestat_options, stat_type),
-- .help = "Specify stat system call type to measure lookup/getattr performance",
-- .def = "stat",
-- .posval = {
-- { .ival = "stat",
-- .oval = FIO_FILESTAT_STAT,
-- .help = "Use stat(2)",
-- },
-- { .ival = "lstat",
-- .oval = FIO_FILESTAT_LSTAT,
-- .help = "Use lstat(2)",
-- },
-- { .ival = "statx",
-- .oval = FIO_FILESTAT_STATX,
-- .help = "Use statx(2) if exists",
-- },
-- },
-- .category = FIO_OPT_C_ENGINE,
-- .group = FIO_OPT_G_FILESTAT,
-- },
-- {
-- .name = NULL,
-- },
--};
--
--static int stat_file(struct thread_data *td, struct fio_file *f)
--{
-- struct filestat_options *o = td->eo;
-- struct timespec start;
-- int do_lat = !td->o.disable_lat;
-- struct stat statbuf;
--#ifndef WIN32
-- struct statx statxbuf;
-- char *abspath;
--#endif
-- int ret;
--
-- dprint(FD_FILE, "fd stat %s\n", f->file_name);
--
-- if (f->filetype != FIO_TYPE_FILE) {
-- log_err("fio: only files are supported\n");
-- return 1;
-- }
-- if (!strcmp(f->file_name, "-")) {
-- log_err("fio: can't read/write to stdin/out\n");
-- return 1;
-- }
--
-- if (do_lat)
-- fio_gettime(&start, NULL);
--
-- switch (o->stat_type){
-- case FIO_FILESTAT_STAT:
-- ret = stat(f->file_name, &statbuf);
-- break;
-- case FIO_FILESTAT_LSTAT:
-- ret = lstat(f->file_name, &statbuf);
-- break;
-- case FIO_FILESTAT_STATX:
--#ifndef WIN32
-- abspath = realpath(f->file_name, NULL);
-- if (abspath) {
-- ret = statx(-1, abspath, 0, STATX_ALL, &statxbuf);
-- free(abspath);
-- } else
-- ret = -1;
--#else
-- ret = -1;
--#endif
-- break;
-- default:
-- ret = -1;
-- break;
-- }
--
-- if (ret == -1) {
-- char buf[FIO_VERROR_SIZE];
-- int e = errno;
--
-- snprintf(buf, sizeof(buf), "stat(%s) type=%u", f->file_name,
-- o->stat_type);
-- td_verror(td, e, buf);
-- return 1;
-- }
--
-- if (do_lat) {
-- struct fc_data *data = td->io_ops_data;
-- uint64_t nsec;
--
-- nsec = ntime_since_now(&start);
-- add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0, 0);
-- }
--
-- return 0;
--}
--
--static enum fio_q_status queue_io(struct thread_data *td, struct io_u fio_unused *io_u)
--{
-- return FIO_Q_COMPLETED;
--}
--
--static int init(struct thread_data *td)
--{
-- struct fc_data *data;
--
-- data = calloc(1, sizeof(*data));
--
-- if (td_read(td))
-- data->stat_ddir = DDIR_READ;
-- else if (td_write(td))
-- data->stat_ddir = DDIR_WRITE;
--
-- td->io_ops_data = data;
-- return 0;
--}
--
--static void cleanup(struct thread_data *td)
--{
-- struct fc_data *data = td->io_ops_data;
--
-- free(data);
--}
--
--static int stat_invalidate(struct thread_data *td, struct fio_file *f)
--{
-- /* do nothing because file not opened */
-- return 0;
--}
--
--static struct ioengine_ops ioengine = {
-- .name = "filestat",
-- .version = FIO_IOOPS_VERSION,
-- .init = init,
-- .cleanup = cleanup,
-- .queue = queue_io,
-- .invalidate = stat_invalidate,
-- .get_file_size = generic_get_file_size,
-- .open_file = stat_file,
-- .flags = FIO_SYNCIO | FIO_FAKEIO |
-- FIO_NOSTATS | FIO_NOFILEHASH,
-- .options = options,
-- .option_struct_size = sizeof(struct filestat_options),
--};
--
--static void fio_init fio_filestat_register(void)
--{
-- register_ioengine(&ioengine);
--}
--
--static void fio_exit fio_filestat_unregister(void)
--{
-- unregister_ioengine(&ioengine);
--}
diff --git a/meta-oe/recipes-benchmark/fio/fio/0005-engines-http-Add-storage-class-option-for-s3.patch b/meta-oe/recipes-benchmark/fio/fio/0005-engines-http-Add-storage-class-option-for-s3.patch
deleted file mode 100644
index 587df1adae..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0005-engines-http-Add-storage-class-option-for-s3.patch
+++ /dev/null
@@ -1,87 +0,0 @@
-From 5b0b5247f0770a89084964274bb951f5a4393299 Mon Sep 17 00:00:00 2001
-From: "Feng, Hualong" <hualong.feng@intel.com>
-Date: Wed, 20 Jul 2022 12:01:35 +0800
-Subject: [PATCH] engines/http: Add storage class option for s3
-
-Amazon S3 offers a range of storage classes that you can choose from
-based on the data access, resiliency, and cost requirements of your
-workloads. (https://aws.amazon.com/s3/storage-classes/)
-
-For example, we have **STANDARD** storage class to test normal
-workload, and have **COLD** storage class to test the workload
-with gzip compression. It is convenient to select which
-storage class to access for different kinds data.
-
-Signed-off-by: Feng, Hualong <hualong.feng@intel.com>
----
- engines/http.c | 25 +++++++++++++++++++------
- 1 file changed, 19 insertions(+), 6 deletions(-)
-
-diff --git a/engines/http.c b/engines/http.c
-index 1de9e66c..dbcde287 100644
---- a/engines/http.c
-+++ b/engines/http.c
-@@ -57,6 +57,7 @@ struct http_options {
- char *s3_key;
- char *s3_keyid;
- char *s3_region;
-+ char *s3_storage_class;
- char *swift_auth_token;
- int verbose;
- unsigned int mode;
-@@ -161,6 +162,16 @@ static struct fio_option options[] = {
- .category = FIO_OPT_C_ENGINE,
- .group = FIO_OPT_G_HTTP,
- },
-+ {
-+ .name = "http_s3_storage_class",
-+ .lname = "S3 Storage class",
-+ .type = FIO_OPT_STR_STORE,
-+ .help = "S3 Storage Class",
-+ .off1 = offsetof(struct http_options, s3_storage_class),
-+ .def = "STANDARD",
-+ .category = FIO_OPT_C_ENGINE,
-+ .group = FIO_OPT_G_HTTP,
-+ },
- {
- .name = "http_mode",
- .lname = "Request mode to use",
-@@ -335,8 +346,8 @@ static void _add_aws_auth_header(CURL *curl, struct curl_slist *slist, struct ht
- char date_iso[32];
- char method[8];
- char dkey[128];
-- char creq[512];
-- char sts[256];
-+ char creq[4096];
-+ char sts[512];
- char s[512];
- char *uri_encoded = NULL;
- char *dsha = NULL;
-@@ -373,11 +384,12 @@ static void _add_aws_auth_header(CURL *curl, struct curl_slist *slist, struct ht
- "host:%s\n"
- "x-amz-content-sha256:%s\n"
- "x-amz-date:%s\n"
-+ "x-amz-storage-class:%s\n"
- "\n"
-- "host;x-amz-content-sha256;x-amz-date\n"
-+ "host;x-amz-content-sha256;x-amz-date;x-amz-storage-class\n"
- "%s"
- , method
-- , uri_encoded, o->host, dsha, date_iso, dsha);
-+ , uri_encoded, o->host, dsha, date_iso, o->s3_storage_class, dsha);
-
- csha = _gen_hex_sha256(creq, strlen(creq));
- snprintf(sts, sizeof(sts), "AWS4-HMAC-SHA256\n%s\n%s/%s/%s/%s\n%s",
-@@ -400,9 +412,10 @@ static void _add_aws_auth_header(CURL *curl, struct curl_slist *slist, struct ht
-
- snprintf(s, sizeof(s), "x-amz-date: %s", date_iso);
- slist = curl_slist_append(slist, s);
--
-+ snprintf(s, sizeof(s), "x-amz-storage-class: %s", o->s3_storage_class);
-+ slist = curl_slist_append(slist, s);
- snprintf(s, sizeof(s), "Authorization: AWS4-HMAC-SHA256 Credential=%s/%s/%s/s3/aws4_request,"
-- "SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=%s",
-+ "SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-storage-class,Signature=%s",
- o->s3_keyid, date_short, o->s3_region, signature);
- slist = curl_slist_append(slist, s);
-
diff --git a/meta-oe/recipes-benchmark/fio/fio/0006-engines-http-Add-s3-crypto-options-for-s3.patch b/meta-oe/recipes-benchmark/fio/fio/0006-engines-http-Add-s3-crypto-options-for-s3.patch
deleted file mode 100644
index d8222c4d1d..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0006-engines-http-Add-s3-crypto-options-for-s3.patch
+++ /dev/null
@@ -1,246 +0,0 @@
-From d196fda02eb73958c2acd367de650858c6203420 Mon Sep 17 00:00:00 2001
-From: "Feng, Hualong" <hualong.feng@intel.com>
-Date: Wed, 20 Jul 2022 09:41:35 +0800
-Subject: [PATCH] engines/http: Add s3 crypto options for s3
-
-Server-side encryption is about protecting data at rest.
-(https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html)
-
-When we want to test server-side encryption, we need to specify
-server-side encryption with customer-provided encryption keys (SSE-C).
-The two option **http_s3_sse_customer_key** and
-**http_s3_sse_customer_algorithm** is for server-side encryption.
-
-Signed-off-by: Feng, Hualong <hualong.feng@intel.com>
----
- engines/http.c | 163 +++++++++++++++++++++++++++++++++++++++++++------
- 1 file changed, 146 insertions(+), 17 deletions(-)
-
-diff --git a/engines/http.c b/engines/http.c
-index dbcde287..56dc7d1b 100644
---- a/engines/http.c
-+++ b/engines/http.c
-@@ -57,6 +57,8 @@ struct http_options {
- char *s3_key;
- char *s3_keyid;
- char *s3_region;
-+ char *s3_sse_customer_key;
-+ char *s3_sse_customer_algorithm;
- char *s3_storage_class;
- char *swift_auth_token;
- int verbose;
-@@ -162,6 +164,26 @@ static struct fio_option options[] = {
- .category = FIO_OPT_C_ENGINE,
- .group = FIO_OPT_G_HTTP,
- },
-+ {
-+ .name = "http_s3_sse_customer_key",
-+ .lname = "SSE Customer Key",
-+ .type = FIO_OPT_STR_STORE,
-+ .help = "S3 SSE Customer Key",
-+ .off1 = offsetof(struct http_options, s3_sse_customer_key),
-+ .def = "",
-+ .category = FIO_OPT_C_ENGINE,
-+ .group = FIO_OPT_G_HTTP,
-+ },
-+ {
-+ .name = "http_s3_sse_customer_algorithm",
-+ .lname = "SSE Customer Algorithm",
-+ .type = FIO_OPT_STR_STORE,
-+ .help = "S3 SSE Customer Algorithm",
-+ .off1 = offsetof(struct http_options, s3_sse_customer_algorithm),
-+ .def = "AES256",
-+ .category = FIO_OPT_C_ENGINE,
-+ .group = FIO_OPT_G_HTTP,
-+ },
- {
- .name = "http_s3_storage_class",
- .lname = "S3 Storage class",
-@@ -277,6 +299,54 @@ static char *_gen_hex_md5(const char *p, size_t len)
- return _conv_hex(hash, MD5_DIGEST_LENGTH);
- }
-
-+static char *_conv_base64_encode(const unsigned char *p, size_t len)
-+{
-+ char *r, *ret;
-+ int i;
-+ static const char sEncodingTable[] = {
-+ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
-+ 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
-+ 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
-+ 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
-+ 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
-+ 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
-+ 'w', 'x', 'y', 'z', '0', '1', '2', '3',
-+ '4', '5', '6', '7', '8', '9', '+', '/'
-+ };
-+
-+ size_t out_len = 4 * ((len + 2) / 3);
-+ ret = r = malloc(out_len + 1);
-+
-+ for (i = 0; i < len - 2; i += 3) {
-+ *r++ = sEncodingTable[(p[i] >> 2) & 0x3F];
-+ *r++ = sEncodingTable[((p[i] & 0x3) << 4) | ((int) (p[i + 1] & 0xF0) >> 4)];
-+ *r++ = sEncodingTable[((p[i + 1] & 0xF) << 2) | ((int) (p[i + 2] & 0xC0) >> 6)];
-+ *r++ = sEncodingTable[p[i + 2] & 0x3F];
-+ }
-+
-+ if (i < len) {
-+ *r++ = sEncodingTable[(p[i] >> 2) & 0x3F];
-+ if (i == (len - 1)) {
-+ *r++ = sEncodingTable[((p[i] & 0x3) << 4)];
-+ *r++ = '=';
-+ } else {
-+ *r++ = sEncodingTable[((p[i] & 0x3) << 4) | ((int) (p[i + 1] & 0xF0) >> 4)];
-+ *r++ = sEncodingTable[((p[i + 1] & 0xF) << 2)];
-+ }
-+ *r++ = '=';
-+ }
-+
-+ ret[out_len]=0;
-+ return ret;
-+}
-+
-+static char *_gen_base64_md5(const unsigned char *p, size_t len)
-+{
-+ unsigned char hash[MD5_DIGEST_LENGTH];
-+ MD5((unsigned char*)p, len, hash);
-+ return _conv_base64_encode(hash, MD5_DIGEST_LENGTH);
-+}
-+
- static void _hmac(unsigned char *md, void *key, int key_len, char *data) {
- #ifndef CONFIG_HAVE_OPAQUE_HMAC_CTX
- HMAC_CTX _ctx;
-@@ -356,6 +426,9 @@ static void _add_aws_auth_header(CURL *curl, struct curl_slist *slist, struct ht
- const char *service = "s3";
- const char *aws = "aws4_request";
- unsigned char md[SHA256_DIGEST_LENGTH];
-+ unsigned char sse_key[33] = {0};
-+ char *sse_key_base64 = NULL;
-+ char *sse_key_md5_base64 = NULL;
-
- time_t t = time(NULL);
- struct tm *gtm = gmtime(&t);
-@@ -364,6 +437,9 @@ static void _add_aws_auth_header(CURL *curl, struct curl_slist *slist, struct ht
- strftime (date_iso, sizeof(date_iso), "%Y%m%dT%H%M%SZ", gtm);
- uri_encoded = _aws_uriencode(uri);
-
-+ if (o->s3_sse_customer_key != NULL)
-+ strncpy((char*)sse_key, o->s3_sse_customer_key, sizeof(sse_key) - 1);
-+
- if (op == DDIR_WRITE) {
- dsha = _gen_hex_sha256(buf, len);
- sprintf(method, "PUT");
-@@ -377,23 +453,50 @@ static void _add_aws_auth_header(CURL *curl, struct curl_slist *slist, struct ht
- }
-
- /* Create the canonical request first */
-- snprintf(creq, sizeof(creq),
-- "%s\n"
-- "%s\n"
-- "\n"
-- "host:%s\n"
-- "x-amz-content-sha256:%s\n"
-- "x-amz-date:%s\n"
-- "x-amz-storage-class:%s\n"
-- "\n"
-- "host;x-amz-content-sha256;x-amz-date;x-amz-storage-class\n"
-- "%s"
-- , method
-- , uri_encoded, o->host, dsha, date_iso, o->s3_storage_class, dsha);
-+ if (sse_key[0] != '\0') {
-+ sse_key_base64 = _conv_base64_encode(sse_key, sizeof(sse_key) - 1);
-+ sse_key_md5_base64 = _gen_base64_md5(sse_key, sizeof(sse_key) - 1);
-+ snprintf(creq, sizeof(creq),
-+ "%s\n"
-+ "%s\n"
-+ "\n"
-+ "host:%s\n"
-+ "x-amz-content-sha256:%s\n"
-+ "x-amz-date:%s\n"
-+ "x-amz-server-side-encryption-customer-algorithm:%s\n"
-+ "x-amz-server-side-encryption-customer-key:%s\n"
-+ "x-amz-server-side-encryption-customer-key-md5:%s\n"
-+ "x-amz-storage-class:%s\n"
-+ "\n"
-+ "host;x-amz-content-sha256;x-amz-date;"
-+ "x-amz-server-side-encryption-customer-algorithm;"
-+ "x-amz-server-side-encryption-customer-key;"
-+ "x-amz-server-side-encryption-customer-key-md5;"
-+ "x-amz-storage-class\n"
-+ "%s"
-+ , method
-+ , uri_encoded, o->host, dsha, date_iso
-+ , o->s3_sse_customer_algorithm, sse_key_base64
-+ , sse_key_md5_base64, o->s3_storage_class, dsha);
-+ } else {
-+ snprintf(creq, sizeof(creq),
-+ "%s\n"
-+ "%s\n"
-+ "\n"
-+ "host:%s\n"
-+ "x-amz-content-sha256:%s\n"
-+ "x-amz-date:%s\n"
-+ "x-amz-storage-class:%s\n"
-+ "\n"
-+ "host;x-amz-content-sha256;x-amz-date;x-amz-storage-class\n"
-+ "%s"
-+ , method
-+ , uri_encoded, o->host, dsha, date_iso, o->s3_storage_class, dsha);
-+ }
-
- csha = _gen_hex_sha256(creq, strlen(creq));
- snprintf(sts, sizeof(sts), "AWS4-HMAC-SHA256\n%s\n%s/%s/%s/%s\n%s",
-- date_iso, date_short, o->s3_region, service, aws, csha);
-+ date_iso, date_short, o->s3_region, service, aws, csha);
-
- snprintf((char *)dkey, sizeof(dkey), "AWS4%s", o->s3_key);
- _hmac(md, dkey, strlen(dkey), date_short);
-@@ -412,11 +515,33 @@ static void _add_aws_auth_header(CURL *curl, struct curl_slist *slist, struct ht
-
- snprintf(s, sizeof(s), "x-amz-date: %s", date_iso);
- slist = curl_slist_append(slist, s);
-+
-+ if (sse_key[0] != '\0') {
-+ snprintf(s, sizeof(s), "x-amz-server-side-encryption-customer-algorithm: %s", o->s3_sse_customer_algorithm);
-+ slist = curl_slist_append(slist, s);
-+ snprintf(s, sizeof(s), "x-amz-server-side-encryption-customer-key: %s", sse_key_base64);
-+ slist = curl_slist_append(slist, s);
-+ snprintf(s, sizeof(s), "x-amz-server-side-encryption-customer-key-md5: %s", sse_key_md5_base64);
-+ slist = curl_slist_append(slist, s);
-+ }
-+
- snprintf(s, sizeof(s), "x-amz-storage-class: %s", o->s3_storage_class);
- slist = curl_slist_append(slist, s);
-- snprintf(s, sizeof(s), "Authorization: AWS4-HMAC-SHA256 Credential=%s/%s/%s/s3/aws4_request,"
-- "SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-storage-class,Signature=%s",
-- o->s3_keyid, date_short, o->s3_region, signature);
-+
-+ if (sse_key[0] != '\0') {
-+ snprintf(s, sizeof(s), "Authorization: AWS4-HMAC-SHA256 Credential=%s/%s/%s/s3/aws4_request,"
-+ "SignedHeaders=host;x-amz-content-sha256;"
-+ "x-amz-date;x-amz-server-side-encryption-customer-algorithm;"
-+ "x-amz-server-side-encryption-customer-key;"
-+ "x-amz-server-side-encryption-customer-key-md5;"
-+ "x-amz-storage-class,"
-+ "Signature=%s",
-+ o->s3_keyid, date_short, o->s3_region, signature);
-+ } else {
-+ snprintf(s, sizeof(s), "Authorization: AWS4-HMAC-SHA256 Credential=%s/%s/%s/s3/aws4_request,"
-+ "SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-storage-class,Signature=%s",
-+ o->s3_keyid, date_short, o->s3_region, signature);
-+ }
- slist = curl_slist_append(slist, s);
-
- curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist);
-@@ -425,6 +550,10 @@ static void _add_aws_auth_header(CURL *curl, struct curl_slist *slist, struct ht
- free(csha);
- free(dsha);
- free(signature);
-+ if (sse_key_base64 != NULL) {
-+ free(sse_key_base64);
-+ free(sse_key_md5_base64);
-+ }
- }
-
- static void _add_swift_header(CURL *curl, struct curl_slist *slist, struct http_options *o,
diff --git a/meta-oe/recipes-benchmark/fio/fio/0007-doc-Add-usage-and-example-about-s3-storage-class-and.patch b/meta-oe/recipes-benchmark/fio/fio/0007-doc-Add-usage-and-example-about-s3-storage-class-and.patch
deleted file mode 100644
index d5815935d7..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0007-doc-Add-usage-and-example-about-s3-storage-class-and.patch
+++ /dev/null
@@ -1,155 +0,0 @@
-From 0cad4b6957818937519604b466a2da5b0c395cfe Mon Sep 17 00:00:00 2001
-From: "Feng, Hualong" <hualong.feng@intel.com>
-Date: Thu, 28 Jul 2022 01:47:48 +0000
-Subject: [PATCH] doc: Add usage and example about s3 storage class and crypto
-
-There add option usage about s3 storage class
-`http_s3_storage_class` and
-s3 SSE server side encryption
-`http_s3_sse_customer_key` and
-`http_s3_sse_customer_algorithm`
-
-And add example file in example folder.
-
-Signed-off-by: Feng, Hualong <hualong.feng@intel.com>
----
- HOWTO.rst | 14 +++++++++++
- examples/http-s3-crypto.fio | 38 ++++++++++++++++++++++++++++++
- examples/http-s3-storage-class.fio | 37 +++++++++++++++++++++++++++++
- fio.1 | 9 +++++++
- 4 files changed, 98 insertions(+)
- create mode 100644 examples/http-s3-crypto.fio
- create mode 100644 examples/http-s3-storage-class.fio
-
-diff --git a/HOWTO.rst b/HOWTO.rst
-index 104cce2d..05fc117f 100644
---- a/HOWTO.rst
-+++ b/HOWTO.rst
-@@ -2692,6 +2692,20 @@ with the caveat that when used on the command line, they must come after the
-
- The S3 key/access id.
-
-+.. option:: http_s3_sse_customer_key=str : [http]
-+
-+ The encryption customer key in SSE server side.
-+
-+.. option:: http_s3_sse_customer_algorithm=str : [http]
-+
-+ The encryption customer algorithm in SSE server side.
-+ Default is **AES256**
-+
-+.. option:: http_s3_storage_class=str : [http]
-+
-+ Which storage class to access. User-customizable settings.
-+ Default is **STANDARD**
-+
- .. option:: http_swift_auth_token=str : [http]
-
- The Swift auth token. See the example configuration file on how
-diff --git a/examples/http-s3-crypto.fio b/examples/http-s3-crypto.fio
-new file mode 100644
-index 00000000..2403746e
---- /dev/null
-+++ b/examples/http-s3-crypto.fio
-@@ -0,0 +1,38 @@
-+# Example test for the HTTP engine's S3 support against Amazon AWS.
-+# Obviously, you have to adjust the S3 credentials; for this example,
-+# they're passed in via the environment.
-+# And you can set the SSE Customer Key and Algorithm to test Server
-+# Side Encryption.
-+#
-+
-+[global]
-+ioengine=http
-+name=test
-+direct=1
-+filename=/larsmb-fio-test/object
-+http_verbose=0
-+https=on
-+http_mode=s3
-+http_s3_key=${S3_KEY}
-+http_s3_keyid=${S3_ID}
-+http_host=s3.eu-central-1.amazonaws.com
-+http_s3_region=eu-central-1
-+http_s3_sse_customer_key=${SSE_KEY}
-+http_s3_sse_customer_algorithm=AES256
-+group_reporting
-+
-+# With verify, this both writes and reads the object
-+[create]
-+rw=write
-+bs=4k
-+size=64k
-+io_size=4k
-+verify=sha256
-+
-+[trim]
-+stonewall
-+rw=trim
-+bs=4k
-+size=64k
-+io_size=4k
-+
-diff --git a/examples/http-s3-storage-class.fio b/examples/http-s3-storage-class.fio
-new file mode 100644
-index 00000000..9ee23837
---- /dev/null
-+++ b/examples/http-s3-storage-class.fio
-@@ -0,0 +1,37 @@
-+# Example test for the HTTP engine's S3 support against Amazon AWS.
-+# Obviously, you have to adjust the S3 credentials; for this example,
-+# they're passed in via the environment.
-+# And here add storage class parameter, you can set normal test for
-+# STANDARD and compression test for another storage class.
-+#
-+
-+[global]
-+ioengine=http
-+name=test
-+direct=1
-+filename=/larsmb-fio-test/object
-+http_verbose=0
-+https=on
-+http_mode=s3
-+http_s3_key=${S3_KEY}
-+http_s3_keyid=${S3_ID}
-+http_host=s3.eu-central-1.amazonaws.com
-+http_s3_region=eu-central-1
-+http_s3_storage_class=${STORAGE_CLASS}
-+group_reporting
-+
-+# With verify, this both writes and reads the object
-+[create]
-+rw=write
-+bs=4k
-+size=64k
-+io_size=4k
-+verify=sha256
-+
-+[trim]
-+stonewall
-+rw=trim
-+bs=4k
-+size=64k
-+io_size=4k
-+
-diff --git a/fio.1 b/fio.1
-index ce9bf3ef..6630525f 100644
---- a/fio.1
-+++ b/fio.1
-@@ -2308,6 +2308,15 @@ The S3 secret key.
- .BI (http)http_s3_keyid \fR=\fPstr
- The S3 key/access id.
- .TP
-+.BI (http)http_s3_sse_customer_key \fR=\fPstr
-+The encryption customer key in SSE server side.
-+.TP
-+.BI (http)http_s3_sse_customer_algorithm \fR=\fPstr
-+The encryption customer algorithm in SSE server side. Default is \fBAES256\fR
-+.TP
-+.BI (http)http_s3_storage_class \fR=\fPstr
-+Which storage class to access. User-customizable settings. Default is \fBSTANDARD\fR
-+.TP
- .BI (http)http_swift_auth_token \fR=\fPstr
- The Swift auth token. See the example configuration file on how to
- retrieve this.
diff --git a/meta-oe/recipes-benchmark/fio/fio/0008-README-link-to-GitHub-releases-for-Windows.patch b/meta-oe/recipes-benchmark/fio/fio/0008-README-link-to-GitHub-releases-for-Windows.patch
deleted file mode 100644
index 3f7ba4ccb4..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0008-README-link-to-GitHub-releases-for-Windows.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From 6809d81b2a9b854697c65e0d69455a39d4497a6b Mon Sep 17 00:00:00 2001
-From: Vincent Fu <vincent.fu@samsung.com>
-Date: Mon, 15 Aug 2022 10:37:57 -0400
-Subject: [PATCH] README: link to GitHub releases for Windows
-
-Note that Windows installers are now available as releases on GitHub.
-
-Signed-off-by: Vincent Fu <vincent.fu@samsung.com>
----
- README.rst | 10 ++++++----
- 1 file changed, 6 insertions(+), 4 deletions(-)
-
-diff --git a/README.rst b/README.rst
-index 67420903..79582dea 100644
---- a/README.rst
-+++ b/README.rst
-@@ -123,10 +123,12 @@ Solaris:
- ``pkgutil -i fio``.
-
- Windows:
-- Rebecca Cran <rebecca@bsdio.com> has fio packages for Windows at
-- https://bsdio.com/fio/ . The latest builds for Windows can also
-- be grabbed from https://ci.appveyor.com/project/axboe/fio by clicking
-- the latest x86 or x64 build, then selecting the ARTIFACTS tab.
-+ Beginning with fio 3.31 Windows installers are available on GitHub at
-+ https://github.com/axboe/fio/releases. Rebecca Cran
-+ <rebecca@bsdio.com> has fio packages for Windows at
-+ https://bsdio.com/fio/ . The latest builds for Windows can also be
-+ grabbed from https://ci.appveyor.com/project/axboe/fio by clicking the
-+ latest x86 or x64 build and then selecting the Artifacts tab.
-
- BSDs:
- Packages for BSDs may be available from their binary package repositories.
diff --git a/meta-oe/recipes-benchmark/fio/fio/0009-engines-xnvme-fix-segfault-issue-with-xnvme-ioengine.patch b/meta-oe/recipes-benchmark/fio/fio/0009-engines-xnvme-fix-segfault-issue-with-xnvme-ioengine.patch
deleted file mode 100644
index 6e322b8f91..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0009-engines-xnvme-fix-segfault-issue-with-xnvme-ioengine.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From d5aac3401e180f3d4ff3db04ebb4e3165b975987 Mon Sep 17 00:00:00 2001
-From: Ankit Kumar <ankit.kumar@samsung.com>
-Date: Tue, 16 Aug 2022 11:08:20 +0530
-Subject: [PATCH] engines/xnvme: fix segfault issue with xnvme ioengine
-
-fix segfault when xnvme ioengine is called without thread=1.
-The segfault happens because td->io_ops_data is accessed at
-two locations xnvme_fioe_cleanup and xnvme_fioe_iomem_free,
-during the error handling call.
-
-Signed-off-by: Ankit Kumar <ankit.kumar@samsung.com>
-Link: https://lore.kernel.org/r/20220816053821.440-2-ankit.kumar@samsung.com
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
----
- engines/xnvme.c | 17 ++++++++++++++---
- 1 file changed, 14 insertions(+), 3 deletions(-)
-
-diff --git a/engines/xnvme.c b/engines/xnvme.c
-index c11b33a8..d8647481 100644
---- a/engines/xnvme.c
-+++ b/engines/xnvme.c
-@@ -205,9 +205,14 @@ static void _dev_close(struct thread_data *td, struct xnvme_fioe_fwrap *fwrap)
-
- static void xnvme_fioe_cleanup(struct thread_data *td)
- {
-- struct xnvme_fioe_data *xd = td->io_ops_data;
-+ struct xnvme_fioe_data *xd = NULL;
- int err;
-
-+ if (!td->io_ops_data)
-+ return;
-+
-+ xd = td->io_ops_data;
-+
- err = pthread_mutex_lock(&g_serialize);
- if (err)
- log_err("ioeng->cleanup(): pthread_mutex_lock(), err(%d)\n", err);
-@@ -367,8 +372,14 @@ static int xnvme_fioe_iomem_alloc(struct thread_data *td, size_t total_mem)
- /* NOTE: using the first device for buffer-allocators) */
- static void xnvme_fioe_iomem_free(struct thread_data *td)
- {
-- struct xnvme_fioe_data *xd = td->io_ops_data;
-- struct xnvme_fioe_fwrap *fwrap = &xd->files[0];
-+ struct xnvme_fioe_data *xd = NULL;
-+ struct xnvme_fioe_fwrap *fwrap = NULL;
-+
-+ if (!td->io_ops_data)
-+ return;
-+
-+ xd = td->io_ops_data;
-+ fwrap = &xd->files[0];
-
- if (!fwrap->dev) {
- log_err("ioeng->iomem_free(): failed no dev-handle\n");
diff --git a/meta-oe/recipes-benchmark/fio/fio/0010-doc-update-fio-doc-for-xnvme-engine.patch b/meta-oe/recipes-benchmark/fio/fio/0010-doc-update-fio-doc-for-xnvme-engine.patch
deleted file mode 100644
index 6c85cfc204..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0010-doc-update-fio-doc-for-xnvme-engine.patch
+++ /dev/null
@@ -1,168 +0,0 @@
-From 8e318fd65ba5c0f6ce82860984bc8d69a7843f97 Mon Sep 17 00:00:00 2001
-From: Ankit Kumar <ankit.kumar@samsung.com>
-Date: Tue, 16 Aug 2022 11:08:21 +0530
-Subject: [PATCH] doc: update fio doc for xnvme engine
-
-- Elaborate about the various sync, async and admin
- interfaces.
-- add missing io_uring_cmd async backend entry.
-- xnvme ioengine doesn't support file stat.
-
-Signed-off-by: Ankit Kumar <ankit.kumar@samsung.com>
-Link: https://lore.kernel.org/r/20220816053821.440-3-ankit.kumar@samsung.com
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
----
- HOWTO.rst | 37 ++++++++++++++++++++++++++-----------
- fio.1 | 34 +++++++++++++++++++++-------------
- 2 files changed, 47 insertions(+), 24 deletions(-)
-
-diff --git a/HOWTO.rst b/HOWTO.rst
-index 05fc117f..b2750b56 100644
---- a/HOWTO.rst
-+++ b/HOWTO.rst
-@@ -2780,41 +2780,56 @@ with the caveat that when used on the command line, they must come after the
- Select the xnvme async command interface. This can take these values.
-
- **emu**
-- This is default and used to emulate asynchronous I/O.
-+ This is default and use to emulate asynchronous I/O by using a
-+ single thread to create a queue pair on top of a synchronous
-+ I/O interface using the NVMe driver IOCTL.
- **thrpool**
-- Use thread pool for Asynchronous I/O.
-+ Emulate an asynchronous I/O interface with a pool of userspace
-+ threads on top of a synchronous I/O interface using the NVMe
-+ driver IOCTL. By default four threads are used.
- **io_uring**
-- Use Linux io_uring/liburing for Asynchronous I/O.
-+ Linux native asynchronous I/O interface which supports both
-+ direct and buffered I/O.
-+ **io_uring_cmd**
-+ Fast Linux native asynchronous I/O interface for NVMe pass
-+ through commands. This only works with NVMe character device
-+ (/dev/ngXnY).
- **libaio**
- Use Linux aio for Asynchronous I/O.
- **posix**
-- Use POSIX aio for Asynchronous I/O.
-+ Use the posix asynchronous I/O interface to perform one or
-+ more I/O operations asynchronously.
- **nil**
-- Use nil-io; For introspective perf. evaluation
-+ Do not transfer any data; just pretend to. This is mainly used
-+ for introspective performance evaluation.
-
- .. option:: xnvme_sync=str : [xnvme]
-
- Select the xnvme synchronous command interface. This can take these values.
-
- **nvme**
-- This is default and uses Linux NVMe Driver ioctl() for synchronous I/O.
-+ This is default and uses Linux NVMe Driver ioctl() for
-+ synchronous I/O.
- **psync**
-- Use pread()/write() for synchronous I/O.
-+ This supports regular as well as vectored pread() and pwrite()
-+ commands.
-+ **block**
-+ This is the same as psync except that it also supports zone
-+ management commands using Linux block layer IOCTLs.
-
- .. option:: xnvme_admin=str : [xnvme]
-
- Select the xnvme admin command interface. This can take these values.
-
- **nvme**
-- This is default and uses linux NVMe Driver ioctl() for admin commands.
-+ This is default and uses linux NVMe Driver ioctl() for admin
-+ commands.
- **block**
- Use Linux Block Layer ioctl() and sysfs for admin commands.
-- **file_as_ns**
-- Use file-stat to construct NVMe idfy responses.
-
- .. option:: xnvme_dev_nsid=int : [xnvme]
-
-- xnvme namespace identifier, for userspace NVMe driver.
-+ xnvme namespace identifier for userspace NVMe driver, such as SPDK.
-
- .. option:: xnvme_iovec=int : [xnvme]
-
-diff --git a/fio.1 b/fio.1
-index 6630525f..f3f3dc5d 100644
---- a/fio.1
-+++ b/fio.1
-@@ -2530,22 +2530,29 @@ Select the xnvme async command interface. This can take these values.
- .RS
- .TP
- .B emu
--This is default and used to emulate asynchronous I/O
-+This is default and use to emulate asynchronous I/O by using a single thread to
-+create a queue pair on top of a synchronous I/O interface using the NVMe driver
-+IOCTL.
- .TP
- .BI thrpool
--Use thread pool for Asynchronous I/O
-+Emulate an asynchronous I/O interface with a pool of userspace threads on top
-+of a synchronous I/O interface using the NVMe driver IOCTL. By default four
-+threads are used.
- .TP
- .BI io_uring
--Use Linux io_uring/liburing for Asynchronous I/O
-+Linux native asynchronous I/O interface which supports both direct and buffered
-+I/O.
- .TP
- .BI libaio
- Use Linux aio for Asynchronous I/O
- .TP
- .BI posix
--Use POSIX aio for Asynchronous I/O
-+Use the posix asynchronous I/O interface to perform one or more I/O operations
-+asynchronously.
- .TP
- .BI nil
--Use nil-io; For introspective perf. evaluation
-+Do not transfer any data; just pretend to. This is mainly used for
-+introspective performance evaluation.
- .RE
- .RE
- .TP
-@@ -2555,10 +2562,14 @@ Select the xnvme synchronous command interface. This can take these values.
- .RS
- .TP
- .B nvme
--This is default and uses Linux NVMe Driver ioctl() for synchronous I/O
-+This is default and uses Linux NVMe Driver ioctl() for synchronous I/O.
- .TP
- .BI psync
--Use pread()/write() for synchronous I/O
-+This supports regular as well as vectored pread() and pwrite() commands.
-+.TP
-+.BI block
-+This is the same as psync except that it also supports zone management
-+commands using Linux block layer IOCTLs.
- .RE
- .RE
- .TP
-@@ -2568,18 +2579,15 @@ Select the xnvme admin command interface. This can take these values.
- .RS
- .TP
- .B nvme
--This is default and uses Linux NVMe Driver ioctl() for admin commands
-+This is default and uses Linux NVMe Driver ioctl() for admin commands.
- .TP
- .BI block
--Use Linux Block Layer ioctl() and sysfs for admin commands
--.TP
--.BI file_as_ns
--Use file-stat as to construct NVMe idfy responses
-+Use Linux Block Layer ioctl() and sysfs for admin commands.
- .RE
- .RE
- .TP
- .BI (xnvme)xnvme_dev_nsid\fR=\fPint
--xnvme namespace identifier, for userspace NVMe driver.
-+xnvme namespace identifier for userspace NVMe driver such as SPDK.
- .TP
- .BI (xnvme)xnvme_iovec
- If this option is set, xnvme will use vectored read/write commands.
diff --git a/meta-oe/recipes-benchmark/fio/fio/0011-test-add-latency-test-using-posixaio-ioengine.patch b/meta-oe/recipes-benchmark/fio/fio/0011-test-add-latency-test-using-posixaio-ioengine.patch
deleted file mode 100644
index d86ac11abb..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0011-test-add-latency-test-using-posixaio-ioengine.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-From fa64b199318318af7fe598a5b9ec62b981a55e2d Mon Sep 17 00:00:00 2001
-From: Vincent Fu <vincent.fu@samsung.com>
-Date: Mon, 15 Aug 2022 11:34:43 -0400
-Subject: [PATCH] test: add latency test using posixaio ioengine
-
-Make sure that mean(slat) + mean(clat) = mean(total lat).
-
-Tests 15 and 16 use the libaio and null ioengines, respectively. Both of
-those ioengines have commit hooks. Add this new test using the posixaio
-ioengine which does not have a commit hook so that we can better cover
-the possible ways that latency is calcualted.
-
-Signed-off-by: Vincent Fu <vincent.fu@samsung.com>
----
- t/jobs/t0017.fio | 9 +++++++++
- t/run-fio-tests.py | 10 ++++++++++
- 2 files changed, 19 insertions(+)
- create mode 100644 t/jobs/t0017.fio
-
-diff --git a/t/jobs/t0017.fio b/t/jobs/t0017.fio
-new file mode 100644
-index 00000000..14486d98
---- /dev/null
-+++ b/t/jobs/t0017.fio
-@@ -0,0 +1,9 @@
-+# Expected result: mean(slat) + mean(clat) = mean(lat)
-+# Buggy result: equality does not hold
-+# This is similar to t0015 and t0016 except that is uses posixaio which is
-+# available on more platforms and does not have a commit hook
-+
-+[test]
-+ioengine=posixaio
-+size=1M
-+iodepth=16
-diff --git a/t/run-fio-tests.py b/t/run-fio-tests.py
-index d77f20e0..2bd02a2a 100755
---- a/t/run-fio-tests.py
-+++ b/t/run-fio-tests.py
-@@ -857,6 +857,16 @@ TEST_LIST = [
- 'output_format': 'json',
- 'requirements': [],
- },
-+ {
-+ 'test_id': 17,
-+ 'test_class': FioJobTest_t0015,
-+ 'job': 't0017.fio',
-+ 'success': SUCCESS_DEFAULT,
-+ 'pre_job': None,
-+ 'pre_success': None,
-+ 'output_format': 'json',
-+ 'requirements': [Requirements.not_windows],
-+ },
- {
- 'test_id': 1000,
- 'test_class': FioExeTest,
diff --git a/meta-oe/recipes-benchmark/fio/fio/0012-test-fix-hash-for-t0016.patch b/meta-oe/recipes-benchmark/fio/fio/0012-test-fix-hash-for-t0016.patch
deleted file mode 100644
index 8f10f415cc..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0012-test-fix-hash-for-t0016.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From 1e68459d85f56f805c70236ad47a65a65f426867 Mon Sep 17 00:00:00 2001
-From: Vincent Fu <vincent.fu@samsung.com>
-Date: Mon, 15 Aug 2022 11:40:58 -0400
-Subject: [PATCH] test: fix hash for t0016
-
-I used the wrong hash for t0016 in the original commit. Fix it to refer
-to the hash that fixed the issue in this tree.
-
-Fixes: de31fe9a ("testing: add test for slat + clat = tlat")
-Signed-off-by: Vincent Fu <vincent.fu@samsung.com>
----
- t/jobs/{t0016-259ebc00.fio => t0016-d54ae22.fio} | 0
- t/run-fio-tests.py | 2 +-
- 2 files changed, 1 insertion(+), 1 deletion(-)
- rename t/jobs/{t0016-259ebc00.fio => t0016-d54ae22.fio} (100%)
-
-diff --git a/t/jobs/t0016-259ebc00.fio b/t/jobs/t0016-d54ae22.fio
-similarity index 100%
-rename from t/jobs/t0016-259ebc00.fio
-rename to t/jobs/t0016-d54ae22.fio
-diff --git a/t/run-fio-tests.py b/t/run-fio-tests.py
-index 2bd02a2a..504b7cdb 100755
---- a/t/run-fio-tests.py
-+++ b/t/run-fio-tests.py
-@@ -850,7 +850,7 @@ TEST_LIST = [
- {
- 'test_id': 16,
- 'test_class': FioJobTest_t0015,
-- 'job': 't0016-259ebc00.fio',
-+ 'job': 't0016-d54ae22.fio',
- 'success': SUCCESS_DEFAULT,
- 'pre_job': None,
- 'pre_success': None,
diff --git a/meta-oe/recipes-benchmark/fio/fio/0013-doc-get-rid-of-trailing-whitespace.patch b/meta-oe/recipes-benchmark/fio/fio/0013-doc-get-rid-of-trailing-whitespace.patch
deleted file mode 100644
index 5f72f47b07..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0013-doc-get-rid-of-trailing-whitespace.patch
+++ /dev/null
@@ -1,82 +0,0 @@
-From ff16b7c0c855d5242d92d7f902247525ff1f889a Mon Sep 17 00:00:00 2001
-From: Konstantin Kharlamov <Hi-Angel@yandex.ru>
-Date: Tue, 16 Aug 2022 19:14:13 +0300
-Subject: [PATCH] doc: get rid of trailing whitespace
-
-Signed-off-by: Konstantin Kharlamov <Hi-Angel@yandex.ru>
----
- HOWTO.rst | 4 ++--
- fio.1 | 10 +++++-----
- 2 files changed, 7 insertions(+), 7 deletions(-)
-
-diff --git a/HOWTO.rst b/HOWTO.rst
-index b2750b56..c94238ed 100644
---- a/HOWTO.rst
-+++ b/HOWTO.rst
-@@ -1301,7 +1301,7 @@ I/O type
- effectively caps the file size at `real_size - offset`. Can be combined with
- :option:`size` to constrain the start and end range of the I/O workload.
- A percentage can be specified by a number between 1 and 100 followed by '%',
-- for example, ``offset=20%`` to specify 20%. In ZBD mode, value can be set as
-+ for example, ``offset=20%`` to specify 20%. In ZBD mode, value can be set as
- number of zones using 'z'.
-
- .. option:: offset_align=int
-@@ -1877,7 +1877,7 @@ I/O size
- If this option is not specified, fio will use the full size of the given
- files or devices. If the files do not exist, size must be given. It is also
- possible to give size as a percentage between 1 and 100. If ``size=20%`` is
-- given, fio will use 20% of the full size of the given files or devices.
-+ given, fio will use 20% of the full size of the given files or devices.
- In ZBD mode, value can also be set as number of zones using 'z'.
- Can be combined with :option:`offset` to constrain the start and end range
- that I/O will be done within.
-diff --git a/fio.1 b/fio.1
-index f3f3dc5d..d40b4247 100644
---- a/fio.1
-+++ b/fio.1
-@@ -292,7 +292,7 @@ For Zone Block Device Mode:
- .RS
- .P
- .PD 0
--z means Zone
-+z means Zone
- .P
- .PD
- .RE
-@@ -1083,7 +1083,7 @@ provided. Data before the given offset will not be touched. This
- effectively caps the file size at `real_size \- offset'. Can be combined with
- \fBsize\fR to constrain the start and end range of the I/O workload.
- A percentage can be specified by a number between 1 and 100 followed by '%',
--for example, `offset=20%' to specify 20%. In ZBD mode, value can be set as
-+for example, `offset=20%' to specify 20%. In ZBD mode, value can be set as
- number of zones using 'z'.
- .TP
- .BI offset_align \fR=\fPint
-@@ -1099,7 +1099,7 @@ specified). This option is useful if there are several jobs which are
- intended to operate on a file in parallel disjoint segments, with even
- spacing between the starting points. Percentages can be used for this option.
- If a percentage is given, the generated offset will be aligned to the minimum
--\fBblocksize\fR or to the value of \fBoffset_align\fR if provided.In ZBD mode, value
-+\fBblocksize\fR or to the value of \fBoffset_align\fR if provided.In ZBD mode, value
- can be set as number of zones using 'z'.
- .TP
- .BI number_ios \fR=\fPint
-@@ -1678,7 +1678,7 @@ If this option is not specified, fio will use the full size of the given
- files or devices. If the files do not exist, size must be given. It is also
- possible to give size as a percentage between 1 and 100. If `size=20%' is
- given, fio will use 20% of the full size of the given files or devices. In ZBD mode,
--size can be given in units of number of zones using 'z'. Can be combined with \fBoffset\fR to
-+size can be given in units of number of zones using 'z'. Can be combined with \fBoffset\fR to
- constrain the start and end range that I/O will be done within.
- .TP
- .BI io_size \fR=\fPint[%|z] "\fR,\fB io_limit" \fR=\fPint[%|z]
-@@ -1697,7 +1697,7 @@ also be set as number of zones using 'z'.
- .BI filesize \fR=\fPirange(int)
- Individual file sizes. May be a range, in which case fio will select sizes
- for files at random within the given range. If not given, each created file
--is the same size. This option overrides \fBsize\fR in terms of file size,
-+is the same size. This option overrides \fBsize\fR in terms of file size,
- i.e. \fBsize\fR becomes merely the default for \fBio_size\fR (and
- has no effect it all if \fBio_size\fR is set explicitly).
- .TP
diff --git a/meta-oe/recipes-benchmark/fio/fio/0014-doc-clarify-that-I-O-errors-may-go-unnoticed-without.patch b/meta-oe/recipes-benchmark/fio/fio/0014-doc-clarify-that-I-O-errors-may-go-unnoticed-without.patch
deleted file mode 100644
index 3196afa2b1..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0014-doc-clarify-that-I-O-errors-may-go-unnoticed-without.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From 331023be2b20d177d533e5fa18f5d8834570613f Mon Sep 17 00:00:00 2001
-From: Konstantin Kharlamov <Hi-Angel@yandex.ru>
-Date: Tue, 16 Aug 2022 19:10:38 +0300
-Subject: [PATCH] doc: clarify that I/O errors may go unnoticed without
- direct=1
-
-Fixes: https://github.com/axboe/fio/issues/1443
-
-Reported-by: Konstantin Kharlamov <Hi-Angel@yandex.ru>
-Signed-off-by: Konstantin Kharlamov <Hi-Angel@yandex.ru>
----
- HOWTO.rst | 7 +++++++
- fio.1 | 10 ++++++++++
- 2 files changed, 17 insertions(+)
-
-diff --git a/HOWTO.rst b/HOWTO.rst
-index c94238ed..08be687c 100644
---- a/HOWTO.rst
-+++ b/HOWTO.rst
-@@ -3927,6 +3927,13 @@ Error handling
- appended, the total error count and the first error. The error field given
- in the stats is the first error that was hit during the run.
-
-+ Note: a write error from the device may go unnoticed by fio when using
-+ buffered IO, as the write() (or similar) system call merely dirties the
-+ kernel pages, unless :option:`sync` or :option:`direct` is used. Device IO
-+ errors occur when the dirty data is actually written out to disk. If fully
-+ sync writes aren't desirable, :option:`fsync` or :option:`fdatasync` can be
-+ used as well. This is specific to writes, as reads are always synchronous.
-+
- The allowed values are:
-
- **none**
-diff --git a/fio.1 b/fio.1
-index d40b4247..27454b0b 100644
---- a/fio.1
-+++ b/fio.1
-@@ -3606,6 +3606,16 @@ EILSEQ) until the runtime is exceeded or the I/O size specified is
- completed. If this option is used, there are two more stats that are
- appended, the total error count and the first error. The error field given
- in the stats is the first error that was hit during the run.
-+.RS
-+.P
-+Note: a write error from the device may go unnoticed by fio when using buffered
-+IO, as the write() (or similar) system call merely dirties the kernel pages,
-+unless `sync' or `direct' is used. Device IO errors occur when the dirty data is
-+actually written out to disk. If fully sync writes aren't desirable, `fsync' or
-+`fdatasync' can be used as well. This is specific to writes, as reads are always
-+synchronous.
-+.RS
-+.P
- The allowed values are:
- .RS
- .RS
diff --git a/meta-oe/recipes-benchmark/fio/fio/0015-Revert-Minor-style-fixups.patch b/meta-oe/recipes-benchmark/fio/fio/0015-Revert-Minor-style-fixups.patch
deleted file mode 100644
index fd962eaf2d..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0015-Revert-Minor-style-fixups.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From 48ceba9c1870c9312d7214503371e0b781abba27 Mon Sep 17 00:00:00 2001
-From: Vincent Fu <vincent.fu@samsung.com>
-Date: Tue, 23 Aug 2022 12:38:20 -0400
-Subject: [PATCH] Revert "Minor style fixups"
-
-This reverts commit 48f8268e88629d408ffd09b1601ad13366bd4ce1.
-
-Signed-off-by: Vincent Fu <vincent.fu@samsung.com>
----
- backend.c | 2 +-
- filesetup.c | 3 ++-
- 2 files changed, 3 insertions(+), 2 deletions(-)
-
-diff --git a/backend.c b/backend.c
-index 5159b60d..3a99850d 100644
---- a/backend.c
-+++ b/backend.c
-@@ -2321,7 +2321,7 @@ static void run_threads(struct sk_out *sk_out)
- * when setup_files() does not run into issues
- * later.
- */
-- if (!i && td->o.nr_files == 1) {
-+ if (!i && td->o.nr_files==1) {
- if (setup_shared_file(td)) {
- exit_value++;
- if (td->error)
-diff --git a/filesetup.c b/filesetup.c
-index 3e2ccf9b..144a0572 100644
---- a/filesetup.c
-+++ b/filesetup.c
-@@ -1109,8 +1109,9 @@ int setup_shared_file(struct thread_data *td)
- dprint(FD_FILE, "fio: extending shared file\n");
- f->real_file_size = file_size;
- err = extend_file(td, f);
-- if (!err)
-+ if (!err) {
- err = __file_invalidate_cache(td, f, 0, f->real_file_size);
-+ }
- get_file_sizes(td);
- dprint(FD_FILE, "shared setup new real_file_size=%llu\n",
- (unsigned long long)f->real_file_size);
diff --git a/meta-oe/recipes-benchmark/fio/fio/0016-Revert-Fix-multithread-issues-when-operating-on-a-si.patch b/meta-oe/recipes-benchmark/fio/fio/0016-Revert-Fix-multithread-issues-when-operating-on-a-si.patch
deleted file mode 100644
index 31d8345378..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0016-Revert-Fix-multithread-issues-when-operating-on-a-si.patch
+++ /dev/null
@@ -1,141 +0,0 @@
-From 1bc0dec3f54e67fa4767d0096ab377e900d5146f Mon Sep 17 00:00:00 2001
-From: Vincent Fu <vincent.fu@samsung.com>
-Date: Tue, 23 Aug 2022 12:38:51 -0400
-Subject: [PATCH] Revert "Fix multithread issues when operating on a single
- shared file"
-
-This reverts commit acbda87c34c743ff2d9e125d9539bcfbbf49eb75.
-
-This commit introduced a lot of unintended consequences for
-create_serialize=0. The aim of the commit can be accomplished with a
-combination of filesize and io_size.
-
-Fixes: https://github.com/axboe/fio/issues/1442
-Signed-off-by: Vincent Fu <vincent.fu@samsung.com>
----
- backend.c | 19 +------------------
- file.h | 1 -
- filesetup.c | 46 ++--------------------------------------------
- 3 files changed, 3 insertions(+), 63 deletions(-)
-
-diff --git a/backend.c b/backend.c
-index 3a99850d..e5bb4e25 100644
---- a/backend.c
-+++ b/backend.c
-@@ -2314,25 +2314,8 @@ static void run_threads(struct sk_out *sk_out)
- for_each_td(td, i) {
- print_status_init(td->thread_number - 1);
-
-- if (!td->o.create_serialize) {
-- /*
-- * When operating on a single rile in parallel,
-- * perform single-threaded early setup so that
-- * when setup_files() does not run into issues
-- * later.
-- */
-- if (!i && td->o.nr_files==1) {
-- if (setup_shared_file(td)) {
-- exit_value++;
-- if (td->error)
-- log_err("fio: pid=%d, err=%d/%s\n",
-- (int) td->pid, td->error, td->verror);
-- td_set_runstate(td, TD_REAPED);
-- todo--;
-- }
-- }
-+ if (!td->o.create_serialize)
- continue;
-- }
-
- if (fio_verify_load_state(td))
- goto reap;
-diff --git a/file.h b/file.h
-index e646cf22..da1b8947 100644
---- a/file.h
-+++ b/file.h
-@@ -201,7 +201,6 @@ struct thread_data;
- extern void close_files(struct thread_data *);
- extern void close_and_free_files(struct thread_data *);
- extern uint64_t get_start_offset(struct thread_data *, struct fio_file *);
--extern int __must_check setup_shared_file(struct thread_data *);
- extern int __must_check setup_files(struct thread_data *);
- extern int __must_check file_invalidate_cache(struct thread_data *, struct fio_file *);
- #ifdef __cplusplus
-diff --git a/filesetup.c b/filesetup.c
-index 144a0572..1d3cc5ad 100644
---- a/filesetup.c
-+++ b/filesetup.c
-@@ -143,7 +143,7 @@ static int extend_file(struct thread_data *td, struct fio_file *f)
- if (unlink_file || new_layout) {
- int ret;
-
-- dprint(FD_FILE, "layout %d unlink %d %s\n", new_layout, unlink_file, f->file_name);
-+ dprint(FD_FILE, "layout unlink %s\n", f->file_name);
-
- ret = td_io_unlink_file(td, f);
- if (ret != 0 && ret != ENOENT) {
-@@ -198,9 +198,6 @@ static int extend_file(struct thread_data *td, struct fio_file *f)
- }
- }
-
--
-- dprint(FD_FILE, "fill file %s, size %llu\n", f->file_name, (unsigned long long) f->real_file_size);
--
- left = f->real_file_size;
- bs = td->o.max_bs[DDIR_WRITE];
- if (bs > left)
-@@ -1081,45 +1078,6 @@ static bool create_work_dirs(struct thread_data *td, const char *fname)
- return true;
- }
-
--int setup_shared_file(struct thread_data *td)
--{
-- struct fio_file *f;
-- uint64_t file_size;
-- int err = 0;
--
-- if (td->o.nr_files > 1) {
-- log_err("fio: shared file setup called for multiple files\n");
-- return -1;
-- }
--
-- get_file_sizes(td);
--
-- f = td->files[0];
--
-- if (f == NULL) {
-- log_err("fio: NULL shared file\n");
-- return -1;
-- }
--
-- file_size = thread_number * td->o.size;
-- dprint(FD_FILE, "shared setup %s real_file_size=%llu, desired=%llu\n",
-- f->file_name, (unsigned long long)f->real_file_size, (unsigned long long)file_size);
--
-- if (f->real_file_size < file_size) {
-- dprint(FD_FILE, "fio: extending shared file\n");
-- f->real_file_size = file_size;
-- err = extend_file(td, f);
-- if (!err) {
-- err = __file_invalidate_cache(td, f, 0, f->real_file_size);
-- }
-- get_file_sizes(td);
-- dprint(FD_FILE, "shared setup new real_file_size=%llu\n",
-- (unsigned long long)f->real_file_size);
-- }
--
-- return err;
--}
--
- /*
- * Open the files and setup files sizes, creating files if necessary.
- */
-@@ -1134,7 +1092,7 @@ int setup_files(struct thread_data *td)
- const unsigned long long bs = td_min_bs(td);
- uint64_t fs = 0;
-
-- dprint(FD_FILE, "setup files (thread_number=%d, subjob_number=%d)\n", td->thread_number, td->subjob_number);
-+ dprint(FD_FILE, "setup files\n");
-
- old_state = td_bump_runstate(td, TD_SETTING_UP);
-
diff --git a/meta-oe/recipes-benchmark/fio/fio/0017-Add-wait-for-handling-SIGBREAK.patch b/meta-oe/recipes-benchmark/fio/fio/0017-Add-wait-for-handling-SIGBREAK.patch
deleted file mode 100644
index e8cb337583..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0017-Add-wait-for-handling-SIGBREAK.patch
+++ /dev/null
@@ -1,59 +0,0 @@
-From 98beea79a30f1541e646efae911dfce10ae18f5c Mon Sep 17 00:00:00 2001
-From: Brandon Paupore <brandon.paupore@wdc.com>
-Date: Fri, 5 Aug 2022 12:57:27 -0500
-Subject: [PATCH] Add wait for handling SIGBREAK
-
-When closing a command prompt window or terminating it using something
-like the taskkill command, each child process (such as a running FIO
-workload) is sent a SIGBREAK signal. Once those child processes have
-responded to that signal, Windows terminates them if they're still
-executing.
-
-This change has the main thread to wait for others to exit when handling
-a SIGBREAK signal, such that each job will still have time to wrap-up
-and give stats before the entire program terminates.
-
-Signed-off-by: Brandon Paupore <brandon.paupore@wdc.com>
----
- backend.c | 21 ++++++++++++++++++++-
- 1 file changed, 20 insertions(+), 1 deletion(-)
-
-diff --git a/backend.c b/backend.c
-index e5bb4e25..375a23e4 100644
---- a/backend.c
-+++ b/backend.c
-@@ -90,6 +90,25 @@ static void sig_int(int sig)
- }
- }
-
-+#ifdef WIN32
-+static void sig_break(int sig)
-+{
-+ struct thread_data *td;
-+ int i;
-+
-+ sig_int(sig);
-+
-+ /**
-+ * Windows terminates all job processes on SIGBREAK after the handler
-+ * returns, so give them time to wrap-up and give stats
-+ */
-+ for_each_td(td, i) {
-+ while (td->runstate < TD_EXITED)
-+ sleep(1);
-+ }
-+}
-+#endif
-+
- void sig_show_status(int sig)
- {
- show_running_run_stats();
-@@ -112,7 +131,7 @@ static void set_sig_handlers(void)
- /* Windows uses SIGBREAK as a quit signal from other applications */
- #ifdef WIN32
- memset(&act, 0, sizeof(act));
-- act.sa_handler = sig_int;
-+ act.sa_handler = sig_break;
- act.sa_flags = SA_RESTART;
- sigaction(SIGBREAK, &act, NULL);
- #endif
diff --git a/meta-oe/recipes-benchmark/fio/fio/0018-engines-io_uring-pass-back-correct-error-value-when-.patch b/meta-oe/recipes-benchmark/fio/fio/0018-engines-io_uring-pass-back-correct-error-value-when-.patch
deleted file mode 100644
index 8aea3abac9..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0018-engines-io_uring-pass-back-correct-error-value-when-.patch
+++ /dev/null
@@ -1,58 +0,0 @@
-From 1e6918419f4a2e5dbd77dd2da82598f1af63f533 Mon Sep 17 00:00:00 2001
-From: Jens Axboe <axboe@kernel.dk>
-Date: Wed, 24 Aug 2022 12:01:39 -0600
-Subject: [PATCH] engines/io_uring: pass back correct error value when
- interrupted
-
-Running with an io_uring engine and using a USR1 signal to show
-current status will end up terminating the job with:
-
-fio: pid=91726, err=-4/file:ioengines.c:320, func=get_events, error=Unknown error -4
-sfx: (groupid=0, jobs=1): err=-4 (file:ioengines.c:320, func=get_events, error=Unknown error -4): pid=91726: Wed Aug 24 11:59:51 2022
-
-Ensure that the return value is set correctly based on the errno.
-
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
----
- engines/io_uring.c | 8 ++++++++
- 1 file changed, 8 insertions(+)
-
-diff --git a/engines/io_uring.c b/engines/io_uring.c
-index cffc7371..89d64b06 100644
---- a/engines/io_uring.c
-+++ b/engines/io_uring.c
-@@ -445,12 +445,18 @@ static struct io_u *fio_ioring_event(struct thread_data *td, int event)
- struct io_uring_cqe *cqe;
- struct io_u *io_u;
- unsigned index;
-+ static int eio;
-
- index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
-
- cqe = &ld->cq_ring.cqes[index];
- io_u = (struct io_u *) (uintptr_t) cqe->user_data;
-
-+ if (eio++ == 5) {
-+ printf("mark EIO\n");
-+ cqe->res = -EIO;
-+ }
-+
- if (cqe->res != io_u->xfer_buflen) {
- if (cqe->res > io_u->xfer_buflen)
- io_u->error = -cqe->res;
-@@ -532,6 +538,7 @@ static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
- if (r < 0) {
- if (errno == EAGAIN || errno == EINTR)
- continue;
-+ r = -errno;
- td_verror(td, errno, "io_uring_enter");
- break;
- }
-@@ -665,6 +672,7 @@ static int fio_ioring_commit(struct thread_data *td)
- usleep(1);
- continue;
- }
-+ ret = -errno;
- td_verror(td, errno, "io_uring_enter submit");
- break;
- }
diff --git a/meta-oe/recipes-benchmark/fio/fio/0019-Enable-CPU-affinity-support-on-Android.patch b/meta-oe/recipes-benchmark/fio/fio/0019-Enable-CPU-affinity-support-on-Android.patch
deleted file mode 100644
index f47741612f..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0019-Enable-CPU-affinity-support-on-Android.patch
+++ /dev/null
@@ -1,64 +0,0 @@
-From 02fd5b722bdfef2e7ce8d4aeb1bc65308d37003f Mon Sep 17 00:00:00 2001
-From: Bart Van Assche <bvanassche@acm.org>
-Date: Wed, 24 Aug 2022 13:42:29 -0700
-Subject: [PATCH] Enable CPU affinity support on Android
-
-This patch enables the --cpumask=, --cpus_allowed= and
---cpus_allowed_policy= fio options.
-
-Signed-off-by: Bart Van Assche <bvanassche@acm.org>
----
- os/os-android.h | 26 ++++++++++++++++++++++++++
- 1 file changed, 26 insertions(+)
-
-diff --git a/os/os-android.h b/os/os-android.h
-index 2f73d249..34534239 100644
---- a/os/os-android.h
-+++ b/os/os-android.h
-@@ -24,6 +24,7 @@
- #define __has_builtin(x) 0 // Compatibility with non-clang compilers.
- #endif
-
-+#define FIO_HAVE_CPU_AFFINITY
- #define FIO_HAVE_DISK_UTIL
- #define FIO_HAVE_IOSCHED_SWITCH
- #define FIO_HAVE_IOPRIO
-@@ -44,6 +45,13 @@
-
- #define OS_MAP_ANON MAP_ANONYMOUS
-
-+typedef cpu_set_t os_cpu_mask_t;
-+
-+#define fio_setaffinity(pid, cpumask) \
-+ sched_setaffinity((pid), sizeof(cpumask), &(cpumask))
-+#define fio_getaffinity(pid, ptr) \
-+ sched_getaffinity((pid), sizeof(cpu_set_t), (ptr))
-+
- #ifndef POSIX_MADV_DONTNEED
- #define posix_madvise madvise
- #define POSIX_MADV_DONTNEED MADV_DONTNEED
-@@ -64,6 +72,24 @@
- pthread_getaffinity_np(pthread_self(), sizeof(mask), &(mask))
- #endif
-
-+#define fio_cpu_clear(mask, cpu) CPU_CLR((cpu), (mask))
-+#define fio_cpu_set(mask, cpu) CPU_SET((cpu), (mask))
-+#define fio_cpu_isset(mask, cpu) (CPU_ISSET((cpu), (mask)) != 0)
-+#define fio_cpu_count(mask) CPU_COUNT((mask))
-+
-+static inline int fio_cpuset_init(os_cpu_mask_t *mask)
-+{
-+ CPU_ZERO(mask);
-+ return 0;
-+}
-+
-+static inline int fio_cpuset_exit(os_cpu_mask_t *mask)
-+{
-+ return 0;
-+}
-+
-+#define FIO_MAX_CPUS CPU_SETSIZE
-+
- #ifndef CONFIG_NO_SHM
- /*
- * Bionic doesn't support SysV shared memory, so implement it using ashmem
diff --git a/meta-oe/recipes-benchmark/fio/fio/0020-io_uring-Replace-pthread_self-with-s-tid.patch b/meta-oe/recipes-benchmark/fio/fio/0020-io_uring-Replace-pthread_self-with-s-tid.patch
deleted file mode 100644
index 24952f19f5..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0020-io_uring-Replace-pthread_self-with-s-tid.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From 72d7f2139454528b9ebfb2f988a35f9a739680d0 Mon Sep 17 00:00:00 2001
-From: Khem Raj <raj.khem@gmail.com>
-Date: Wed, 24 Aug 2022 18:08:53 -0700
-Subject: [PATCH] io_uring: Replace pthread_self with s->tid
-
-__init_rand64 takes 64bit value and srand48 takes unsigned 32bit value,
-pthread_t is opaque type and some libcs ( e.g. musl ) do not define them
-in plain old data types and ends up with errors
-
-| t/io_uring.c:809:32: error: incompatible pointer to integer conversion passing 'pthread_t' (aka 'struct __pthread *') to parameter of type 'uint64_t' (aka 'unsigned long') [-Wint-conver
-sion]
-| __init_rand64(&s->rand_state, pthread_self());
-| ^~~~~~~~~~~~~~
-
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
----
- t/io_uring.c | 5 ++---
- 1 file changed, 2 insertions(+), 3 deletions(-)
-
-diff --git a/t/io_uring.c b/t/io_uring.c
-index 35bf1956..f34a3554 100644
---- a/t/io_uring.c
-+++ b/t/io_uring.c
-@@ -799,15 +799,14 @@ static int submitter_init(struct submitter *s)
- int i, nr_batch, err;
- static int init_printed;
- char buf[80];
--
- s->tid = gettid();
- printf("submitter=%d, tid=%d, file=%s, node=%d\n", s->index, s->tid,
- s->filename, s->numa_node);
-
- set_affinity(s);
-
-- __init_rand64(&s->rand_state, pthread_self());
-- srand48(pthread_self());
-+ __init_rand64(&s->rand_state, s->tid);
-+ srand48(s->tid);
-
- for (i = 0; i < MAX_FDS; i++)
- s->files[i].fileno = i;
diff --git a/meta-oe/recipes-benchmark/fio/fio/0021-engines-io_uring-delete-debug-code.patch b/meta-oe/recipes-benchmark/fio/fio/0021-engines-io_uring-delete-debug-code.patch
deleted file mode 100644
index b4f3d0d27f..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0021-engines-io_uring-delete-debug-code.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From 85f8181d42050f8a8c9ddf6d30f621054f0e6890 Mon Sep 17 00:00:00 2001
-From: Jens Axboe <axboe@kernel.dk>
-Date: Thu, 25 Aug 2022 11:19:34 -0600
-Subject: [PATCH] engines/io_uring: delete debug code
-
-This was inadvertently introduced by a previous commit, get rid
-of it.
-
-Fixes: 1816895b788e ("engines/io_uring: pass back correct error value when interrupted")
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
----
- engines/io_uring.c | 6 ------
- 1 file changed, 6 deletions(-)
-
-diff --git a/engines/io_uring.c b/engines/io_uring.c
-index 89d64b06..94376efa 100644
---- a/engines/io_uring.c
-+++ b/engines/io_uring.c
-@@ -445,18 +445,12 @@ static struct io_u *fio_ioring_event(struct thread_data *td, int event)
- struct io_uring_cqe *cqe;
- struct io_u *io_u;
- unsigned index;
-- static int eio;
-
- index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
-
- cqe = &ld->cq_ring.cqes[index];
- io_u = (struct io_u *) (uintptr_t) cqe->user_data;
-
-- if (eio++ == 5) {
-- printf("mark EIO\n");
-- cqe->res = -EIO;
-- }
--
- if (cqe->res != io_u->xfer_buflen) {
- if (cqe->res > io_u->xfer_buflen)
- io_u->error = -cqe->res;
diff --git a/meta-oe/recipes-benchmark/fio/fio/0022-t-io_uring-prep-for-including-engines-nvme.h-in-t-io.patch b/meta-oe/recipes-benchmark/fio/fio/0022-t-io_uring-prep-for-including-engines-nvme.h-in-t-io.patch
deleted file mode 100644
index f32c8867df..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0022-t-io_uring-prep-for-including-engines-nvme.h-in-t-io.patch
+++ /dev/null
@@ -1,72 +0,0 @@
-From a10cec0440a7574ffb76ff52fbc33a250f067d6a Mon Sep 17 00:00:00 2001
-From: Anuj Gupta <anuj20.g@samsung.com>
-Date: Fri, 26 Aug 2022 17:03:05 +0530
-Subject: [PATCH] t/io_uring: prep for including engines/nvme.h in t/io_uring
-
-Change page_size and cal_clat_percentiles name to something different
-as these are indirectly picked from engines/nvme.h (fio.h and stat.h)
-
-Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
-Link: https://lore.kernel.org/r/20220826113306.4139-2-anuj20.g@samsung.com
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
----
- t/io_uring.c | 18 +++++++++---------
- 1 file changed, 9 insertions(+), 9 deletions(-)
-
-diff --git a/t/io_uring.c b/t/io_uring.c
-index f34a3554..6e4737e4 100644
---- a/t/io_uring.c
-+++ b/t/io_uring.c
-@@ -117,7 +117,7 @@ static struct submitter *submitter;
- static volatile int finish;
- static int stats_running;
- static unsigned long max_iops;
--static long page_size;
-+static long t_io_uring_page_size;
-
- static int depth = DEPTH;
- static int batch_submit = BATCH_SUBMIT;
-@@ -195,9 +195,9 @@ static unsigned long plat_idx_to_val(unsigned int idx)
- return cycles_to_nsec(base + ((k + 0.5) * (1 << error_bits)));
- }
-
--unsigned int calc_clat_percentiles(unsigned long *io_u_plat, unsigned long nr,
-- unsigned long **output,
-- unsigned long *maxv, unsigned long *minv)
-+unsigned int calculate_clat_percentiles(unsigned long *io_u_plat,
-+ unsigned long nr, unsigned long **output,
-+ unsigned long *maxv, unsigned long *minv)
- {
- unsigned long sum = 0;
- unsigned int len = plist_len, i, j = 0;
-@@ -251,7 +251,7 @@ static void show_clat_percentiles(unsigned long *io_u_plat, unsigned long nr,
- bool is_last;
- char fmt[32];
-
-- len = calc_clat_percentiles(io_u_plat, nr, &ovals, &maxv, &minv);
-+ len = calculate_clat_percentiles(io_u_plat, nr, &ovals, &maxv, &minv);
- if (!len || !ovals)
- goto out;
-
-@@ -786,7 +786,7 @@ static void *allocate_mem(struct submitter *s, int size)
- return numa_alloc_onnode(size, s->numa_node);
- #endif
-
-- if (posix_memalign(&buf, page_size, bs)) {
-+ if (posix_memalign(&buf, t_io_uring_page_size, bs)) {
- printf("failed alloc\n");
- return NULL;
- }
-@@ -1542,9 +1542,9 @@ int main(int argc, char *argv[])
-
- arm_sig_int();
-
-- page_size = sysconf(_SC_PAGESIZE);
-- if (page_size < 0)
-- page_size = 4096;
-+ t_io_uring_page_size = sysconf(_SC_PAGESIZE);
-+ if (t_io_uring_page_size < 0)
-+ t_io_uring_page_size = 4096;
-
- for (j = 0; j < nthreads; j++) {
- s = get_submitter(j);
diff --git a/meta-oe/recipes-benchmark/fio/fio/0023-t-io_uring-add-support-for-async-passthru.patch b/meta-oe/recipes-benchmark/fio/fio/0023-t-io_uring-add-support-for-async-passthru.patch
deleted file mode 100644
index 8ef7d13e98..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0023-t-io_uring-add-support-for-async-passthru.patch
+++ /dev/null
@@ -1,379 +0,0 @@
-From fa4a1345ca7cd60ae0b96da286f45621a3f45a33 Mon Sep 17 00:00:00 2001
-From: Anuj Gupta <anuj20.g@samsung.com>
-Date: Fri, 26 Aug 2022 17:03:06 +0530
-Subject: [PATCH] t/io_uring: add support for async-passthru
-
-This patch adds support for async-passthru in t/io_uring. User needs to
-specify -u1 option in the command
-
-Example commandline:
-t/io_uring -b512 -d128 -c32 -s32 -p0 -F1 -B0 -O0 -n1 -u1 /dev/ng0n1
-
-Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
-Link: https://lore.kernel.org/r/20220826113306.4139-3-anuj20.g@samsung.com
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
----
- t/io_uring.c | 238 +++++++++++++++++++++++++++++++++++++++++++++++++--
- 1 file changed, 230 insertions(+), 8 deletions(-)
-
-diff --git a/t/io_uring.c b/t/io_uring.c
-index 6e4737e4..0a90f85c 100644
---- a/t/io_uring.c
-+++ b/t/io_uring.c
-@@ -35,6 +35,7 @@
- #include "../lib/rand.h"
- #include "../minmax.h"
- #include "../os/linux/io_uring.h"
-+#include "../engines/nvme.h"
-
- struct io_sq_ring {
- unsigned *head;
-@@ -67,6 +68,8 @@ struct file {
- unsigned long max_size;
- unsigned long cur_off;
- unsigned pending_ios;
-+ unsigned int nsid; /* nsid field required for nvme-passthrough */
-+ unsigned int lba_shift; /* lba_shift field required for nvme-passthrough */
- int real_fd;
- int fixed_fd;
- int fileno;
-@@ -139,6 +142,7 @@ static int random_io = 1; /* random or sequential IO */
- static int register_ring = 1; /* register ring */
- static int use_sync = 0; /* use preadv2 */
- static int numa_placement = 0; /* set to node of device */
-+static int pt = 0; /* passthrough I/O or not */
-
- static unsigned long tsc_rate;
-
-@@ -161,6 +165,54 @@ struct io_uring_map_buffers {
- };
- #endif
-
-+static int nvme_identify(int fd, __u32 nsid, enum nvme_identify_cns cns,
-+ enum nvme_csi csi, void *data)
-+{
-+ struct nvme_passthru_cmd cmd = {
-+ .opcode = nvme_admin_identify,
-+ .nsid = nsid,
-+ .addr = (__u64)(uintptr_t)data,
-+ .data_len = NVME_IDENTIFY_DATA_SIZE,
-+ .cdw10 = cns,
-+ .cdw11 = csi << NVME_IDENTIFY_CSI_SHIFT,
-+ .timeout_ms = NVME_DEFAULT_IOCTL_TIMEOUT,
-+ };
-+
-+ return ioctl(fd, NVME_IOCTL_ADMIN_CMD, &cmd);
-+}
-+
-+static int nvme_get_info(int fd, __u32 *nsid, __u32 *lba_sz, __u64 *nlba)
-+{
-+ struct nvme_id_ns ns;
-+ int namespace_id;
-+ int err;
-+
-+ namespace_id = ioctl(fd, NVME_IOCTL_ID);
-+ if (namespace_id < 0) {
-+ fprintf(stderr, "error failed to fetch namespace-id\n");
-+ close(fd);
-+ return -errno;
-+ }
-+
-+ /*
-+ * Identify namespace to get namespace-id, namespace size in LBA's
-+ * and LBA data size.
-+ */
-+ err = nvme_identify(fd, namespace_id, NVME_IDENTIFY_CNS_NS,
-+ NVME_CSI_NVM, &ns);
-+ if (err) {
-+ fprintf(stderr, "error failed to fetch identify namespace\n");
-+ close(fd);
-+ return err;
-+ }
-+
-+ *nsid = namespace_id;
-+ *lba_sz = 1 << ns.lbaf[(ns.flbas & 0x0f)].ds;
-+ *nlba = ns.nsze;
-+
-+ return 0;
-+}
-+
- static unsigned long cycles_to_nsec(unsigned long cycles)
- {
- uint64_t val;
-@@ -520,6 +572,65 @@ static void init_io(struct submitter *s, unsigned index)
- sqe->user_data |= ((uint64_t)s->clock_index << 32);
- }
-
-+static void init_io_pt(struct submitter *s, unsigned index)
-+{
-+ struct io_uring_sqe *sqe = &s->sqes[index << 1];
-+ unsigned long offset;
-+ struct file *f;
-+ struct nvme_uring_cmd *cmd;
-+ unsigned long long slba;
-+ unsigned long long nlb;
-+ long r;
-+
-+ if (s->nr_files == 1) {
-+ f = &s->files[0];
-+ } else {
-+ f = &s->files[s->cur_file];
-+ if (f->pending_ios >= file_depth(s)) {
-+ s->cur_file++;
-+ if (s->cur_file == s->nr_files)
-+ s->cur_file = 0;
-+ f = &s->files[s->cur_file];
-+ }
-+ }
-+ f->pending_ios++;
-+
-+ if (random_io) {
-+ r = __rand64(&s->rand_state);
-+ offset = (r % (f->max_blocks - 1)) * bs;
-+ } else {
-+ offset = f->cur_off;
-+ f->cur_off += bs;
-+ if (f->cur_off + bs > f->max_size)
-+ f->cur_off = 0;
-+ }
-+
-+ if (register_files) {
-+ sqe->fd = f->fixed_fd;
-+ sqe->flags = IOSQE_FIXED_FILE;
-+ } else {
-+ sqe->fd = f->real_fd;
-+ sqe->flags = 0;
-+ }
-+ sqe->opcode = IORING_OP_URING_CMD;
-+ sqe->user_data = (unsigned long) f->fileno;
-+ if (stats)
-+ sqe->user_data |= ((unsigned long)s->clock_index << 32);
-+ sqe->cmd_op = NVME_URING_CMD_IO;
-+ slba = offset >> f->lba_shift;
-+ nlb = (bs >> f->lba_shift) - 1;
-+ cmd = (struct nvme_uring_cmd *)&sqe->cmd;
-+ /* cdw10 and cdw11 represent starting slba*/
-+ cmd->cdw10 = slba & 0xffffffff;
-+ cmd->cdw11 = slba >> 32;
-+ /* cdw12 represent number of lba to be read*/
-+ cmd->cdw12 = nlb;
-+ cmd->addr = (unsigned long) s->iovecs[index].iov_base;
-+ cmd->data_len = bs;
-+ cmd->nsid = f->nsid;
-+ cmd->opcode = 2;
-+}
-+
- static int prep_more_ios_uring(struct submitter *s, int max_ios)
- {
- struct io_sq_ring *ring = &s->sq_ring;
-@@ -532,7 +643,10 @@ static int prep_more_ios_uring(struct submitter *s, int max_ios)
- break;
-
- index = tail & sq_ring_mask;
-- init_io(s, index);
-+ if (pt)
-+ init_io_pt(s, index);
-+ else
-+ init_io(s, index);
- ring->array[index] = index;
- prepped++;
- tail = next_tail;
-@@ -549,7 +663,29 @@ static int get_file_size(struct file *f)
-
- if (fstat(f->real_fd, &st) < 0)
- return -1;
-- if (S_ISBLK(st.st_mode)) {
-+ if (pt) {
-+ __u64 nlba;
-+ __u32 lbs;
-+ int ret;
-+
-+ if (!S_ISCHR(st.st_mode)) {
-+ fprintf(stderr, "passthrough works with only nvme-ns "
-+ "generic devices (/dev/ngXnY)\n");
-+ return -1;
-+ }
-+ ret = nvme_get_info(f->real_fd, &f->nsid, &lbs, &nlba);
-+ if (ret)
-+ return -1;
-+ if ((bs % lbs) != 0) {
-+ printf("error: bs:%d should be a multiple logical_block_size:%d\n",
-+ bs, lbs);
-+ return -1;
-+ }
-+ f->max_blocks = nlba / bs;
-+ f->max_size = nlba;
-+ f->lba_shift = ilog2(lbs);
-+ return 0;
-+ } else if (S_ISBLK(st.st_mode)) {
- unsigned long long bytes;
-
- if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0)
-@@ -620,6 +756,60 @@ static int reap_events_uring(struct submitter *s)
- return reaped;
- }
-
-+static int reap_events_uring_pt(struct submitter *s)
-+{
-+ struct io_cq_ring *ring = &s->cq_ring;
-+ struct io_uring_cqe *cqe;
-+ unsigned head, reaped = 0;
-+ int last_idx = -1, stat_nr = 0;
-+ unsigned index;
-+ int fileno;
-+
-+ head = *ring->head;
-+ do {
-+ struct file *f;
-+
-+ read_barrier();
-+ if (head == atomic_load_acquire(ring->tail))
-+ break;
-+ index = head & cq_ring_mask;
-+ cqe = &ring->cqes[index << 1];
-+ fileno = cqe->user_data & 0xffffffff;
-+ f = &s->files[fileno];
-+ f->pending_ios--;
-+
-+ if (cqe->res != 0) {
-+ printf("io: unexpected ret=%d\n", cqe->res);
-+ if (polled && cqe->res == -EINVAL)
-+ printf("passthrough doesn't support polled IO\n");
-+ return -1;
-+ }
-+ if (stats) {
-+ int clock_index = cqe->user_data >> 32;
-+
-+ if (last_idx != clock_index) {
-+ if (last_idx != -1) {
-+ add_stat(s, last_idx, stat_nr);
-+ stat_nr = 0;
-+ }
-+ last_idx = clock_index;
-+ }
-+ stat_nr++;
-+ }
-+ reaped++;
-+ head++;
-+ } while (1);
-+
-+ if (stat_nr)
-+ add_stat(s, last_idx, stat_nr);
-+
-+ if (reaped) {
-+ s->inflight -= reaped;
-+ atomic_store_release(ring->head, head);
-+ }
-+ return reaped;
-+}
-+
- static void set_affinity(struct submitter *s)
- {
- #ifdef CONFIG_LIBNUMA
-@@ -697,6 +887,7 @@ static int setup_ring(struct submitter *s)
- struct io_uring_params p;
- int ret, fd;
- void *ptr;
-+ size_t len;
-
- memset(&p, 0, sizeof(p));
-
-@@ -709,6 +900,10 @@ static int setup_ring(struct submitter *s)
- p.sq_thread_cpu = sq_thread_cpu;
- }
- }
-+ if (pt) {
-+ p.flags |= IORING_SETUP_SQE128;
-+ p.flags |= IORING_SETUP_CQE32;
-+ }
-
- fd = io_uring_setup(depth, &p);
- if (fd < 0) {
-@@ -761,11 +956,22 @@ static int setup_ring(struct submitter *s)
- sring->array = ptr + p.sq_off.array;
- sq_ring_mask = *sring->ring_mask;
-
-- s->sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe),
-+ if (p.flags & IORING_SETUP_SQE128)
-+ len = 2 * p.sq_entries * sizeof(struct io_uring_sqe);
-+ else
-+ len = p.sq_entries * sizeof(struct io_uring_sqe);
-+ s->sqes = mmap(0, len,
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
- IORING_OFF_SQES);
-
-- ptr = mmap(0, p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe),
-+ if (p.flags & IORING_SETUP_CQE32) {
-+ len = p.cq_off.cqes +
-+ 2 * p.cq_entries * sizeof(struct io_uring_cqe);
-+ } else {
-+ len = p.cq_off.cqes +
-+ p.cq_entries * sizeof(struct io_uring_cqe);
-+ }
-+ ptr = mmap(0, len,
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
- IORING_OFF_CQ_RING);
- cring->head = ptr + p.cq_off.head;
-@@ -855,7 +1061,16 @@ static int submitter_init(struct submitter *s)
- s->plat = NULL;
- nr_batch = 0;
- }
-+ /* perform the expensive command initialization part for passthrough here
-+ * rather than in the fast path
-+ */
-+ if (pt) {
-+ for (i = 0; i < roundup_pow2(depth); i++) {
-+ struct io_uring_sqe *sqe = &s->sqes[i << 1];
-
-+ memset(&sqe->cmd, 0, sizeof(struct nvme_uring_cmd));
-+ }
-+ }
- return nr_batch;
- }
-
-@@ -1111,7 +1326,10 @@ submit:
- do {
- int r;
-
-- r = reap_events_uring(s);
-+ if (pt)
-+ r = reap_events_uring_pt(s);
-+ else
-+ r = reap_events_uring(s);
- if (r == -1) {
- s->finish = 1;
- break;
-@@ -1305,11 +1523,12 @@ static void usage(char *argv, int status)
- " -a <bool> : Use legacy aio, default %d\n"
- " -S <bool> : Use sync IO (preadv2), default %d\n"
- " -X <bool> : Use registered ring %d\n"
-- " -P <bool> : Automatically place on device home node %d\n",
-+ " -P <bool> : Automatically place on device home node %d\n"
-+ " -u <bool> : Use nvme-passthrough I/O, default %d\n",
- argv, DEPTH, BATCH_SUBMIT, BATCH_COMPLETE, BS, polled,
- fixedbufs, dma_map, register_files, nthreads, !buffered, do_nop,
- stats, runtime == 0 ? "unlimited" : runtime_str, random_io, aio,
-- use_sync, register_ring, numa_placement);
-+ use_sync, register_ring, numa_placement, pt);
- exit(status);
- }
-
-@@ -1368,7 +1587,7 @@ int main(int argc, char *argv[])
- if (!do_nop && argc < 2)
- usage(argv[0], 1);
-
-- while ((opt = getopt(argc, argv, "d:s:c:b:p:B:F:n:N:O:t:T:a:r:D:R:X:S:P:h?")) != -1) {
-+ while ((opt = getopt(argc, argv, "d:s:c:b:p:B:F:n:N:O:t:T:a:r:D:R:X:S:P:u:h?")) != -1) {
- switch (opt) {
- case 'a':
- aio = !!atoi(optarg);
-@@ -1449,6 +1668,9 @@ int main(int argc, char *argv[])
- case 'P':
- numa_placement = !!atoi(optarg);
- break;
-+ case 'u':
-+ pt = !!atoi(optarg);
-+ break;
- case 'h':
- case '?':
- default:
diff --git a/meta-oe/recipes-benchmark/fio/fio/0024-t-io_uring-fix-64-bit-cast-on-32-bit-archs.patch b/meta-oe/recipes-benchmark/fio/fio/0024-t-io_uring-fix-64-bit-cast-on-32-bit-archs.patch
deleted file mode 100644
index ba687e0418..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0024-t-io_uring-fix-64-bit-cast-on-32-bit-archs.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From 286bed8bc95fbc7d8a1d00b1861037bc215948ee Mon Sep 17 00:00:00 2001
-From: Jens Axboe <axboe@kernel.dk>
-Date: Fri, 26 Aug 2022 07:52:54 -0600
-Subject: [PATCH] t/io_uring: fix 64-bit cast on 32-bit archs
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-gcc complains that:
-
-t/io_uring.c: In function ‘init_io_pt’:
-t/io_uring.c:618:52: error: left shift count >= width of type [-Werror=shift-count-overflow]
- 618 | sqe->user_data |= ((unsigned long)s->clock_index << 32);
- | ^~
-
-we're shifting more than the size of the type. Cast to a 64-bit value
-so that it'll work on 32-bit as well.
-
-Fixes: 7d04588a7663 ("t/io_uring: add support for async-passthru")
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
----
- t/io_uring.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/t/io_uring.c b/t/io_uring.c
-index 0a90f85c..b90bcf78 100644
---- a/t/io_uring.c
-+++ b/t/io_uring.c
-@@ -615,7 +615,7 @@ static void init_io_pt(struct submitter *s, unsigned index)
- sqe->opcode = IORING_OP_URING_CMD;
- sqe->user_data = (unsigned long) f->fileno;
- if (stats)
-- sqe->user_data |= ((unsigned long)s->clock_index << 32);
-+ sqe->user_data |= ((__u64) s->clock_index << 32ULL);
- sqe->cmd_op = NVME_URING_CMD_IO;
- slba = offset >> f->lba_shift;
- nlb = (bs >> f->lba_shift) - 1;
diff --git a/meta-oe/recipes-benchmark/fio/fio/0025-test-add-basic-test-for-io_uring-ioengine.patch b/meta-oe/recipes-benchmark/fio/fio/0025-test-add-basic-test-for-io_uring-ioengine.patch
deleted file mode 100644
index 7472eea628..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0025-test-add-basic-test-for-io_uring-ioengine.patch
+++ /dev/null
@@ -1,91 +0,0 @@
-From c3df3532a507e6d7c66339dee6eb022feab68f59 Mon Sep 17 00:00:00 2001
-From: Vincent Fu <vincent.fu@samsung.com>
-Date: Thu, 25 Aug 2022 12:08:33 -0700
-Subject: [PATCH] test: add basic test for io_uring ioengine
-
-We should have a quick smoke test for the io_uring ioengine to
-automatically detect breakage.
-
-Signed-off-by: Vincent Fu <vincent.fu@samsung.com>
----
- t/jobs/t0018.fio | 9 +++++++++
- t/run-fio-tests.py | 22 ++++++++++++++++++++++
- 2 files changed, 31 insertions(+)
- create mode 100644 t/jobs/t0018.fio
-
-diff --git a/t/jobs/t0018.fio b/t/jobs/t0018.fio
-new file mode 100644
-index 00000000..e2298b1f
---- /dev/null
-+++ b/t/jobs/t0018.fio
-@@ -0,0 +1,9 @@
-+# Expected result: job completes without error
-+# Buggy result: job fails
-+
-+[test]
-+ioengine=io_uring
-+filesize=256K
-+time_based
-+runtime=3s
-+rw=randrw
-diff --git a/t/run-fio-tests.py b/t/run-fio-tests.py
-index 504b7cdb..1e5e9f24 100755
---- a/t/run-fio-tests.py
-+++ b/t/run-fio-tests.py
-@@ -582,6 +582,7 @@ class Requirements(object):
-
- _linux = False
- _libaio = False
-+ _io_uring = False
- _zbd = False
- _root = False
- _zoned_nullb = False
-@@ -605,6 +606,12 @@ class Requirements(object):
- Requirements._zbd = "CONFIG_HAS_BLKZONED" in contents
- Requirements._libaio = "CONFIG_LIBAIO" in contents
-
-+ contents, success = FioJobTest.get_file("/proc/kallsyms")
-+ if not success:
-+ print("Unable to open '/proc/kallsyms' to probe for io_uring support")
-+ else:
-+ Requirements._io_uring = "io_uring_setup" in contents
-+
- Requirements._root = (os.geteuid() == 0)
- if Requirements._zbd and Requirements._root:
- try:
-@@ -627,6 +634,7 @@ class Requirements(object):
-
- req_list = [Requirements.linux,
- Requirements.libaio,
-+ Requirements.io_uring,
- Requirements.zbd,
- Requirements.root,
- Requirements.zoned_nullb,
-@@ -648,6 +656,11 @@ class Requirements(object):
- """Is libaio available?"""
- return Requirements._libaio, "libaio required"
-
-+ @classmethod
-+ def io_uring(cls):
-+ """Is io_uring available?"""
-+ return Requirements._io_uring, "io_uring required"
-+
- @classmethod
- def zbd(cls):
- """Is ZBD support available?"""
-@@ -867,6 +880,15 @@ TEST_LIST = [
- 'output_format': 'json',
- 'requirements': [Requirements.not_windows],
- },
-+ {
-+ 'test_id': 18,
-+ 'test_class': FioJobTest,
-+ 'job': 't0018.fio',
-+ 'success': SUCCESS_DEFAULT,
-+ 'pre_job': None,
-+ 'pre_success': None,
-+ 'requirements': [Requirements.linux, Requirements.io_uring],
-+ },
- {
- 'test_id': 1000,
- 'test_class': FioExeTest,
diff --git a/meta-oe/recipes-benchmark/fio/fio/0026-t-io_uring-remove-duplicate-definition-of-gettid.patch b/meta-oe/recipes-benchmark/fio/fio/0026-t-io_uring-remove-duplicate-definition-of-gettid.patch
deleted file mode 100644
index c75ec36d18..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0026-t-io_uring-remove-duplicate-definition-of-gettid.patch
+++ /dev/null
@@ -1,59 +0,0 @@
-From 575bea1c9b642a11ac5b7162aea6a9f905c60318 Mon Sep 17 00:00:00 2001
-From: Jens Axboe <axboe@kernel.dk>
-Date: Fri, 26 Aug 2022 14:14:44 -0600
-Subject: [PATCH] t/io_uring: remove duplicate definition of gettid()
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-With a recent change, we now include os.h through nvme.h, and this
-can cause a duplicate gettid() definition:
-
-t/io_uring.c:499:12: error: redefinition of ‘gettid’
- static int gettid(void)
- ^~~~~~
-In file included from t/../engines/../os/os.h:39,
- from t/../engines/../thread_options.h:5,
- from t/../engines/../fio.h:18,
- from t/../engines/nvme.h:10,
- from t/io_uring.c:38:
-t/../engines/../os/os-linux.h:147:19: note: previous definition of
-‘gettid’ was here
- static inline int gettid(void)
- ^~~~~~
-
-Include os.h directly to make it clear that we use it, and remove the
-gettid() definition from io_uring.c.
-
-Reported-by: Yi Zhang <yi.zhang@redhat.com>
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
----
- t/io_uring.c | 8 +-------
- 1 file changed, 1 insertion(+), 7 deletions(-)
-
-diff --git a/t/io_uring.c b/t/io_uring.c
-index b90bcf78..e8e41796 100644
---- a/t/io_uring.c
-+++ b/t/io_uring.c
-@@ -30,6 +30,7 @@
- #include <sched.h>
-
- #include "../arch/arch.h"
-+#include "../os/os.h"
- #include "../lib/types.h"
- #include "../lib/roundup.h"
- #include "../lib/rand.h"
-@@ -495,13 +496,6 @@ static int io_uring_enter(struct submitter *s, unsigned int to_submit,
- #endif
- }
-
--#ifndef CONFIG_HAVE_GETTID
--static int gettid(void)
--{
-- return syscall(__NR_gettid);
--}
--#endif
--
- static unsigned file_depth(struct submitter *s)
- {
- return (depth + s->nr_files - 1) / s->nr_files;
diff --git a/meta-oe/recipes-benchmark/fio/fio/0027-test-add-some-tests-for-seq-and-rand-offsets.patch b/meta-oe/recipes-benchmark/fio/fio/0027-test-add-some-tests-for-seq-and-rand-offsets.patch
deleted file mode 100644
index 2abd449b8b..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0027-test-add-some-tests-for-seq-and-rand-offsets.patch
+++ /dev/null
@@ -1,157 +0,0 @@
-From 1eef6cddda678b0d1a120970bc4cc961c285c81e Mon Sep 17 00:00:00 2001
-From: Vincent Fu <vincent.fu@samsung.com>
-Date: Mon, 29 Aug 2022 11:30:30 -0400
-Subject: [PATCH] test: add some tests for seq and rand offsets
-
-t/jobs/t0019.fio is a seq read test
-t/jobs/t0020.fio is a rand read test
-
-We don't have any automated tests which make sure that sequential access
-patterns are actually sequential and that random access patterns are not
-sequential. Add these two tests to help detect the possibility that
-these features could break.
-
-Signed-off-by: Vincent Fu <vincent.fu@samsung.com>
----
- t/jobs/t0019.fio | 10 ++++++
- t/jobs/t0020.fio | 11 ++++++
- t/run-fio-tests.py | 84 ++++++++++++++++++++++++++++++++++++++++++++++
- 3 files changed, 105 insertions(+)
- create mode 100644 t/jobs/t0019.fio
- create mode 100644 t/jobs/t0020.fio
-
-diff --git a/t/jobs/t0019.fio b/t/jobs/t0019.fio
-new file mode 100644
-index 00000000..b60d27d2
---- /dev/null
-+++ b/t/jobs/t0019.fio
-@@ -0,0 +1,10 @@
-+# Expected result: offsets are accessed sequentially and all offsets are read
-+# Buggy result: offsets are not accessed sequentially and one or more offsets are missed
-+# run with --debug=io or logging to see which offsets are accessed
-+
-+[test]
-+ioengine=null
-+filesize=1M
-+write_bw_log=test
-+per_job_logs=0
-+log_offset=1
-diff --git a/t/jobs/t0020.fio b/t/jobs/t0020.fio
-new file mode 100644
-index 00000000..1c1c5166
---- /dev/null
-+++ b/t/jobs/t0020.fio
-@@ -0,0 +1,11 @@
-+# Expected result: offsets are not accessed sequentially and all offsets are touched
-+# Buggy result: offsets are accessed sequentially and one or more offsets are missed
-+# run with --debug=io or logging to see which offsets are read
-+
-+[test]
-+ioengine=null
-+filesize=1M
-+rw=randread
-+write_bw_log=test
-+per_job_logs=0
-+log_offset=1
-diff --git a/t/run-fio-tests.py b/t/run-fio-tests.py
-index 1e5e9f24..78f43521 100755
---- a/t/run-fio-tests.py
-+++ b/t/run-fio-tests.py
-@@ -548,6 +548,72 @@ class FioJobTest_t0015(FioJobTest):
- self.passed = False
-
-
-+class FioJobTest_t0019(FioJobTest):
-+ """Test consists of fio test job t0019
-+ Confirm that all offsets were touched sequentially"""
-+
-+ def check_result(self):
-+ super(FioJobTest_t0019, self).check_result()
-+
-+ bw_log_filename = os.path.join(self.test_dir, "test_bw.log")
-+ file_data, success = self.get_file(bw_log_filename)
-+ log_lines = file_data.split('\n')
-+
-+ prev = -4096
-+ for line in log_lines:
-+ if len(line.strip()) == 0:
-+ continue
-+ cur = int(line.split(',')[4])
-+ if cur - prev != 4096:
-+ self.passed = False
-+ self.failure_reason = "offsets {0}, {1} not sequential".format(prev, cur)
-+ return
-+ prev = cur
-+
-+ if cur/4096 != 255:
-+ self.passed = False
-+ self.failure_reason = "unexpected last offset {0}".format(cur)
-+
-+
-+class FioJobTest_t0020(FioJobTest):
-+ """Test consists of fio test job t0020
-+ Confirm that almost all offsets were touched non-sequentially"""
-+
-+ def check_result(self):
-+ super(FioJobTest_t0020, self).check_result()
-+
-+ bw_log_filename = os.path.join(self.test_dir, "test_bw.log")
-+ file_data, success = self.get_file(bw_log_filename)
-+ log_lines = file_data.split('\n')
-+
-+ seq_count = 0
-+ offsets = set()
-+
-+ prev = int(log_lines[0].split(',')[4])
-+ for line in log_lines[1:]:
-+ offsets.add(prev/4096)
-+ if len(line.strip()) == 0:
-+ continue
-+ cur = int(line.split(',')[4])
-+ if cur - prev == 4096:
-+ seq_count += 1
-+ prev = cur
-+
-+ # 10 is an arbitrary threshold
-+ if seq_count > 10:
-+ self.passed = False
-+ self.failure_reason = "too many ({0}) consecutive offsets".format(seq_count)
-+
-+ if len(offsets) != 256:
-+ self.passed = False
-+ self.failure_reason += " number of offsets is {0} instead of 256".format(len(offsets))
-+
-+ for i in range(256):
-+ if not i in offsets:
-+ self.passed = False
-+ self.failure_reason += " missing offset {0}".format(i*4096)
-+
-+
- class FioJobTest_iops_rate(FioJobTest):
- """Test consists of fio test job t0009
- Confirm that job0 iops == 1000
-@@ -889,6 +955,24 @@ TEST_LIST = [
- 'pre_success': None,
- 'requirements': [Requirements.linux, Requirements.io_uring],
- },
-+ {
-+ 'test_id': 19,
-+ 'test_class': FioJobTest_t0019,
-+ 'job': 't0019.fio',
-+ 'success': SUCCESS_DEFAULT,
-+ 'pre_job': None,
-+ 'pre_success': None,
-+ 'requirements': [],
-+ },
-+ {
-+ 'test_id': 20,
-+ 'test_class': FioJobTest_t0020,
-+ 'job': 't0020.fio',
-+ 'success': SUCCESS_DEFAULT,
-+ 'pre_job': None,
-+ 'pre_success': None,
-+ 'requirements': [],
-+ },
- {
- 'test_id': 1000,
- 'test_class': FioExeTest,
diff --git a/meta-oe/recipes-benchmark/fio/fio/0028-test-use-Ubuntu-22.04-for-64-bit-tests.patch b/meta-oe/recipes-benchmark/fio/fio/0028-test-use-Ubuntu-22.04-for-64-bit-tests.patch
deleted file mode 100644
index b471d9c325..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0028-test-use-Ubuntu-22.04-for-64-bit-tests.patch
+++ /dev/null
@@ -1,72 +0,0 @@
-From abfe30b1fe8118a4ff935bd7cb03243329eba4b8 Mon Sep 17 00:00:00 2001
-From: Vincent Fu <vincent.fu@samsung.com>
-Date: Mon, 29 Aug 2022 14:24:16 -0400
-Subject: [PATCH] test: use Ubuntu 22.04 for 64-bit tests
-
-On 22.04 there was a conflict among libunwind-14-dev, libunwind-dev, and
-libunwind8 that was resolved by removing libunwind-14-dev.
-
-The 32-bit Ubuntu setup steps require more attention to get them to work
-on 22.04. Stay on 20.04 for now and figure it out later.
-
-Starting pkgProblemResolver with broken count: 1
-Starting 2 pkgProblemResolver with broken count: 1
-Investigating (0) libunwind-14-dev:amd64 < 1:14.0.0-1ubuntu1 @ii K Ib >
-Broken libunwind-14-dev:amd64 Breaks on libunwind-dev:amd64 < none -> 1.3.2-2build2 @un puN >
- Considering libunwind-dev:amd64 -1 as a solution to libunwind-14-dev:amd64 2
-Done
-Some packages could not be installed. This may mean that you have
-requested an impossible situation or if you are using the unstable
-distribution that some required packages have not yet been created
-or been moved out of Incoming.
-The following information may help to resolve the situation:
-
-The following packages have unmet dependencies:
- libunwind-14-dev : Breaks: libunwind-dev but 1.3.2-2build2 is to be installed
-E: Error, pkgProblemResolver::Resolve generated breaks, this may be caused by held packages.
-
-Signed-off-by: Vincent Fu <vincent.fu@samsung.com>
----
- .github/workflows/ci.yml | 6 +++---
- ci/actions-install.sh | 2 ++
- 2 files changed, 5 insertions(+), 3 deletions(-)
-
-diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
-index 650366b2..85104e5a 100644
---- a/.github/workflows/ci.yml
-+++ b/.github/workflows/ci.yml
-@@ -18,10 +18,10 @@ jobs:
- - android
- include:
- - build: linux-gcc
-- os: ubuntu-20.04
-+ os: ubuntu-22.04
- cc: gcc
- - build: linux-clang
-- os: ubuntu-20.04
-+ os: ubuntu-22.04
- cc: clang
- - build: macos
- os: macos-11
-@@ -29,7 +29,7 @@ jobs:
- os: ubuntu-20.04
- arch: i686
- - build: android
-- os: ubuntu-20.04
-+ os: ubuntu-22.04
- arch: aarch64-linux-android32
-
- env:
-diff --git a/ci/actions-install.sh b/ci/actions-install.sh
-index b5c4198f..7017de2a 100755
---- a/ci/actions-install.sh
-+++ b/ci/actions-install.sh
-@@ -54,6 +54,8 @@ DPKGCFG
- libtcmalloc-minimal4
- nvidia-cuda-dev
- )
-+ echo "Removing libunwind-14-dev because of conflicts with libunwind-dev"
-+ sudo apt remove -y libunwind-14-dev
- ;;
- esac
-
diff --git a/meta-oe/recipes-benchmark/fio/fio/0029-test-get-32-bit-Ubuntu-22.04-build-working.patch b/meta-oe/recipes-benchmark/fio/fio/0029-test-get-32-bit-Ubuntu-22.04-build-working.patch
deleted file mode 100644
index 163ebf3b33..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0029-test-get-32-bit-Ubuntu-22.04-build-working.patch
+++ /dev/null
@@ -1,79 +0,0 @@
-From aa1075ba2ff300e4017bd7813423f63b1fbc325f Mon Sep 17 00:00:00 2001
-From: Vincent Fu <vincent.fu@samsung.com>
-Date: Mon, 29 Aug 2022 15:15:56 -0400
-Subject: [PATCH] test: get 32-bit Ubuntu 22.04 build working
-
-Ubuntu 22.04 no longer has i386 builds for the packages libibverbs and
-librdmacm. So stop trying to install those packages for the 32-bit
-build.
-
-Signed-off-by: Vincent Fu <vincent.fu@samsung.com>
----
- .github/workflows/ci.yml | 2 +-
- ci/actions-install.sh | 11 ++++-------
- 2 files changed, 5 insertions(+), 8 deletions(-)
-
-diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
-index 85104e5a..bdc4db85 100644
---- a/.github/workflows/ci.yml
-+++ b/.github/workflows/ci.yml
-@@ -26,7 +26,7 @@ jobs:
- - build: macos
- os: macos-11
- - build: linux-i686-gcc
-- os: ubuntu-20.04
-+ os: ubuntu-22.04
- arch: i686
- - build: android
- os: ubuntu-22.04
-diff --git a/ci/actions-install.sh b/ci/actions-install.sh
-index 7017de2a..c209a089 100755
---- a/ci/actions-install.sh
-+++ b/ci/actions-install.sh
-@@ -23,26 +23,21 @@ DPKGCFG
- libcunit1-dev
- libcurl4-openssl-dev
- libfl-dev
-- libibverbs-dev
- libnuma-dev
-- librdmacm-dev
- libnfs-dev
- valgrind
- )
- case "${CI_TARGET_ARCH}" in
- "i686")
- sudo dpkg --add-architecture i386
-- opts="--allow-downgrades"
- pkgs=("${pkgs[@]/%/:i386}")
- pkgs+=(
- gcc-multilib
- pkg-config:i386
- zlib1g-dev:i386
-- libpcre2-8-0=10.34-7
- )
- ;;
- "x86_64")
-- opts=""
- pkgs+=(
- libglusterfs-dev
- libgoogle-perftools-dev
-@@ -53,6 +48,8 @@ DPKGCFG
- librbd-dev
- libtcmalloc-minimal4
- nvidia-cuda-dev
-+ libibverbs-dev
-+ librdmacm-dev
- )
- echo "Removing libunwind-14-dev because of conflicts with libunwind-dev"
- sudo apt remove -y libunwind-14-dev
-@@ -68,8 +65,8 @@ DPKGCFG
-
- echo "Updating APT..."
- sudo apt-get -qq update
-- echo "Installing packages..."
-- sudo apt-get install "$opts" -o APT::Immediate-Configure=false --no-install-recommends -qq -y "${pkgs[@]}"
-+ echo "Installing packages... ${pkgs[@]}"
-+ sudo apt-get install -o APT::Immediate-Configure=false --no-install-recommends -qq -y "${pkgs[@]}"
- }
-
- install_linux() {
diff --git a/meta-oe/recipes-benchmark/fio/fio/0030-test-add-tests-for-lfsr-and-norandommap.patch b/meta-oe/recipes-benchmark/fio/fio/0030-test-add-tests-for-lfsr-and-norandommap.patch
deleted file mode 100644
index fa1d49e145..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0030-test-add-tests-for-lfsr-and-norandommap.patch
+++ /dev/null
@@ -1,143 +0,0 @@
-From 8d2b6305a6d0497bc6d78832be256380b1691694 Mon Sep 17 00:00:00 2001
-From: Vincent Fu <vincent.fu@samsung.com>
-Date: Tue, 30 Aug 2022 09:59:55 -0400
-Subject: [PATCH] test: add tests for lfsr and norandommap
-
-t0021 checks whether the lfsr random generator actually touches every
-offset.
-
-t0022 checks whether fio touches offsets more than once when
-norandommap=1.
-
-We should have automated tests for basic functionality to detect
-problems early.
-
-Signed-off-by: Vincent Fu <vincent.fu@samsung.com>
----
- t/jobs/t0021.fio | 15 +++++++++++++
- t/jobs/t0022.fio | 13 +++++++++++
- t/run-fio-tests.py | 55 +++++++++++++++++++++++++++++++++++++++++++++-
- 3 files changed, 82 insertions(+), 1 deletion(-)
- create mode 100644 t/jobs/t0021.fio
- create mode 100644 t/jobs/t0022.fio
-
-diff --git a/t/jobs/t0021.fio b/t/jobs/t0021.fio
-new file mode 100644
-index 00000000..47fbae71
---- /dev/null
-+++ b/t/jobs/t0021.fio
-@@ -0,0 +1,15 @@
-+# make sure the lfsr random generator actually does touch all the offsets
-+#
-+# Expected result: offsets are not accessed sequentially and all offsets are touched
-+# Buggy result: offsets are accessed sequentially and one or more offsets are missed
-+# run with --debug=io or logging to see which offsets are read
-+
-+[test]
-+ioengine=null
-+filesize=1M
-+rw=randread
-+write_bw_log=test
-+per_job_logs=0
-+log_offset=1
-+norandommap=1
-+random_generator=lfsr
-diff --git a/t/jobs/t0022.fio b/t/jobs/t0022.fio
-new file mode 100644
-index 00000000..2324571e
---- /dev/null
-+++ b/t/jobs/t0022.fio
-@@ -0,0 +1,13 @@
-+# make sure that when we enable norandommap we touch some offsets more than once
-+#
-+# Expected result: at least one offset is touched more than once
-+# Buggy result: each offset is touched only once
-+
-+[test]
-+ioengine=null
-+filesize=1M
-+rw=randread
-+write_bw_log=test
-+per_job_logs=0
-+log_offset=1
-+norandommap=1
-diff --git a/t/run-fio-tests.py b/t/run-fio-tests.py
-index 78f43521..47823761 100755
---- a/t/run-fio-tests.py
-+++ b/t/run-fio-tests.py
-@@ -576,7 +576,7 @@ class FioJobTest_t0019(FioJobTest):
-
-
- class FioJobTest_t0020(FioJobTest):
-- """Test consists of fio test job t0020
-+ """Test consists of fio test jobs t0020 and t0021
- Confirm that almost all offsets were touched non-sequentially"""
-
- def check_result(self):
-@@ -614,6 +614,41 @@ class FioJobTest_t0020(FioJobTest):
- self.failure_reason += " missing offset {0}".format(i*4096)
-
-
-+class FioJobTest_t0022(FioJobTest):
-+ """Test consists of fio test job t0022"""
-+
-+ def check_result(self):
-+ super(FioJobTest_t0022, self).check_result()
-+
-+ bw_log_filename = os.path.join(self.test_dir, "test_bw.log")
-+ file_data, success = self.get_file(bw_log_filename)
-+ log_lines = file_data.split('\n')
-+
-+ filesize = 1024*1024
-+ bs = 4096
-+ seq_count = 0
-+ offsets = set()
-+
-+ prev = int(log_lines[0].split(',')[4])
-+ for line in log_lines[1:]:
-+ offsets.add(prev/bs)
-+ if len(line.strip()) == 0:
-+ continue
-+ cur = int(line.split(',')[4])
-+ if cur - prev == bs:
-+ seq_count += 1
-+ prev = cur
-+
-+ # 10 is an arbitrary threshold
-+ if seq_count > 10:
-+ self.passed = False
-+ self.failure_reason = "too many ({0}) consecutive offsets".format(seq_count)
-+
-+ if len(offsets) == filesize/bs:
-+ self.passed = False
-+ self.failure_reason += " no duplicate offsets found with norandommap=1".format(len(offsets))
-+
-+
- class FioJobTest_iops_rate(FioJobTest):
- """Test consists of fio test job t0009
- Confirm that job0 iops == 1000
-@@ -973,6 +1008,24 @@ TEST_LIST = [
- 'pre_success': None,
- 'requirements': [],
- },
-+ {
-+ 'test_id': 21,
-+ 'test_class': FioJobTest_t0020,
-+ 'job': 't0021.fio',
-+ 'success': SUCCESS_DEFAULT,
-+ 'pre_job': None,
-+ 'pre_success': None,
-+ 'requirements': [],
-+ },
-+ {
-+ 'test_id': 22,
-+ 'test_class': FioJobTest_t0022,
-+ 'job': 't0022.fio',
-+ 'success': SUCCESS_DEFAULT,
-+ 'pre_job': None,
-+ 'pre_success': None,
-+ 'requirements': [],
-+ },
- {
- 'test_id': 1000,
- 'test_class': FioExeTest,
diff --git a/meta-oe/recipes-benchmark/fio/fio/0031-backend-revert-bad-memory-leak-fix.patch b/meta-oe/recipes-benchmark/fio/fio/0031-backend-revert-bad-memory-leak-fix.patch
deleted file mode 100644
index 335798cea7..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0031-backend-revert-bad-memory-leak-fix.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From c060732180c981712f9a6fb7108c28a3c301c2c3 Mon Sep 17 00:00:00 2001
-From: Jens Axboe <axboe@kernel.dk>
-Date: Tue, 30 Aug 2022 10:48:18 -0600
-Subject: [PATCH] backend: revert bad memory leak fix
-
-This essentially reverts the commit mentioned in the fixes line, as it
-causes crashes with using a trigger timeout + command.
-
-Fixes: 807473c36e10 ("fixed memory leak detected by ASAN")
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
----
- backend.c | 5 -----
- 1 file changed, 5 deletions(-)
-
-diff --git a/backend.c b/backend.c
-index 375a23e4..fe614f6e 100644
---- a/backend.c
-+++ b/backend.c
-@@ -2451,10 +2451,8 @@ reap:
- strerror(ret));
- } else {
- pid_t pid;
-- struct fio_file **files;
- void *eo;
- dprint(FD_PROCESS, "will fork\n");
-- files = td->files;
- eo = td->eo;
- read_barrier();
- pid = fork();
-@@ -2465,9 +2463,6 @@ reap:
- _exit(ret);
- } else if (i == fio_debug_jobno)
- *fio_debug_jobp = pid;
-- // freeing previously allocated memory for files
-- // this memory freed MUST NOT be shared between processes, only the pointer itself may be shared within TD
-- free(files);
- free(eo);
- free(fd);
- fd = NULL;
diff --git a/meta-oe/recipes-benchmark/fio/fio/0032-Fio-3.32.patch b/meta-oe/recipes-benchmark/fio/fio/0032-Fio-3.32.patch
deleted file mode 100644
index 6aa86d1fcf..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio/0032-Fio-3.32.patch
+++ /dev/null
@@ -1,23 +0,0 @@
-From aaad03c6c8a9ef8cc0507a356a9fa2372e1f611b Mon Sep 17 00:00:00 2001
-From: Jens Axboe <axboe@kernel.dk>
-Date: Tue, 30 Aug 2022 10:51:13 -0600
-Subject: [PATCH] Fio 3.32
-
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
----
- FIO-VERSION-GEN | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/FIO-VERSION-GEN b/FIO-VERSION-GEN
-index 72630dd0..db073818 100755
---- a/FIO-VERSION-GEN
-+++ b/FIO-VERSION-GEN
-@@ -1,7 +1,7 @@
- #!/bin/sh
-
- GVF=FIO-VERSION-FILE
--DEF_VER=fio-3.31
-+DEF_VER=fio-3.32
-
- LF='
- '
diff --git a/meta-oe/recipes-benchmark/fio/fio_2022.bb b/meta-oe/recipes-benchmark/fio/fio_2022.bb
deleted file mode 100644
index 108b2b5c10..0000000000
--- a/meta-oe/recipes-benchmark/fio/fio_2022.bb
+++ /dev/null
@@ -1,77 +0,0 @@
-SUMMARY = "Filesystem and hardware benchmark and stress tool"
-DESCRIPTION = "fio is an I/O tool meant to be used both for benchmark and \
-stress/hardware verification. It has support for a number of I/O engines, \
-I/O priorities (for newer Linux kernels), rate I/O, forked or threaded jobs, \
-and much more. It can work on block devices as well as files. fio accepts \
-job descriptions in a simple-to-understand text format. Several example job \
-files are included. fio displays all sorts of I/O performance information."
-HOMEPAGE = "http://freecode.com/projects/fio"
-SECTION = "console/tests"
-LICENSE = "GPL-2.0-only"
-LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
-
-DEPENDS = "libaio zlib coreutils-native"
-DEPENDS += "${@bb.utils.contains('MACHINE_FEATURES', 'pmem', 'pmdk', '', d)}"
-RDEPENDS:${PN} = "python3-core bash"
-
-PACKAGECONFIG_NUMA = "numa"
-# ARM does not currently support NUMA
-PACKAGECONFIG_NUMA:arm = ""
-PACKAGECONFIG_NUMA:armeb = ""
-
-PACKAGECONFIG ??= "${PACKAGECONFIG_NUMA}"
-PACKAGECONFIG[numa] = ",--disable-numa,numactl"
-
-SRCREV = "6e44f31b9241cdc56d0857fb10ddb2ec40faa541"
-SRC_URI = "git://git.kernel.dk/fio.git;branch=master \
- file://0001-Fio-3.31.patch \
- file://0002-lib-rand-Enhance-__fill_random_buf-using-the-multi-r.patch \
- file://0003-lib-rand-get-rid-of-unused-MAX_SEED_BUCKETS.patch \
- file://0004-ioengines-merge-filecreate-filestat-filedelete-engin.patch \
- file://0005-engines-http-Add-storage-class-option-for-s3.patch \
- file://0006-engines-http-Add-s3-crypto-options-for-s3.patch \
- file://0007-doc-Add-usage-and-example-about-s3-storage-class-and.patch \
- file://0008-README-link-to-GitHub-releases-for-Windows.patch \
- file://0009-engines-xnvme-fix-segfault-issue-with-xnvme-ioengine.patch \
- file://0010-doc-update-fio-doc-for-xnvme-engine.patch \
- file://0011-test-add-latency-test-using-posixaio-ioengine.patch \
- file://0012-test-fix-hash-for-t0016.patch \
- file://0013-doc-get-rid-of-trailing-whitespace.patch \
- file://0014-doc-clarify-that-I-O-errors-may-go-unnoticed-without.patch \
- file://0015-Revert-Minor-style-fixups.patch \
- file://0016-Revert-Fix-multithread-issues-when-operating-on-a-si.patch \
- file://0017-Add-wait-for-handling-SIGBREAK.patch \
- file://0018-engines-io_uring-pass-back-correct-error-value-when-.patch \
- file://0019-Enable-CPU-affinity-support-on-Android.patch \
- file://0020-io_uring-Replace-pthread_self-with-s-tid.patch \
- file://0021-engines-io_uring-delete-debug-code.patch \
- file://0022-t-io_uring-prep-for-including-engines-nvme.h-in-t-io.patch \
- file://0023-t-io_uring-add-support-for-async-passthru.patch \
- file://0024-t-io_uring-fix-64-bit-cast-on-32-bit-archs.patch \
- file://0025-test-add-basic-test-for-io_uring-ioengine.patch \
- file://0026-t-io_uring-remove-duplicate-definition-of-gettid.patch \
- file://0027-test-add-some-tests-for-seq-and-rand-offsets.patch \
- file://0028-test-use-Ubuntu-22.04-for-64-bit-tests.patch \
- file://0029-test-get-32-bit-Ubuntu-22.04-build-working.patch \
- file://0030-test-add-tests-for-lfsr-and-norandommap.patch \
- file://0031-backend-revert-bad-memory-leak-fix.patch \
- file://0032-Fio-3.32.patch \
- "
-
-S = "${WORKDIR}/git"
-
-# avoids build breaks when using no-static-libs.inc
-DISABLE_STATIC = ""
-
-EXTRA_OEMAKE = "CC='${CC}' LDFLAGS='${LDFLAGS}'"
-EXTRA_OECONF = "${@bb.utils.contains('MACHINE_FEATURES', 'x86', '--disable-optimizations', '', d)}"
-
-do_configure() {
- ./configure ${EXTRA_OECONF}
-}
-
-do_install() {
- oe_runmake install DESTDIR=${D} prefix=${prefix} mandir=${mandir}
- install -d ${D}/${docdir}/${PN}
- cp -R --no-dereference --preserve=mode,links -v ${S}/examples ${D}/${docdir}/${PN}/
-}
diff --git a/meta-oe/recipes-benchmark/fio/fio_3.32.bb b/meta-oe/recipes-benchmark/fio/fio_3.32.bb
new file mode 100644
index 0000000000..90e28340c6
--- /dev/null
+++ b/meta-oe/recipes-benchmark/fio/fio_3.32.bb
@@ -0,0 +1,44 @@
+SUMMARY = "Filesystem and hardware benchmark and stress tool"
+DESCRIPTION = "fio is an I/O tool meant to be used both for benchmark and \
+stress/hardware verification. It has support for a number of I/O engines, \
+I/O priorities (for newer Linux kernels), rate I/O, forked or threaded jobs, \
+and much more. It can work on block devices as well as files. fio accepts \
+job descriptions in a simple-to-understand text format. Several example job \
+files are included. fio displays all sorts of I/O performance information."
+HOMEPAGE = "http://freecode.com/projects/fio"
+SECTION = "console/tests"
+LICENSE = "GPL-2.0-only"
+LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
+
+DEPENDS = "libaio zlib coreutils-native"
+DEPENDS += "${@bb.utils.contains('MACHINE_FEATURES', 'pmem', 'pmdk', '', d)}"
+RDEPENDS:${PN} = "python3-core bash"
+
+PACKAGECONFIG_NUMA = "numa"
+# ARM does not currently support NUMA
+PACKAGECONFIG_NUMA:arm = ""
+PACKAGECONFIG_NUMA:armeb = ""
+
+PACKAGECONFIG ??= "${PACKAGECONFIG_NUMA}"
+PACKAGECONFIG[numa] = ",--disable-numa,numactl"
+
+SRCREV = "db7fc8d864dc4fb607a0379333a0db60431bd649"
+SRC_URI = "git://git.kernel.dk/fio.git;branch=master"
+
+S = "${WORKDIR}/git"
+
+# avoids build breaks when using no-static-libs.inc
+DISABLE_STATIC = ""
+
+EXTRA_OEMAKE = "CC='${CC}' LDFLAGS='${LDFLAGS}'"
+EXTRA_OECONF = "${@bb.utils.contains('MACHINE_FEATURES', 'x86', '--disable-optimizations', '', d)}"
+
+do_configure() {
+ ./configure ${EXTRA_OECONF}
+}
+
+do_install() {
+ oe_runmake install DESTDIR=${D} prefix=${prefix} mandir=${mandir}
+ install -d ${D}/${docdir}/${PN}
+ cp -R --no-dereference --preserve=mode,links -v ${S}/examples ${D}/${docdir}/${PN}/
+}