* [igt-dev] [PATCH i-g-t 1/4] lib/intel_allocator: Remove RANDOM allocator
2022-12-01 13:03 [igt-dev] [PATCH i-g-t 0/4] Remove random allocator and improve reloc one Zbigniew Kempczyński
@ 2022-12-01 13:03 ` Zbigniew Kempczyński
2022-12-01 13:03 ` [igt-dev] [PATCH i-g-t 2/4] tests/api_intel_allocator: Remove duplicated reuse and reserve subtests Zbigniew Kempczyński
` (3 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Zbigniew Kempczyński @ 2022-12-01 13:03 UTC (permalink / raw)
To: igt-dev
It was added in first allocator series as modification of previously
existing offset provider. As it is randomizing offsets there's a risk
they can overlap and during exec we can get ENOSPC in softpin mode.
Another thing is nobody is using it, so it is good idea to remove it
and prevent to have incidental failures in case of use.
Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
lib/intel_allocator.c | 3 -
lib/intel_allocator.h | 3 +-
lib/intel_allocator_random.c | 191 -------------------------------
lib/meson.build | 1 -
tests/i915/api_intel_allocator.c | 8 +-
tests/i915/api_intel_bb.c | 12 --
6 files changed, 3 insertions(+), 215 deletions(-)
delete mode 100644 lib/intel_allocator_random.c
diff --git a/lib/intel_allocator.c b/lib/intel_allocator.c
index 717d7fc56b..3004f15ae1 100644
--- a/lib/intel_allocator.c
+++ b/lib/intel_allocator.c
@@ -305,9 +305,6 @@ static struct intel_allocator *intel_allocator_create(int fd,
case INTEL_ALLOCATOR_RELOC:
ial = intel_allocator_reloc_create(fd, start, end);
break;
- case INTEL_ALLOCATOR_RANDOM:
- ial = intel_allocator_random_create(fd, start, end);
- break;
case INTEL_ALLOCATOR_SIMPLE:
ial = intel_allocator_simple_create(fd, start, end,
allocator_strategy);
diff --git a/lib/intel_allocator.h b/lib/intel_allocator.h
index c237e8e442..28e1165540 100644
--- a/lib/intel_allocator.h
+++ b/lib/intel_allocator.h
@@ -213,8 +213,7 @@ void intel_allocator_print(uint64_t allocator_handle);
#define ALLOC_INVALID_ADDRESS (-1ull)
#define INTEL_ALLOCATOR_NONE 0
#define INTEL_ALLOCATOR_RELOC 1
-#define INTEL_ALLOCATOR_RANDOM 2
-#define INTEL_ALLOCATOR_SIMPLE 3
+#define INTEL_ALLOCATOR_SIMPLE 2
#define GEN8_GTT_ADDRESS_WIDTH 48
diff --git a/lib/intel_allocator_random.c b/lib/intel_allocator_random.c
deleted file mode 100644
index d22f817670..0000000000
--- a/lib/intel_allocator_random.c
+++ /dev/null
@@ -1,191 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2021 Intel Corporation
- */
-
-#include <sys/ioctl.h>
-#include <stdlib.h>
-#include "igt.h"
-#include "igt_x86.h"
-#include "igt_rand.h"
-#include "intel_allocator.h"
-
-struct intel_allocator *
-intel_allocator_random_create(int fd, uint64_t start, uint64_t end);
-
-struct intel_allocator_random {
- uint32_t prng;
- uint64_t start;
- uint64_t end;
-
- /* statistics */
- uint64_t allocated_objects;
-};
-
-/* Keep the low 256k clear, for negative deltas */
-#define BIAS (256 << 10)
-#define RETRIES 8
-
-static void intel_allocator_random_get_address_range(struct intel_allocator *ial,
- uint64_t *startp,
- uint64_t *endp)
-{
- struct intel_allocator_random *ialr = ial->priv;
-
- if (startp)
- *startp = ialr->start;
-
- if (endp)
- *endp = ialr->end;
-}
-
-static uint64_t intel_allocator_random_alloc(struct intel_allocator *ial,
- uint32_t handle, uint64_t size,
- uint64_t alignment,
- enum allocator_strategy strategy)
-{
- struct intel_allocator_random *ialr = ial->priv;
- uint64_t offset;
- int cnt = RETRIES;
-
- (void) handle;
- (void) strategy;
-
- /* randomize the address, we try to avoid relocations */
- do {
- offset = hars_petruska_f54_1_random64(&ialr->prng);
- /* maximize the chances of fitting in the last iteration */
- if (cnt == 1)
- offset = 0;
-
- offset %= ialr->end - ialr->start;
- offset += ialr->start;
- offset = ALIGN(offset, alignment);
- } while (offset + size > ialr->end && --cnt);
-
- if (!cnt)
- return ALLOC_INVALID_ADDRESS;
-
- ialr->allocated_objects++;
-
- return offset;
-}
-
-static bool intel_allocator_random_free(struct intel_allocator *ial,
- uint32_t handle)
-{
- struct intel_allocator_random *ialr = ial->priv;
-
- (void) handle;
-
- ialr->allocated_objects--;
-
- return false;
-}
-
-static bool intel_allocator_random_is_allocated(struct intel_allocator *ial,
- uint32_t handle, uint64_t size,
- uint64_t offset)
-{
- (void) ial;
- (void) handle;
- (void) size;
- (void) offset;
-
- return false;
-}
-
-static void intel_allocator_random_destroy(struct intel_allocator *ial)
-{
- igt_assert(ial);
-
- free(ial->priv);
- free(ial);
-}
-
-static bool intel_allocator_random_reserve(struct intel_allocator *ial,
- uint32_t handle,
- uint64_t start, uint64_t end)
-{
- (void) ial;
- (void) handle;
- (void) start;
- (void) end;
-
- return false;
-}
-
-static bool intel_allocator_random_unreserve(struct intel_allocator *ial,
- uint32_t handle,
- uint64_t start, uint64_t end)
-{
- (void) ial;
- (void) handle;
- (void) start;
- (void) end;
-
- return false;
-}
-
-static bool intel_allocator_random_is_reserved(struct intel_allocator *ial,
- uint64_t start, uint64_t end)
-{
- (void) ial;
- (void) start;
- (void) end;
-
- return false;
-}
-
-static void intel_allocator_random_print(struct intel_allocator *ial, bool full)
-{
- struct intel_allocator_random *ialr = ial->priv;
-
- (void) full;
-
- igt_info("<ial: %p, fd: %d> allocated objects: %" PRIx64 "\n",
- ial, ial->fd, ialr->allocated_objects);
-}
-
-static bool intel_allocator_random_is_empty(struct intel_allocator *ial)
-{
- struct intel_allocator_random *ialr = ial->priv;
-
- return !ialr->allocated_objects;
-}
-
-struct intel_allocator *
-intel_allocator_random_create(int fd, uint64_t start, uint64_t end)
-{
- struct intel_allocator *ial;
- struct intel_allocator_random *ialr;
-
- igt_debug("Using random allocator\n");
- ial = calloc(1, sizeof(*ial));
- igt_assert(ial);
-
- ial->fd = fd;
- ial->get_address_range = intel_allocator_random_get_address_range;
- ial->alloc = intel_allocator_random_alloc;
- ial->free = intel_allocator_random_free;
- ial->is_allocated = intel_allocator_random_is_allocated;
- ial->reserve = intel_allocator_random_reserve;
- ial->unreserve = intel_allocator_random_unreserve;
- ial->is_reserved = intel_allocator_random_is_reserved;
- ial->destroy = intel_allocator_random_destroy;
- ial->print = intel_allocator_random_print;
- ial->is_empty = intel_allocator_random_is_empty;
-
- ialr = ial->priv = calloc(1, sizeof(*ialr));
- igt_assert(ial->priv);
- ialr->prng = (uint32_t) to_user_pointer(ial);
-
- start = max_t(uint64_t, start, BIAS);
- igt_assert(start < end);
- ialr->start = start;
- ialr->end = end;
-
- ialr->allocated_objects = 0;
-
- return ial;
-}
diff --git a/lib/meson.build b/lib/meson.build
index cef2d0ff3d..579026f310 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -47,7 +47,6 @@ lib_sources = [
'instdone.c',
'intel_allocator.c',
'intel_allocator_msgchannel.c',
- 'intel_allocator_random.c',
'intel_allocator_reloc.c',
'intel_allocator_simple.c',
'intel_batchbuffer.c',
diff --git a/tests/i915/api_intel_allocator.c b/tests/i915/api_intel_allocator.c
index a7929e9b13..098b9e6960 100644
--- a/tests/i915/api_intel_allocator.c
+++ b/tests/i915/api_intel_allocator.c
@@ -179,9 +179,6 @@ static void basic_alloc(int fd, int cnt, uint8_t type)
for (i = 0; i < cnt; i++) {
igt_progress("check overlapping: ", i, cnt);
- if (type == INTEL_ALLOCATOR_RANDOM)
- continue;
-
for (j = 0; j < cnt; j++) {
if (j == i)
continue;
@@ -307,8 +304,8 @@ static void parallel_one(int fd, uint8_t type)
/* Check if all objects are allocated */
for (i = 0; i < count; i++) {
- /* Reloc + random allocators don't have state. */
- if (type == INTEL_ALLOCATOR_RELOC || type == INTEL_ALLOCATOR_RANDOM)
+ /* Reloc don't have state. */
+ if (type == INTEL_ALLOCATOR_RELOC)
break;
igt_assert_eq(offsets[i],
@@ -752,7 +749,6 @@ struct allocators {
} als[] = {
{"simple", INTEL_ALLOCATOR_SIMPLE},
{"reloc", INTEL_ALLOCATOR_RELOC},
- {"random", INTEL_ALLOCATOR_RANDOM},
{NULL, 0},
};
diff --git a/tests/i915/api_intel_bb.c b/tests/i915/api_intel_bb.c
index 4c8ca6ab36..980906f4ed 100644
--- a/tests/i915/api_intel_bb.c
+++ b/tests/i915/api_intel_bb.c
@@ -1620,24 +1620,12 @@ igt_main_args("dpibc:", NULL, help_str, opt_handler, NULL)
igt_subtest("object-noreloc-keep-cache-simple")
object_noreloc(bops, KEEP_CACHE, INTEL_ALLOCATOR_SIMPLE);
- igt_subtest("object-noreloc-purge-cache-random")
- object_noreloc(bops, PURGE_CACHE, INTEL_ALLOCATOR_RANDOM);
-
- igt_subtest("object-noreloc-keep-cache-random")
- object_noreloc(bops, KEEP_CACHE, INTEL_ALLOCATOR_RANDOM);
-
igt_subtest("blit-reloc-purge-cache")
blit(bops, RELOC, PURGE_CACHE, INTEL_ALLOCATOR_SIMPLE);
igt_subtest("blit-reloc-keep-cache")
blit(bops, RELOC, KEEP_CACHE, INTEL_ALLOCATOR_SIMPLE);
- igt_subtest("blit-noreloc-keep-cache-random")
- blit(bops, NORELOC, KEEP_CACHE, INTEL_ALLOCATOR_RANDOM);
-
- igt_subtest("blit-noreloc-purge-cache-random")
- blit(bops, NORELOC, PURGE_CACHE, INTEL_ALLOCATOR_RANDOM);
-
igt_subtest("blit-noreloc-keep-cache")
blit(bops, NORELOC, KEEP_CACHE, INTEL_ALLOCATOR_SIMPLE);
--
2.34.1
^ permalink raw reply related [flat|nested] 6+ messages in thread* [igt-dev] [PATCH i-g-t 3/4] lib/intel_allocator_reloc: Introduce stateful allocations in reloc
2022-12-01 13:03 [igt-dev] [PATCH i-g-t 0/4] Remove random allocator and improve reloc one Zbigniew Kempczyński
2022-12-01 13:03 ` [igt-dev] [PATCH i-g-t 1/4] lib/intel_allocator: Remove RANDOM allocator Zbigniew Kempczyński
2022-12-01 13:03 ` [igt-dev] [PATCH i-g-t 2/4] tests/api_intel_allocator: Remove duplicated reuse and reserve subtests Zbigniew Kempczyński
@ 2022-12-01 13:03 ` Zbigniew Kempczyński
2022-12-01 13:03 ` [igt-dev] [PATCH i-g-t 4/4] tests/gem_ctx_shared: Remove necessity of passing offset to function call Zbigniew Kempczyński
2022-12-01 14:42 ` [igt-dev] ✗ Fi.CI.BAT: failure for Remove random allocator and improve reloc one Patchwork
4 siblings, 0 replies; 6+ messages in thread
From: Zbigniew Kempczyński @ 2022-12-01 13:03 UTC (permalink / raw)
To: igt-dev
Till now reloc allocator was stateless - that means if we would
call alloc() twice for same handle + size, we would get consecutive
offsets (according to size and alignment). This is problematic
in library code, where we get handle and we would like to get offset
which must be same like in the caller. It wasn't possible thus library
instead of alloc() has to get offset from the caller instead. For
single object it is not big problem but passing more makes prototype
much longer and usage is inconvinient.
Introducing stateful allocations tracking in reloc solves this issue
- alloc() can be called many times in dependent code and same offset
will be returned until free().
Tracking was added to alloc()/free() only - reserve()/unreserve()
are not handled by reloc allocator at the moment at all.
Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
lib/intel_allocator_reloc.c | 115 ++++++++++++++++++++++++++-----
tests/i915/api_intel_allocator.c | 21 ++++--
2 files changed, 111 insertions(+), 25 deletions(-)
diff --git a/lib/intel_allocator_reloc.c b/lib/intel_allocator_reloc.c
index ee3ad43f4a..60cbb88511 100644
--- a/lib/intel_allocator_reloc.c
+++ b/lib/intel_allocator_reloc.c
@@ -9,11 +9,13 @@
#include "igt_x86.h"
#include "igt_rand.h"
#include "intel_allocator.h"
+#include "igt_map.h"
struct intel_allocator *
intel_allocator_reloc_create(int fd, uint64_t start, uint64_t end);
struct intel_allocator_reloc {
+ struct igt_map *objects;
uint32_t prng;
uint64_t start;
uint64_t end;
@@ -23,9 +25,41 @@ struct intel_allocator_reloc {
uint64_t allocated_objects;
};
+struct intel_allocator_record {
+ uint32_t handle;
+ uint64_t offset;
+ uint64_t size;
+};
+
/* Keep the low 256k clear, for negative deltas */
#define BIAS (256 << 10)
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
+
+/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
+#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
+
+static inline uint32_t hash_handles(const void *val)
+{
+ uint32_t hash = *(uint32_t *) val;
+
+ hash = hash * GOLDEN_RATIO_PRIME_32;
+ return hash;
+}
+
+static int equal_handles(const void *a, const void *b)
+{
+ uint32_t *key1 = (uint32_t *) a, *key2 = (uint32_t *) b;
+
+ return *key1 == *key2;
+}
+
+static void map_entry_free_func(struct igt_map_entry *entry)
+{
+ free(entry->data);
+}
+
static void intel_allocator_reloc_get_address_range(struct intel_allocator *ial,
uint64_t *startp,
uint64_t *endp)
@@ -44,25 +78,39 @@ static uint64_t intel_allocator_reloc_alloc(struct intel_allocator *ial,
uint64_t alignment,
enum allocator_strategy strategy)
{
+ struct intel_allocator_record *rec;
struct intel_allocator_reloc *ialr = ial->priv;
uint64_t offset, aligned_offset;
- (void) handle;
(void) strategy;
- aligned_offset = ALIGN(ialr->offset, alignment);
+ rec = igt_map_search(ialr->objects, &handle);
+ if (rec) {
+ offset = rec->offset;
+ igt_assert(rec->size == size);
+ } else {
+ aligned_offset = ALIGN(ialr->offset, alignment);
- /* Check we won't exceed end */
- if (aligned_offset + size > ialr->end)
- aligned_offset = ALIGN(ialr->start, alignment);
+ /* Check we won't exceed end */
+ if (aligned_offset + size > ialr->end)
+ aligned_offset = ALIGN(ialr->start, alignment);
- /* Check that the object fits in the address range */
- if (aligned_offset + size > ialr->end)
- return ALLOC_INVALID_ADDRESS;
+ /* Check that the object fits in the address range */
+ if (aligned_offset + size > ialr->end)
+ return ALLOC_INVALID_ADDRESS;
- offset = aligned_offset;
- ialr->offset = offset + size;
- ialr->allocated_objects++;
+ offset = aligned_offset;
+
+ rec = malloc(sizeof(*rec));
+ rec->handle = handle;
+ rec->offset = offset;
+ rec->size = size;
+
+ igt_map_insert(ialr->objects, &rec->handle, rec);
+
+ ialr->offset = offset + size;
+ ialr->allocated_objects++;
+ }
return offset;
}
@@ -70,30 +118,60 @@ static uint64_t intel_allocator_reloc_alloc(struct intel_allocator *ial,
static bool intel_allocator_reloc_free(struct intel_allocator *ial,
uint32_t handle)
{
+ struct intel_allocator_record *rec = NULL;
struct intel_allocator_reloc *ialr = ial->priv;
+ struct igt_map_entry *entry;
- (void) handle;
+ entry = igt_map_search_entry(ialr->objects, &handle);
+ if (entry) {
+ igt_map_remove_entry(ialr->objects, entry);
+ if (entry->data) {
+ rec = (struct intel_allocator_record *) entry->data;
+ ialr->allocated_objects--;
+ free(rec);
- ialr->allocated_objects--;
+ return true;
+ }
+ }
return false;
}
+static inline bool __same(const struct intel_allocator_record *rec,
+ uint32_t handle, uint64_t size, uint64_t offset)
+{
+ return rec->handle == handle && rec->size == size &&
+ DECANONICAL(rec->offset) == DECANONICAL(offset);
+}
+
static bool intel_allocator_reloc_is_allocated(struct intel_allocator *ial,
uint32_t handle, uint64_t size,
uint64_t offset)
{
- (void) ial;
- (void) handle;
- (void) size;
- (void) offset;
+ struct intel_allocator_record *rec;
+ struct intel_allocator_reloc *ialr;
+ bool same = false;
- return false;
+ igt_assert(ial);
+ ialr = (struct intel_allocator_reloc *) ial->priv;
+ igt_assert(ialr);
+ igt_assert(handle);
+
+ rec = igt_map_search(ialr->objects, &handle);
+ if (rec && __same(rec, handle, size, offset))
+ same = true;
+
+ return same;
}
static void intel_allocator_reloc_destroy(struct intel_allocator *ial)
{
+ struct intel_allocator_reloc *ialr;
+
igt_assert(ial);
+ ialr = (struct intel_allocator_reloc *) ial->priv;
+
+ igt_map_destroy(ialr->objects, map_entry_free_func);
free(ial->priv);
free(ial);
@@ -174,6 +252,7 @@ intel_allocator_reloc_create(int fd, uint64_t start, uint64_t end)
ialr = ial->priv = calloc(1, sizeof(*ialr));
igt_assert(ial->priv);
+ ialr->objects = igt_map_create(hash_handles, equal_handles);
ialr->prng = (uint32_t) to_user_pointer(ial);
start = max_t(uint64_t, start, BIAS);
diff --git a/tests/i915/api_intel_allocator.c b/tests/i915/api_intel_allocator.c
index b55587e549..87abd90084 100644
--- a/tests/i915/api_intel_allocator.c
+++ b/tests/i915/api_intel_allocator.c
@@ -195,6 +195,7 @@ static void basic_alloc(int fd, int cnt, uint8_t type)
igt_assert_eq(intel_allocator_close(ahnd), true);
}
+#define NUM_OBJS 128
static void reuse(int fd, uint8_t type)
{
struct test_obj obj[128], tmp;
@@ -204,15 +205,15 @@ static void reuse(int fd, uint8_t type)
ahnd = intel_allocator_open(fd, 0, type);
- for (i = 0; i < 128; i++) {
+ for (i = 0; i < NUM_OBJS; i++) {
obj[i].handle = gem_handle_gen();
obj[i].size = OBJ_SIZE;
obj[i].offset = intel_allocator_alloc(ahnd, obj[i].handle,
obj[i].size, align);
}
- /* check simple reuse */
- for (i = 0; i < 128; i++) {
+ /* check reuse */
+ for (i = 0; i < NUM_OBJS; i++) {
prev_offset = obj[i].offset;
obj[i].offset = intel_allocator_alloc(ahnd, obj[i].handle,
obj[i].size, 0);
@@ -225,7 +226,13 @@ static void reuse(int fd, uint8_t type)
/* alloc different buffer to fill freed hole */
tmp.handle = gem_handle_gen();
tmp.offset = intel_allocator_alloc(ahnd, tmp.handle, OBJ_SIZE, align);
- igt_assert(prev_offset == tmp.offset);
+
+ /* Simple will return previously returned offset if fits */
+ if (type == INTEL_ALLOCATOR_SIMPLE)
+ igt_assert(prev_offset == tmp.offset);
+ /* Reloc is moving forward for new allocations */
+ else if (type == INTEL_ALLOCATOR_RELOC)
+ igt_assert(prev_offset != tmp.offset);
obj[i].offset = intel_allocator_alloc(ahnd, obj[i].handle,
obj[i].size, 0);
@@ -785,10 +792,10 @@ igt_main
igt_dynamic("print")
basic_alloc(fd, 1UL << 2, a->type);
- if (a->type == INTEL_ALLOCATOR_SIMPLE) {
- igt_dynamic("reuse")
- reuse(fd, a->type);
+ igt_dynamic("reuse")
+ reuse(fd, a->type);
+ if (a->type == INTEL_ALLOCATOR_SIMPLE) {
igt_dynamic("reserve")
reserve(fd, a->type);
}
--
2.34.1
^ permalink raw reply related [flat|nested] 6+ messages in thread* [igt-dev] [PATCH i-g-t 4/4] tests/gem_ctx_shared: Remove necessity of passing offset to function call
2022-12-01 13:03 [igt-dev] [PATCH i-g-t 0/4] Remove random allocator and improve reloc one Zbigniew Kempczyński
` (2 preceding siblings ...)
2022-12-01 13:03 ` [igt-dev] [PATCH i-g-t 3/4] lib/intel_allocator_reloc: Introduce stateful allocations in reloc Zbigniew Kempczyński
@ 2022-12-01 13:03 ` Zbigniew Kempczyński
2022-12-01 14:42 ` [igt-dev] ✗ Fi.CI.BAT: failure for Remove random allocator and improve reloc one Patchwork
4 siblings, 0 replies; 6+ messages in thread
From: Zbigniew Kempczyński @ 2022-12-01 13:03 UTC (permalink / raw)
To: igt-dev
Previously reloc allocator wasn't stateful for allocations thus alloc()
had to be done in single place. Now this limitation is removed so
dependent function can call allocator alloc() function again without
worrying about offset change.
Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_ctx_shared.c | 45 ++++++++++++++++---------------------
1 file changed, 19 insertions(+), 26 deletions(-)
diff --git a/tests/i915/gem_ctx_shared.c b/tests/i915/gem_ctx_shared.c
index d6b56b72ab..3b155ac214 100644
--- a/tests/i915/gem_ctx_shared.c
+++ b/tests/i915/gem_ctx_shared.c
@@ -512,9 +512,9 @@ static void exec_single_timeline(int i915, const intel_ctx_cfg_t *cfg,
}
static void store_dword(int i915, uint64_t ahnd, const intel_ctx_t *ctx,
- unsigned ring, uint32_t target, uint64_t target_offset,
+ unsigned ring, uint32_t target, uint64_t target_size,
uint32_t offset, uint32_t value,
- uint32_t cork, uint64_t cork_offset,
+ uint32_t cork, uint64_t cork_size,
unsigned write_domain)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(i915));
@@ -537,9 +537,9 @@ static void store_dword(int i915, uint64_t ahnd, const intel_ctx_t *ctx,
obj[1].handle = target;
obj[2].handle = gem_create(i915, 4096);
if (ahnd) {
- obj[0].offset = cork_offset;
+ obj[0].offset = get_offset(ahnd, cork, cork_size, 0);
obj[0].flags |= EXEC_OBJECT_PINNED;
- obj[1].offset = target_offset;
+ obj[1].offset = get_offset(ahnd, target, target_size, 0);
obj[1].flags |= EXEC_OBJECT_PINNED;
if (write_domain)
obj[1].flags |= EXEC_OBJECT_WRITE;
@@ -768,7 +768,7 @@ static void reorder(int i915, const intel_ctx_cfg_t *cfg,
intel_ctx_cfg_t q_cfg;
const intel_ctx_t *ctx[2];
uint32_t plug;
- uint64_t ahnd = get_reloc_ahnd(i915, 0), scratch_offset, plug_offset;
+ uint64_t ahnd = get_reloc_ahnd(i915, 0);
q_cfg = *cfg;
q_cfg.vm = gem_vm_create(i915);
@@ -781,17 +781,15 @@ static void reorder(int i915, const intel_ctx_cfg_t *cfg,
gem_context_set_priority(i915, ctx[HI]->id, flags & EQUAL ? MIN_PRIO : 0);
scratch = gem_create(i915, 4096);
- scratch_offset = get_offset(ahnd, scratch, 4096, 0);
plug = igt_cork_plug(&cork, i915);
- plug_offset = get_offset(ahnd, plug, 4096, 0);
/* We expect the high priority context to be executed first, and
* so the final result will be value from the low priority context.
*/
- store_dword(i915, ahnd, ctx[LO], ring, scratch, scratch_offset,
- 0, ctx[LO]->id, plug, plug_offset, 0);
- store_dword(i915, ahnd, ctx[HI], ring, scratch, scratch_offset,
- 0, ctx[HI]->id, plug, plug_offset, 0);
+ store_dword(i915, ahnd, ctx[LO], ring, scratch, 4096,
+ 0, ctx[LO]->id, plug, 4096, 0);
+ store_dword(i915, ahnd, ctx[HI], ring, scratch, 4096,
+ 0, ctx[HI]->id, plug, 4096, 0);
unplug_show_queue(i915, &cork, ahnd, &q_cfg, ring);
gem_close(i915, plug);
@@ -825,7 +823,6 @@ static void promotion(int i915, const intel_ctx_cfg_t *cfg, unsigned ring)
const intel_ctx_t *ctx[3];
uint32_t plug;
uint64_t ahnd = get_reloc_ahnd(i915, 0);
- uint64_t result_offset, dep_offset, plug_offset;
q_cfg = *cfg;
q_cfg.vm = gem_vm_create(i915);
@@ -841,30 +838,27 @@ static void promotion(int i915, const intel_ctx_cfg_t *cfg, unsigned ring)
gem_context_set_priority(i915, ctx[NOISE]->id, 0);
result = gem_create(i915, 4096);
- result_offset = get_offset(ahnd, result, 4096, 0);
dep = gem_create(i915, 4096);
- dep_offset = get_offset(ahnd, dep, 4096, 0);
plug = igt_cork_plug(&cork, i915);
- plug_offset = get_offset(ahnd, plug, 4096, 0);
/* Expect that HI promotes LO, so the order will be LO, HI, NOISE.
*
* fifo would be NOISE, LO, HI.
* strict priority would be HI, NOISE, LO
*/
- store_dword(i915, ahnd, ctx[NOISE], ring, result, result_offset,
- 0, ctx[NOISE]->id, plug, plug_offset, 0);
- store_dword(i915, ahnd, ctx[LO], ring, result, result_offset,
- 0, ctx[LO]->id, plug, plug_offset, 0);
+ store_dword(i915, ahnd, ctx[NOISE], ring, result, 4096,
+ 0, ctx[NOISE]->id, plug, 4096, 0);
+ store_dword(i915, ahnd, ctx[LO], ring, result, 4096,
+ 0, ctx[LO]->id, plug, 4096, 0);
/* link LO <-> HI via a dependency on another buffer */
- store_dword(i915, ahnd, ctx[LO], ring, dep, dep_offset,
+ store_dword(i915, ahnd, ctx[LO], ring, dep, 4096,
0, ctx[LO]->id, 0, 0, I915_GEM_DOMAIN_INSTRUCTION);
- store_dword(i915, ahnd, ctx[HI], ring, dep, dep_offset,
+ store_dword(i915, ahnd, ctx[HI], ring, dep, 4096,
0, ctx[HI]->id, 0, 0, 0);
- store_dword(i915, ahnd, ctx[HI], ring, result, result_offset,
+ store_dword(i915, ahnd, ctx[HI], ring, result, 4096,
0, ctx[HI]->id, 0, 0, 0);
unplug_show_queue(i915, &cork, ahnd, &q_cfg, ring);
@@ -908,7 +902,6 @@ static void smoketest(int i915, const intel_ctx_cfg_t *cfg,
uint32_t scratch;
uint32_t *ptr;
uint64_t ahnd = get_reloc_ahnd(i915, 0); /* same vm */
- uint64_t scratch_offset;
q_cfg = *cfg;
q_cfg.vm = gem_vm_create(i915);
@@ -926,7 +919,7 @@ static void smoketest(int i915, const intel_ctx_cfg_t *cfg,
igt_require(nengine);
scratch = gem_create(i915, 4096);
- scratch_offset = get_offset(ahnd, scratch, 4096, 0);
+
igt_fork(child, ncpus) {
unsigned long count = 0;
const intel_ctx_t *ctx;
@@ -943,12 +936,12 @@ static void smoketest(int i915, const intel_ctx_cfg_t *cfg,
engine = engines[hars_petruska_f54_1_random_unsafe_max(nengine)];
store_dword(i915, ahnd, ctx, engine,
- scratch, scratch_offset,
+ scratch, 4096,
8*child + 0, ~child,
0, 0, 0);
for (unsigned int step = 0; step < 8; step++)
store_dword(i915, ahnd, ctx, engine,
- scratch, scratch_offset,
+ scratch, 4096,
8*child + 4, count++,
0, 0, 0);
}
--
2.34.1
^ permalink raw reply related [flat|nested] 6+ messages in thread