Top |
#define | BATCH_SZ |
#define | I915_TILING_4 |
#define | I915_TILING_Yf |
#define | I915_TILING_Ys |
#define | I915_TILING_64 |
enum | i915_compression |
struct | igt_pxp |
struct | intel_bb |
uint32_t | previous_offset |
struct | intel_buf |
uint32_t fast_copy_dword1 (int fd
,unsigned int src_tiling
,unsigned int dst_tiling
,int bpp
);
void igt_blitter_copy (int fd
,uint64_t ahnd
,uint32_t ctx
,const intel_ctx_cfg_t *cfg
,uint32_t src_handle
,uint32_t src_delta
,uint32_t src_stride
,uint32_t src_tiling
,uint32_t src_x
,uint32_t src_y
,uint64_t src_size
,uint32_t width
,uint32_t height
,uint32_t bpp
,uint32_t dst_handle
,uint32_t dst_delta
,uint32_t dst_stride
,uint32_t dst_tiling
,uint32_t dst_x
,uint32_t dst_y
,uint64_t dst_size
);
Wrapper API to call appropriate blitter copy function.
fd |
file descriptor of the i915 driver |
|
ahnd |
handle to an allocator |
|
ctx |
context within which execute copy blit |
|
src_handle |
GEM handle of the source buffer |
|
src_delta |
offset into the source GEM bo, in bytes |
|
src_stride |
Stride (in bytes) of the source buffer |
|
src_tiling |
Tiling mode of the source buffer |
|
src_x |
X coordinate of the source region to copy |
|
src_y |
Y coordinate of the source region to copy |
|
src_size |
size of the src bo required for allocator and softpin |
|
width |
Width of the region to copy |
|
height |
Height of the region to copy |
|
bpp |
source and destination bits per pixel |
|
dst_handle |
GEM handle of the destination buffer |
|
dst_delta |
offset into the destination GEM bo, in bytes |
|
dst_stride |
Stride (in bytes) of the destination buffer |
|
dst_tiling |
Tiling mode of the destination buffer |
|
dst_x |
X coordinate of destination |
|
dst_y |
Y coordinate of destination |
|
dst_size |
size of the dst bo required for allocator and softpin |
void igt_blitter_src_copy (int fd
,uint64_t ahnd
,uint32_t ctx
,const intel_ctx_cfg_t *cfg
,uint32_t src_handle
,uint32_t src_delta
,uint32_t src_stride
,uint32_t src_tiling
,uint32_t src_x
,uint32_t src_y
,uint64_t src_size
,uint32_t width
,uint32_t height
,uint32_t bpp
,uint32_t dst_handle
,uint32_t dst_delta
,uint32_t dst_stride
,uint32_t dst_tiling
,uint32_t dst_x
,uint32_t dst_y
,uint64_t dst_size
);
Copy src
into dst
using the XY_SRC blit command.
fd |
file descriptor of the i915 driver |
|
ahnd |
handle to an allocator |
|
ctx |
context within which execute copy blit |
|
cfg |
intel_ctx configuration, NULL for default context or legacy mode |
|
src_handle |
GEM handle of the source buffer |
|
src_delta |
offset into the source GEM bo, in bytes |
|
src_stride |
Stride (in bytes) of the source buffer |
|
src_tiling |
Tiling mode of the source buffer |
|
src_x |
X coordinate of the source region to copy |
|
src_y |
Y coordinate of the source region to copy |
|
src_size |
size of the src bo required for allocator and softpin |
|
width |
Width of the region to copy |
|
height |
Height of the region to copy |
|
bpp |
source and destination bits per pixel |
|
dst_handle |
GEM handle of the destination buffer |
|
dst_delta |
offset into the destination GEM bo, in bytes |
|
dst_stride |
Stride (in bytes) of the destination buffer |
|
dst_tiling |
Tiling mode of the destination buffer |
|
dst_x |
X coordinate of destination |
|
dst_y |
Y coordinate of destination |
|
dst_size |
size of the dst bo required for allocator and softpin |
void igt_blitter_fast_copy__raw (int fd
,uint64_t ahnd
,uint32_t ctx
,const intel_ctx_cfg_t *cfg
,uint32_t src_handle
,unsigned int src_delta
,unsigned int src_stride
,unsigned int src_tiling
,unsigned int src_x
,unsigned src_y
,uint64_t src_size
,unsigned int width
,unsigned int height
,int bpp
,uint32_t dst_handle
,unsigned int dst_delta
,unsigned int dst_stride
,unsigned int dst_tiling
,unsigned int dst_x
,unsigned dst_y
,uint64_t dst_size
);
Like igt_blitter_fast_copy()
, but talking to the kernel directly.
fd |
file descriptor of the i915 driver |
|
ahnd |
handle to an allocator |
|
ctx |
context within which execute copy blit |
|
cfg |
intel_ctx configuration, NULL for default context or legacy mode |
|
src_handle |
GEM handle of the source buffer |
|
src_delta |
offset into the source GEM bo, in bytes |
|
src_stride |
Stride (in bytes) of the source buffer |
|
src_tiling |
Tiling mode of the source buffer |
|
src_x |
X coordinate of the source region to copy |
|
src_y |
Y coordinate of the source region to copy |
|
src_size |
size of the src bo required for allocator and softpin |
|
width |
Width of the region to copy |
|
height |
Height of the region to copy |
|
bpp |
source and destination bits per pixel |
|
dst_handle |
GEM handle of the destination buffer |
|
dst_delta |
offset into the destination GEM bo, in bytes |
|
dst_stride |
Stride (in bytes) of the destination buffer |
|
dst_tiling |
Tiling mode of the destination buffer |
|
dst_x |
X coordinate of destination |
|
dst_y |
Y coordinate of destination |
|
dst_size |
size of the dst bo required for allocator and softpin |
void (*igt_render_copyfunc_t) (struct intel_bb *ibb
,struct intel_buf *src
,uint32_t src_x
,uint32_t src_y
,uint32_t width
,uint32_t height
,struct intel_buf *dst
,uint32_t dst_x
,uint32_t dst_y
);
This is the type of the per-platform render copy functions. The
platform-specific implementation can be obtained by calling
igt_get_render_copyfunc()
.
A render copy function will emit a batchbuffer to the kernel which executes
the specified blit copy operation using the render engine. ctx
is
optional and can be 0.
ibb |
batchbuffer |
|
ctx |
context to use |
|
src |
intel_buf source object |
|
src_x |
source pixel x-coordination |
|
src_y |
source pixel y-coordination |
|
width |
width of the copied rectangle |
|
height |
height of the copied rectangle |
|
dst |
intel_buf destination object |
|
dst_x |
destination pixel x-coordination |
|
dst_y |
destination pixel y-coordination |
void (*igt_vebox_copyfunc_t) (struct intel_bb *ibb
,struct intel_buf *src
,unsigned int width
,unsigned int height
,struct intel_buf *dst
);
This is the type of the per-platform vebox copy functions. The
platform-specific implementation can be obtained by calling
igt_get_vebox_copyfunc()
.
A vebox copy function will emit a batchbuffer to the kernel which executes the specified blit copy operation using the vebox engine.
void (*igt_render_clearfunc_t) (struct intel_bb *ibb
,struct intel_buf *dst
,unsigned int dst_x
,unsigned int dst_y
,unsigned int width
,unsigned int height
,const float cc_color[4]
);
void (*igt_fillfunc_t) (int i915
,struct intel_buf *buf
,unsigned x
,unsigned y
,unsigned width
,unsigned height
,uint8_t color
);
This is the type of the per-platform fill functions using media
or gpgpu pipeline. The platform-specific implementation can be obtained
by calling igt_get_media_fillfunc()
or igt_get_gpgpu_fillfunc()
.
A fill function will emit a batchbuffer to the kernel which executes the specified blit fill operation using the media/gpgpu engine.
void (*igt_vme_func_t) (int i915
,uint32_t ctx
,struct intel_buf *src
,unsigned int width
,unsigned int height
,struct intel_buf *dst
);
void (*igt_media_spinfunc_t) (int i915
,struct intel_buf *buf
,uint32_t spins
);
This is the type of the per-platform media spin functions. The
platform-specific implementation can be obtained by calling
igt_get_media_spinfunc()
.
The media spin function emits a batchbuffer for the render engine with the media pipeline selected. The workload consists of a single thread which spins in a tight loop the requested number of times. Each spin increments a counter whose final 32-bit value is written to the destination buffer on completion. This utility provides a simple way to keep the render engine busy for a set time for various tests.
struct intel_bb * intel_bb_create_full (int fd
,uint32_t ctx
,uint32_t vm
,const intel_ctx_cfg_t *cfg
,uint32_t size
,uint64_t start
,uint64_t end
,uint64_t alignment
,uint8_t allocator_type
,enum allocator_strategy strategy
,uint64_t region
);
Creates bb with context passed in ctx
, size in size
and allocator type
in allocator_type
, in memory region passed in region
. Relocations are set
to false because IGT allocator is used in that case. VM range is passed
to allocator (start
and end
) and allocation strategy
(suggestion
to allocator about address allocation preferences).
fd |
drm fd - i915 or xe |
|
ctx |
for i915 context id, for xe engine id |
|
vm |
for xe vm_id, unused for i915 |
|
cfg |
intel_ctx configuration, NULL for default context or legacy mode |
|
size |
size of the batchbuffer |
|
start |
allocator vm start address |
|
end |
allocator vm start address |
|
alignment |
alignment to use for allocator, zero for default |
|
allocator_type |
allocator type, SIMPLE, RELOC, ... |
|
strategy |
allocation strategy |
|
region |
memory region |
struct intel_bb * intel_bb_create_with_allocator (int fd
,uint32_t ctx
,uint32_t vm
,const intel_ctx_cfg_t *cfg
,uint32_t size
,uint8_t allocator_type
);
Creates bb with context passed in ctx
, size in size
and allocator type
in allocator_type
. Relocations are set to false because IGT allocator
is used in that case.
struct intel_bb * intel_bb_create (int fd
,uint32_t size
);
Creates bb with default context.
Pointer the intel_bb, asserts on failure.
Notes:
intel_bb must not be created in igt_fixture. The reason is intel_bb "opens" connection to the allocator and when test completes it can leave the allocator in unknown state (mostly for failed tests). As igt_core was armed to reset the allocator infrastructure connection to it inside intel_bb is not valid anymore. Trying to use it leads to catastrofic errors.
struct intel_bb * intel_bb_create_with_context (int fd
,uint32_t ctx
,uint32_t vm
,const intel_ctx_cfg_t *cfg
,uint32_t size
);
Creates bb with context passed in ctx
and cfg
configuration (when
working with custom engines layout).
struct intel_bb * intel_bb_create_with_context_in_region (int fd
,uint32_t ctx
,uint32_t vm
,const intel_ctx_cfg_t *cfg
,uint32_t size
,uint64_t region
);
Creates bb with context passed in ctx
in memory region passed in memory
.
struct intel_bb * intel_bb_create_with_relocs (int fd
,uint32_t size
);
Creates bb which will disable passing addresses. This will lead to relocations when objects are not previously pinned.
struct intel_bb * intel_bb_create_with_relocs_and_context (int fd
,uint32_t ctx
,const intel_ctx_cfg_t *cfg
,uint32_t size
);
Creates bb with default context which will disable passing addresses. This will lead to relocations when objects are not previously pinned.
struct intel_bb * intel_bb_create_no_relocs (int fd
,uint32_t size
);
Creates bb with disabled relocations. This enables passing addresses and requires pinning objects.
void
intel_bb_destroy (struct intel_bb *ibb
);
Frees all relocations / objects allocated during filling the batch.
void
intel_bb_reinit_allocator (void
);
Reinit allocator and get offsets in tracked intel_batchbuffers.
void
intel_bb_track (bool do_tracking
);
Turn on (true) or off (false) tracking for intel_batchbuffers.
void intel_bb_set_debug (struct intel_bb *ibb
,bool debug
);
Sets debug to true / false. Execbuf is then called synchronously and object/reloc arrays are printed after execution.
void intel_bb_set_dump_base64 (struct intel_bb *ibb
,bool dump
);
Do bb dump as base64 string before execbuf call.
void intel_bb_set_pxp (struct intel_bb *ibb
,bool new_state
,uint32_t apptype
,uint32_t appid
);
struct drm_i915_gem_exec_object2 * intel_bb_add_object (struct intel_bb *ibb
,uint32_t handle
,uint64_t size
,uint64_t offset
,uint64_t alignment
,bool write
);
bool intel_bb_remove_object (struct intel_bb *ibb
,uint32_t handle
,uint64_t offset
,uint64_t size
);
struct drm_i915_gem_exec_object2 * intel_bb_add_intel_buf (struct intel_bb *ibb
,struct intel_buf *buf
,bool write
);
struct drm_i915_gem_exec_object2 * intel_bb_add_intel_buf_with_alignment (struct intel_bb *ibb
,struct intel_buf *buf
,uint64_t alignment
,bool write
);
bool intel_bb_remove_intel_buf (struct intel_bb *ibb
,struct intel_buf *buf
);
struct drm_i915_gem_exec_object2 * intel_bb_find_object (struct intel_bb *ibb
,uint32_t handle
);
bool intel_bb_object_set_flag (struct intel_bb *ibb
,uint32_t handle
,uint64_t flag
);
bool intel_bb_object_clear_flag (struct intel_bb *ibb
,uint32_t handle
,uint64_t flag
);
uint64_t intel_bb_emit_reloc (struct intel_bb *ibb
,uint32_t handle
,uint32_t read_domains
,uint32_t write_domain
,uint64_t delta
,uint64_t presumed_offset
);
Function prepares relocation (execobj if required + reloc) and emits offset in bb. For I915_EXEC_NO_RELOC presumed_offset is a hint we already have object in valid place and relocation step can be skipped in this case.
Note: delta is value added to address, mostly used when some instructions require modify-bit set to apply change. Which delta is valid depends on instruction (see instruction specification).
ibb |
pointer to intel_bb |
|
handle |
object handle which address will be taken to patch the bb |
|
read_domains |
gem domain bits for the relocation |
|
write_domain |
gem domain bit for the relocation |
|
delta |
delta value to add to |
|
presumed_offset |
address of the object in address space. If -1 is passed
then final offset of the object will be randomized (for no-reloc bb) or
0 (for reloc bb, in that case reloc.presumed_offset will be -1). In
case address is known it should passed in |
|
write |
does a handle is a render target |
uint64_t intel_bb_emit_reloc_fenced (struct intel_bb *ibb
,uint32_t handle
,uint32_t read_domains
,uint32_t write_domain
,uint64_t delta
,uint64_t presumed_offset
);
uint64_t intel_bb_offset_reloc (struct intel_bb *ibb
,uint32_t handle
,uint32_t read_domains
,uint32_t write_domain
,uint32_t offset
,uint64_t presumed_offset
);
Function prepares relocation (execobj if required + reloc). It it used
for editing batchbuffer via modifying structures. It means when we're
preparing batchbuffer it is more descriptive to edit the structure
than emitting dwords. But it require for some fields to point the
relocation. For that case offset
is passed by the user and it points
to the offset in bb where the relocation will be applied.
ibb |
pointer to intel_bb |
|
handle |
object handle which address will be taken to patch the bb |
|
read_domains |
gem domain bits for the relocation |
|
write_domain |
gem domain bit for the relocation |
|
offset |
offset within bb to be patched |
|
presumed_offset |
address of the object in address space. If -1 is passed
then final offset of the object will be randomized (for no-reloc bb) or
0 (for reloc bb, in that case reloc.presumed_offset will be -1). In
case address is known it should passed in |
uint64_t intel_bb_offset_reloc_with_delta (struct intel_bb *ibb
,uint32_t handle
,uint32_t read_domains
,uint32_t write_domain
,uint32_t delta
,uint32_t offset
,uint64_t presumed_offset
);
uint64_t intel_bb_offset_reloc_to_object (struct intel_bb *ibb
,uint32_t handle
,uint32_t to_handle
,uint32_t read_domains
,uint32_t write_domain
,uint32_t delta
,uint32_t offset
,uint64_t presumed_offset
);
void intel_bb_exec (struct intel_bb *ibb
,uint32_t end_offset
,uint64_t flags
,bool sync
);
Do execbuf on context selected during bb creation. Asserts on failure.
uint64_t intel_bb_get_object_offset (struct intel_bb *ibb
,uint32_t handle
);
bool intel_bb_object_offset_to_buf (struct intel_bb *ibb
,struct intel_buf *buf
);
uint32_t intel_bb_copy_data (struct intel_bb *ibb
,const void *data
,unsigned int bytes
,uint32_t align
);
void intel_bb_emit_blt_copy (struct intel_bb *ibb
,struct intel_buf *src
,int src_x1
,int src_y1
,int src_pitch
,struct intel_buf *dst
,int dst_x1
,int dst_y1
,int dst_pitch
,int width
,int height
,int bpp
);
void intel_bb_blt_copy (struct intel_bb *ibb
,struct intel_buf *src
,int src_x1
,int src_y1
,int src_pitch
,struct intel_buf *dst
,int dst_x1
,int dst_y1
,int dst_pitch
,int width
,int height
,int bpp
);
void intel_bb_copy_intel_buf (struct intel_bb *ibb
,struct intel_buf *dst
,struct intel_buf *src
,long int size
);
Emits a copy operation using blitter commands into the supplied batch.
A total of size
bytes from the start of src
is copied
over to dst
. Note that size
must be page-aligned.
void (*igt_huc_copyfunc_t) (int fd
,uint64_t ahnd
,struct drm_i915_gem_exec_object2 *obj
,uint64_t *objsize
);
This is the type of the per-platform huc copy functions.
The huc copy function emits a batchbuffer to the VDBOX engine to invoke the HuC Copy kernel to copy 4K bytes from the source buffer to the destination buffer.
struct intel_bb { struct igt_list_head link; uint64_t allocator_handle; uint64_t allocator_start, allocator_end; uint8_t allocator_type; enum allocator_strategy allocator_strategy; enum intel_driver driver; int fd; unsigned int gen; bool debug; bool dump_base64; bool enforce_relocs; uint32_t devid; uint32_t handle; uint32_t size; uint32_t *batch; uint32_t *ptr; uint64_t alignment; int fence; uint64_t gtt_size; bool supports_48b_address; bool uses_full_ppgtt; bool allows_obj_alignment; struct igt_pxp pxp; uint32_t ctx; uint32_t vm_id; bool xe_bound; uint32_t engine_syncobj; uint32_t engine_id; uint32_t last_engine; /* Context configuration */ intel_ctx_cfg_t *cfg; /* Cache */ void *root; /* Current objects for execbuf */ void *current; /* Objects for current execbuf */ struct drm_i915_gem_exec_object2 **objects; uint32_t num_objects; uint32_t allocated_objects; uint64_t batch_offset; struct drm_i915_gem_relocation_entry *relocs; uint32_t num_relocs; uint32_t allocated_relocs; /* Tracked intel_bufs */ struct igt_list_head intel_bufs; /* * BO recreate in reset path only when refcount == 0 * Currently we don't need to use atomics because intel_bb * is not thread-safe. */ int32_t refcount; /* long running mode */ bool lr_mode; int64_t user_fence_offset; uint64_t user_fence_value; };
struct intel_buf { struct buf_ops *bops; bool is_owner; uint32_t handle; uint64_t size; uint32_t width; uint32_t height; uint32_t tiling; uint32_t bpp, depth; uint32_t compression; uint32_t swizzle_mode; uint32_t yuv_semiplanar_bpp; bool format_is_yuv; bool format_is_yuv_semiplanar; struct { uint32_t offset; uint32_t stride; uint64_t size; } surface[2]; struct { uint32_t offset; uint32_t stride; } ccs[2]; struct { uint32_t offset; bool disable; } cc; struct { uint64_t offset; uint32_t ctx; } addr; uint64_t bo_size; uint64_t region; /* Tracking */ struct intel_bb *ibb; struct igt_list_head link; /* CPU mapping */ uint32_t *ptr; bool cpu_write; /* Content Protection*/ bool is_protected; /* pat_index to use for mapping this buf. Only used in Xe. */ uint8_t pat_index; /* mocs_index to use for operations using this intel_buf, like render_copy */ uint8_t mocs_index; /* For debugging purposes */ char name[INTEL_BUF_NAME_MAXSIZE + 1]; };