Top | ![]() |
![]() |
![]() |
![]() |
#define | BATCH_SZ |
#define | BATCH_RESERVED |
struct | intel_batchbuffer |
#define | I915_TILING_4 |
#define | I915_TILING_Yf |
#define | I915_TILING_Ys |
enum | i915_compression |
struct | igt_buf |
struct | igt_pxp |
struct | intel_bb |
uint32_t | previous_offset |
struct | intel_buf |
This library provides some basic support for batchbuffers and using the
blitter engine based upon libdrm. A new batchbuffer is allocated with
intel_batchbuffer_alloc()
and for simple blitter commands submitted with
intel_batchbuffer_flush()
.
It also provides some convenient macros to easily emit commands into batchbuffers. All those macros presume that a pointer to a intel_batchbuffer structure called batch is in scope. The basic macros are BEGIN_BATCH, OUT_BATCH, OUT_RELOC and ADVANCE_BATCH.
Note that this library's header pulls in the i-g-t core library as a dependency.
struct intel_batchbuffer * intel_batchbuffer_alloc (drm_intel_bufmgr *bufmgr
,uint32_t devid
);
Allocates a new batchbuffer object. devid
must be supplied since libdrm
doesn't expose it directly.
void intel_batchbuffer_set_context (struct intel_batchbuffer *batch
,drm_intel_context *ctx
);
void
intel_batchbuffer_free (struct intel_batchbuffer *batch
);
Releases all resource of the batchbuffer object batch
.
void
intel_batchbuffer_flush (struct intel_batchbuffer *batch
);
Submits the batch for execution on the blitter engine, selecting the right ring depending upon the hardware platform.
void intel_batchbuffer_flush_on_ring (struct intel_batchbuffer *batch
,int ring
);
Submits the batch for execution on ring
.
void intel_batchbuffer_flush_with_context (struct intel_batchbuffer *batch
,drm_intel_context *context
);
Submits the batch for execution on the render engine with the supplied hardware context.
void
intel_batchbuffer_reset (struct intel_batchbuffer *batch
);
Resets batch
by allocating a new gem buffer object as backing storage.
uint32_t intel_batchbuffer_copy_data (struct intel_batchbuffer *batch
,const void *data
,unsigned int bytes
,uint32_t align
);
This transfers the given data
into the batchbuffer. Note that the length
must be DWORD aligned, i.e. multiples of 32bits. The caller must
confirm that there is enough space in the batch for the data to be
copied.
void intel_batchbuffer_emit_reloc (struct intel_batchbuffer *batch
,drm_intel_bo *buffer
,uint64_t delta
,uint32_t read_domains
,uint32_t write_domain
,int fenced
);
Emits both a libdrm relocation entry pointing at buffer
and the pre-computed
DWORD of batch
's presumed gpu address plus the supplied delta
into batch
.
Note that fenced
is only relevant if buffer
is actually tiled.
This is the only way buffers get added to the validate list.
uint32_t intel_batchbuffer_align (struct intel_batchbuffer *batch
,uint32_t align
);
Aligns the current in-batch offset to the given value.
void * intel_batchbuffer_subdata_alloc (struct intel_batchbuffer *batch
,uint32_t size
,uint32_t align
);
Verify if sufficient size
within batch
is available to deny overflow.
Then allocate size
bytes within batch
.
uint32_t intel_batchbuffer_subdata_offset (struct intel_batchbuffer *batch
,void *ptr
);
void intel_batchbuffer_emit_dword (struct intel_batchbuffer *batch
,uint32_t dword
);
void intel_batchbuffer_require_space (struct intel_batchbuffer *batch
,unsigned int sz
);
#define BEGIN_BATCH(n, r)
Prepares a batch to emit n
DWORDS, flushing it if there's not enough space
available.
This macro needs a pointer to an intel_batchbuffer structure called batch in scope.
#define OUT_BATCH(d) intel_batchbuffer_emit_dword(batch, d)
Emits d
into a batch.
This macro needs a pointer to an intel_batchbuffer structure called batch in scope.
#define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta)
Emits a fenced relocation into a batch.
This macro needs a pointer to an intel_batchbuffer structure called batch in scope.
#define OUT_RELOC(buf, read_domains, write_domain, delta)
Emits a normal, unfenced relocation into a batch.
This macro needs a pointer to an intel_batchbuffer structure called batch in scope.
#define ADVANCE_BATCH()
Completes the batch command emission sequence started with BEGIN_BATCH.
This macro needs a pointer to an intel_batchbuffer structure called batch in scope.
void intel_blt_copy (struct intel_batchbuffer *batch
,drm_intel_bo *src_bo
,int src_x1
,int src_y1
,int src_pitch
,drm_intel_bo *dst_bo
,int dst_x1
,int dst_y1
,int dst_pitch
,int width
,int height
,int bpp
);
This emits a 2D copy operation using blitter commands into the supplied batch buffer object.
batch |
batchbuffer object |
|
src_bo |
source libdrm buffer object |
|
src_x1 |
source pixel x-coordination |
|
src_y1 |
source pixel y-coordination |
|
src_pitch |
|
|
dst_bo |
destination libdrm buffer object |
|
dst_x1 |
destination pixel x-coordination |
|
dst_y1 |
destination pixel y-coordination |
|
dst_pitch |
|
|
width |
width of the copied rectangle |
|
height |
height of the copied rectangle |
|
bpp |
bits per pixel |
void intel_copy_bo (struct intel_batchbuffer *batch
,drm_intel_bo *dst_bo
,drm_intel_bo *src_bo
,long int size
);
This emits a copy operation using blitter commands into the supplied batch
buffer object. A total of size
bytes from the start of src_bo
is copied
over to dst_bo
. Note that size
must be page-aligned.
unsigned
igt_buf_width (const struct igt_buf *buf
);
Computes the width in 32-bit pixels of the given buffer.
unsigned
igt_buf_height (const struct igt_buf *buf
);
Computes the height in 32-bit pixels of the given buffer.
unsigned int igt_buf_intel_ccs_width (unsigned int gen
,const struct igt_buf *buf
);
Computes the width of ccs buffer when considered as Intel surface data.
unsigned int igt_buf_intel_ccs_height (unsigned int gen
,const struct igt_buf *buf
);
Computes the height of ccs buffer when considered as Intel surface data.
void igt_blitter_src_copy (int fd
,uint64_t ahnd
,uint32_t ctx
,uint32_t src_handle
,uint32_t src_delta
,uint32_t src_stride
,uint32_t src_tiling
,uint32_t src_x
,uint32_t src_y
,uint64_t src_size
,uint32_t width
,uint32_t height
,uint32_t bpp
,uint32_t dst_handle
,uint32_t dst_delta
,uint32_t dst_stride
,uint32_t dst_tiling
,uint32_t dst_x
,uint32_t dst_y
,uint64_t dst_size
);
Copy src
into dst
using the XY_SRC blit command.
fd |
file descriptor of the i915 driver |
|
ahnd |
handle to an allocator |
|
ctx |
context within which execute copy blit |
|
src_handle |
GEM handle of the source buffer |
|
src_delta |
offset into the source GEM bo, in bytes |
|
src_stride |
Stride (in bytes) of the source buffer |
|
src_tiling |
Tiling mode of the source buffer |
|
src_x |
X coordinate of the source region to copy |
|
src_y |
Y coordinate of the source region to copy |
|
src_size |
size of the src bo required for allocator and softpin |
|
width |
Width of the region to copy |
|
height |
Height of the region to copy |
|
bpp |
source and destination bits per pixel |
|
dst_handle |
GEM handle of the destination buffer |
|
dst_delta |
offset into the destination GEM bo, in bytes |
|
dst_stride |
Stride (in bytes) of the destination buffer |
|
dst_tiling |
Tiling mode of the destination buffer |
|
dst_x |
X coordinate of destination |
|
dst_y |
Y coordinate of destination |
|
dst_size |
size of the dst bo required for allocator and softpin |
void igt_blitter_fast_copy (struct intel_batchbuffer *batch
,const struct igt_buf *src
,unsigned src_delta
,unsigned src_x
,unsigned src_y
,unsigned width
,unsigned height
,int bpp
,const struct igt_buf *dst
,unsigned dst_delta
,unsigned dst_x
,unsigned dst_y
);
Copy src
into dst
using the gen9 fast copy blitter command.
The source and destination surfaces cannot overlap.
batch |
batchbuffer object |
|
src |
source i-g-t buffer object |
|
src_delta |
offset into the source i-g-t bo |
|
src_x |
source pixel x-coordination |
|
src_y |
source pixel y-coordination |
|
width |
width of the copied rectangle |
|
height |
height of the copied rectangle |
|
dst |
destination i-g-t buffer object |
|
dst_delta |
offset into the destination i-g-t bo |
|
dst_x |
destination pixel x-coordination |
|
dst_y |
destination pixel y-coordination |
void igt_blitter_fast_copy__raw (int fd
,uint64_t ahnd
,uint32_t ctx
,uint32_t src_handle
,unsigned int src_delta
,unsigned int src_stride
,unsigned int src_tiling
,unsigned int src_x
,unsigned src_y
,uint64_t src_size
,unsigned int width
,unsigned int height
,int bpp
,uint32_t dst_handle
,unsigned int dst_delta
,unsigned int dst_stride
,unsigned int dst_tiling
,unsigned int dst_x
,unsigned dst_y
,uint64_t dst_size
);
Like igt_blitter_fast_copy()
, but talking to the kernel directly.
fd |
file descriptor of the i915 driver |
|
ahnd |
handle to an allocator |
|
ctx |
context within which execute copy blit |
|
src_handle |
GEM handle of the source buffer |
|
src_delta |
offset into the source GEM bo, in bytes |
|
src_stride |
Stride (in bytes) of the source buffer |
|
src_tiling |
Tiling mode of the source buffer |
|
src_x |
X coordinate of the source region to copy |
|
src_y |
Y coordinate of the source region to copy |
|
src_size |
size of the src bo required for allocator and softpin |
|
width |
Width of the region to copy |
|
height |
Height of the region to copy |
|
bpp |
source and destination bits per pixel |
|
dst_handle |
GEM handle of the destination buffer |
|
dst_delta |
offset into the destination GEM bo, in bytes |
|
dst_stride |
Stride (in bytes) of the destination buffer |
|
dst_tiling |
Tiling mode of the destination buffer |
|
dst_x |
X coordinate of destination |
|
dst_y |
Y coordinate of destination |
|
dst_size |
size of the dst bo required for allocator and softpin |
void (*igt_render_copyfunc_t) (struct intel_bb *ibb
,struct intel_buf *src
,uint32_t src_x
,uint32_t src_y
,uint32_t width
,uint32_t height
,struct intel_buf *dst
,uint32_t dst_x
,uint32_t dst_y
);
This is the type of the per-platform render copy functions. The
platform-specific implementation can be obtained by calling
igt_get_render_copyfunc()
.
A render copy function will emit a batchbuffer to the kernel which executes
the specified blit copy operation using the render engine. ctx
is
optional and can be 0.
ibb |
batchbuffer |
|
ctx |
context to use |
|
src |
intel_buf source object |
|
src_x |
source pixel x-coordination |
|
src_y |
source pixel y-coordination |
|
width |
width of the copied rectangle |
|
height |
height of the copied rectangle |
|
dst |
intel_buf destination object |
|
dst_x |
destination pixel x-coordination |
|
dst_y |
destination pixel y-coordination |
void (*igt_vebox_copyfunc_t) (struct intel_bb *ibb
,struct intel_buf *src
,unsigned int width
,unsigned int height
,struct intel_buf *dst
);
This is the type of the per-platform vebox copy functions. The
platform-specific implementation can be obtained by calling
igt_get_vebox_copyfunc()
.
A vebox copy function will emit a batchbuffer to the kernel which executes the specified blit copy operation using the vebox engine.
void (*igt_render_clearfunc_t) (struct intel_bb *ibb
,struct intel_buf *dst
,unsigned int dst_x
,unsigned int dst_y
,unsigned int width
,unsigned int height
,const float cc_color[4]
);
void (*igt_fillfunc_t) (int i915
,struct intel_buf *buf
,unsigned x
,unsigned y
,unsigned width
,unsigned height
,uint8_t color
);
This is the type of the per-platform fill functions using media
or gpgpu pipeline. The platform-specific implementation can be obtained
by calling igt_get_media_fillfunc()
or igt_get_gpgpu_fillfunc()
.
A fill function will emit a batchbuffer to the kernel which executes the specified blit fill operation using the media/gpgpu engine.
void (*igt_vme_func_t) (int i915
,uint32_t ctx
,struct intel_buf *src
,unsigned int width
,unsigned int height
,struct intel_buf *dst
);
void (*igt_media_spinfunc_t) (int i915
,struct intel_buf *buf
,uint32_t spins
);
This is the type of the per-platform media spin functions. The
platform-specific implementation can be obtained by calling
igt_get_media_spinfunc()
.
The media spin function emits a batchbuffer for the render engine with the media pipeline selected. The workload consists of a single thread which spins in a tight loop the requested number of times. Each spin increments a counter whose final 32-bit value is written to the destination buffer on completion. This utility provides a simple way to keep the render engine busy for a set time for various tests.
struct intel_bb * intel_bb_create_full (int i915
,uint32_t ctx
,uint32_t size
,uint64_t start
,uint64_t end
,uint8_t allocator_type
,enum allocator_strategy strategy
);
Creates bb with context passed in ctx
, size in size
and allocator type
in allocator_type
. Relocations are set to false because IGT allocator
is used in that case. VM range is passed to allocator (start
and end
)
and allocation strategy
(suggestion to allocator about address allocation
preferences).
struct intel_bb * intel_bb_create_with_allocator (int i915
,uint32_t ctx
,uint32_t size
,uint8_t allocator_type
);
Creates bb with context passed in ctx
, size in size
and allocator type
in allocator_type
. Relocations are set to false because IGT allocator
is used in that case.
struct intel_bb * intel_bb_create (int i915
,uint32_t size
);
Creates bb with default context.
Pointer the intel_bb, asserts on failure.
Notes:
intel_bb must not be created in igt_fixture. The reason is intel_bb "opens" connection to the allocator and when test completes it can leave the allocator in unknown state (mostly for failed tests). As igt_core was armed to reset the allocator infrastructure connection to it inside intel_bb is not valid anymore. Trying to use it leads to catastrofic errors.
struct intel_bb * intel_bb_create_with_context (int i915
,uint32_t ctx
,uint32_t size
);
Creates bb with context passed in ctx
.
struct intel_bb * intel_bb_create_with_relocs (int i915
,uint32_t size
);
Creates bb which will disable passing addresses. This will lead to relocations when objects are not previously pinned.
struct intel_bb * intel_bb_create_with_relocs_and_context (int i915
,uint32_t ctx
,uint32_t size
);
Creates bb with default context which will disable passing addresses. This will lead to relocations when objects are not previously pinned.
struct intel_bb * intel_bb_create_no_relocs (int i915
,uint32_t size
);
Creates bb with disabled relocations. This enables passing addresses and requires pinning objects.
void
intel_bb_destroy (struct intel_bb *ibb
);
Frees all relocations / objects allocated during filling the batch.
void
intel_bb_reinit_allocator (void
);
Reinit allocator and get offsets in tracked intel_batchbuffers.
void
intel_bb_track (bool do_tracking
);
Turn on (true) or off (false) tracking for intel_batchbuffers.
void intel_bb_set_debug (struct intel_bb *ibb
,bool debug
);
Sets debug to true / false. Execbuf is then called synchronously and object/reloc arrays are printed after execution.
void intel_bb_set_dump_base64 (struct intel_bb *ibb
,bool dump
);
Do bb dump as base64 string before execbuf call.
void intel_bb_set_pxp (struct intel_bb *ibb
,bool new_state
,uint32_t apptype
,uint32_t appid
);
struct drm_i915_gem_exec_object2 * intel_bb_add_object (struct intel_bb *ibb
,uint32_t handle
,uint64_t size
,uint64_t offset
,uint64_t alignment
,bool write
);
Function adds or updates execobj slot in bb objects array and in the object tree. When object is a render target it has to be marked with EXEC_OBJECT_WRITE flag.
bool intel_bb_remove_object (struct intel_bb *ibb
,uint32_t handle
,uint64_t offset
,uint64_t size
);
struct drm_i915_gem_exec_object2 * intel_bb_add_intel_buf (struct intel_bb *ibb
,struct intel_buf *buf
,bool write
);
struct drm_i915_gem_exec_object2 * intel_bb_add_intel_buf_with_alignment (struct intel_bb *ibb
,struct intel_buf *buf
,uint64_t alignment
,bool write
);
bool intel_bb_remove_intel_buf (struct intel_bb *ibb
,struct intel_buf *buf
);
struct drm_i915_gem_exec_object2 * intel_bb_find_object (struct intel_bb *ibb
,uint32_t handle
);
bool intel_bb_object_set_flag (struct intel_bb *ibb
,uint32_t handle
,uint64_t flag
);
bool intel_bb_object_clear_flag (struct intel_bb *ibb
,uint32_t handle
,uint64_t flag
);
uint64_t intel_bb_emit_reloc (struct intel_bb *ibb
,uint32_t handle
,uint32_t read_domains
,uint32_t write_domain
,uint64_t delta
,uint64_t presumed_offset
);
Function prepares relocation (execobj if required + reloc) and emits offset in bb. For I915_EXEC_NO_RELOC presumed_offset is a hint we already have object in valid place and relocation step can be skipped in this case.
Note: delta is value added to address, mostly used when some instructions require modify-bit set to apply change. Which delta is valid depends on instruction (see instruction specification).
ibb |
pointer to intel_bb |
|
handle |
object handle which address will be taken to patch the bb |
|
read_domains |
gem domain bits for the relocation |
|
write_domain |
gem domain bit for the relocation |
|
delta |
delta value to add to |
|
presumed_offset |
address of the object in address space. If -1 is passed
then final offset of the object will be randomized (for no-reloc bb) or
0 (for reloc bb, in that case reloc.presumed_offset will be -1). In
case address is known it should passed in |
|
write |
does a handle is a render target |
uint64_t intel_bb_emit_reloc_fenced (struct intel_bb *ibb
,uint32_t handle
,uint32_t read_domains
,uint32_t write_domain
,uint64_t delta
,uint64_t presumed_offset
);
uint64_t intel_bb_offset_reloc (struct intel_bb *ibb
,uint32_t handle
,uint32_t read_domains
,uint32_t write_domain
,uint32_t offset
,uint64_t presumed_offset
);
Function prepares relocation (execobj if required + reloc). It it used
for editing batchbuffer via modifying structures. It means when we're
preparing batchbuffer it is more descriptive to edit the structure
than emitting dwords. But it require for some fields to point the
relocation. For that case offset
is passed by the user and it points
to the offset in bb where the relocation will be applied.
ibb |
pointer to intel_bb |
|
handle |
object handle which address will be taken to patch the bb |
|
read_domains |
gem domain bits for the relocation |
|
write_domain |
gem domain bit for the relocation |
|
offset |
offset within bb to be patched |
|
presumed_offset |
address of the object in address space. If -1 is passed
then final offset of the object will be randomized (for no-reloc bb) or
0 (for reloc bb, in that case reloc.presumed_offset will be -1). In
case address is known it should passed in |
uint64_t intel_bb_offset_reloc_with_delta (struct intel_bb *ibb
,uint32_t handle
,uint32_t read_domains
,uint32_t write_domain
,uint32_t delta
,uint32_t offset
,uint64_t presumed_offset
);
uint64_t intel_bb_offset_reloc_to_object (struct intel_bb *ibb
,uint32_t handle
,uint32_t to_handle
,uint32_t read_domains
,uint32_t write_domain
,uint32_t delta
,uint32_t offset
,uint64_t presumed_offset
);
void intel_bb_exec (struct intel_bb *ibb
,uint32_t end_offset
,uint64_t flags
,bool sync
);
Do execbuf on context selected during bb creation. Asserts on failure.
uint64_t intel_bb_get_object_offset (struct intel_bb *ibb
,uint32_t handle
);
bool intel_bb_object_offset_to_buf (struct intel_bb *ibb
,struct intel_buf *buf
);
uint32_t intel_bb_copy_data (struct intel_bb *ibb
,const void *data
,unsigned int bytes
,uint32_t align
);
void intel_bb_emit_blt_copy (struct intel_bb *ibb
,struct intel_buf *src
,int src_x1
,int src_y1
,int src_pitch
,struct intel_buf *dst
,int dst_x1
,int dst_y1
,int dst_pitch
,int width
,int height
,int bpp
);
void intel_bb_blt_copy (struct intel_bb *ibb
,struct intel_buf *src
,int src_x1
,int src_y1
,int src_pitch
,struct intel_buf *dst
,int dst_x1
,int dst_y1
,int dst_pitch
,int width
,int height
,int bpp
);
void intel_bb_copy_intel_buf (struct intel_bb *ibb
,struct intel_buf *dst
,struct intel_buf *src
,long int size
);
Emits a copy operation using blitter commands into the supplied batch.
A total of size
bytes from the start of src
is copied
over to dst
. Note that size
must be page-aligned.
void (*igt_huc_copyfunc_t) (int fd
,uint64_t ahnd
,struct drm_i915_gem_exec_object2 *obj
,uint64_t *objsize
);
This is the type of the per-platform huc copy functions.
The huc copy function emits a batchbuffer to the VDBOX engine to invoke the HuC Copy kernel to copy 4K bytes from the source buffer to the destination buffer.
struct intel_batchbuffer { drm_intel_bufmgr *bufmgr; uint32_t devid; unsigned int gen; drm_intel_context *ctx; drm_intel_bo *bo; uint8_t buffer[BATCH_SZ]; uint8_t *ptr, *end; };
struct igt_buf { drm_intel_bo *bo; uint32_t tiling; enum i915_compression compression; uint32_t bpp; uint32_t yuv_semiplanar_bpp; uint32_t *data; bool format_is_yuv:1; bool format_is_yuv_semiplanar:1; struct { uint32_t offset; uint32_t stride; uint32_t size; } surface[2]; struct { uint32_t offset; uint32_t stride; } ccs[2]; struct { uint32_t offset; } cc; };
This is a i-g-t buffer object wrapper structure which augments the baseline libdrm buffer object with suitable data needed by the render/vebox copy and the fill functions.
underlying libdrm buffer object |
||
tiling mode bits |
||
enum i915_compression |
memory compression mode |
|
bits per pixel, 8, 16 or 32. |
||
pointer to the memory mapping of the buffer |
||
struct intel_bb { struct igt_list_head link; uint64_t allocator_handle; uint64_t allocator_start, allocator_end; uint8_t allocator_type; enum allocator_strategy allocator_strategy; int i915; unsigned int gen; bool debug; bool dump_base64; bool enforce_relocs; uint32_t devid; uint32_t handle; uint32_t size; uint32_t *batch; uint32_t *ptr; uint64_t alignment; int fence; uint64_t gtt_size; bool supports_48b_address; bool uses_full_ppgtt; bool allows_obj_alignment; struct igt_pxp pxp; uint32_t ctx; uint32_t vm_id; /* Cache */ void *root; /* Current objects for execbuf */ void *current; /* Objects for current execbuf */ struct drm_i915_gem_exec_object2 **objects; uint32_t num_objects; uint32_t allocated_objects; uint64_t batch_offset; struct drm_i915_gem_relocation_entry *relocs; uint32_t num_relocs; uint32_t allocated_relocs; /* Tracked intel_bufs */ struct igt_list_head intel_bufs; /* * BO recreate in reset path only when refcount == 0 * Currently we don't need to use atomics because intel_bb * is not thread-safe. */ int32_t refcount; };
struct intel_buf { struct buf_ops *bops; bool is_owner; uint32_t handle; uint64_t size; uint32_t tiling; uint32_t bpp; uint32_t compression; uint32_t swizzle_mode; uint32_t yuv_semiplanar_bpp; bool format_is_yuv; bool format_is_yuv_semiplanar; struct { uint32_t offset; uint32_t stride; uint64_t size; } surface[2]; struct { uint32_t offset; uint32_t stride; } ccs[2]; struct { uint32_t offset; } cc; struct { uint64_t offset; uint32_t ctx; } addr; uint64_t bo_size; /* Tracking */ struct intel_bb *ibb; struct igt_list_head link; /* CPU mapping */ uint32_t *ptr; bool cpu_write; /* Content Protection*/ bool is_protected; /* For debugging purposes */ char name[INTEL_BUF_NAME_MAXSIZE + 1]; };