Am 19.08.2016 um 15:50 schrieb Marek Olšák: > From: Marek Olšák <marek.olsak at amd.com> > > This reverts commit 2ce9dde0d47f2f94ab25c73a30596a7328bcdf1f. > > See the comment in the code. Basically, don't do cleanups in this header. > > Signed-off-by: Marek Olšák <marek.olsak at amd.com>
I completely agree with you that this was a bad move, but I fear that we will run into opposition with that. Adding Mikko Rapeli who made the reverted patch to comment. Regards, Christian. > --- > include/uapi/drm/amdgpu_drm.h | 295 > +++++++++++++++++++++--------------------- > 1 file changed, 150 insertions(+), 145 deletions(-) > > diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h > index 462246a..b39e07c 100644 > --- a/include/uapi/drm/amdgpu_drm.h > +++ b/include/uapi/drm/amdgpu_drm.h > @@ -29,6 +29,11 @@ > * Keith Whitwell <keith at tungstengraphics.com> > */ > > +/* IT IS NOT ALLOWED TO CHANGE THIS HEADER WITHOUT DOING THE SAME IN LIBDRM > !!! > + * THIS IS NOT A UAPI HEADER. IT IS ONLY A MIRROR OF ITS COUNTERPART IN > LIBDRM. > + * USERSPACE SHOULD USE THE HEADERS FROM LIBDRM. NOT THIS ONE. > + */ > + > #ifndef __AMDGPU_DRM_H__ > #define __AMDGPU_DRM_H__ > > @@ -80,19 +85,19 @@ extern "C" { > > struct drm_amdgpu_gem_create_in { > /** the requested memory size */ > - __u64 bo_size; > + uint64_t bo_size; > /** physical start_addr alignment in bytes for some HW requirements */ > - __u64 alignment; > + uint64_t alignment; > /** the requested memory domains */ > - __u64 domains; > + uint64_t domains; > /** allocation flags */ > - __u64 domain_flags; > + uint64_t domain_flags; > }; > > struct drm_amdgpu_gem_create_out { > /** returned GEM object handle */ > - __u32 handle; > - __u32 _pad; > + uint32_t handle; > + uint32_t _pad; > }; > > union drm_amdgpu_gem_create { > @@ -109,28 +114,28 @@ union drm_amdgpu_gem_create { > > struct drm_amdgpu_bo_list_in { > /** Type of operation */ > - __u32 operation; > + uint32_t operation; > /** Handle of list or 0 if we want to create one */ > - __u32 list_handle; > + uint32_t list_handle; > /** Number of BOs in list */ > - __u32 bo_number; > + uint32_t bo_number; > /** Size of each element describing BO */ > - __u32 bo_info_size; > + uint32_t bo_info_size; > /** Pointer to array describing BOs */ > - __u64 bo_info_ptr; > + uint64_t bo_info_ptr; > }; > > struct drm_amdgpu_bo_list_entry { > /** Handle of BO */ > - __u32 bo_handle; > + uint32_t bo_handle; > /** New (if specified) BO priority to be used during migration */ > - __u32 bo_priority; > + uint32_t bo_priority; > }; > > struct drm_amdgpu_bo_list_out { > /** Handle of resource list */ > - __u32 list_handle; > - __u32 _pad; > + uint32_t list_handle; > + uint32_t _pad; > }; > > union drm_amdgpu_bo_list { > @@ -154,26 +159,26 @@ union drm_amdgpu_bo_list { > > struct drm_amdgpu_ctx_in { > /** AMDGPU_CTX_OP_* */ > - __u32 op; > + uint32_t op; > /** For future use, no flags defined so far */ > - __u32 flags; > - __u32 ctx_id; > - __u32 _pad; > + uint32_t flags; > + uint32_t ctx_id; > + uint32_t _pad; > }; > > union drm_amdgpu_ctx_out { > struct { > - __u32 ctx_id; > - __u32 _pad; > + uint32_t ctx_id; > + uint32_t _pad; > } alloc; > > struct { > /** For future use, no flags defined so far */ > - __u64 flags; > + uint64_t flags; > /** Number of resets caused by this context so far. */ > - __u32 hangs; > + uint32_t hangs; > /** Reset status since the last call of the ioctl. */ > - __u32 reset_status; > + uint32_t reset_status; > } state; > }; > > @@ -193,12 +198,12 @@ union drm_amdgpu_ctx { > #define AMDGPU_GEM_USERPTR_REGISTER (1 << 3) > > struct drm_amdgpu_gem_userptr { > - __u64 addr; > - __u64 size; > + uint64_t addr; > + uint64_t size; > /* AMDGPU_GEM_USERPTR_* */ > - __u32 flags; > + uint32_t flags; > /* Resulting GEM handle */ > - __u32 handle; > + uint32_t handle; > }; > > /* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */ > @@ -230,28 +235,28 @@ struct drm_amdgpu_gem_userptr { > /** The same structure is shared for input/output */ > struct drm_amdgpu_gem_metadata { > /** GEM Object handle */ > - __u32 handle; > + uint32_t handle; > /** Do we want get or set metadata */ > - __u32 op; > + uint32_t op; > struct { > /** For future use, no flags defined so far */ > - __u64 flags; > + uint64_t flags; > /** family specific tiling info */ > - __u64 tiling_info; > - __u32 data_size_bytes; > - __u32 data[64]; > + uint64_t tiling_info; > + uint32_t data_size_bytes; > + uint32_t data[64]; > } data; > }; > > struct drm_amdgpu_gem_mmap_in { > /** the GEM object handle */ > - __u32 handle; > - __u32 _pad; > + uint32_t handle; > + uint32_t _pad; > }; > > struct drm_amdgpu_gem_mmap_out { > /** mmap offset from the vma offset manager */ > - __u64 addr_ptr; > + uint64_t addr_ptr; > }; > > union drm_amdgpu_gem_mmap { > @@ -261,18 +266,18 @@ union drm_amdgpu_gem_mmap { > > struct drm_amdgpu_gem_wait_idle_in { > /** GEM object handle */ > - __u32 handle; > + uint32_t handle; > /** For future use, no flags defined so far */ > - __u32 flags; > + uint32_t flags; > /** Absolute timeout to wait */ > - __u64 timeout; > + uint64_t timeout; > }; > > struct drm_amdgpu_gem_wait_idle_out { > /** BO status: 0 - BO is idle, 1 - BO is busy */ > - __u32 status; > + uint32_t status; > /** Returned current memory domain */ > - __u32 domain; > + uint32_t domain; > }; > > union drm_amdgpu_gem_wait_idle { > @@ -282,18 +287,18 @@ union drm_amdgpu_gem_wait_idle { > > struct drm_amdgpu_wait_cs_in { > /** Command submission handle */ > - __u64 handle; > + uint64_t handle; > /** Absolute timeout to wait */ > - __u64 timeout; > - __u32 ip_type; > - __u32 ip_instance; > - __u32 ring; > - __u32 ctx_id; > + uint64_t timeout; > + uint32_t ip_type; > + uint32_t ip_instance; > + uint32_t ring; > + uint32_t ctx_id; > }; > > struct drm_amdgpu_wait_cs_out { > /** CS status: 0 - CS completed, 1 - CS still busy */ > - __u64 status; > + uint64_t status; > }; > > union drm_amdgpu_wait_cs { > @@ -307,11 +312,11 @@ union drm_amdgpu_wait_cs { > /* Sets or returns a value associated with a buffer. */ > struct drm_amdgpu_gem_op { > /** GEM object handle */ > - __u32 handle; > + uint32_t handle; > /** AMDGPU_GEM_OP_* */ > - __u32 op; > + uint32_t op; > /** Input or return value */ > - __u64 value; > + uint64_t value; > }; > > #define AMDGPU_VA_OP_MAP 1 > @@ -330,18 +335,18 @@ struct drm_amdgpu_gem_op { > > struct drm_amdgpu_gem_va { > /** GEM object handle */ > - __u32 handle; > - __u32 _pad; > + uint32_t handle; > + uint32_t _pad; > /** AMDGPU_VA_OP_* */ > - __u32 operation; > + uint32_t operation; > /** AMDGPU_VM_PAGE_* */ > - __u32 flags; > + uint32_t flags; > /** va address to assign . Must be correctly aligned.*/ > - __u64 va_address; > + uint64_t va_address; > /** Specify offset inside of BO to assign. Must be correctly aligned.*/ > - __u64 offset_in_bo; > + uint64_t offset_in_bo; > /** Specify mapping size. Must be correctly aligned. */ > - __u64 map_size; > + uint64_t map_size; > }; > > #define AMDGPU_HW_IP_GFX 0 > @@ -358,24 +363,24 @@ struct drm_amdgpu_gem_va { > #define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 > > struct drm_amdgpu_cs_chunk { > - __u32 chunk_id; > - __u32 length_dw; > - __u64 chunk_data; > + uint32_t chunk_id; > + uint32_t length_dw; > + uint64_t chunk_data; > }; > > struct drm_amdgpu_cs_in { > /** Rendering context id */ > - __u32 ctx_id; > + uint32_t ctx_id; > /** Handle of resource list associated with CS */ > - __u32 bo_list_handle; > - __u32 num_chunks; > - __u32 _pad; > - /** this points to __u64 * which point to cs chunks */ > - __u64 chunks; > + uint32_t bo_list_handle; > + uint32_t num_chunks; > + uint32_t _pad; > + /** this points to uint64_t * which point to cs chunks */ > + uint64_t chunks; > }; > > struct drm_amdgpu_cs_out { > - __u64 handle; > + uint64_t handle; > }; > > union drm_amdgpu_cs { > @@ -392,32 +397,32 @@ union drm_amdgpu_cs { > #define AMDGPU_IB_FLAG_PREAMBLE (1<<1) > > struct drm_amdgpu_cs_chunk_ib { > - __u32 _pad; > + uint32_t _pad; > /** AMDGPU_IB_FLAG_* */ > - __u32 flags; > + uint32_t flags; > /** Virtual address to begin IB execution */ > - __u64 va_start; > + uint64_t va_start; > /** Size of submission */ > - __u32 ib_bytes; > + uint32_t ib_bytes; > /** HW IP to submit to */ > - __u32 ip_type; > + uint32_t ip_type; > /** HW IP index of the same type to submit to */ > - __u32 ip_instance; > + uint32_t ip_instance; > /** Ring index to submit to */ > - __u32 ring; > + uint32_t ring; > }; > > struct drm_amdgpu_cs_chunk_dep { > - __u32 ip_type; > - __u32 ip_instance; > - __u32 ring; > - __u32 ctx_id; > - __u64 handle; > + uint32_t ip_type; > + uint32_t ip_instance; > + uint32_t ring; > + uint32_t ctx_id; > + uint64_t handle; > }; > > struct drm_amdgpu_cs_chunk_fence { > - __u32 handle; > - __u32 offset; > + uint32_t handle; > + uint32_t offset; > }; > > struct drm_amdgpu_cs_chunk_data { > @@ -489,53 +494,53 @@ struct drm_amdgpu_cs_chunk_data { > > struct drm_amdgpu_query_fw { > /** AMDGPU_INFO_FW_* */ > - __u32 fw_type; > + uint32_t fw_type; > /** > * Index of the IP if there are more IPs of > * the same type. > */ > - __u32 ip_instance; > + uint32_t ip_instance; > /** > * Index of the engine. Whether this is used depends > * on the firmware type. (e.g. MEC, SDMA) > */ > - __u32 index; > - __u32 _pad; > + uint32_t index; > + uint32_t _pad; > }; > > /* Input structure for the INFO ioctl */ > struct drm_amdgpu_info { > /* Where the return value will be stored */ > - __u64 return_pointer; > + uint64_t return_pointer; > /* The size of the return value. Just like "size" in "snprintf", > * it limits how many bytes the kernel can write. */ > - __u32 return_size; > + uint32_t return_size; > /* The query request id. */ > - __u32 query; > + uint32_t query; > > union { > struct { > - __u32 id; > - __u32 _pad; > + uint32_t id; > + uint32_t _pad; > } mode_crtc; > > struct { > /** AMDGPU_HW_IP_* */ > - __u32 type; > + uint32_t type; > /** > * Index of the IP if there are more IPs of the same > * type. Ignored by AMDGPU_INFO_HW_IP_COUNT. > */ > - __u32 ip_instance; > + uint32_t ip_instance; > } query_hw_ip; > > struct { > - __u32 dword_offset; > + uint32_t dword_offset; > /** number of registers to read */ > - __u32 count; > - __u32 instance; > + uint32_t count; > + uint32_t instance; > /** For future use, no flags defined so far */ > - __u32 flags; > + uint32_t flags; > } read_mmr_reg; > > struct drm_amdgpu_query_fw query_fw; > @@ -544,31 +549,31 @@ struct drm_amdgpu_info { > > struct drm_amdgpu_info_gds { > /** GDS GFX partition size */ > - __u32 gds_gfx_partition_size; > + uint32_t gds_gfx_partition_size; > /** GDS compute partition size */ > - __u32 compute_partition_size; > + uint32_t compute_partition_size; > /** total GDS memory size */ > - __u32 gds_total_size; > + uint32_t gds_total_size; > /** GWS size per GFX partition */ > - __u32 gws_per_gfx_partition; > + uint32_t gws_per_gfx_partition; > /** GSW size per compute partition */ > - __u32 gws_per_compute_partition; > + uint32_t gws_per_compute_partition; > /** OA size per GFX partition */ > - __u32 oa_per_gfx_partition; > + uint32_t oa_per_gfx_partition; > /** OA size per compute partition */ > - __u32 oa_per_compute_partition; > - __u32 _pad; > + uint32_t oa_per_compute_partition; > + uint32_t _pad; > }; > > struct drm_amdgpu_info_vram_gtt { > - __u64 vram_size; > - __u64 vram_cpu_accessible_size; > - __u64 gtt_size; > + uint64_t vram_size; > + uint64_t vram_cpu_accessible_size; > + uint64_t gtt_size; > }; > > struct drm_amdgpu_info_firmware { > - __u32 ver; > - __u32 feature; > + uint32_t ver; > + uint32_t feature; > }; > > #define AMDGPU_VRAM_TYPE_UNKNOWN 0 > @@ -582,61 +587,61 @@ struct drm_amdgpu_info_firmware { > > struct drm_amdgpu_info_device { > /** PCI Device ID */ > - __u32 device_id; > + uint32_t device_id; > /** Internal chip revision: A0, A1, etc.) */ > - __u32 chip_rev; > - __u32 external_rev; > + uint32_t chip_rev; > + uint32_t external_rev; > /** Revision id in PCI Config space */ > - __u32 pci_rev; > - __u32 family; > - __u32 num_shader_engines; > - __u32 num_shader_arrays_per_engine; > + uint32_t pci_rev; > + uint32_t family; > + uint32_t num_shader_engines; > + uint32_t num_shader_arrays_per_engine; > /* in KHz */ > - __u32 gpu_counter_freq; > - __u64 max_engine_clock; > - __u64 max_memory_clock; > + uint32_t gpu_counter_freq; > + uint64_t max_engine_clock; > + uint64_t max_memory_clock; > /* cu information */ > - __u32 cu_active_number; > - __u32 cu_ao_mask; > - __u32 cu_bitmap[4][4]; > + uint32_t cu_active_number; > + uint32_t cu_ao_mask; > + uint32_t cu_bitmap[4][4]; > /** Render backend pipe mask. One render backend is CB+DB. */ > - __u32 enabled_rb_pipes_mask; > - __u32 num_rb_pipes; > - __u32 num_hw_gfx_contexts; > - __u32 _pad; > - __u64 ids_flags; > + uint32_t enabled_rb_pipes_mask; > + uint32_t num_rb_pipes; > + uint32_t num_hw_gfx_contexts; > + uint32_t _pad; > + uint64_t ids_flags; > /** Starting virtual address for UMDs. */ > - __u64 virtual_address_offset; > + uint64_t virtual_address_offset; > /** The maximum virtual address */ > - __u64 virtual_address_max; > + uint64_t virtual_address_max; > /** Required alignment of virtual addresses. */ > - __u32 virtual_address_alignment; > + uint32_t virtual_address_alignment; > /** Page table entry - fragment size */ > - __u32 pte_fragment_size; > - __u32 gart_page_size; > + uint32_t pte_fragment_size; > + uint32_t gart_page_size; > /** constant engine ram size*/ > - __u32 ce_ram_size; > + uint32_t ce_ram_size; > /** video memory type info*/ > - __u32 vram_type; > + uint32_t vram_type; > /** video memory bit width*/ > - __u32 vram_bit_width; > + uint32_t vram_bit_width; > /* vce harvesting instance */ > - __u32 vce_harvest_config; > + uint32_t vce_harvest_config; > }; > > struct drm_amdgpu_info_hw_ip { > /** Version of h/w IP */ > - __u32 hw_ip_version_major; > - __u32 hw_ip_version_minor; > + uint32_t hw_ip_version_major; > + uint32_t hw_ip_version_minor; > /** Capabilities */ > - __u64 capabilities_flags; > + uint64_t capabilities_flags; > /** command buffer address start alignment*/ > - __u32 ib_start_alignment; > + uint32_t ib_start_alignment; > /** command buffer size alignment*/ > - __u32 ib_size_alignment; > + uint32_t ib_size_alignment; > /** Bitmask of available rings. Bit 0 means ring 0, etc. */ > - __u32 available_rings; > - __u32 _pad; > + uint32_t available_rings; > + uint32_t _pad; > }; > > /*