On Thu, 2017-04-27 at 20:36 -0700, Jason Ekstrand wrote: > On Thu, Apr 27, 2017 at 9:23 AM, Juan A. Suarez Romero <jasua...@igalia.com> > wrote: > > On Wed, 2017-04-26 at 07:35 -0700, Jason Ekstrand wrote: > > > > > We should only use size_t when referring to sizes of bits of CPU memory. > > > > > Anything on the GPU or just a regular array length should be a type that > > > > > has the same size on both 32 and 64-bit architectures. For state > > > > > objects, we use a uint32_t because we'll never allocate a piece of > > > > > driver-internal GPU state larger than 2GB (more like 16KB). > > > > > --- > > > > > src/intel/vulkan/anv_allocator.c | 12 ++++++------ > > > > > src/intel/vulkan/anv_gem.c | 2 +- > > > > > src/intel/vulkan/anv_gem_stubs.c | 2 +- > > > > > src/intel/vulkan/anv_private.h | 12 ++++++------ > > > > > 4 files changed, 14 insertions(+), 14 deletions(-) > > > > > > > > > > diff --git a/src/intel/vulkan/anv_allocator.c > > > b/src/intel/vulkan/anv_allocator.c > > > > > index 1c94d1b..2dad400 100644 > > > > > --- a/src/intel/vulkan/anv_allocator.c > > > > > +++ b/src/intel/vulkan/anv_allocator.c > > > > > @@ -342,7 +342,7 @@ anv_block_pool_finish(struct anv_block_pool *pool) > > > > > static uint32_t > > > > > anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state > > >*state) > > > > > { > > > > > - size_t size; > > > > > + uint32_t size; > > > > > void *map; > > > > > uint32_t gem_handle; > > > > > struct anv_mmap_cleanup *cleanup; > > > > > @@ -367,7 +367,7 @@ anv_block_pool_grow(struct anv_block_pool *pool, > > > struct anv_block_state *state) > > > > > > > > > > assert(state == &pool->state || back_used > 0); > > > > > > > > > > - size_t old_size = pool->bo.size; > > > > > + uint32_t old_size = pool->bo.size; > > > > > > > > > > if (old_size != 0 && > > > > > back_used * 2 <= pool->center_bo_offset && > > > > > > > > > > > > While checking this patch, I've notice we have the following comment in > > > > the anv_block_pool_grow() > > > > > > > > /* We can't have a block pool bigger than 1GB because we use signed > > > > * 32-bit offsets in the free list and we don't want overflow. We > > > > * should never need a block pool bigger than 1GB anyway. > > > > */ > > > > assert(size <= (1u << 31)); > > > > > > > > Is the comment correct? Shouldn't say 2Gb? > > 1 or 2; I can't remember at the moment. I thought there was something weird > with Android that required us to drop it to 1. >
If it's 1Gb, I think assertion should be "assert(size <= (1u << 30));" > > Other than that, > > > > > > > > Reviewed-by: Juan A. Suarez Romero <jasua...@igalia.com> > > > > > > > > > > > > > @@ -613,7 +613,7 @@ anv_block_pool_free(struct anv_block_pool *pool, > > > int32_t offset) > > > > > > > > > > static void > > > > > anv_fixed_size_state_pool_init(struct anv_fixed_size_state_pool *pool, > > > > > - size_t state_size) > > > > > + uint32_t state_size) > > > > > { > > > > > /* At least a cache line and must divide the block size. */ > > > > > assert(state_size >= 64 && util_is_power_of_two(state_size)); > > > > > @@ -672,7 +672,7 @@ anv_state_pool_init(struct anv_state_pool *pool, > > > > > { > > > > > pool->block_pool = block_pool; > > > > > for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) { > > > > > - size_t size = 1 << (ANV_MIN_STATE_SIZE_LOG2 + i); > > > > > + uint32_t size = 1 << (ANV_MIN_STATE_SIZE_LOG2 + i); > > > > > anv_fixed_size_state_pool_init(&pool->buckets[i], size); > > > > > } > > > > > VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false)); > > > > > @@ -686,7 +686,7 @@ anv_state_pool_finish(struct anv_state_pool *pool) > > > > > > > > > > static struct anv_state > > > > > anv_state_pool_alloc_no_vg(struct anv_state_pool *pool, > > > > > - size_t size, size_t align) > > > > > + uint32_t size, uint32_t align) > > > > > { > > > > > unsigned size_log2 = ilog2_round_up(size < align ? align : size); > > > > > assert(size_log2 <= ANV_MAX_STATE_SIZE_LOG2); > > > > > @@ -703,7 +703,7 @@ anv_state_pool_alloc_no_vg(struct anv_state_pool > > > *pool, > > > > > } > > > > > > > > > > struct anv_state > > > > > -anv_state_pool_alloc(struct anv_state_pool *pool, size_t size, size_t > > > align) > > > > > +anv_state_pool_alloc(struct anv_state_pool *pool, uint32_t size, > > > uint32_t align) > > > > > { > > > > > if (size == 0) > > > > > return ANV_STATE_NULL; > > > > > diff --git a/src/intel/vulkan/anv_gem.c b/src/intel/vulkan/anv_gem.c > > > > > index 185086f..4b6ee58 100644 > > > > > --- a/src/intel/vulkan/anv_gem.c > > > > > +++ b/src/intel/vulkan/anv_gem.c > > > > > @@ -48,7 +48,7 @@ anv_ioctl(int fd, unsigned long request, void *arg) > > > > > * Return gem handle, or 0 on failure. Gem handles are never 0. > > > > > */ > > > > > uint32_t > > > > > -anv_gem_create(struct anv_device *device, size_t size) > > > > > +anv_gem_create(struct anv_device *device, uint64_t size) > > > > > { > > > > > struct drm_i915_gem_create gem_create = { > > > > > .size = size, > > > > > diff --git a/src/intel/vulkan/anv_gem_stubs.c > > > b/src/intel/vulkan/anv_gem_stubs.c > > > > > index a63e96d..8d81eb5 100644 > > > > > --- a/src/intel/vulkan/anv_gem_stubs.c > > > > > +++ b/src/intel/vulkan/anv_gem_stubs.c > > > > > @@ -34,7 +34,7 @@ memfd_create(const char *name, unsigned int flags) > > > > > } > > > > > > > > > > uint32_t > > > > > -anv_gem_create(struct anv_device *device, size_t size) > > > > > +anv_gem_create(struct anv_device *device, uint64_t size) > > > > > { > > > > > int fd = memfd_create("fake bo", MFD_CLOEXEC); > > > > > if (fd == -1) > > > > > diff --git a/src/intel/vulkan/anv_private.h > > > b/src/intel/vulkan/anv_private.h > > > > > index 216c6eb..c709d39 100644 > > > > > --- a/src/intel/vulkan/anv_private.h > > > > > +++ b/src/intel/vulkan/anv_private.h > > > > > @@ -493,7 +493,7 @@ struct anv_state { > > > > > #define ANV_STATE_NULL ((struct anv_state) { .alloc_size = 0 }) > > > > > > > > > > struct anv_fixed_size_state_pool { > > > > > - size_t state_size; > > > > > + uint32_t state_size; > > > > > union anv_free_list free_list; > > > > > struct anv_block_state block; > > > > > }; > > > > > @@ -565,7 +565,7 @@ void anv_state_pool_init(struct anv_state_pool *pool, > > > > > struct anv_block_pool *block_pool); > > > > > void anv_state_pool_finish(struct anv_state_pool *pool); > > > > > struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool, > > > > > - size_t state_size, size_t > > > alignment); > > > > > + uint32_t state_size, uint32_t > > > alignment); > > > > > void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state > > >state); > > > > > void anv_state_stream_init(struct anv_state_stream *stream, > > > > > struct anv_state_pool *state_pool, > > > > > @@ -752,7 +752,7 @@ VkResult anv_device_wait(struct anv_device *device, > > > struct anv_bo *bo, > > > > > void* anv_gem_mmap(struct anv_device *device, > > > > > uint32_t gem_handle, uint64_t offset, uint64_t size, > > >uint32_t flags); > > > > > void anv_gem_munmap(void *p, uint64_t size); > > > > > -uint32_t anv_gem_create(struct anv_device *device, size_t size); > > > > > +uint32_t anv_gem_create(struct anv_device *device, uint64_t size); > > > > > void anv_gem_close(struct anv_device *device, uint32_t gem_handle); > > > > > uint32_t anv_gem_userptr(struct anv_device *device, void *mem, size_t > > >size); > > > > > int anv_gem_busy(struct anv_device *device, uint32_t gem_handle); > > > > > @@ -780,8 +780,8 @@ int anv_gem_set_domain(struct anv_device *device, > > > uint32_t gem_handle, > > > > > VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, > > >uint64_t size); > > > > > > > > > > struct anv_reloc_list { > > > > > - size_t num_relocs; > > > > > - size_t array_length; > > > > > + uint32_t num_relocs; > > > > > + uint32_t array_length; > > > > > struct drm_i915_gem_relocation_entry * relocs; > > > > > struct anv_bo ** reloc_bos; > > > > > }; > > > > > @@ -803,7 +803,7 @@ struct anv_batch_bo { > > > > > struct anv_bo bo; > > > > > > > > > > /* Bytes actually consumed in this batch BO */ > > > > > - size_t length; > > > > > + uint32_t length; > > > > > > > > > > struct anv_reloc_list relocs; > > > > > }; > > > > _______________________________________________ > > > > mesa-dev mailing list > > > > mesa-dev@lists.freedesktop.org > > > > https://lists.freedesktop.org/mailman/listinfo/mesa-dev > > > >
_______________________________________________ mesa-dev mailing list mesa-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/mesa-dev