On Mon, 2014-04-14 at 01:06 -0600, Daniel Vetter wrote: > On Mon, Apr 14, 2014 at 12:19:58PM +0800, Zhao Yakui wrote: > > The Broadwell GT3 machine has two independent BSD rings in kernel driver > > while > > it is transparent to the user-space driver. In such case it needs to check > > the CPU<->GPU sync for the second BSD ring. Multi drm_fd can assure that the > > second BSD ring has the opportunity to dispatch the GPU command. > > > > Signed-off-by: Zhao Yakui <yakui.z...@intel.com> > > --- > > tests/Makefile.sources | 1 + > > tests/gem_dummy_reloc_multi_bsd.c | 258 > > +++++++++++++++++++++++++++++++++++++ > > I've meant that you add a new subtest to the existing gem_dummy_reloc > test. With your patch here we essentially duplicate all the tests for the > other rings. > > > 2 files changed, 259 insertions(+) > > create mode 100644 tests/gem_dummy_reloc_multi_bsd.c > > > > diff --git a/tests/Makefile.sources b/tests/Makefile.sources > > index 254a5c5..98f277f 100644 > > --- a/tests/Makefile.sources > > +++ b/tests/Makefile.sources > > @@ -105,6 +105,7 @@ TESTS_progs = \ > > gem_ring_sync_copy \ > > gem_ring_sync_loop \ > > gem_multi_bsd_sync_loop \ > > + gem_dummy_reloc_multi_bsd \ > > Tests with subtests must be added to the TESTS_progs_M variable, otherwise > piglit won't be able to enumerate the subtests. That's just an fyi for the > next testcase, like I've said here it's imo better to just add a new > subtest. >
Thanks for the rules about how to add the test with subtests.(Sorry that I don't know this rule) OK. I will follow your comment to add it as subtests. Thanks. Yakui > Also you've forgotten to update .gitignore, when building with your patch > git status shows some not-added binaries. > -Daniel > > > gem_seqno_wrap \ > > gem_set_tiling_vs_gtt \ > > gem_set_tiling_vs_pwrite \ > > diff --git a/tests/gem_dummy_reloc_multi_bsd.c > > b/tests/gem_dummy_reloc_multi_bsd.c > > new file mode 100644 > > index 0000000..ef8213e > > --- /dev/null > > +++ b/tests/gem_dummy_reloc_multi_bsd.c > > @@ -0,0 +1,258 @@ > > +/* > > + * Copyright © 2014 Intel Corporation > > + * > > + * Permission is hereby granted, free of charge, to any person obtaining a > > + * copy of this software and associated documentation files (the > > "Software"), > > + * to deal in the Software without restriction, including without > > limitation > > + * the rights to use, copy, modify, merge, publish, distribute, sublicense, > > + * and/or sell copies of the Software, and to permit persons to whom the > > + * Software is furnished to do so, subject to the following conditions: > > + * > > + * The above copyright notice and this permission notice (including the > > next > > + * paragraph) shall be included in all copies or substantial portions of > > the > > + * Software. > > + * > > + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS > > OR > > + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, > > + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL > > + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR > > OTHER > > + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING > > + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER > > DEALINGS > > + * IN THE SOFTWARE. > > + * > > + * Authors: > > + * Daniel Vetter <daniel.vet...@ffwll.ch> (based on > > gem_dummy_reloc_loop*.c) > > + * Zhao Yakui <yakui.z...@intel.com> > > + * > > + */ > > + > > +#include <stdlib.h> > > +#include <stdio.h> > > +#include <string.h> > > +#include <fcntl.h> > > +#include <inttypes.h> > > +#include <errno.h> > > +#include <sys/stat.h> > > +#include <sys/time.h> > > +#include "drm.h" > > +#include "ioctl_wrappers.h" > > +#include "drmtest.h" > > +#include "intel_bufmgr.h" > > +#include "intel_batchbuffer.h" > > +#include "intel_io.h" > > +#include "i830_reg.h" > > +#include "intel_chipset.h" > > + > > +#define LOCAL_I915_EXEC_VEBOX (4<<0) > > + > > +static drm_intel_bufmgr *bufmgr; > > +struct intel_batchbuffer *batch; > > +static drm_intel_bo *target_buffer; > > + > > +#define NUM_FD 50 > > + > > +static int mfd[NUM_FD]; > > +static drm_intel_bufmgr *mbufmgr[NUM_FD]; > > +static struct intel_batchbuffer *mbatch[NUM_FD]; > > +static drm_intel_bo *mbuffer[NUM_FD]; > > + > > + > > +/* > > + * Testcase: Basic check of ring<->cpu sync using a dummy reloc under > > multi-fd > > + * > > + * The last test (that randomly switches the ring) seems to be pretty > > effective > > + * at hitting the missed irq bug that's worked around with the HWSTAM irq > > write. > > + */ > > + > > + > > +#define MI_COND_BATCH_BUFFER_END (0x36<<23 | 1) > > +#define MI_DO_COMPARE (1<<21) > > +static void > > +dummy_reloc_loop(int ring) > > +{ > > + int i; > > + srandom(0xdeadbeef); > > + > > + for (i = 0; i < 0x100000; i++) { > > + int mindex = random() % NUM_FD; > > + > > + batch = mbatch[mindex]; > > + if (ring == I915_EXEC_RENDER) { > > + BEGIN_BATCH(4); > > + OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE); > > + OUT_BATCH(0xffffffff); /* compare dword */ > > + OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER, > > + I915_GEM_DOMAIN_RENDER, 0); > > + OUT_BATCH(MI_NOOP); > > + ADVANCE_BATCH(); > > + } else { > > + BEGIN_BATCH(4); > > + OUT_BATCH(MI_FLUSH_DW | 1); > > + OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER, > > + I915_GEM_DOMAIN_RENDER, 0); > > + OUT_BATCH(0); /* reserved */ > > + OUT_BATCH(MI_NOOP | (1<<22) | (0xf)); > > + ADVANCE_BATCH(); > > + } > > + intel_batchbuffer_flush_on_ring(batch, ring); > > + > > + drm_intel_bo_map(target_buffer, 0); > > + // map to force completion > > + drm_intel_bo_unmap(target_buffer); > > + } > > +} > > + > > +static void > > +dummy_reloc_loop_random_ring(int num_rings) > > +{ > > + int i; > > + > > + srandom(0xdeadbeef); > > + > > + for (i = 0; i < 0x100000; i++) { > > + int mindex; > > + int ring = random() % num_rings + 1; > > + > > + mindex = random() % NUM_FD; > > + > > + batch = mbatch[mindex]; > > + if (ring == I915_EXEC_RENDER) { > > + BEGIN_BATCH(4); > > + OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE); > > + OUT_BATCH(0xffffffff); /* compare dword */ > > + OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER, > > + I915_GEM_DOMAIN_RENDER, 0); > > + OUT_BATCH(MI_NOOP); > > + ADVANCE_BATCH(); > > + } else { > > + BEGIN_BATCH(4); > > + OUT_BATCH(MI_FLUSH_DW | 1); > > + OUT_BATCH(0); /* reserved */ > > + OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER, > > + I915_GEM_DOMAIN_RENDER, 0); > > + OUT_BATCH(MI_NOOP | (1<<22) | (0xf)); > > + ADVANCE_BATCH(); > > + } > > + intel_batchbuffer_flush_on_ring(batch, ring); > > + > > + drm_intel_bo_map(target_buffer, 0); > > + // map to force waiting on rendering > > + drm_intel_bo_unmap(target_buffer); > > + } > > +} > > + > > +int fd; > > +int devid; > > +int num_rings; > > + > > +igt_main > > +{ > > + igt_skip_on_simulation(); > > + > > + igt_fixture { > > + fd = drm_open_any(); > > + devid = intel_get_drm_devid(fd); > > + num_rings = gem_get_num_rings(fd); > > + /* Not yet implemented on pre-snb. */ > > + igt_require(HAS_BLT_RING(devid)); > > + > > + bufmgr = drm_intel_bufmgr_gem_init(fd, 4096); > > + igt_assert(bufmgr); > > + drm_intel_bufmgr_gem_enable_reuse(bufmgr); > > + > > + target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, > > 4096); > > + igt_assert(target_buffer); > > + } > > + /* Create multi drm_fd and map one gem object to multi gem_contexts */ > > + { > > + int i; > > + unsigned int target_flink; > > + char buffer_name[32]; > > + if (dri_bo_flink(target_buffer, &target_flink)) { > > + igt_assert(0); > > + printf("fail to get flink for target buffer\n"); > > + goto fail_flink; > > + } > > + for (i = 0; i < NUM_FD; i++) { > > + mfd[i] = 0; > > + mbufmgr[i] = NULL; > > + mbuffer[i] = NULL; > > + } > > + for (i = 0; i < NUM_FD; i++) { > > + sprintf(buffer_name, "Target buffer %d\n", i); > > + mfd[i] = drm_open_any(); > > + mbufmgr[i] = drm_intel_bufmgr_gem_init(mfd[i], 4096); > > + igt_assert(mbufmgr[i]); > > + drm_intel_bufmgr_gem_enable_reuse(mbufmgr[i]); > > + mbatch[i] = intel_batchbuffer_alloc(mbufmgr[i], devid); > > + igt_assert(mbufmgr[i]); > > + mbuffer[i] = intel_bo_gem_create_from_name(mbufmgr[i], > > buffer_name, target_flink); > > + igt_assert(mbuffer[i]); > > + } > > + } > > + igt_subtest("render") { > > + printf("running dummy loop on render\n"); > > + dummy_reloc_loop(I915_EXEC_RENDER); > > + printf("dummy loop run on render completed\n"); > > + } > > + > > + igt_subtest("bsd") { > > + gem_require_ring(fd, I915_EXEC_BSD); > > + sleep(2); > > + printf("running dummy loop on bsd\n"); > > + dummy_reloc_loop(I915_EXEC_BSD); > > + printf("dummy loop run on bsd completed\n"); > > + } > > + > > + igt_subtest("blt") { > > + gem_require_ring(fd, I915_EXEC_BLT); > > + sleep(2); > > + printf("running dummy loop on blt\n"); > > + dummy_reloc_loop(I915_EXEC_BLT); > > + printf("dummy loop run on blt completed\n"); > > + } > > + > > +#ifdef I915_EXEC_VEBOX > > + igt_subtest("vebox") { > > + gem_require_ring(fd, I915_EXEC_VEBOX); > > + sleep(2); > > + printf("running dummy loop on vebox\n"); > > + dummy_reloc_loop(LOCAL_I915_EXEC_VEBOX); > > + printf("dummy loop run on vebox completed\n"); > > + } > > +#endif > > + > > + igt_subtest("mixed") { > > + if (num_rings > 1) { > > + sleep(2); > > + printf("running dummy loop on random rings\n"); > > + dummy_reloc_loop_random_ring(num_rings); > > + printf("dummy loop run on random rings completed\n"); > > + } > > + } > > + > > + /* Free the buffer/batchbuffer/buffer mgr for multi-fd */ > > + { > > + int i; > > + for (i = 0; i < NUM_FD; i++) { > > + dri_bo_unreference(mbuffer[i]); > > + intel_batchbuffer_free(mbatch[i]); > > + drm_intel_bufmgr_destroy(mbufmgr[i]); > > + close(mfd[i]); > > + } > > + } > > + igt_fixture { > > + drm_intel_bo_unreference(target_buffer); > > + drm_intel_bufmgr_destroy(bufmgr); > > + > > + close(fd); > > + } > > + return; > > +fail_flink: > > + igt_fixture { > > + drm_intel_bo_unreference(target_buffer); > > + drm_intel_bufmgr_destroy(bufmgr); > > + > > + close(fd); > > + } > > +} > > -- > > 1.7.10.1 > > > > _______________________________________________ > > Intel-gfx mailing list > > Intel-gfx@lists.freedesktop.org > > http://lists.freedesktop.org/mailman/listinfo/intel-gfx > _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/intel-gfx