diff options
author | Dave Airlie <airlied@redhat.com> | 2017-11-02 11:29:28 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2017-11-02 11:29:28 +1000 |
commit | 87331c83797b5d5763a82f09f26fbb6e1a7e6661 (patch) | |
tree | 0fbd0da83eeadfd50751bfbeee65a60f8aa9ca5d /drivers/gpu/drm/msm/msm_gpu.h | |
parent | 43106e25ab37580f54df99c512d1d66ae0fe1c67 (diff) | |
parent | 39ae0d3e561d360e41f2a3d1c427d5d9142468da (diff) | |
download | kernel_replicant_linux-87331c83797b5d5763a82f09f26fbb6e1a7e6661.tar.gz kernel_replicant_linux-87331c83797b5d5763a82f09f26fbb6e1a7e6661.tar.bz2 kernel_replicant_linux-87331c83797b5d5763a82f09f26fbb6e1a7e6661.zip |
Merge tag 'drm-msm-next-2017-11-01' of git://people.freedesktop.org/~robclark/linux into drm-next
+ preemption support for a5xx[1][2]
+ display fixes for 8x96 (snapdragon 820) including fixes for 4k scanout
(hwpipe assignment re-work to handle multiple hwpipe assigned to plane
for wide scanout)
+ async cursor plane updates and fixes
+ refactor adreno_bind/hwinit.. still defer fw loading until device open,
but move clk/irq/etc to probe/bind time to fix issues when fw isn't
present in filesys
+ clk/dt bindings cleanups w/ backward compat via msm_clk_get() (dt docs
part ack'ed by Rob Herring)
+ fw loading re-work with helper to handle either /lib/firmware/qcom/$fw
or /lib/firmware/$fw.. background, we've started landing fw for some of
generations in linux-firmware, but there is a preference to put fw files
under 'qcom' subdirectory, which is not what was done on android or for
people who copied fw from android. So now we first look in qcom subdir
and then fallback to the original location.
+ bunch of GPU debugging enhancements, to dump full cmdline of processes
that trigger faults, and to add a new debugfs to capture cmdstream of
just submits that triggered faults.. both quite useful for piglit ;-)
* tag 'drm-msm-next-2017-11-01' of git://people.freedesktop.org/~robclark/linux: (38 commits)
drm/msm: use %z format modifier for printing size_t
drm/msm/mdp5: Don't use async plane update path if plane visibility changes
drm/msm/mdp5: mdp5_crtc: Restore cursor state only if LM cursors are enabled
drm/msm/mdp5: Update mdp5_pipe_assign to spit out both planes
drm/msm/mdp5: Prepare mdp5_pipe_assign for some rework
drm/msm: remove mdp5_cursor_plane_funcs
drm/msm: update cursors asynchronously through atomic
drm/msm/atomic: switch to drm_atomic_helper_check
drm/msm/mdp5: restore cursor state when enabling crtc
drm/msm/mdp5: don't use autosuspend
drm/msm/mdp5: ignore planes that are not visible
drm/msm: dump submits which triggered gpu hang
drm/msm: preserve IOVAs in submit's bo table
drm/msm/rd: allow adding addition msg to top of dump
drm/msm: split rd debugfs file
drm/msm: add special _get_vaddr_active() for cmdstream dumps
drm/msm: show task cmdline in gpu recovery messages
drm/msm: dump a rd GPUADDR header for all buffers in the command
drm/msm: Removed unused struct_mutex_task
drm/msm: Implement preemption for A5XX targets
...
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gpu.h')
-rw-r--r-- | drivers/gpu/drm/msm/msm_gpu.h | 51 |
1 files changed, 39 insertions, 12 deletions
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index df4e2771fb85..e113d64574d3 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -33,7 +33,7 @@ struct msm_gpu_config { const char *irqname; uint64_t va_start; uint64_t va_end; - unsigned int ringsz; + unsigned int nr_rings; }; /* So far, with hardware that I've seen to date, we can have: @@ -57,9 +57,9 @@ struct msm_gpu_funcs { int (*pm_resume)(struct msm_gpu *gpu); void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx); - void (*flush)(struct msm_gpu *gpu); + void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); irqreturn_t (*irq)(struct msm_gpu *irq); - uint32_t (*last_fence)(struct msm_gpu *gpu); + struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); void (*recover)(struct msm_gpu *gpu); void (*destroy)(struct msm_gpu *gpu); #ifdef CONFIG_DEBUG_FS @@ -86,16 +86,12 @@ struct msm_gpu { const struct msm_gpu_perfcntr *perfcntrs; uint32_t num_perfcntrs; - /* ringbuffer: */ - struct msm_ringbuffer *rb; - uint64_t rb_iova; + struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS]; + int nr_rings; /* list of GEM active objects: */ struct list_head active_list; - /* fencing: */ - struct msm_fence_context *fctx; - /* does gpu need hw_init? */ bool needs_hw_init; @@ -126,15 +122,31 @@ struct msm_gpu { #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ #define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD) struct timer_list hangcheck_timer; - uint32_t hangcheck_fence; struct work_struct recover_work; - struct list_head submit_list; + struct drm_gem_object *memptrs_bo; }; +/* It turns out that all targets use the same ringbuffer size */ +#define MSM_GPU_RINGBUFFER_SZ SZ_32K +#define MSM_GPU_RINGBUFFER_BLKSIZE 32 + +#define MSM_GPU_RB_CNTL_DEFAULT \ + (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \ + AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8))) + static inline bool msm_gpu_active(struct msm_gpu *gpu) { - return gpu->fctx->last_fence > gpu->funcs->last_fence(gpu); + int i; + + for (i = 0; i < gpu->nr_rings; i++) { + struct msm_ringbuffer *ring = gpu->rb[i]; + + if (ring->seqno > ring->memptrs->fence) + return true; + } + + return false; } /* Perf-Counters: @@ -150,6 +162,15 @@ struct msm_gpu_perfcntr { const char *name; }; +struct msm_gpu_submitqueue { + int id; + u32 flags; + u32 prio; + int faults; + struct list_head node; + struct kref ref; +}; + static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) { msm_writel(data, gpu->mmio + (reg << 2)); @@ -223,4 +244,10 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev); void __init adreno_register(void); void __exit adreno_unregister(void); +static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue) +{ + if (queue) + kref_put(&queue->ref, msm_submitqueue_destroy); +} + #endif /* __MSM_GPU_H__ */ |