diff options
author | Maxime Ripard <maxime@cerno.tech> | 2020-04-04 11:02:15 +0200 |
---|---|---|
committer | Maxime Ripard <maxime@cerno.tech> | 2020-04-04 11:02:15 +0200 |
commit | cc46c03397c1865a181f1a4f66d4645806e5a943 (patch) | |
tree | e3a762d8961219a61b96ae6e5ec875ba9fbe2314 /drivers/gpu/drm/msm | |
parent | d8a26d8fc37c5b8b9e95f2fa194f287cf8cab3f4 (diff) | |
parent | 0e7e6198af28c1573267aba1be33dd0b7fb35691 (diff) | |
download | kernel_replicant_linux-cc46c03397c1865a181f1a4f66d4645806e5a943.tar.gz kernel_replicant_linux-cc46c03397c1865a181f1a4f66d4645806e5a943.tar.bz2 kernel_replicant_linux-cc46c03397c1865a181f1a4f66d4645806e5a943.zip |
Merge drm/drm-next into drm-misc-next-fixes
Alex needs v5.6 into drm-misc-next-fixes to merge a fix for a regression in
the scatterlist processing in PRIME.
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Diffstat (limited to 'drivers/gpu/drm/msm')
29 files changed, 729 insertions, 722 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 7d9e63e20ded..724024a2243a 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -1446,18 +1446,31 @@ static const struct adreno_gpu_funcs funcs = { static void check_speed_bin(struct device *dev) { struct nvmem_cell *cell; - u32 bin, val; + u32 val; + + /* + * If the OPP table specifies a opp-supported-hw property then we have + * to set something with dev_pm_opp_set_supported_hw() or the table + * doesn't get populated so pick an arbitrary value that should + * ensure the default frequencies are selected but not conflict with any + * actual bins + */ + val = 0x80; cell = nvmem_cell_get(dev, "speed_bin"); - /* If a nvmem cell isn't defined, nothing to do */ - if (IS_ERR(cell)) - return; + if (!IS_ERR(cell)) { + void *buf = nvmem_cell_read(cell, NULL); + + if (!IS_ERR(buf)) { + u8 bin = *((u8 *) buf); - bin = *((u32 *) nvmem_cell_read(cell, NULL)); - nvmem_cell_put(cell); + val = (1 << bin); + kfree(buf); + } - val = (1 << bin); + nvmem_cell_put(cell); + } dev_pm_opp_set_supported_hw(dev, &val, 1); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 983afeaee737..c4e71abbdd53 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -2,6 +2,7 @@ /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ #include <linux/clk.h> +#include <linux/dma-mapping.h> #include <linux/interconnect.h> #include <linux/pm_domain.h> #include <linux/pm_opp.h> @@ -796,12 +797,41 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) return true; } +#define GBIF_CLIENT_HALT_MASK BIT(0) +#define GBIF_ARB_HALT_MASK BIT(1) + +static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) +{ + struct msm_gpu *gpu = &adreno_gpu->base; + + if (!a6xx_has_gbif(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); + spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & + 0xf) == 0xf); + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); + + return; + } + + /* Halt new client requests on GBIF */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & + (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); + + /* Halt all AXI requests on GBIF */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & + (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); + + /* The GBIF halt needs to be explicitly cleared */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); +} + /* Gracefully try to shut down the GMU and by extension the GPU */ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) { struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; - struct msm_gpu *gpu = &adreno_gpu->base; u32 val; /* @@ -819,11 +849,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) return; } - /* Clear the VBIF pipe before shutting down */ - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); - spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf) - == 0xf); - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); + a6xx_bus_clear_pending_transactions(adreno_gpu); /* tell the GMU we want to slumber */ a6xx_gmu_notify_slumber(gmu); @@ -895,21 +921,10 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo) { - int count, i; - u64 iova; - if (IS_ERR_OR_NULL(bo)) return; - count = bo->size >> PAGE_SHIFT; - iova = bo->iova; - - for (i = 0; i < count; i++, iova += PAGE_SIZE) { - iommu_unmap(gmu->domain, iova, PAGE_SIZE); - __free_pages(bo->pages[i], 0); - } - - kfree(bo->pages); + dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova); kfree(bo); } @@ -917,7 +932,6 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, size_t size) { struct a6xx_gmu_bo *bo; - int ret, count, i; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) @@ -925,86 +939,14 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, bo->size = PAGE_ALIGN(size); - count = bo->size >> PAGE_SHIFT; + bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL); - bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL); - if (!bo->pages) { + if (!bo->virt) { kfree(bo); return ERR_PTR(-ENOMEM); } - for (i = 0; i < count; i++) { - bo->pages[i] = alloc_page(GFP_KERNEL); - if (!bo->pages[i]) - goto err; - } - - bo->iova = gmu->uncached_iova_base; - - for (i = 0; i < count; i++) { - ret = iommu_map(gmu->domain, - bo->iova + (PAGE_SIZE * i), - page_to_phys(bo->pages[i]), PAGE_SIZE, - IOMMU_READ | IOMMU_WRITE); - - if (ret) { - DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n"); - - for (i = i - 1 ; i >= 0; i--) - iommu_unmap(gmu->domain, - bo->iova + (PAGE_SIZE * i), - PAGE_SIZE); - - goto err; - } - } - - bo->virt = vmap(bo->pages, count, VM_IOREMAP, - pgprot_writecombine(PAGE_KERNEL)); - if (!bo->virt) - goto err; - - /* Align future IOVA addresses on 1MB boundaries */ - gmu->uncached_iova_base += ALIGN(size, SZ_1M); - return bo; - -err: - for (i = 0; i < count; i++) { - if (bo->pages[i]) - __free_pages(bo->pages[i], 0); - } - - kfree(bo->pages); - kfree(bo); - - return ERR_PTR(-ENOMEM); -} - -static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) -{ - int ret; - - /* - * The GMU address space is hardcoded to treat the range - * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared - * between the GMU and the CPU will live in this space - */ - gmu->uncached_iova_base = 0x60000000; - - - gmu->domain = iommu_domain_alloc(&platform_bus_type); - if (!gmu->domain) - return -ENODEV; - - ret = iommu_attach_device(gmu->domain, gmu->dev); - - if (ret) { - iommu_domain_free(gmu->domain); - gmu->domain = NULL; - } - - return ret; } /* Return the 'arc-level' for the given frequency */ @@ -1264,10 +1206,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) a6xx_gmu_memory_free(gmu, gmu->hfi); - iommu_detach_device(gmu->domain, gmu->dev); - - iommu_domain_free(gmu->domain); - free_irq(gmu->gmu_irq, gmu); free_irq(gmu->hfi_irq, gmu); @@ -1288,7 +1226,15 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) gmu->dev = &pdev->dev; - of_dma_configure(gmu->dev, node, true); + /* Pass force_dma false to require the DT to set the dma region */ + ret = of_dma_configure(gmu->dev, node, false); + if (ret) + return ret; + + /* Set the mask after the of_dma_configure() */ + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31)); + if (ret) + return ret; /* Fow now, don't do anything fancy until we get our feet under us */ gmu->idle_level = GMU_IDLE_STATE_ACTIVE; @@ -1300,11 +1246,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) if (ret) goto err_put_device; - /* Set up the IOMMU context bank */ - ret = a6xx_gmu_memory_probe(gmu); - if (ret) - goto err_put_device; - /* Allocate memory for for the HFI queues */ gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K); if (IS_ERR(gmu->hfi)) @@ -1350,11 +1291,6 @@ err_mmio: err_memory: a6xx_gmu_memory_free(gmu, gmu->hfi); - if (gmu->domain) { - iommu_detach_device(gmu->domain, gmu->dev); - - iommu_domain_free(gmu->domain); - } ret = -ENODEV; err_put_device: diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index 2af91ed7ed0c..4af65a36d5ca 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -12,8 +12,7 @@ struct a6xx_gmu_bo { void *virt; size_t size; - u64 iova; - struct page **pages; + dma_addr_t iova; }; /* @@ -49,9 +48,6 @@ struct a6xx_gmu { int hfi_irq; int gmu_irq; - struct iommu_domain *domain; - u64 uncached_iova_base; - struct device *gxpd; int idle_level; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index daf07800cde0..68af24150de5 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -378,18 +378,6 @@ static int a6xx_hw_init(struct msm_gpu *gpu) struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); int ret; - /* - * During a previous slumber, GBIF halt is asserted to ensure - * no further transaction can go through GPU before GPU - * headswitch is turned off. - * - * This halt is deasserted once headswitch goes off but - * incase headswitch doesn't goes off clear GBIF halt - * here to ensure GPU wake-up doesn't fail because of - * halted GPU transactions. - */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); - /* Make sure the GMU keeps the GPU on while we set it up */ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); @@ -470,10 +458,12 @@ static int a6xx_hw_init(struct msm_gpu *gpu) /* Select CP0 to always count cycles */ gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT); - gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1); - gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1); - gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1); - gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21); + if (adreno_is_a630(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1); + gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1); + gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1); + gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21); + } /* Enable fault detection */ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, @@ -748,39 +738,6 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL), }; -#define GBIF_CLIENT_HALT_MASK BIT(0) -#define GBIF_ARB_HALT_MASK BIT(1) - -static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) -{ - struct msm_gpu *gpu = &adreno_gpu->base; - - if(!a6xx_has_gbif(adreno_gpu)){ - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); - spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & - 0xf) == 0xf); - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); - - return; - } - - /* Halt new client requests on GBIF */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); - spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & - (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); - - /* Halt all AXI requests on GBIF */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); - spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & - (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); - - /* - * GMU needs DDR access in slumber path. Deassert GBIF halt now - * to allow for GMU to access system memory. - */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); -} - static int a6xx_pm_resume(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -805,16 +762,6 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) devfreq_suspend_device(gpu->devfreq.devfreq); - /* - * Make sure the GMU is idle before continuing (because some transitions - * may use VBIF - */ - a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu); - - /* Clear the VBIF pipe before shutting down */ - /* FIXME: This accesses the GPU - do we need to make sure it is on? */ - a6xx_bus_clear_pending_transactions(adreno_gpu); - return a6xx_gmu_stop(a6xx_gpu); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h index e67c20c415af..24c974c293e5 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h @@ -379,7 +379,7 @@ static const struct a6xx_indexed_registers { }; static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = { - "CP_MEMPOOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR, + "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR, REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, }; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c index eda11abc5f01..e450e0b97211 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -7,6 +7,7 @@ #include "a6xx_gmu.h" #include "a6xx_gmu.xml.h" +#include "a6xx_gpu.h" #define HFI_MSG_ID(val) [val] = #val @@ -216,48 +217,82 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu) NULL, 0); } -static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) +static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) { - struct a6xx_hfi_msg_bw_table msg = { 0 }; + /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */ + msg->bw_level_num = 1; + + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x01; + + msg->ddr_cmds_addrs[0] = 0x50000; + msg->ddr_cmds_addrs[1] = 0x5003c; + msg->ddr_cmds_addrs[2] = 0x5000c; + + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; /* - * The sdm845 GMU doesn't do bus frequency scaling on its own but it - * does need at least one entry in the list because it might be accessed - * when the GMU is shutting down. Send a single "off" entry. + * These are the CX (CNOC) votes - these are used by the GMU but the + * votes are known and fixed for the target */ + msg->cnoc_cmds_num = 1; + msg->cnoc_wait_bitmask = 0x01; + + msg->cnoc_cmds_addrs[0] = 0x5007c; + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[1][0] = 0x60000001; +} - msg.bw_level_num = 1; +static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) +{ + /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */ + msg->bw_level_num = 1; - msg.ddr_cmds_num = 3; - msg.ddr_wait_bitmask = 0x07; + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x07; - msg.ddr_cmds_addrs[0] = 0x50000; - msg.ddr_cmds_addrs[1] = 0x5005c; - msg.ddr_cmds_addrs[2] = 0x5000c; + msg->ddr_cmds_addrs[0] = 0x50000; + msg->ddr_cmds_addrs[1] = 0x5005c; + msg->ddr_cmds_addrs[2] = 0x5000c; - msg.ddr_cmds_data[0][0] = 0x40000000; - msg.ddr_cmds_data[0][1] = 0x40000000; - msg.ddr_cmds_data[0][2] = 0x40000000; + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; /* * These are the CX (CNOC) votes. This is used but the values for the * sdm845 GMU are known and fixed so we can hard code them. */ - msg.cnoc_cmds_num = 3; - msg.cnoc_wait_bitmask = 0x05; + msg->cnoc_cmds_num = 3; + msg->cnoc_wait_bitmask = 0x05; - msg.cnoc_cmds_addrs[0] = 0x50034; - msg.cnoc_cmds_addrs[1] = 0x5007c; - msg.cnoc_cmds_addrs[2] = 0x5004c; + msg->cnoc_cmds_addrs[0] = 0x50034; + msg->cnoc_cmds_addrs[1] = 0x5007c; + msg->cnoc_cmds_addrs[2] = 0x5004c; - msg.cnoc_cmds_data[0][0] = 0x40000000; - msg.cnoc_cmds_data[0][1] = 0x00000000; - msg.cnoc_cmds_data[0][2] = 0x40000000; + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[0][1] = 0x00000000; + msg->cnoc_cmds_data[0][2] = 0x40000000; + + msg->cnoc_cmds_data[1][0] = 0x60000001; + msg->cnoc_cmds_data[1][1] = 0x20000001; + msg->cnoc_cmds_data[1][2] = 0x60000001; +} + + +static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_msg_bw_table msg = { 0 }; + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; - msg.cnoc_cmds_data[1][0] = 0x60000001; - msg.cnoc_cmds_data[1][1] = 0x20000001; - msg.cnoc_cmds_data[1][2] = 0x60000001; + if (adreno_is_a618(adreno_gpu)) + a618_build_bw_table(&msg); + else + a6xx_build_bw_table(&msg); return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg), NULL, 0); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 7fd29829b2fa..1d5c43c22269 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -673,7 +673,7 @@ static char *adreno_gpu_ascii85_encode(u32 *src, size_t len) return NULL; for (i = 0; i < l; i++) - buf_itr += snprintf(buf + buf_itr, buffer_size - buf_itr, "%s", + buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s", ascii85_encode(src[i], out)); return buf; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 58d3400668f5..a1b79ee2bd9d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -164,7 +164,6 @@ enum dpu_enc_rc_states { * clks and resources after IDLE_TIMEOUT time. * @vsync_event_work: worker to handle vsync event for autorefresh * @topology: topology of the display - * @mode_set_complete: flag to indicate modeset completion * @idle_timeout: idle timeout duration in milliseconds */ struct dpu_encoder_virt { @@ -202,7 +201,6 @@ struct dpu_encoder_virt { struct delayed_work delayed_off_work; struct kthread_work vsync_event_work; struct msm_display_topology topology; - bool mode_set_complete; u32 idle_timeout; }; @@ -461,7 +459,7 @@ void dpu_encoder_helper_split_config( struct msm_display_info *disp_info; if (!phys_enc->hw_mdptop || !phys_enc->parent) { - DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0); + DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL); return; } @@ -562,12 +560,13 @@ static int dpu_encoder_virt_atomic_check( const struct drm_display_mode *mode; struct drm_display_mode *adj_mode; struct msm_display_topology topology; + struct dpu_global_state *global_state; int i = 0; int ret = 0; if (!drm_enc || !crtc_state || !conn_state) { DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n", - drm_enc != 0, crtc_state != 0, conn_state != 0); + drm_enc != NULL, crtc_state != NULL, conn_state != NULL); return -EINVAL; } @@ -578,6 +577,7 @@ static int dpu_encoder_virt_atomic_check( dpu_kms = to_dpu_kms(priv->kms); mode = &crtc_state->mode; adj_mode = &crtc_state->adjusted_mode; + global_state = dpu_kms_get_existing_global_state(dpu_kms); trace_dpu_enc_atomic_check(DRMID(drm_enc)); /* @@ -609,17 +609,15 @@ static int dpu_encoder_virt_atomic_check( topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode); - /* Reserve dynamic resources now. Indicating AtomicTest phase */ + /* Reserve dynamic resources now. */ if (!ret) { /* * Avoid reserving resources when mode set is pending. Topology * info may not be available to complete reservation. */ - if (drm_atomic_crtc_needs_modeset(crtc_state) - && dpu_enc->mode_set_complete) { - ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state, - topology, true); - dpu_enc->mode_set_complete = false; + if (drm_atomic_crtc_needs_modeset(crtc_state)) { + ret = dpu_rm_reserve(&dpu_kms->rm, global_state, + drm_enc, crtc_state, topology); } } @@ -956,12 +954,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, struct drm_connector *conn = NULL, *conn_iter; struct drm_crtc *drm_crtc; struct dpu_crtc_state *cstate; - struct dpu_rm_hw_iter hw_iter; + struct dpu_global_state *global_state; struct msm_display_topology topology; - struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL }; - struct dpu_hw_mixer *hw_lm[MAX_CHANNELS_PER_ENC] = { NULL }; - int num_lm = 0, num_ctl = 0; - int i, j, ret; + struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; + int num_lm, num_ctl, num_pp; + int i, j; if (!drm_enc) { DPU_ERROR("invalid encoder\n"); @@ -975,6 +974,12 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, dpu_kms = to_dpu_kms(priv->kms); connector_list = &dpu_kms->dev->mode_config.connector_list; + global_state = dpu_kms_get_existing_global_state(dpu_kms); + if (IS_ERR_OR_NULL(global_state)) { + DPU_ERROR("Failed to get global state"); + return; + } + trace_dpu_enc_mode_set(DRMID(drm_enc)); list_for_each_entry(conn_iter, connector_list, head) @@ -995,77 +1000,57 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode); - /* Reserve dynamic resources now. Indicating non-AtomicTest phase */ - ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_crtc->state, - topology, false); - if (ret) { - DPU_ERROR_ENC(dpu_enc, - "failed to reserve hw resources, %d\n", ret); - return; - } - - dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG); - for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { - dpu_enc->hw_pp[i] = NULL; - if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) - break; - dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) hw_iter.hw; - } - - dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_CTL); - for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { - if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) - break; - hw_ctl[i] = (struct dpu_hw_ctl *)hw_iter.hw; - num_ctl++; - } + /* Query resource that have been reserved in atomic check step. */ + num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp, + ARRAY_SIZE(hw_pp)); + num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); + num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); - dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_LM); - for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { - if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) - break; - hw_lm[i] = (struct dpu_hw_mixer *)hw_iter.hw; - num_lm++; - } + for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) + dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) + : NULL; cstate = to_dpu_crtc_state(drm_crtc->state); for (i = 0; i < num_lm; i++) { int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); - cstate->mixers[i].hw_lm = hw_lm[i]; - cstate->mixers[i].lm_ctl = hw_ctl[ctl_idx]; + cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); + cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); } cstate->num_mixers = num_lm; for (i = 0; i < dpu_enc->num_phys_encs; i++) { + int num_blk; + struct dpu_hw_blk *hw_blk[MAX_CHANNELS_PER_ENC]; struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; if (!dpu_enc->hw_pp[i]) { DPU_ERROR_ENC(dpu_enc, "no pp block assigned at idx: %d\n", i); - goto error; + return; } if (!hw_ctl[i]) { DPU_ERROR_ENC(dpu_enc, "no ctl block assigned at idx: %d\n", i); - goto error; + return; } phys->hw_pp = dpu_enc->hw_pp[i]; - phys->hw_ctl = hw_ctl[i]; + phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]); - dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, - DPU_HW_BLK_INTF); - for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) { + num_blk = dpu_rm_get_assigned_resources(&dpu_kms->rm, + global_state, drm_enc->base.id, DPU_HW_BLK_INTF, + hw_blk, ARRAY_SIZE(hw_blk)); + for (j = 0; j < num_blk; j++) { struct dpu_hw_intf *hw_intf; - if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) - break; - - hw_intf = (struct dpu_hw_intf *)hw_iter.hw; + hw_intf = to_dpu_hw_intf(hw_blk[i]); if (hw_intf->idx == phys->intf_idx) phys->hw_intf = hw_intf; } @@ -1073,18 +1058,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, if (!phys->hw_intf) { DPU_ERROR_ENC(dpu_enc, "no intf block assigned at idx: %d\n", i); - goto error; + return; } phys->connector = conn->state->connector; if (phys->ops.mode_set) phys->ops.mode_set(phys, mode, adj_mode); } - - dpu_enc->mode_set_complete = true; - -error: - dpu_rm_release(&dpu_kms->rm, drm_enc); } static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) @@ -1181,6 +1161,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) struct dpu_encoder_virt *dpu_enc = NULL; struct msm_drm_private *priv; struct dpu_kms *dpu_kms; + struct dpu_global_state *global_state; int i = 0; if (!drm_enc) { @@ -1199,6 +1180,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) priv = drm_enc->dev->dev_private; dpu_kms = to_dpu_kms(priv->kms); + global_state = dpu_kms_get_existing_global_state(dpu_kms); trace_dpu_enc_disable(DRMID(drm_enc)); @@ -1228,7 +1210,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); - dpu_rm_release(&dpu_kms->rm, drm_enc); + dpu_rm_release(global_state, drm_enc); mutex_unlock(&dpu_enc->enc_lock); } @@ -1964,7 +1946,7 @@ static int dpu_encoder_virt_add_phys_encs( if (IS_ERR_OR_NULL(enc)) { DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n", PTR_ERR(enc)); - return enc == 0 ? -EINVAL : PTR_ERR(enc); + return enc == NULL ? -EINVAL : PTR_ERR(enc); } dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; @@ -1977,7 +1959,7 @@ static int dpu_encoder_virt_add_phys_encs( if (IS_ERR_OR_NULL(enc)) { DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n", PTR_ERR(enc)); - return enc == 0 ? -EINVAL : PTR_ERR(enc); + return enc == NULL ? -EINVAL : PTR_ERR(enc); } dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; @@ -2008,7 +1990,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, struct dpu_enc_phys_init_params phys_params; if (!dpu_enc) { - DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != 0); + DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL); return -EINVAL; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 39e1e280ba44..8493d68ad841 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -411,7 +411,7 @@ static void _dpu_encoder_phys_cmd_pingpong_config( to_dpu_encoder_phys_cmd(phys_enc); if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) { - DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0); + DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL); return; } @@ -440,7 +440,7 @@ static void dpu_encoder_phys_cmd_enable_helper( u32 flush_mask = 0; if (!phys_enc->hw_pp) { - DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0); + DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL); return; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index c71c18de5966..b5a49050d131 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -239,7 +239,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine( struct dpu_hw_intf_cfg intf_cfg = { 0 }; if (!phys_enc->hw_ctl->ops.setup_intf_cfg) { - DPU_ERROR("invalid encoder %d\n", phys_enc != 0); + DPU_ERROR("invalid encoder %d\n", phys_enc != NULL); return; } @@ -559,7 +559,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) if (!phys_enc->hw_intf) { DPU_ERROR("invalid hw_intf %d hw_ctl %d\n", - phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0); + phys_enc->hw_intf != NULL, phys_enc->hw_ctl != NULL); return; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c index 528632690f1e..a05282dede91 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c @@ -255,13 +255,13 @@ static const struct dpu_format dpu_format_map[] = { INTERLEAVED_RGB_FMT(RGB565, 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, + C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, false, 2, 0, DPU_FETCH_LINEAR, 1), INTERLEAVED_RGB_FMT(BGR565, 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, false, 2, 0, DPU_FETCH_LINEAR, 1), diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h index 85468981632d..0ead64d3f63d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h @@ -90,6 +90,16 @@ struct dpu_hw_intf { }; /** + * to_dpu_hw_intf - convert base object dpu_hw_base to container + * @hw: Pointer to base hardware block + * return: Pointer to hardware block container + */ +static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw) +{ + return container_of(hw, struct dpu_hw_intf, base); +} + +/** * dpu_hw_intf_init(): Initializes the intf driver for the passed * interface idx. * @idx: interface index for which driver object is required diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h index 3d6f46b1db30..d73cb73e938b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h @@ -97,6 +97,16 @@ struct dpu_hw_pingpong { }; /** + * to_dpu_hw_pingpong - convert base object dpu_hw_base to container + * @hw: Pointer to base hardware block + * return: Pointer to hardware block container + */ +static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw) +{ + return container_of(hw, struct dpu_hw_pingpong, base); +} + +/** * dpu_hw_pingpong_init - initializes the pingpong driver for the passed * pingpong idx. * @idx: Pingpong index for which driver object is required diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index cb08fafb1dc1..ce19f1d39367 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -138,16 +138,12 @@ static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data) { struct dpu_debugfs_regset32 *regset = s->private; struct dpu_kms *dpu_kms = regset->dpu_kms; - struct drm_device *dev; - struct msm_drm_private *priv; void __iomem *base; uint32_t i, addr; if (!dpu_kms->mmio) return 0; - dev = dpu_kms->dev; - priv = dev->dev_private; base = dpu_kms->mmio + regset->offset; /* insert padding spaces, if needed */ @@ -228,6 +224,85 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) } #endif +/* Global/shared object state funcs */ + +/* + * This is a helper that returns the private state currently in operation. + * Note that this would return the "old_state" if called in the atomic check + * path, and the "new_state" after the atomic swap has been done. + */ +struct dpu_global_state * +dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms) +{ + return to_dpu_global_state(dpu_kms->global_state.state); +} + +/* + * This acquires the modeset lock set aside for global state, creates + * a new duplicated private object state. + */ +struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); + struct drm_private_state *priv_state; + int ret; + + ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + priv_state = drm_atomic_get_private_obj_state(s, + &dpu_kms->global_state); + if (IS_ERR(priv_state)) + return ERR_CAST(priv_state); + + return to_dpu_global_state(priv_state); +} + +static struct drm_private_state * +dpu_kms_global_duplicate_state(struct drm_private_obj *obj) +{ + struct dpu_global_state *state; + + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; +} + +static void dpu_kms_global_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct dpu_global_state *dpu_state = to_dpu_global_state(state); + + kfree(dpu_state); +} + +static const struct drm_private_state_funcs dpu_kms_global_state_funcs = { + .atomic_duplicate_state = dpu_kms_global_duplicate_state, + .atomic_destroy_state = dpu_kms_global_destroy_state, +}; + +static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms) +{ + struct dpu_global_state *state; + + drm_modeset_lock_init(&dpu_kms->global_state_lock); + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state, + &state->base, + &dpu_kms_global_state_funcs); + return 0; +} + static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { return dpu_crtc_vblank(crtc, true); @@ -267,8 +342,6 @@ static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc) static void dpu_kms_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) { - struct dpu_kms *dpu_kms; - struct drm_device *dev; struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; struct drm_encoder *encoder; @@ -276,8 +349,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms, if (!kms) return; - dpu_kms = to_dpu_kms(kms); - dev = dpu_kms->dev; /* Call prepare_commit for all affected encoders */ for_each_new_crtc_in_state(state, crtc, crtc_state, i) { @@ -552,11 +623,8 @@ static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate, static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) { - struct drm_device *dev; int i; - dev = dpu_kms->dev; - if (dpu_kms->hw_intr) dpu_hw_intr_destroy(dpu_kms->hw_intr); dpu_kms->hw_intr = NULL; @@ -760,7 +828,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms) { struct dpu_kms *dpu_kms; struct drm_device *dev; - struct msm_drm_private *priv; int i, rc = -EINVAL; if (!kms) { @@ -770,7 +837,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms) dpu_kms = to_dpu_kms(kms); dev = dpu_kms->dev; - priv = dev->dev_private; + + rc = dpu_kms_global_obj_init(dpu_kms); + if (rc) + return rc; atomic_set(&dpu_kms->bandwidth_ref, 0); @@ -1018,10 +1088,8 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev) int rc = -1; struct platform_device *pdev = to_platform_device(dev); struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); - struct drm_device *ddev; struct dss_module_power *mp = &dpu_kms->mp; - ddev = dpu_kms->dev; rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false); if (rc) DPU_ERROR("clock disable failed rc:%d\n", rc); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index c6169e7df19d..211f5de99a44 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -111,6 +111,13 @@ struct dpu_kms { struct dpu_core_perf perf; + /* + * Global private object state, Do not access directly, use + * dpu_kms_global_get_state() + */ + struct drm_modeset_lock global_state_lock; + struct drm_private_obj global_state; + struct dpu_rm rm; bool rm_init; @@ -139,6 +146,25 @@ struct vsync_info { #define to_dpu_kms(x) container_of(x, struct dpu_kms, base) +#define to_dpu_global_state(x) container_of(x, struct dpu_global_state, base) + +/* Global private object state for tracking resources that are shared across + * multiple kms objects (planes/crtcs/etc). + */ +struct dpu_global_state { + struct drm_private_state base; + + uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0]; + uint32_t mixer_to_enc_id[LM_MAX - LM_0]; + uint32_t ctl_to_enc_id[CTL_MAX - CTL_0]; + uint32_t intf_to_enc_id[INTF_MAX - INTF_0]; +}; + +struct dpu_global_state + *dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms); +struct dpu_global_state + *__must_check dpu_kms_get_global_state(struct drm_atomic_state *s); + /** * Debugfs functions - extra helper functions for debugfs support * diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c index 29705e773a4b..80d3cfc14007 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c @@ -12,6 +12,7 @@ #define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base) +#define HW_REV 0x0 #define HW_INTR_STATUS 0x0010 /* Max BW defined in KBps */ @@ -22,6 +23,17 @@ struct dpu_irq_controller { struct irq_domain *domain; }; +struct dpu_hw_cfg { + u32 val; + u32 offset; +}; + +struct dpu_mdss_hw_init_handler { + u32 hw_rev; + u32 hw_reg_count; + struct dpu_hw_cfg* hw_cfg; +}; + struct dpu_mdss { struct msm_mdss base; void __iomem *mmio; @@ -32,6 +44,44 @@ struct dpu_mdss { u32 num_paths; }; +static struct dpu_hw_cfg hw_cfg[] = { + { + /* UBWC global settings */ + .val = 0x1E, + .offset = 0x144, + } +}; + +static struct dpu_mdss_hw_init_handler cfg_handler[] = { + { .hw_rev = DPU_HW_VER_620, + .hw_reg_count = ARRAY_SIZE(hw_cfg), + .hw_cfg = hw_cfg + }, +}; + +static void dpu_mdss_hw_init(struct dpu_mdss *dpu_mdss, u32 hw_rev) +{ + int i; + u32 count = 0; + struct dpu_hw_cfg *hw_cfg = NULL; + + for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) { + if (cfg_handler[i].hw_rev == hw_rev) { + hw_cfg = cfg_handler[i].hw_cfg; + count = cfg_handler[i].hw_reg_count; + break; + } + } + + for (i = 0; i < count; i++ ) { + writel_relaxed(hw_cfg->val, + dpu_mdss->mmio + hw_cfg->offset); + hw_cfg++; + } + + return; +} + static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev, struct dpu_mdss *dpu_mdss) { @@ -174,12 +224,18 @@ static int dpu_mdss_enable(struct msm_mdss *mdss) struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss); struct dss_module_power *mp = &dpu_mdss->mp; int ret; + u32 mdss_rev; dpu_mdss_icc_request_bw(mdss); ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); - if (ret) + if (ret) { DPU_ERROR("clock enable failed, ret:%d\n", ret); + return ret; + } + + mdss_rev = readl_relaxed(dpu_mdss->mmio + HW_REV); + dpu_mdss_hw_init(dpu_mdss, mdss_rev); return ret; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 23f5b1433b35..9b62451b01ee 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -12,8 +12,12 @@ #include "dpu_encoder.h" #include "dpu_trace.h" -#define RESERVED_BY_OTHER(h, r) \ - ((h)->enc_id && (h)->enc_id != r) + +static inline bool reserved_by_other(uint32_t *res_map, int idx, + uint32_t enc_id) +{ + return res_map[idx] && res_map[idx] != enc_id; +} /** * struct dpu_rm_requirements - Reservation requirements parameter bundle @@ -25,171 +29,43 @@ struct dpu_rm_requirements { struct dpu_encoder_hw_resources hw_res; }; - -/** - * struct dpu_rm_hw_blk - hardware block tracking list member - * @list: List head for list of all hardware blocks tracking items - * @id: Hardware ID number, within it's own space, ie. LM_X - * @enc_id: Encoder id to which this blk is binded - * @hw: Pointer to the hardware register access object for this block - */ -struct dpu_rm_hw_blk { - struct list_head list; - uint32_t id; - uint32_t enc_id; - struct dpu_hw_blk *hw; -}; - -void dpu_rm_init_hw_iter( - struct dpu_rm_hw_iter *iter, - uint32_t enc_id, - enum dpu_hw_blk_type type) -{ - memset(iter, 0, sizeof(*iter)); - iter->enc_id = enc_id; - iter->type = type; -} - -static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i) +int dpu_rm_destroy(struct dpu_rm *rm) { - struct list_head *blk_list; - - if (!rm || !i || i->type >= DPU_HW_BLK_MAX) { - DPU_ERROR("invalid rm\n"); - return false; - } + int i; - i->hw = NULL; - blk_list = &rm->hw_blks[i->type]; + for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) { + struct dpu_hw_pingpong *hw; - if (i->blk && (&i->blk->list == blk_list)) { - DPU_DEBUG("attempt resume iteration past last\n"); - return false; - } - - i->blk = list_prepare_entry(i->blk, blk_list, list); - - list_for_each_entry_continue(i->blk, blk_list, list) { - if (i->enc_id == i->blk->enc_id) { - i->hw = i->blk->hw; - DPU_DEBUG("found type %d id %d for enc %d\n", - i->type, i->blk->id, i->enc_id); - return true; + if (rm->pingpong_blks[i]) { + hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]); + dpu_hw_pingpong_destroy(hw); } } + for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) { + struct dpu_hw_mixer *hw; - DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id); - - return false; -} - -bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i) -{ - bool ret; - - mutex_lock(&rm->rm_lock); - ret = _dpu_rm_get_hw_locked(rm, i); - mutex_unlock(&rm->rm_lock); - - return ret; -} - -static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw) -{ - switch (type) { - case DPU_HW_BLK_LM: - dpu_hw_lm_destroy(hw); - break; - case DPU_HW_BLK_CTL: - dpu_hw_ctl_destroy(hw); - break; - case DPU_HW_BLK_PINGPONG: - dpu_hw_pingpong_destroy(hw); - break; - case DPU_HW_BLK_INTF: - dpu_hw_intf_destroy(hw); - break; - case DPU_HW_BLK_SSPP: - /* SSPPs are not managed by the resource manager */ - case DPU_HW_BLK_TOP: - /* Top is a singleton, not managed in hw_blks list */ - case DPU_HW_BLK_MAX: - default: - DPU_ERROR("unsupported block type %d\n", type); - break; - } -} - -int dpu_rm_destroy(struct dpu_rm *rm) -{ - struct dpu_rm_hw_blk *hw_cur, *hw_nxt; - enum dpu_hw_blk_type type; - - for (type = 0; type < DPU_HW_BLK_MAX; type++) { - list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type], - list) { - list_del(&hw_cur->list); - _dpu_rm_hw_destroy(type, hw_cur->hw); - kfree(hw_cur); + if (rm->mixer_blks[i]) { + hw = to_dpu_hw_mixer(rm->mixer_blks[i]); + dpu_hw_lm_destroy(hw); } } + for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) { + struct dpu_hw_ctl *hw; - mutex_destroy(&rm->rm_lock); - - return 0; -} - -static int _dpu_rm_hw_blk_create( - struct dpu_rm *rm, - const struct dpu_mdss_cfg *cat, - void __iomem *mmio, - enum dpu_hw_blk_type type, - uint32_t id, - const void *hw_catalog_info) -{ - struct dpu_rm_hw_blk *blk; - void *hw; - - switch (type) { - case DPU_HW_BLK_LM: - hw = dpu_hw_lm_init(id, mmio, cat); - break; - case DPU_HW_BLK_CTL: - hw = dpu_hw_ctl_init(id, mmio, cat); - break; - case DPU_HW_BLK_PINGPONG: - hw = dpu_hw_pingpong_init(id, mmio, cat); - break; - case DPU_HW_BLK_INTF: - hw = dpu_hw_intf_init(id, mmio, cat); - break; - case DPU_HW_BLK_SSPP: - /* SSPPs are not managed by the resource manager */ - case DPU_HW_BLK_TOP: - /* Top is a singleton, not managed in hw_blks list */ - case DPU_HW_BLK_MAX: - default: - DPU_ERROR("unsupported block type %d\n", type); - return -EINVAL; - } - - if (IS_ERR_OR_NULL(hw)) { - DPU_ERROR("failed hw object creation: type %d, err %ld\n", - type, PTR_ERR(hw)); - return -EFAULT; + if (rm->ctl_blks[i]) { + hw = to_dpu_hw_ctl(rm->ctl_blks[i]); + dpu_hw_ctl_destroy(hw); + } } + for (i = 0; i < ARRAY_SIZE(rm->intf_blks); i++) { + struct dpu_hw_intf *hw; - blk = kzalloc(sizeof(*blk), GFP_KERNEL); - if (!blk) { - _dpu_rm_hw_destroy(type, hw); - return -ENOMEM; + if (rm->intf_blks[i]) { + hw = to_dpu_hw_intf(rm->intf_blks[i]); + dpu_hw_intf_destroy(hw); + } } - blk->id = id; - blk->hw = hw; - blk->enc_id = 0; - list_add_tail(&blk->list, &rm->hw_blks[type]); - return 0; } @@ -198,7 +74,6 @@ int dpu_rm_init(struct dpu_rm *rm, void __iomem *mmio) { int rc, i; - enum dpu_hw_blk_type type; if (!rm || !cat || !mmio) { DPU_ERROR("invalid kms\n"); @@ -208,13 +83,9 @@ int dpu_rm_init(struct dpu_rm *rm, /* Clear, setup lists */ memset(rm, 0, sizeof(*rm)); - mutex_init(&rm->rm_lock); - - for (type = 0; type < DPU_HW_BLK_MAX; type++) - INIT_LIST_HEAD(&rm->hw_blks[type]); - /* Interrogate HW catalog and create tracking items for hw blocks */ for (i = 0; i < cat->mixer_count; i++) { + struct dpu_hw_mixer *hw; const struct dpu_lm_cfg *lm = &cat->mixer[i]; if (lm->pingpong == PINGPONG_MAX) { @@ -222,12 +93,17 @@ int dpu_rm_init(struct dpu_rm *rm, continue; } - rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM, - cat->mixer[i].id, &cat->mixer[i]); - if (rc) { - DPU_ERROR("failed: lm hw not available\n"); + if (lm->id < LM_0 || lm->id >= LM_MAX) { + DPU_ERROR("skip mixer %d with invalid id\n", lm->id); + continue; + } + hw = dpu_hw_lm_init(lm->id, mmio, cat); + if (IS_ERR_OR_NULL(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed lm object creation: err %d\n", rc); goto fail; } + rm->mixer_blks[lm->id - LM_0] = &hw->base; if (!rm->lm_max_width) { rm->lm_max_width = lm->sblk->maxwidth; @@ -243,35 +119,59 @@ int dpu_rm_init(struct dpu_rm *rm, } for (i = 0; i < cat->pingpong_count; i++) { - rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG, - cat->pingpong[i].id, &cat->pingpong[i]); - if (rc) { - DPU_ERROR("failed: pp hw not available\n"); + struct dpu_hw_pingpong *hw; + const struct dpu_pingpong_cfg *pp = &cat->pingpong[i]; + + if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) { + DPU_ERROR("skip pingpong %d with invalid id\n", pp->id); + continue; + } + hw = dpu_hw_pingpong_init(pp->id, mmio, cat); + if (IS_ERR_OR_NULL(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed pingpong object creation: err %d\n", + rc); goto fail; } + rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base; } for (i = 0; i < cat->intf_count; i++) { - if (cat->intf[i].type == INTF_NONE) { + struct dpu_hw_intf *hw; + const struct dpu_intf_cfg *intf = &cat->intf[i]; + + if (intf->type == INTF_NONE) { DPU_DEBUG("skip intf %d with type none\n", i); continue; } - - rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF, - cat->intf[i].id, &cat->intf[i]); - if (rc) { - DPU_ERROR("failed: intf hw not available\n"); + if (intf->id < INTF_0 || intf->id >= INTF_MAX) { + DPU_ERROR("skip intf %d with invalid id\n", intf->id); + continue; + } + hw = dpu_hw_intf_init(intf->id, mmio, cat); + if (IS_ERR_OR_NULL(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed intf object creation: err %d\n", rc); goto fail; } + rm->intf_blks[intf->id - INTF_0] = &hw->base; } for (i = 0; i < cat->ctl_count; i++) { - rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL, - cat->ctl[i].id, &cat->ctl[i]); - if (rc) { - DPU_ERROR("failed: ctl hw not available\n"); + struct dpu_hw_ctl *hw; + const struct dpu_ctl_cfg *ctl = &cat->ctl[i]; + + if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) { + DPU_ERROR("skip ctl %d with invalid id\n", ctl->id); + continue; + } + hw = dpu_hw_ctl_init(ctl->id, mmio, cat); + if (IS_ERR_OR_NULL(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed ctl object creation: err %d\n", rc); goto fail; } + rm->ctl_blks[ctl->id - CTL_0] = &hw->base; } return 0; @@ -279,7 +179,7 @@ int dpu_rm_init(struct dpu_rm *rm, fail: dpu_rm_destroy(rm); - return rc; + return rc ? rc : -EFAULT; } static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) @@ -288,85 +188,81 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) } /** + * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary + * @rm: dpu resource manager handle + * @primary_idx: index of primary mixer in rm->mixer_blks[] + * @peer_idx: index of other mixer in rm->mixer_blks[] + * @Return: true if rm->mixer_blks[peer_idx] is a peer of + * rm->mixer_blks[primary_idx] + */ +static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx, + int peer_idx) +{ + const struct dpu_lm_cfg *prim_lm_cfg; + const struct dpu_lm_cfg *peer_cfg; + + prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap; + peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap; + + if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) { + DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id, + peer_cfg->id); + return false; + } + return true; +} + +/** * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets * proposed use case requirements, incl. hardwired dependent blocks like * pingpong * @rm: dpu resource manager handle * @enc_id: encoder id requesting for allocation - * @reqs: proposed use case requirements - * @lm: proposed layer mixer, function checks if lm, and all other hardwired - * blocks connected to the lm (pp) is available and appropriate - * @pp: output parameter, pingpong block attached to the layer mixer. - * NULL if pp was not available, or not matching requirements. - * @primary_lm: if non-null, this function check if lm is compatible primary_lm - * as well as satisfying all other requirements + * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks + * if lm, and all other hardwired blocks connected to the lm (pp) is + * available and appropriate + * @pp_idx: output parameter, index of pingpong block attached to the layer + * mixer in rm->pongpong_blks[]. * @Return: true if lm matches all requirements, false otherwise */ -static bool _dpu_rm_check_lm_and_get_connected_blks( - struct dpu_rm *rm, - uint32_t enc_id, - struct dpu_rm_requirements *reqs, - struct dpu_rm_hw_blk *lm, - struct dpu_rm_hw_blk **pp, - struct dpu_rm_hw_blk *primary_lm) +static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, + struct dpu_global_state *global_state, + uint32_t enc_id, int lm_idx, int *pp_idx) { - const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap; - struct dpu_rm_hw_iter iter; - - *pp = NULL; - - DPU_DEBUG("check lm %d pp %d\n", - lm_cfg->id, lm_cfg->pingpong); - - /* Check if this layer mixer is a peer of the proposed primary LM */ - if (primary_lm) { - const struct dpu_lm_cfg *prim_lm_cfg = - to_dpu_hw_mixer(primary_lm->hw)->cap; - - if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) { - DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id, - prim_lm_cfg->id); - return false; - } - } + const struct dpu_lm_cfg *lm_cfg; + int idx; /* Already reserved? */ - if (RESERVED_BY_OTHER(lm, enc_id)) { - DPU_DEBUG("lm %d already reserved\n", lm_cfg->id); + if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) { + DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0); return false; } - dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG); - while (_dpu_rm_get_hw_locked(rm, &iter)) { - if (iter.blk->id == lm_cfg->pingpong) { - *pp = iter.blk; - break; - } - } - - if (!*pp) { + lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap; + idx = lm_cfg->pingpong - PINGPONG_0; + if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) { DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong); return false; } - if (RESERVED_BY_OTHER(*pp, enc_id)) { - DPU_DEBUG("lm %d pp %d already reserved\n", lm->id, - (*pp)->id); + if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) { + DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id, + lm_cfg->pingpong); return false; } - + *pp_idx = idx; return true; } -static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, +static int _dpu_rm_reserve_lms(struct dpu_rm *rm, + struct dpu_global_state *global_state, + uint32_t enc_id, struct dpu_rm_requirements *reqs) { - struct dpu_rm_hw_blk *lm[MAX_BLOCKS]; - struct dpu_rm_hw_blk *pp[MAX_BLOCKS]; - struct dpu_rm_hw_iter iter_i, iter_j; - int lm_count = 0; - int i, rc = 0; + int lm_idx[MAX_BLOCKS]; + int pp_idx[MAX_BLOCKS]; + int i, j, lm_count = 0; if (!reqs->topology.num_lm) { DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm); @@ -374,36 +270,40 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, } /* Find a primary mixer */ - dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM); - while (lm_count != reqs->topology.num_lm && - _dpu_rm_get_hw_locked(rm, &iter_i)) { - memset(&lm, 0, sizeof(lm)); - memset(&pp, 0, sizeof(pp)); + for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) && + lm_count < reqs->topology.num_lm; i++) { + if (!rm->mixer_blks[i]) + continue; lm_count = 0; - lm[lm_count] = iter_i.blk; + lm_idx[lm_count] = i; - if (!_dpu_rm_check_lm_and_get_connected_blks( - rm, enc_id, reqs, lm[lm_count], - &pp[lm_count], NULL)) + if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state, + enc_id, i, &pp_idx[lm_count])) { continue; + } ++lm_count; /* Valid primary mixer found, find matching peers */ - dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM); + for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) && + lm_count < reqs->topology.num_lm; j++) { + if (!rm->mixer_blks[j]) + continue; - while (lm_count != reqs->topology.num_lm && - _dpu_rm_get_hw_locked(rm, &iter_j)) { - if (iter_i.blk == iter_j.blk) + if (!_dpu_rm_check_lm_peer(rm, i, j)) { + DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j, + LM_0 + i); continue; + } - if (!_dpu_rm_check_lm_and_get_connected_blks( - rm, enc_id, reqs, iter_j.blk, - &pp[lm_count], iter_i.blk)) + if (!_dpu_rm_check_lm_and_get_connected_blks(rm, + global_state, enc_id, j, + &pp_idx[lm_count])) { continue; + } - lm[lm_count] = iter_j.blk; + lm_idx[lm_count] = j; ++lm_count; } } @@ -413,65 +313,65 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, return -ENAVAIL; } - for (i = 0; i < ARRAY_SIZE(lm); i++) { - if (!lm[i]) - break; + for (i = 0; i < lm_count; i++) { + global_state->mixer_to_enc_id[lm_idx[i]] = enc_id; + global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id; - lm[i]->enc_id = enc_id; - pp[i]->enc_id = enc_id; - - trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id); + trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id, + pp_idx[i] + PINGPONG_0); } - return rc; + return 0; } static int _dpu_rm_reserve_ctls( struct dpu_rm *rm, + struct dpu_global_state *global_state, uint32_t enc_id, const struct msm_display_topology *top) { - struct dpu_rm_hw_blk *ctls[MAX_BLOCKS]; - struct dpu_rm_hw_iter iter; - int i = 0, num_ctls = 0; - bool needs_split_display = false; - - memset(&ctls, 0, sizeof(ctls)); + int ctl_idx[MAX_BLOCKS]; + int i = 0, j, num_ctls; + bool needs_split_display; /* each hw_intf needs its own hw_ctrl to program its control path */ num_ctls = top->num_intf; needs_split_display = _dpu_rm_needs_split_display(top); - dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL); - while (_dpu_rm_get_hw_locked(rm, &iter)) { - const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw); - unsigned long features = ctl->caps->features; + for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) { + const struct dpu_hw_ctl *ctl; + unsigned long features; bool has_split_display; - if (RESERVED_BY_OTHER(iter.blk, enc_id)) + if (!rm->ctl_blks[j]) + continue; + if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id)) continue; + ctl = to_dpu_hw_ctl(rm->ctl_blks[j]); + features = ctl->caps->features; has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features; - DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features); + DPU_DEBUG("ctl %d caps 0x%lX\n", rm->ctl_blks[j]->id, features); if (needs_split_display != has_split_display) continue; - ctls[i] = iter.blk; - DPU_DEBUG("ctl %d match\n", iter.blk->id); + ctl_idx[i] = j; + DPU_DEBUG("ctl %d match\n", j + CTL_0); if (++i == num_ctls) break; + } if (i != num_ctls) return -ENAVAIL; - for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) { - ctls[i]->enc_id = enc_id; - trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id); + for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) { + global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id; + trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id); } return 0; @@ -479,40 +379,34 @@ static int _dpu_rm_reserve_ctls( static int _dpu_rm_reserve_intf( struct dpu_rm *rm, + struct dpu_global_state *global_state, uint32_t enc_id, - uint32_t id, - enum dpu_hw_blk_type type) + uint32_t id) { - struct dpu_rm_hw_iter iter; - int ret = 0; - - /* Find the block entry in the rm, and note the reservation */ - dpu_rm_init_hw_iter(&iter, 0, type); - while (_dpu_rm_get_hw_locked(rm, &iter)) { - if (iter.blk->id != id) - continue; + int idx = id - INTF_0; - if (RESERVED_BY_OTHER(iter.blk, enc_id)) { - DPU_ERROR("type %d id %d already reserved\n", type, id); - return -ENAVAIL; - } - - iter.blk->enc_id = enc_id; - trace_dpu_rm_reserve_intf(iter.blk->id, enc_id); - break; + if (idx < 0 || idx >= ARRAY_SIZE(rm->intf_blks)) { + DPU_ERROR("invalid intf id: %d", id); + return -EINVAL; } - /* Shouldn't happen since intfs are fixed at probe */ - if (!iter.hw) { - DPU_ERROR("couldn't find type %d id %d\n", type, id); + if (!rm->intf_blks[idx]) { + DPU_ERROR("couldn't find intf id %d\n", id); return -EINVAL; } - return ret; + if (reserved_by_other(global_state->intf_to_enc_id, idx, enc_id)) { + DPU_ERROR("intf id %d already reserved\n", id); + return -ENAVAIL; + } + + global_state->intf_to_enc_id[idx] = enc_id; + return 0; } static int _dpu_rm_reserve_intf_related_hw( struct dpu_rm *rm, + struct dpu_global_state *global_state, uint32_t enc_id, struct dpu_encoder_hw_resources *hw_res) { @@ -523,8 +417,7 @@ static int _dpu_rm_reserve_intf_related_hw( if (hw_res->intfs[i] == INTF_MODE_NONE) continue; id = i + INTF_0; - ret = _dpu_rm_reserve_intf(rm, enc_id, id, - DPU_HW_BLK_INTF); + ret = _dpu_rm_reserve_intf(rm, global_state, enc_id, id); if (ret) return ret; } @@ -534,25 +427,27 @@ static int _dpu_rm_reserve_intf_related_hw( static int _dpu_rm_make_reservation( struct dpu_rm *rm, + struct dpu_global_state *global_state, struct drm_encoder *enc, - struct drm_crtc_state *crtc_state, struct dpu_rm_requirements *reqs) { int ret; - ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs); + ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs); if (ret) { DPU_ERROR("unable to find appropriate mixers\n"); return ret; } - ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology); + ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id, + &reqs->topology); if (ret) { DPU_ERROR("unable to find appropriate CTL\n"); return ret; } - ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res); + ret = _dpu_rm_reserve_intf_related_hw(rm, global_state, enc->base.id, + &reqs->hw_res); if (ret) return ret; @@ -560,9 +455,7 @@ static int _dpu_rm_make_reservation( } static int _dpu_rm_populate_requirements( - struct dpu_rm *rm, struct drm_encoder *enc, - struct drm_crtc_state *crtc_state, struct dpu_rm_requirements *reqs, struct msm_display_topology req_topology) { @@ -577,37 +470,36 @@ static int _dpu_rm_populate_requirements( return 0; } -static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id) +static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt, + uint32_t enc_id) { - struct dpu_rm_hw_blk *blk; - enum dpu_hw_blk_type type; - - for (type = 0; type < DPU_HW_BLK_MAX; type++) { - list_for_each_entry(blk, &rm->hw_blks[type], list) { - if (blk->enc_id == enc_id) { - blk->enc_id = 0; - DPU_DEBUG("rel enc %d %d %d\n", enc_id, - type, blk->id); - } - } + int i; + + for (i = 0; i < cnt; i++) { + if (res_mapping[i] == enc_id) + res_mapping[i] = 0; } } -void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc) +void dpu_rm_release(struct dpu_global_state *global_state, + struct drm_encoder *enc) { - mutex_lock(&rm->rm_lock); - - _dpu_rm_release_reservation(rm, enc->base.id); - - mutex_unlock(&rm->rm_lock); + _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id, + ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id); + _dpu_rm_clear_mapping(global_state->mixer_to_enc_id, + ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id); + _dpu_rm_clear_mapping(global_state->ctl_to_enc_id, + ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id); + _dpu_rm_clear_mapping(global_state->intf_to_enc_id, + ARRAY_SIZE(global_state->intf_to_enc_id), enc->base.id); } int dpu_rm_reserve( struct dpu_rm *rm, + struct dpu_global_state *global_state, struct drm_encoder *enc, struct drm_crtc_state *crtc_state, - struct msm_display_topology topology, - bool test_only) + struct msm_display_topology topology) { struct dpu_rm_requirements reqs; int ret; @@ -616,31 +508,75 @@ int dpu_rm_reserve( if (!drm_atomic_crtc_needs_modeset(crtc_state)) return 0; - DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n", - enc->base.id, crtc_state->crtc->base.id, test_only); + if (IS_ERR(global_state)) { + DPU_ERROR("failed to global state\n"); + return PTR_ERR(global_state); + } - mutex_lock(&rm->rm_lock); + DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n", + enc->base.id, crtc_state->crtc->base.id); - ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs, - topology); + ret = _dpu_rm_populate_requirements(enc, &reqs, topology); if (ret) { DPU_ERROR("failed to populate hw requirements\n"); - goto end; + return ret; } - ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs); - if (ret) { + ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs); + if (ret) DPU_ERROR("failed to reserve hw resources: %d\n", ret); - _dpu_rm_release_reservation(rm, enc->base.id); - } else if (test_only) { - /* test_only: test the reservation and then undo */ - DPU_DEBUG("test_only: discard test [enc: %d]\n", - enc->base.id); - _dpu_rm_release_reservation(rm, enc->base.id); - } -end: - mutex_unlock(&rm->rm_lock); + return ret; } + +int dpu_rm_get_assigned_resources(struct dpu_rm *rm, + struct dpu_global_state *global_state, uint32_t enc_id, + enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size) +{ + struct dpu_hw_blk **hw_blks; + uint32_t *hw_to_enc_id; + int i, num_blks, max_blks; + + switch (type) { + case DPU_HW_BLK_PINGPONG: + hw_blks = rm->pingpong_blks; + hw_to_enc_id = global_state->pingpong_to_enc_id; + max_blks = ARRAY_SIZE(rm->pingpong_blks); + break; + case DPU_HW_BLK_LM: + hw_blks = rm->mixer_blks; + hw_to_enc_id = global_state->mixer_to_enc_id; + max_blks = ARRAY_SIZE(rm->mixer_blks); + break; + case DPU_HW_BLK_CTL: + hw_blks = rm->ctl_blks; + hw_to_enc_id = global_state->ctl_to_enc_id; + max_blks = ARRAY_SIZE(rm->ctl_blks); + break; + case DPU_HW_BLK_INTF: + hw_blks = rm->intf_blks; + hw_to_enc_id = global_state->intf_to_enc_id; + max_blks = ARRAY_SIZE(rm->intf_blks); + break; + default: + DPU_ERROR("blk type %d not managed by rm\n", type); + return 0; + } + + num_blks = 0; + for (i = 0; i < max_blks; i++) { + if (hw_to_enc_id[i] != enc_id) + continue; + + if (num_blks == blks_size) { + DPU_ERROR("More than %d resources assigned to enc %d\n", + blks_size, enc_id); + break; + } + blks[num_blks++] = hw_blks[i]; + } + + return num_blks; +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h index 9c580a017094..6d2b04f306f0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h @@ -11,37 +11,24 @@ #include "msm_kms.h" #include "dpu_hw_top.h" +struct dpu_global_state; + /** * struct dpu_rm - DPU dynamic hardware resource manager - * @hw_blks: array of lists of hardware resources present in the system, one - * list per type of hardware block + * @pingpong_blks: array of pingpong hardware resources + * @mixer_blks: array of layer mixer hardware resources + * @ctl_blks: array of ctl hardware resources + * @intf_blks: array of intf hardware resources * @lm_max_width: cached layer mixer maximum width * @rm_lock: resource manager mutex */ struct dpu_rm { - struct list_head hw_blks[DPU_HW_BLK_MAX]; - uint32_t lm_max_width; - struct mutex rm_lock; -}; + struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0]; + struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0]; + struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0]; + struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0]; -/** - * struct dpu_rm_hw_blk - resource manager internal structure - * forward declaration for single iterator definition without void pointer - */ -struct dpu_rm_hw_blk; - -/** - * struct dpu_rm_hw_iter - iterator for use with dpu_rm - * @hw: dpu_hw object requested, or NULL on failure - * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator. - * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder - * @type: Hardware Block Type client wishes to search for. - */ -struct dpu_rm_hw_iter { - void *hw; - struct dpu_rm_hw_blk *blk; - uint32_t enc_id; - enum dpu_hw_blk_type type; + uint32_t lm_max_width; }; /** @@ -74,14 +61,13 @@ int dpu_rm_destroy(struct dpu_rm *rm); * @drm_enc: DRM Encoder handle * @crtc_state: Proposed Atomic DRM CRTC State handle * @topology: Pointer to topology info for the display - * @test_only: Atomic-Test phase, discard results (unless property overrides) * @Return: 0 on Success otherwise -ERROR */ int dpu_rm_reserve(struct dpu_rm *rm, + struct dpu_global_state *global_state, struct drm_encoder *drm_enc, struct drm_crtc_state *crtc_state, - struct msm_display_topology topology, - bool test_only); + struct msm_display_topology topology); /** * dpu_rm_reserve - Given the encoder for the display chain, release any @@ -90,31 +76,14 @@ int dpu_rm_reserve(struct dpu_rm *rm, * @enc: DRM Encoder handle * @Return: 0 on Success otherwise -ERROR */ -void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc); +void dpu_rm_release(struct dpu_global_state *global_state, + struct drm_encoder *enc); /** - * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list - * using dpu_rm_get_hw - * @iter: iter object to initialize - * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder - * @type: Hardware Block Type client wishes to search for. - */ -void dpu_rm_init_hw_iter( - struct dpu_rm_hw_iter *iter, - uint32_t enc_id, - enum dpu_hw_blk_type type); -/** - * dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type - * Meant to do a single pass through the hardware list to iteratively - * retrieve hardware blocks of a given type for a given encoder. - * Initialize an iterator object. - * Set hw block type of interest. Set encoder id of interest, 0 for any. - * Function returns first hw of type for that encoder. - * Subsequent calls will return the next reserved hw of that type in-order. - * Iterator HW pointer will be null on failure to find hw. - * @rm: DPU Resource Manager handle - * @iter: iterator object - * @Return: true on match found, false on no match found + * Get hw resources of the given type that are assigned to this encoder. */ -bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter); +int dpu_rm_get_assigned_resources(struct dpu_rm *rm, + struct dpu_global_state *global_state, uint32_t enc_id, + enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size); #endif /* __DPU_RM_H__ */ + diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c index 93ab36bd8df3..5e8c3f3e6625 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c @@ -24,7 +24,7 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id) int rc; if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) { - DPU_ERROR("invalid arguments vbif %d\n", vbif != 0); + DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL); return -EINVAL; } @@ -106,7 +106,7 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif, u32 val; if (!vbif || !vbif->cap) { - DPU_ERROR("invalid arguments vbif %d\n", vbif != 0); + DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL); return -EINVAL; } @@ -164,7 +164,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, if (!vbif || !mdp) { DPU_DEBUG("invalid arguments vbif %d mdp %d\n", - vbif != 0, mdp != 0); + vbif != NULL, mdp != NULL); return; } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index b3d0a0fe76b9..998bef1190a3 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -1191,8 +1191,8 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc) ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion, msecs_to_jiffies(50)); if (ret == 0) - dev_warn(dev->dev, "pp done time out, lm=%d\n", - mdp5_cstate->pipeline.mixer->lm); + dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n", + mdp5_cstate->pipeline.mixer->lm); } static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 6af26ab5b09d..4b363bd7ddff 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -336,7 +336,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector) return num; } -static int dsi_mgr_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { int id = dsi_mgr_connector_get_id(connector); @@ -506,6 +506,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); struct mipi_dsi_host *host = msm_dsi->host; struct drm_panel *panel = msm_dsi->panel; + struct msm_dsi_pll *src_pll; bool is_dual_dsi = IS_DUAL_DSI(); int ret; @@ -539,6 +540,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) id, ret); } + /* Save PLL status if it is a clock source */ + src_pll = msm_dsi_phy_get_pll(msm_dsi->phy); + msm_dsi_pll_save_state(src_pll); + ret = msm_dsi_host_power_off(host); if (ret) pr_err("%s: host %d power off failed,%d\n", __func__, id, ret); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index b0cfa67d2a57..f509ebd77500 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -724,10 +724,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy) if (!phy || !phy->cfg->ops.disable) return; - /* Save PLL status if it is a clock source */ - if (phy->usecase != MSM_DSI_PHY_SLAVE) - msm_dsi_pll_save_state(phy->pll); - phy->cfg->ops.disable(phy); dsi_phy_regulator_disable(phy); diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c index 1c894548dd72..6ac04fc303f5 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c @@ -411,6 +411,12 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) if (pll_10nm->slave) dsi_pll_enable_pll_bias(pll_10nm->slave); + rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0); + if (rc) { + pr_err("vco_set_rate failed, rc=%d\n", rc); + return rc; + } + /* Start PLL */ pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x01); diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c index a78d6077802b..106a67473af5 100644 --- a/drivers/gpu/drm/msm/edp/edp.c +++ b/drivers/gpu/drm/msm/edp/edp.c @@ -178,10 +178,6 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, goto fail; } - ret = drm_bridge_attach(encoder, edp->bridge, NULL, 0); - if (ret) - goto fail; - priv->bridges[priv->num_bridges++] = edp->bridge; priv->connectors[priv->num_connectors++] = edp->connector; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 3a8646535c14..737453b6e596 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -327,10 +327,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, goto fail; } - ret = drm_bridge_attach(encoder, hdmi->bridge, NULL, 0); - if (ret) - goto fail; - priv->bridges[priv->num_bridges++] = hdmi->bridge; priv->connectors[priv->num_connectors++] = hdmi->connector; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 2a82c23a6e4d..29295dee2a2e 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -444,8 +444,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) if (!dev->dma_parms) { dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); - if (!dev->dma_parms) - return -ENOMEM; + if (!dev->dma_parms) { + ret = -ENOMEM; + goto err_msm_uninit; + } } dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 9e0953c2b7ce..30584eaf8cc8 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -157,7 +157,17 @@ struct msm_gem_submit { uint32_t handle; }; uint64_t iova; - } bos[0]; + } bos[]; }; +/* helper to determine of a buffer in submit should be dumped, used for both + * devcoredump and debugfs cmdstream dumping: + */ +static inline bool +should_dump(struct msm_gem_submit *submit, int idx) +{ + extern bool rd_full; + return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP); +} + #endif /* __MSM_GEM_H__ */ diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 18f3a5c53ffb..615c5cda5389 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -355,16 +355,34 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, state->cmd = kstrdup(cmd, GFP_KERNEL); if (submit) { - int i; - - state->bos = kcalloc(submit->nr_cmds, + int i, nr = 0; + + /* count # of buffers to dump: */ + for (i = 0; i < submit->nr_bos; i++) + if (should_dump(submit, i)) + nr++; + /* always dump cmd bo's, but don't double count them: */ + for (i = 0; i < submit->nr_cmds; i++) + if (!should_dump(submit, submit->cmd[i].idx)) + nr++; + + state->bos = kcalloc(nr, sizeof(struct msm_gpu_state_bo), GFP_KERNEL); + for (i = 0; i < submit->nr_bos; i++) { + if (should_dump(submit, i)) { + msm_gpu_crashstate_get_bo(state, submit->bos[i].obj, + submit->bos[i].iova, submit->bos[i].flags); + } + } + for (i = 0; state->bos && i < submit->nr_cmds; i++) { int idx = submit->cmd[i].idx; - msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj, - submit->bos[idx].iova, submit->bos[idx].flags); + if (!should_dump(submit, submit->cmd[i].idx)) { + msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj, + submit->bos[idx].iova, submit->bos[idx].flags); + } } } diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index af7ceb246c7c..732f65df5c4f 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -43,7 +43,7 @@ #include "msm_gpu.h" #include "msm_gem.h" -static bool rd_full = false; +bool rd_full = false; MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents"); module_param_named(rd_full, rd_full, bool, 0600); @@ -336,12 +336,6 @@ static void snapshot_buf(struct msm_rd_state *rd, msm_gem_put_vaddr(&obj->base); } -static bool -should_dump(struct msm_gem_submit *submit, int idx) -{ - return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP); -} - /* called under struct_mutex */ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, const char *fmt, ...) |