From a1ad35233345e7ddd9ea3ea7b841432f4723d743 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Fri, 11 Jul 2014 11:59:22 -0400 Subject: [PATCH] drm/msm: fix potential deadlock in gpu init Somewhere along the way, the firmware loader sprouted another lock dependency, resulting in possible deadlock scenario: &dev->struct_mutex --> &sb->s_type->i_mutex_key#2 --> &mm->mmap_sem which is problematic vs things like gem mmap. So introduce a separate mutex to synchronize gpu init. Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 8 +++++--- drivers/gpu/drm/msm/msm_drv.c | 13 ++++++++----- drivers/gpu/drm/msm/msm_gpu.c | 3 +++ 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 76c1df73e74..655ce5b14ad 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -95,7 +95,7 @@ int adreno_hw_init(struct msm_gpu *gpu) DBG("%s", gpu->name); - ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova); + ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova); if (ret) { gpu->rb_iova = 0; dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret); @@ -370,8 +370,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return ret; } + mutex_lock(&drm->struct_mutex); gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs), MSM_BO_UNCACHED); + mutex_unlock(&drm->struct_mutex); if (IS_ERR(gpu->memptrs_bo)) { ret = PTR_ERR(gpu->memptrs_bo); gpu->memptrs_bo = NULL; @@ -379,13 +381,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return ret; } - gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo); + gpu->memptrs = msm_gem_vaddr(gpu->memptrs_bo); if (!gpu->memptrs) { dev_err(drm->dev, "could not vmap memptrs\n"); return -ENOMEM; } - ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id, + ret = msm_gem_get_iova(gpu->memptrs_bo, gpu->base.id, &gpu->memptrs_iova); if (ret) { dev_err(drm->dev, "could not map memptrs: %d\n", ret); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index a2f5bf6da6f..b447c01ad89 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -181,7 +181,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags) struct msm_kms *kms; int ret; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(dev->dev, "failed to allocate private data\n"); @@ -314,13 +313,15 @@ fail: static void load_gpu(struct drm_device *dev) { + static DEFINE_MUTEX(init_lock); struct msm_drm_private *priv = dev->dev_private; struct msm_gpu *gpu; + mutex_lock(&init_lock); + if (priv->gpu) - return; + goto out; - mutex_lock(&dev->struct_mutex); gpu = a3xx_gpu_init(dev); if (IS_ERR(gpu)) { dev_warn(dev->dev, "failed to load a3xx gpu\n"); @@ -330,7 +331,9 @@ static void load_gpu(struct drm_device *dev) if (gpu) { int ret; + mutex_lock(&dev->struct_mutex); gpu->funcs->pm_resume(gpu); + mutex_unlock(&dev->struct_mutex); ret = gpu->funcs->hw_init(gpu); if (ret) { dev_err(dev->dev, "gpu hw init failed: %d\n", ret); @@ -340,12 +343,12 @@ static void load_gpu(struct drm_device *dev) /* give inactive pm a chance to kick in: */ msm_gpu_retire(gpu); } - } priv->gpu = gpu; - mutex_unlock(&dev->struct_mutex); +out: + mutex_unlock(&init_lock); } static int msm_open(struct drm_device *dev, struct drm_file *file) diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 915240b4b80..4a0dce58774 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -612,8 +612,11 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, } gpu->id = msm_register_mmu(drm, gpu->mmu); + /* Create ringbuffer: */ + mutex_lock(&drm->struct_mutex); gpu->rb = msm_ringbuffer_new(gpu, ringsz); + mutex_unlock(&drm->struct_mutex); if (IS_ERR(gpu->rb)) { ret = PTR_ERR(gpu->rb); gpu->rb = NULL; -- 2.41.0