diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 2072228cf75b..0c60a3b0f02f 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include #include @@ -90,6 +92,9 @@ struct rk_iommu { struct iommu iommu_wrapper; struct list_head node; /* entry in rk_iommu_domain.iommus */ struct iommu_domain *domain; /* domain to which iommu is attached */ + struct notifier_block genpd_nb; + spinlock_t hw_lock; /* lock for register access */ + bool is_powered; /* power domain is on and register clock is enabled */ }; static inline void rk_table_flush(u32 *va, unsigned int count) @@ -285,6 +290,9 @@ static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova, size_t size) { dma_addr_t iova_end = iova + size; + + assert_spin_locked(&iommu->hw_lock); + /* * TODO(djkurtz): Figure out when it is more efficient to shootdown the * entire iotlb rather than iterate over individual iovas. @@ -295,11 +303,15 @@ static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova, static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) { + assert_spin_locked(&iommu->hw_lock); + return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE; } static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) { + assert_spin_locked(&iommu->hw_lock); + return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_PAGING_ENABLED; } @@ -308,6 +320,8 @@ static int rk_iommu_enable_stall(struct rk_iommu *iommu) { int ret; + assert_spin_locked(&iommu->hw_lock); + if (rk_iommu_is_stall_active(iommu)) return 0; @@ -329,6 +343,8 @@ static int rk_iommu_disable_stall(struct rk_iommu *iommu) { int ret; + assert_spin_locked(&iommu->hw_lock); + if (!rk_iommu_is_stall_active(iommu)) return 0; @@ -346,6 +362,8 @@ static int rk_iommu_enable_paging(struct rk_iommu *iommu) { int ret; + assert_spin_locked(&iommu->hw_lock); + if (rk_iommu_is_paging_enabled(iommu)) return 0; @@ -363,6 +381,8 @@ static int rk_iommu_disable_paging(struct rk_iommu *iommu) { int ret; + assert_spin_locked(&iommu->hw_lock); + if (!rk_iommu_is_paging_enabled(iommu)) return 0; @@ -381,6 +401,8 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu) int ret; u32 dte_addr; + assert_spin_locked(&iommu->hw_lock); + /* * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY * and verifying that upper 5 nybbles are read back. @@ -403,6 +425,50 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu) return ret; } +static int rk_iommu_startup(struct rk_iommu *iommu) +{ + struct iommu_domain *domain = iommu->domain; + struct rk_iommu_domain *rk_domain; + phys_addr_t dte_addr; + int ret; + + assert_spin_locked(&iommu->hw_lock); + + ret = rk_iommu_enable_stall(iommu); + if (ret) + return ret; + + ret = rk_iommu_force_reset(iommu); + if (ret) + return ret; + + rk_domain = domain->priv; + dte_addr = virt_to_phys(rk_domain->dt); + rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr); + rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); + rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); + + ret = rk_iommu_enable_paging(iommu); + if (ret) + return ret; + + rk_iommu_disable_stall(iommu); + + return ret; +} + +static void rk_iommu_shutdown(struct rk_iommu *iommu) +{ + assert_spin_locked(&iommu->hw_lock); + + /* Ignore error while disabling, just keep going */ + rk_iommu_enable_stall(iommu); + rk_iommu_disable_paging(iommu); + rk_iommu_write(iommu, RK_MMU_INT_MASK, 0); + rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0); + rk_iommu_disable_stall(iommu); +} + static void log_iova(struct rk_iommu *iommu, dma_addr_t iova) { u32 dte_index, pte_index, page_offset; @@ -416,6 +482,8 @@ static void log_iova(struct rk_iommu *iommu, dma_addr_t iova) phys_addr_t page_addr_phys = 0; u32 page_flags = 0; + assert_spin_locked(&iommu->hw_lock); + dte_index = rk_iova_dte_index(iova); pte_index = rk_iova_pte_index(iova); page_offset = rk_iova_page_offset(iova); @@ -452,13 +520,22 @@ print_it: static irqreturn_t rk_iommu_irq(int irq, void *dev_id) { struct rk_iommu *iommu = dev_id; + irqreturn_t ret = IRQ_HANDLED; + unsigned long flags; u32 status; u32 int_status; dma_addr_t iova; + spin_lock_irqsave(&iommu->hw_lock, flags); + + if (!iommu->is_powered) + goto done; + int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS); - if (int_status == 0) - return IRQ_NONE; + if (int_status == 0) { + ret = IRQ_NONE; + goto done; + } iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR); @@ -499,7 +576,10 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status); - return IRQ_HANDLED; +done: + spin_unlock_irqrestore(&iommu->hw_lock, flags); + + return ret; } static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, @@ -539,9 +619,15 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, /* shootdown these iova from all iommus using this domain */ spin_lock_irqsave(&rk_domain->iommus_lock, flags); list_for_each(pos, &rk_domain->iommus) { - struct rk_iommu *iommu; - iommu = list_entry(pos, struct rk_iommu, node); - rk_iommu_zap_lines(iommu, iova, size); + struct rk_iommu *iommu = list_entry(pos, struct rk_iommu, node); + + spin_lock(&iommu->hw_lock); + + /* Only zap TLBs of IOMMUs that are powered on. */ + if (iommu->is_powered) + rk_iommu_zap_lines(iommu, iova, size); + + spin_unlock(&iommu->hw_lock); } spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); } @@ -730,8 +816,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, struct rk_iommu *iommu; struct rk_iommu_domain *rk_domain = domain->priv; unsigned long flags; - int ret; - phys_addr_t dte_addr; + int ret = 0; /* * Allow 'virtual devices' (e.g., drm) to attach to domain. @@ -741,29 +826,22 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, if (!iommu) return 0; - ret = rk_iommu_enable_stall(iommu); - if (ret) - return ret; - - ret = rk_iommu_force_reset(iommu); - if (ret) - return ret; - + spin_lock_irqsave(&iommu->hw_lock, flags); iommu->domain = domain; + if (iommu->is_powered) { + ret = rk_iommu_startup(iommu); + if (ret) + iommu->domain = NULL; + } + spin_unlock_irqrestore(&iommu->hw_lock, flags); + + if (ret) + return ret; ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq, IRQF_SHARED, dev_name(dev), iommu); if (ret) - return ret; - - dte_addr = virt_to_phys(rk_domain->dt); - rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr); - rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); - rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); - - ret = rk_iommu_enable_paging(iommu); - if (ret) - return ret; + goto err_shutdown; spin_lock_irqsave(&rk_domain->iommus_lock, flags); list_add_tail(&iommu->node, &rk_domain->iommus); @@ -771,9 +849,16 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, dev_info(dev, "Attached to iommu domain\n"); - rk_iommu_disable_stall(iommu); - return 0; + +err_shutdown: + spin_lock_irqsave(&iommu->hw_lock, flags); + if (iommu->is_powered) + rk_iommu_shutdown(iommu); + iommu->domain = NULL; + spin_unlock_irqrestore(&iommu->hw_lock, flags); + + return ret; } static void rk_iommu_detach_device(struct iommu_domain *domain, @@ -792,17 +877,14 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, list_del_init(&iommu->node); spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); - /* Ignore error while disabling, just keep going */ - rk_iommu_enable_stall(iommu); - rk_iommu_disable_paging(iommu); - rk_iommu_write(iommu, RK_MMU_INT_MASK, 0); - rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0); - rk_iommu_disable_stall(iommu); + spin_lock_irqsave(&iommu->hw_lock, flags); + if (iommu->is_powered) + rk_iommu_shutdown(iommu); + iommu->domain = NULL; + spin_unlock_irqrestore(&iommu->hw_lock, flags); devm_free_irq(dev, iommu->irq, iommu); - iommu->domain = NULL; - dev_info(dev, "Detached from iommu domain\n"); } @@ -966,6 +1048,57 @@ static const struct iommu_ops rk_iommu_ops = { .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, }; +static int rk_iommu_power_on(struct rk_iommu *iommu) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&iommu->hw_lock, flags); + + iommu->is_powered = true; + if (iommu->domain) + ret = rk_iommu_startup(iommu); + + spin_unlock_irqrestore(&iommu->hw_lock, flags); + + return ret; +} + +static void rk_iommu_power_off(struct rk_iommu *iommu) +{ + unsigned long flags; + + spin_lock_irqsave(&iommu->hw_lock, flags); + + if (iommu->domain) + rk_iommu_shutdown(iommu); + iommu->is_powered = false; + + spin_unlock_irqrestore(&iommu->hw_lock, flags); +} + +static int rk_iommu_genpd_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct rk_iommu *iommu = container_of(nb, struct rk_iommu, genpd_nb); + int ret = 0; + + switch (action) { + case PM_GENPD_POST_POWER_ON: + ret = rk_iommu_power_on(iommu); + break; + + case PM_GENPD_POWER_OFF_PREPARE: + rk_iommu_power_off(iommu); + break; + + default: + return NOTIFY_DONE; + } + + return notifier_from_errno(ret); +} + static int rk_iommu_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -978,6 +1111,8 @@ static int rk_iommu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, iommu); iommu->dev = dev; + iommu->is_powered = true; + spin_lock_init(&iommu->hw_lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); iommu->base = devm_ioremap_resource(&pdev->dev, res); @@ -990,6 +1125,13 @@ static int rk_iommu_probe(struct platform_device *pdev) return -ENXIO; } + pm_runtime_no_callbacks(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + iommu->genpd_nb.notifier_call = rk_iommu_genpd_notify; + pm_genpd_register_notifier(dev, &iommu->genpd_nb); + iommu->iommu_wrapper.dev = iommu->dev; iommu_add(&iommu->iommu_wrapper); @@ -999,7 +1141,11 @@ static int rk_iommu_probe(struct platform_device *pdev) static int rk_iommu_remove(struct platform_device *pdev) { struct rk_iommu *iommu = platform_get_drvdata(pdev); + iommu_del(&iommu->iommu_wrapper); + pm_genpd_unregister_notifier(iommu->dev, &iommu->genpd_nb); + pm_runtime_disable(&pdev->dev); + return 0; }