diff options
30 files changed, 1469 insertions, 637 deletions
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt index f50309081ac..e59480db9ee 100644 --- a/Documentation/DMA-attributes.txt +++ b/Documentation/DMA-attributes.txt @@ -91,3 +91,12 @@ transferred to 'device' domain. This attribute can be also used for dma_unmap_{single,page,sg} functions family to force buffer to stay in device domain after releasing a mapping for it. Use this attribute with care! + +DMA_ATTR_FORCE_CONTIGUOUS +------------------------- + +By default DMA-mapping subsystem is allowed to assemble the buffer +allocated by dma_alloc_attrs() function from individual pages if it can +be mapped as contiguous chunk into device dma address space. By +specifing this attribute the allocated buffer is forced to be contiguous +also in physical memory. diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 58bc3e4d3bd..f076f209c7a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1036,7 +1036,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping, spin_unlock_irqrestore(&mapping->lock, flags); } -static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) +static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, + gfp_t gfp, struct dma_attrs *attrs) { struct page **pages; int count = size >> PAGE_SHIFT; @@ -1050,6 +1051,23 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t if (!pages) return NULL; + if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) + { + unsigned long order = get_order(size); + struct page *page; + + page = dma_alloc_from_contiguous(dev, count, order); + if (!page) + goto error; + + __dma_clear_buffer(page, size); + + for (i = 0; i < count; i++) + pages[i] = page + i; + + return pages; + } + while (count) { int j, order = __fls(count); @@ -1083,14 +1101,21 @@ error: return NULL; } -static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size) +static int __iommu_free_buffer(struct device *dev, struct page **pages, + size_t size, struct dma_attrs *attrs) { int count = size >> PAGE_SHIFT; int array_size = count * sizeof(struct page *); int i; - for (i = 0; i < count; i++) - if (pages[i]) - __free_pages(pages[i], 0); + + if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { + dma_release_from_contiguous(dev, pages[0], count); + } else { + for (i = 0; i < count; i++) + if (pages[i]) + __free_pages(pages[i], 0); + } + if (array_size <= PAGE_SIZE) kfree(pages); else @@ -1252,7 +1277,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, if (gfp & GFP_ATOMIC) return __iommu_alloc_atomic(dev, size, handle); - pages = __iommu_alloc_buffer(dev, size, gfp); + pages = __iommu_alloc_buffer(dev, size, gfp, attrs); if (!pages) return NULL; @@ -1273,7 +1298,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, err_mapping: __iommu_remove_mapping(dev, *handle, size); err_buffer: - __iommu_free_buffer(dev, pages, size); + __iommu_free_buffer(dev, pages, size, attrs); return NULL; } @@ -1329,7 +1354,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, } __iommu_remove_mapping(dev, handle, size); - __iommu_free_buffer(dev, pages, size); + __iommu_free_buffer(dev, pages, size, attrs); } static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 2ba9d7fac34..19c01ca3cc7 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -1021,6 +1021,8 @@ void drm_vblank_off(struct drm_device *dev, int crtc) /* Send any queued vblank events, lest the natives grow disquiet */ seq = drm_vblank_count_and_time(dev, crtc, &now); + + spin_lock(&dev->event_lock); list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { if (e->pipe != crtc) continue; @@ -1031,6 +1033,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc) drm_vblank_put(dev, e->pipe); send_vblank_event(dev, e, seq, &now); } + spin_unlock(&dev->event_lock); spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index fc345d4ebb0..86fb75d3fca 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -10,6 +10,12 @@ config DRM_EXYNOS Choose this option if you have a Samsung SoC EXYNOS chipset. If M is selected the module will be called exynosdrm. +config DRM_EXYNOS_IOMMU + bool "EXYNOS DRM IOMMU Support" + depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU + help + Choose this option if you want to use IOMMU feature for DRM. + config DRM_EXYNOS_DMABUF bool "EXYNOS DRM DMABUF" depends on DRM_EXYNOS diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile index eb651ca8e2a..26813b8a505 100644 --- a/drivers/gpu/drm/exynos/Makefile +++ b/drivers/gpu/drm/exynos/Makefile @@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \ exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ exynos_drm_plane.o +exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \ diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c index 118c117b322..72bf97b96ba 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c @@ -33,73 +33,42 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, unsigned int flags, struct exynos_drm_gem_buf *buf) { - dma_addr_t start_addr; - unsigned int npages, i = 0; - struct scatterlist *sgl; int ret = 0; + enum dma_attr attr = DMA_ATTR_FORCE_CONTIGUOUS; DRM_DEBUG_KMS("%s\n", __FILE__); - if (IS_NONCONTIG_BUFFER(flags)) { - DRM_DEBUG_KMS("not support allocation type.\n"); - return -EINVAL; - } - if (buf->dma_addr) { DRM_DEBUG_KMS("already allocated.\n"); return 0; } - if (buf->size >= SZ_1M) { - npages = buf->size >> SECTION_SHIFT; - buf->page_size = SECTION_SIZE; - } else if (buf->size >= SZ_64K) { - npages = buf->size >> 16; - buf->page_size = SZ_64K; - } else { - npages = buf->size >> PAGE_SHIFT; - buf->page_size = PAGE_SIZE; - } + init_dma_attrs(&buf->dma_attrs); - buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); - if (!buf->sgt) { - DRM_ERROR("failed to allocate sg table.\n"); - return -ENOMEM; - } + if (flags & EXYNOS_BO_NONCONTIG) + attr = DMA_ATTR_WRITE_COMBINE; - ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL); - if (ret < 0) { - DRM_ERROR("failed to initialize sg table.\n"); - kfree(buf->sgt); - buf->sgt = NULL; - return -ENOMEM; - } + dma_set_attr(attr, &buf->dma_attrs); - buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size, - &buf->dma_addr, GFP_KERNEL); + buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size, + &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs); if (!buf->kvaddr) { DRM_ERROR("failed to allocate buffer.\n"); - ret = -ENOMEM; - goto err1; + return -ENOMEM; } - buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL); - if (!buf->pages) { - DRM_ERROR("failed to allocate pages.\n"); + buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!buf->sgt) { + DRM_ERROR("failed to allocate sg table.\n"); ret = -ENOMEM; - goto err2; + goto err_free_attrs; } - sgl = buf->sgt->sgl; - start_addr = buf->dma_addr; - - while (i < npages) { - buf->pages[i] = phys_to_page(start_addr); - sg_set_page(sgl, buf->pages[i], buf->page_size, 0); - sg_dma_address(sgl) = start_addr; - start_addr += buf->page_size; - sgl = sg_next(sgl); - i++; + ret = dma_get_sgtable(dev->dev, buf->sgt, buf->kvaddr, buf->dma_addr, + buf->size); + if (ret < 0) { + DRM_ERROR("failed to get sgtable.\n"); + goto err_free_sgt; } DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", @@ -108,14 +77,14 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, buf->size); return ret; -err2: - dma_free_writecombine(dev->dev, buf->size, buf->kvaddr, - (dma_addr_t)buf->dma_addr); - buf->dma_addr = (dma_addr_t)NULL; -err1: - sg_free_table(buf->sgt); + +err_free_sgt: kfree(buf->sgt); buf->sgt = NULL; +err_free_attrs: + dma_free_attrs(dev->dev, buf->size, buf->kvaddr, + (dma_addr_t)buf->dma_addr, &buf->dma_attrs); + buf->dma_addr = (dma_addr_t)NULL; return ret; } @@ -125,16 +94,6 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev, { DRM_DEBUG_KMS("%s.\n", __FILE__); - /* - * release only physically continuous memory and - * non-continuous memory would be released by exynos - * gem framework. - */ - if (IS_NONCONTIG_BUFFER(flags)) { - DRM_DEBUG_KMS("not support allocation type.\n"); - return; - } - if (!buf->dma_addr) { DRM_DEBUG_KMS("dma_addr is invalid.\n"); return; @@ -150,11 +109,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev, kfree(buf->sgt); buf->sgt = NULL; - kfree(buf->pages); - buf->pages = NULL; - - dma_free_writecombine(dev->dev, buf->size, buf->kvaddr, - (dma_addr_t)buf->dma_addr); + dma_free_attrs(dev->dev, buf->size, buf->kvaddr, + (dma_addr_t)buf->dma_addr, &buf->dma_attrs); buf->dma_addr = (dma_addr_t)NULL; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h index 3388e4eb4ba..25cf1628503 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.h +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h @@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, void exynos_drm_fini_buf(struct drm_device *dev, struct exynos_drm_gem_buf *buffer); -/* allocate physical memory region and setup sgt and pages. */ +/* allocate physical memory region and setup sgt. */ int exynos_drm_alloc_buf(struct drm_device *dev, struct exynos_drm_gem_buf *buf, unsigned int flags); -/* release physical memory region, sgt and pages. */ +/* release physical memory region, and sgt. */ void exynos_drm_free_buf(struct drm_device *dev, unsigned int flags, struct exynos_drm_gem_buf *buffer); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index fce245f64c4..2efa4b031d7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -236,16 +236,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, goto out; } + spin_lock_irq(&dev->event_lock); list_add_tail(&event->base.link, &dev_priv->pageflip_event_list); + spin_unlock_irq(&dev->event_lock); crtc->fb = fb; ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL); if (ret) { crtc->fb = old_fb; + + spin_lock_irq(&dev->event_lock); drm_vblank_put(dev, exynos_crtc->pipe); list_del(&event->base.link); + spin_unlock_irq(&dev->event_lock); goto out; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index fae1f2ec886..539da9f4eb9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -30,26 +30,22 @@ #include <linux/dma-buf.h> -static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages, - unsigned int page_size) +static struct sg_table *exynos_get_sgt(struct drm_device *drm_dev, + struct exynos_drm_gem_buf *buf) { struct sg_table *sgt = NULL; - struct scatterlist *sgl; - int i, ret; + int ret; sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) goto out; - ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL); - if (ret) + ret = dma_get_sgtable(drm_dev->dev, sgt, buf->kvaddr, + buf->dma_addr, buf->size); + if (ret < 0) { + DRM_ERROR("failed to get sgtable.\n"); goto err_free_sgt; - - if (page_size < PAGE_SIZE) - page_size = PAGE_SIZE; - - for_each_sg(sgt->sgl, sgl, nr_pages, i) - sg_set_page(sgl, pages[i], page_size, 0); + } return sgt; @@ -68,32 +64,30 @@ static struct sg_table * struct drm_device *dev = gem_obj->base.dev; struct exynos_drm_gem_buf *buf; struct sg_table *sgt = NULL; - unsigned int npages; int nents; DRM_DEBUG_PRIME("%s\n", __FILE__); - mutex_lock(&dev->struct_mutex); - buf = gem_obj->buffer; - - /* there should always be pages allocated. */ - if (!buf->pages) { - DRM_ERROR("pages is null.\n"); - goto err_unlock; + if (!buf) { + DRM_ERROR("buffer is null.\n"); + return sgt; } - npages = buf->size / buf->page_size; + mutex_lock(&dev->struct_mutex); - sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size); - if (!sgt) { - DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n"); + sgt = exynos_get_sgt(dev, buf); + if (!sgt) goto err_unlock; - } + nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir); + if (!nents) { + DRM_ERROR("failed to map sgl with iommu.\n"); + sgt = NULL; + goto err_unlock; + } - DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n", - npages, buf->size, buf->page_size); + DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size); err_unlock: mutex_unlock(&dev->struct_mutex); @@ -105,6 +99,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir) { dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); + sg_free_table(sgt); kfree(sgt); sgt = NULL; @@ -196,7 +191,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, struct scatterlist *sgl; struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_buf *buffer; - struct page *page; int ret; DRM_DEBUG_PRIME("%s\n", __FILE__); @@ -233,38 +227,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, goto err_unmap_attach; } - buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL); - if (!buffer->pages) { - DRM_ERROR("failed to allocate pages.\n"); - ret = -ENOMEM; - goto err_free_buffer; - } - exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); if (!exynos_gem_obj) { ret = -ENOMEM; - goto err_free_pages; + goto err_free_buffer; } sgl = sgt->sgl; - if (sgt->nents == 1) { - buffer->dma_addr = sg_dma_address(sgt->sgl); - buffer->size = sg_dma_len(sgt->sgl); + buffer->size = dma_buf->size; + buffer->dma_addr = sg_dma_address(sgl); + if (sgt->nents == 1) { /* always physically continuous memory if sgt->nents is 1. */ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; } else { - unsigned int i = 0; - - buffer->dma_addr = sg_dma_address(sgl); - while (i < sgt->nents) { - buffer->pages[i] = sg_page(sgl); - buffer->size += sg_dma_len(sgl); - sgl = sg_next(sgl); - i++; - } - + /* + * this case could be CONTIG or NONCONTIG type but for now + * sets NONCONTIG. + * TODO. we have to find a way that exporter can notify + * the type of its own buffer to importer. + */ exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; } @@ -277,9 +260,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, return &exynos_gem_obj->base; -err_free_pages: - kfree(buffer->pages); - buffer->pages = NULL; err_free_buffer: kfree(buffer); buffer = NULL; diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 1de7baafddd..2b287d2fc92 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -40,6 +40,7 @@ #include "exynos_drm_vidi.h" #include "exynos_drm_dmabuf.h" #include "exynos_drm_g2d.h" +#include "exynos_drm_iommu.h" #define DRIVER_NAME "exynos" #define DRIVER_DESC "Samsung SoC DRM" @@ -66,6 +67,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) INIT_LIST_HEAD(&private->pageflip_event_list); dev->dev_private = (void *)private; + /* + * create mapping to manage iommu table and set a pointer to iommu + * mapping structure to iommu_mapping of private data. + * also this iommu_mapping can be used to check if iommu is supported + * or not. + */ + ret = drm_create_iommu_mapping(dev); + if (ret < 0) { + DRM_ERROR("failed to create iommu mapping.\n"); + goto err_crtc; + } + drm_mode_config_init(dev); /* init kms poll for handling hpd */ @@ -80,7 +93,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) for (nr = 0; nr < MAX_CRTC; nr++) { ret = exynos_drm_crtc_create(dev, nr); if (ret) - goto err_crtc; + goto err_release_iommu_mapping; } for (nr = 0; nr < MAX_PLANE; nr++) { @@ -89,12 +102,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) plane = exynos_plane_init(dev, possible_crtcs, false); if (!plane) - goto err_crtc; + goto err_release_iommu_mapping; } ret = drm_vblank_init(dev, MAX_CRTC); if (ret) - goto err_crtc; + goto err_release_iommu_mapping; /* * probe sub drivers such as display controller and hdmi driver, @@ -126,6 +139,8 @@ err_drm_device: exynos_drm_device_unregister(dev); err_vblank: drm_vblank_cleanup(dev); +err_release_iommu_mapping: + drm_release_iommu_mapping(dev); err_crtc: drm_mode_config_cleanup(dev); kfree(private); @@ -142,6 +157,8 @@ static int exynos_drm_unload(struct drm_device *dev) drm_vblank_cleanup(dev); drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); + + drm_release_iommu_mapping(dev); kfree(dev->dev_private); dev->dev_private = NULL; diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index a3423103649..9c9c2dc7582 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -231,8 +231,7 @@ struct exynos_drm_g2d_private { struct device *dev; struct list_head inuse_cmdlist; struct list_head event_list; - struct list_head gem_list; - unsigned int gem_nr; + struct list_head userptr_list; }; struct drm_exynos_file_private { @@ -241,6 +240,13 @@ struct drm_exynos_file_private { /* * Exynos drm private structure. + * + * @da_start: start address to device address space. + * with iommu, device address space starts from this address + * otherwise default one. + * @da_space_size: size of device address space. + * if 0 then default value is used for it. + * @da_space_order: order to device address space. */ struct exynos_drm_private { struct drm_fb_helper *fb_helper; @@ -255,6 +261,10 @@ struct exynos_drm_private { struct drm_crtc *crtc[MAX_CRTC]; struct drm_property *plane_zpos_property; struct drm_property *crtc_mode_property; + + unsigned long da_start; + unsigned long da_space_size; + unsigned long da_space_order; }; /* diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c index 241ad1eeec6..d9afb11aac7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c @@ -226,8 +226,47 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder) * already updated or not by exynos_drm_encoder_dpms function. */ exynos_encoder->updated = true; + + /* + * In case of setcrtc, there is no way to update encoder's dpms + * so update it here. + */ + exynos_encoder->dpms = DRM_MODE_DPMS_ON; +} + +void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb) +{ + struct exynos_drm_encoder *exynos_encoder; + struct exynos_drm_overlay_ops *overlay_ops; + struct exynos_drm_manager *manager; + struct drm_device *dev = fb->dev; + struct drm_encoder *encoder; + + /* + * make sure that overlay data are updated to real hardware + * for all encoders. + */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + exynos_encoder = to_exynos_encoder(encoder); + + /* if exynos was disabled, just ignor it. */ + if (exynos_encoder->dpms > DRM_MODE_DPMS_ON) + continue; + + manager = exynos_encoder->manager; + overlay_ops = manager->overlay_ops; + + /* + * wait for vblank interrupt + * - this makes sure that overlay data are updated to + * real hardware. + */ + if (overlay_ops->wait_for_vblank) + overlay_ops->wait_for_vblank(manager->dev); + } } + static void exynos_drm_encoder_disable(struct drm_encoder *encoder) { struct drm_plane *plane; @@ -499,14 +538,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data) if (overlay_ops && overlay_ops->disable) overlay_ops->disable(manager->dev, zpos); - - /* - * wait for vblank interrupt - * - this makes sure that hardware overlay is disabled to avoid - * for the dma accesses to memory after gem buffer was released - * because the setting for disabling the overlay will be updated - * at vsync. - */ - if (overlay_ops->wait_for_vblank) - overlay_ops->wait_for_vblank(manager->dev); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h index 6470d9ddf5a..88bb25a2a91 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h @@ -46,5 +46,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data); +void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb); #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 4ef4cd3f993..7413f4b729b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -30,10 +30,13 @@ #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> +#include <uapi/drm/exynos_drm.h> #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" #include "exynos_drm_gem.h" +#include "exynos_drm_iommu.h" +#include "exynos_drm_encoder.h" #define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) @@ -50,6 +53,32 @@ struct exynos_drm_fb { struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER]; }; +static int check_fb_gem_memory_type(struct drm_device *drm_dev, + struct exynos_drm_gem_obj *exynos_gem_obj) +{ + unsigned int flags; + + /* + * if exynos drm driver supports iommu then framebuffer can use + * all the buffer types. + */ + if (is_drm_iommu_supported(drm_dev)) + return 0; + + flags = exynos_gem_obj->flags; + + /* + * without iommu support, not support physically non-continuous memory + * for framebuffer. + */ + if (IS_NONCONTIG_BUFFER(flags)) { + DRM_ERROR("cannot use this gem memory type for fb.\n"); + return -EINVAL; + } + + return 0; +} + static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) { struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); @@ -57,6 +86,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) DRM_DEBUG_KMS("%s\n", __FILE__); + /* make sure that overlay data are updated before relesing fb. */ + exynos_drm_encoder_complete_scanout(fb); + drm_framebuffer_cleanup(fb); for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) { @@ -128,14 +160,25 @@ exynos_drm_framebuffer_init(struct drm_device *dev, struct drm_gem_object *obj) { struct exynos_drm_fb *exynos_fb; + struct exynos_drm_gem_obj *exynos_gem_obj; int ret; + exynos_gem_obj = to_exynos_gem_obj(obj); + + ret = check_fb_gem_memory_type(dev, exynos_gem_obj); + if (ret < 0) { + DRM_ERROR("cannot use this gem memory type for fb.\n"); + return ERR_PTR(-EINVAL); + } + exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); if (!exynos_fb) { DRM_ERROR("failed to allocate exynos drm framebuffer\n"); return ERR_PTR(-ENOMEM); } + exynos_fb->exynos_gem_obj[0] = exynos_gem_obj; + ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); if (ret) { DRM_ERROR("failed to initialize framebuffer\n"); @@ -143,7 +186,6 @@ exynos_drm_framebuffer_init(struct drm_device *dev, } drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); - exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj); return &exynos_fb->fb; } @@ -214,6 +256,9 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); for (i = 1; i < exynos_fb->buf_cnt; i++) { + struct exynos_drm_gem_obj *exynos_gem_obj; + int ret; + obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[i]); if (!obj) { @@ -222,6 +267,15 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, return ERR_PTR(-ENOENT); } + exynos_gem_obj = to_exynos_gem_obj(obj); + + ret = check_fb_gem_memory_type(dev, exynos_gem_obj); + if (ret < 0) { + DRM_ERROR("cannot use this gem memory type for fb.\n"); + exynos_drm_fb_destroy(fb); + return ERR_PTR(ret); + } + exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 67eb6ba56ed..a2232792e0c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/ |