mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-06-29 23:43:21 -04:00
drm/msm: add support for "stolen" mem
Add support to use the VRAM carveout (if specified in dtb) for fbdev scanout buffer. This allows drm/msm to take over a bootloader splash- screen, and avoids corruption on screen that results if the kernel uses memory that is still being scanned out for itself. Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
parent
5bf9c0b614
commit
072f1f9168
4 changed files with 66 additions and 11 deletions
|
@ -182,21 +182,57 @@ static int get_mdp_ver(struct platform_device *pdev)
|
||||||
return 4;
|
return 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#include <linux/of_address.h>
|
||||||
|
|
||||||
static int msm_init_vram(struct drm_device *dev)
|
static int msm_init_vram(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct msm_drm_private *priv = dev->dev_private;
|
struct msm_drm_private *priv = dev->dev_private;
|
||||||
|
unsigned long size = 0;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
#ifdef CONFIG_OF
|
||||||
|
/* In the device-tree world, we could have a 'memory-region'
|
||||||
|
* phandle, which gives us a link to our "vram". Allocating
|
||||||
|
* is all nicely abstracted behind the dma api, but we need
|
||||||
|
* to know the entire size to allocate it all in one go. There
|
||||||
|
* are two cases:
|
||||||
|
* 1) device with no IOMMU, in which case we need exclusive
|
||||||
|
* access to a VRAM carveout big enough for all gpu
|
||||||
|
* buffers
|
||||||
|
* 2) device with IOMMU, but where the bootloader puts up
|
||||||
|
* a splash screen. In this case, the VRAM carveout
|
||||||
|
* need only be large enough for fbdev fb. But we need
|
||||||
|
* exclusive access to the buffer to avoid the kernel
|
||||||
|
* using those pages for other purposes (which appears
|
||||||
|
* as corruption on screen before we have a chance to
|
||||||
|
* load and do initial modeset)
|
||||||
|
*/
|
||||||
|
struct device_node *node;
|
||||||
|
|
||||||
|
node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
|
||||||
|
if (node) {
|
||||||
|
struct resource r;
|
||||||
|
ret = of_address_to_resource(node, 0, &r);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
size = r.end - r.start;
|
||||||
|
DRM_INFO("using VRAM carveout: %lx@%08x\n", size, r.start);
|
||||||
|
} else
|
||||||
|
#endif
|
||||||
|
|
||||||
/* if we have no IOMMU, then we need to use carveout allocator.
|
/* if we have no IOMMU, then we need to use carveout allocator.
|
||||||
* Grab the entire CMA chunk carved out in early startup in
|
* Grab the entire CMA chunk carved out in early startup in
|
||||||
* mach-msm:
|
* mach-msm:
|
||||||
*/
|
*/
|
||||||
if (!iommu_present(&platform_bus_type)) {
|
if (!iommu_present(&platform_bus_type)) {
|
||||||
|
DRM_INFO("using %s VRAM carveout\n", vram);
|
||||||
|
size = memparse(vram, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (size) {
|
||||||
DEFINE_DMA_ATTRS(attrs);
|
DEFINE_DMA_ATTRS(attrs);
|
||||||
unsigned long size;
|
|
||||||
void *p;
|
void *p;
|
||||||
|
|
||||||
DBG("using %s VRAM carveout", vram);
|
|
||||||
size = memparse(vram, NULL);
|
|
||||||
priv->vram.size = size;
|
priv->vram.size = size;
|
||||||
|
|
||||||
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
|
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
|
||||||
|
@ -220,7 +256,7 @@ static int msm_init_vram(struct drm_device *dev)
|
||||||
(uint32_t)(priv->vram.paddr + size));
|
(uint32_t)(priv->vram.paddr + size));
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msm_load(struct drm_device *dev, unsigned long flags)
|
static int msm_load(struct drm_device *dev, unsigned long flags)
|
||||||
|
|
|
@ -110,7 +110,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
||||||
size = mode_cmd.pitches[0] * mode_cmd.height;
|
size = mode_cmd.pitches[0] * mode_cmd.height;
|
||||||
DBG("allocating %d bytes for fb %d", size, dev->primary->index);
|
DBG("allocating %d bytes for fb %d", size, dev->primary->index);
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
|
fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
|
||||||
|
MSM_BO_WC | MSM_BO_STOLEN);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
if (IS_ERR(fbdev->bo)) {
|
if (IS_ERR(fbdev->bo)) {
|
||||||
ret = PTR_ERR(fbdev->bo);
|
ret = PTR_ERR(fbdev->bo);
|
||||||
|
|
|
@ -32,6 +32,12 @@ static dma_addr_t physaddr(struct drm_gem_object *obj)
|
||||||
priv->vram.paddr;
|
priv->vram.paddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool use_pages(struct drm_gem_object *obj)
|
||||||
|
{
|
||||||
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
|
return !msm_obj->vram_node;
|
||||||
|
}
|
||||||
|
|
||||||
/* allocate pages from VRAM carveout, used when no IOMMU: */
|
/* allocate pages from VRAM carveout, used when no IOMMU: */
|
||||||
static struct page **get_pages_vram(struct drm_gem_object *obj,
|
static struct page **get_pages_vram(struct drm_gem_object *obj,
|
||||||
int npages)
|
int npages)
|
||||||
|
@ -72,7 +78,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
|
||||||
struct page **p;
|
struct page **p;
|
||||||
int npages = obj->size >> PAGE_SHIFT;
|
int npages = obj->size >> PAGE_SHIFT;
|
||||||
|
|
||||||
if (iommu_present(&platform_bus_type))
|
if (use_pages(obj))
|
||||||
p = drm_gem_get_pages(obj);
|
p = drm_gem_get_pages(obj);
|
||||||
else
|
else
|
||||||
p = get_pages_vram(obj, npages);
|
p = get_pages_vram(obj, npages);
|
||||||
|
@ -116,7 +122,7 @@ static void put_pages(struct drm_gem_object *obj)
|
||||||
sg_free_table(msm_obj->sgt);
|
sg_free_table(msm_obj->sgt);
|
||||||
kfree(msm_obj->sgt);
|
kfree(msm_obj->sgt);
|
||||||
|
|
||||||
if (iommu_present(&platform_bus_type))
|
if (use_pages(obj))
|
||||||
drm_gem_put_pages(obj, msm_obj->pages, true, false);
|
drm_gem_put_pages(obj, msm_obj->pages, true, false);
|
||||||
else {
|
else {
|
||||||
drm_mm_remove_node(msm_obj->vram_node);
|
drm_mm_remove_node(msm_obj->vram_node);
|
||||||
|
@ -580,6 +586,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
||||||
struct msm_drm_private *priv = dev->dev_private;
|
struct msm_drm_private *priv = dev->dev_private;
|
||||||
struct msm_gem_object *msm_obj;
|
struct msm_gem_object *msm_obj;
|
||||||
unsigned sz;
|
unsigned sz;
|
||||||
|
bool use_vram = false;
|
||||||
|
|
||||||
switch (flags & MSM_BO_CACHE_MASK) {
|
switch (flags & MSM_BO_CACHE_MASK) {
|
||||||
case MSM_BO_UNCACHED:
|
case MSM_BO_UNCACHED:
|
||||||
|
@ -592,15 +599,23 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
sz = sizeof(*msm_obj);
|
|
||||||
if (!iommu_present(&platform_bus_type))
|
if (!iommu_present(&platform_bus_type))
|
||||||
|
use_vram = true;
|
||||||
|
else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
|
||||||
|
use_vram = true;
|
||||||
|
|
||||||
|
if (WARN_ON(use_vram && !priv->vram.size))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
sz = sizeof(*msm_obj);
|
||||||
|
if (use_vram)
|
||||||
sz += sizeof(struct drm_mm_node);
|
sz += sizeof(struct drm_mm_node);
|
||||||
|
|
||||||
msm_obj = kzalloc(sz, GFP_KERNEL);
|
msm_obj = kzalloc(sz, GFP_KERNEL);
|
||||||
if (!msm_obj)
|
if (!msm_obj)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (!iommu_present(&platform_bus_type))
|
if (use_vram)
|
||||||
msm_obj->vram_node = (void *)&msm_obj[1];
|
msm_obj->vram_node = (void *)&msm_obj[1];
|
||||||
|
|
||||||
msm_obj->flags = flags;
|
msm_obj->flags = flags;
|
||||||
|
@ -630,7 +645,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
if (iommu_present(&platform_bus_type)) {
|
if (use_pages(obj)) {
|
||||||
ret = drm_gem_object_init(dev, obj, size);
|
ret = drm_gem_object_init(dev, obj, size);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
|
@ -21,6 +21,9 @@
|
||||||
#include <linux/reservation.h>
|
#include <linux/reservation.h>
|
||||||
#include "msm_drv.h"
|
#include "msm_drv.h"
|
||||||
|
|
||||||
|
/* Additional internal-use only BO flags: */
|
||||||
|
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
|
||||||
|
|
||||||
struct msm_gem_object {
|
struct msm_gem_object {
|
||||||
struct drm_gem_object base;
|
struct drm_gem_object base;
|
||||||
|
|
||||||
|
@ -59,7 +62,7 @@ struct msm_gem_object {
|
||||||
struct reservation_object _resv;
|
struct reservation_object _resv;
|
||||||
|
|
||||||
/* For physically contiguous buffers. Used when we don't have
|
/* For physically contiguous buffers. Used when we don't have
|
||||||
* an IOMMU.
|
* an IOMMU. Also used for stolen/splashscreen buffer.
|
||||||
*/
|
*/
|
||||||
struct drm_mm_node *vram_node;
|
struct drm_mm_node *vram_node;
|
||||||
};
|
};
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue