linux/drivers/gpu/drm/omapdrm/omap_fbdev.c
Alexander A. Klimov 1b409fda60 drm: omapdrm: Replace HTTP links with HTTPS ones
Rationale:
Reduces attack surface on kernel devs opening the links for MITM
as HTTPS traffic is much harder to manipulate.

Deterministic algorithm:
For each file:
  If not .svg:
    For each line:
      If doesn't contain `\bxmlns\b`:
        For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`:
	  If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`:
            If both the HTTP and HTTPS versions
            return 200 OK and serve the same content:
              Replace HTTP with HTTPS.

Signed-off-by: Alexander A. Klimov <grandmaster@al2klimov.de>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200713122859.34135-1-grandmaster@al2klimov.de
2020-11-10 14:41:22 +02:00

294 lines
7.0 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
* Author: Rob Clark <rob@ti.com>
*/
#include <drm/drm_crtc.h>
#include <drm/drm_util.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include "omap_drv.h"
MODULE_PARM_DESC(ywrap, "Enable ywrap scrolling (omap44xx and later, default 'y')");
static bool ywrap_enabled = true;
module_param_named(ywrap, ywrap_enabled, bool, 0644);
/*
* fbdev funcs, to implement legacy fbdev interface on top of drm driver
*/
#define to_omap_fbdev(x) container_of(x, struct omap_fbdev, base)
struct omap_fbdev {
struct drm_fb_helper base;
struct drm_framebuffer *fb;
struct drm_gem_object *bo;
bool ywrap_enabled;
/* for deferred dmm roll when getting called in atomic ctx */
struct work_struct work;
};
static struct drm_fb_helper *get_fb(struct fb_info *fbi);
static void pan_worker(struct work_struct *work)
{
struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work);
struct fb_info *fbi = fbdev->base.fbdev;
int npages;
/* DMM roll shifts in 4K pages: */
npages = fbi->fix.line_length >> PAGE_SHIFT;
omap_gem_roll(fbdev->bo, fbi->var.yoffset * npages);
}
static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
struct fb_info *fbi)
{
struct drm_fb_helper *helper = get_fb(fbi);
struct omap_fbdev *fbdev = to_omap_fbdev(helper);
if (!helper)
goto fallback;
if (!fbdev->ywrap_enabled)
goto fallback;
if (drm_can_sleep()) {
pan_worker(&fbdev->work);
} else {
struct omap_drm_private *priv = helper->dev->dev_private;
queue_work(priv->wq, &fbdev->work);
}
return 0;
fallback:
return drm_fb_helper_pan_display(var, fbi);
}
static const struct fb_ops omap_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_blank = drm_fb_helper_blank,
.fb_pan_display = omap_fbdev_pan_display,
.fb_ioctl = drm_fb_helper_ioctl,
.fb_read = drm_fb_helper_sys_read,
.fb_write = drm_fb_helper_sys_write,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
};
static int omap_fbdev_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct omap_fbdev *fbdev = to_omap_fbdev(helper);
struct drm_device *dev = helper->dev;
struct omap_drm_private *priv = dev->dev_private;
struct drm_framebuffer *fb = NULL;
union omap_gem_size gsize;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
dma_addr_t dma_addr;
int ret;
sizes->surface_bpp = 32;
sizes->surface_depth = 24;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
sizes->surface_height, sizes->surface_bpp,
sizes->fb_width, sizes->fb_height);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] =
DIV_ROUND_UP(mode_cmd.width * sizes->surface_bpp, 8);
fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
if (fbdev->ywrap_enabled) {
/* need to align pitch to page size if using DMM scrolling */
mode_cmd.pitches[0] = PAGE_ALIGN(mode_cmd.pitches[0]);
}
/* allocate backing bo */
gsize = (union omap_gem_size){
.bytes = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height),
};
DBG("allocating %d bytes for fb %d", gsize.bytes, dev->primary->index);
fbdev->bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC);
if (!fbdev->bo) {
dev_err(dev->dev, "failed to allocate buffer object\n");
ret = -ENOMEM;
goto fail;
}
fb = omap_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
if (IS_ERR(fb)) {
dev_err(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
drm_gem_object_put(fbdev->bo);
ret = PTR_ERR(fb);
goto fail;
}
/* note: this keeps the bo pinned.. which is perhaps not ideal,
* but is needed as long as we use fb_mmap() to mmap to userspace
* (since this happens using fix.smem_start). Possibly we could
* implement our own mmap using GEM mmap support to avoid this
* (non-tiled buffer doesn't need to be pinned for fbcon to write
* to it). Then we just need to be sure that we are able to re-
* pin it in case of an opps.
*/
ret = omap_gem_pin(fbdev->bo, &dma_addr);
if (ret) {
dev_err(dev->dev, "could not pin framebuffer\n");
ret = -ENOMEM;
goto fail;
}
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
dev_err(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
goto fail;
}
DBG("fbi=%p, dev=%p", fbi, dev);
fbdev->fb = fb;
helper->fb = fb;
fbi->fbops = &omap_fb_ops;
drm_fb_helper_fill_info(fbi, helper, sizes);
dev->mode_config.fb_base = dma_addr;
fbi->screen_buffer = omap_gem_vaddr(fbdev->bo);
fbi->screen_size = fbdev->bo->size;
fbi->fix.smem_start = dma_addr;
fbi->fix.smem_len = fbdev->bo->size;
/* if we have DMM, then we can use it for scrolling by just
* shuffling pages around in DMM rather than doing sw blit.
*/
if (fbdev->ywrap_enabled) {
DRM_INFO("Enabling DMM ywrap scrolling\n");
fbi->flags |= FBINFO_HWACCEL_YWRAP | FBINFO_READS_FAST;
fbi->fix.ywrapstep = 1;
}
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
return 0;
fail:
if (ret) {
if (fb)
drm_framebuffer_remove(fb);
}
return ret;
}
static const struct drm_fb_helper_funcs omap_fb_helper_funcs = {
.fb_probe = omap_fbdev_create,
};
static struct drm_fb_helper *get_fb(struct fb_info *fbi)
{
if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) {
/* these are not the fb's you're looking for */
return NULL;
}
return fbi->par;
}
/* initialize fbdev helper */
void omap_fbdev_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_fbdev *fbdev = NULL;
struct drm_fb_helper *helper;
int ret = 0;
if (!priv->num_pipes)
return;
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
if (!fbdev)
goto fail;
INIT_WORK(&fbdev->work, pan_worker);
helper = &fbdev->base;
drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
ret = drm_fb_helper_init(dev, helper);
if (ret)
goto fail;
ret = drm_fb_helper_initial_config(helper, 32);
if (ret)
goto fini;
priv->fbdev = helper;
return;
fini:
drm_fb_helper_fini(helper);
fail:
kfree(fbdev);
dev_warn(dev->dev, "omap_fbdev_init failed\n");
}
void omap_fbdev_fini(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct drm_fb_helper *helper = priv->fbdev;
struct omap_fbdev *fbdev;
DBG();
if (!helper)
return;
drm_fb_helper_unregister_fbi(helper);
drm_fb_helper_fini(helper);
fbdev = to_omap_fbdev(helper);
/* unpin the GEM object pinned in omap_fbdev_create() */
if (fbdev->bo)
omap_gem_unpin(fbdev->bo);
/* this will free the backing object */
if (fbdev->fb)
drm_framebuffer_remove(fbdev->fb);
kfree(fbdev);
priv->fbdev = NULL;
}