mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-25 13:43:55 +08:00
drm/udl: Use drm_fb_helper deferred_io support
Use the fbdev deferred io support in drm_fb_helper. The (struct fb_ops *)->fb_{fillrect,copyarea,imageblit} functions will now schedule a worker instead of being flushed directly like it was previously (recorded when in atomic). This patch has only been compile tested. Signed-off-by: Noralf Trønnes <noralf@tronnes.org> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Tested-by: Gerd Hoffmann <kraxel@redhat.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1461856717-6476-8-git-send-email-noralf@tronnes.org
This commit is contained in:
parent
6819c3c251
commit
e375882406
@ -81,8 +81,6 @@ struct udl_framebuffer {
|
||||
struct drm_framebuffer base;
|
||||
struct udl_gem_object *obj;
|
||||
bool active_16; /* active on the 16-bit channel */
|
||||
int x1, y1, x2, y2; /* dirty rect */
|
||||
spinlock_t dirty_lock;
|
||||
};
|
||||
|
||||
#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
|
||||
|
@ -77,68 +77,6 @@ static uint16_t rgb16(uint32_t col)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* NOTE: fb_defio.c is holding info->fbdefio.mutex
|
||||
* Touching ANY framebuffer memory that triggers a page fault
|
||||
* in fb_defio will cause a deadlock, when it also tries to
|
||||
* grab the same mutex.
|
||||
*/
|
||||
static void udlfb_dpy_deferred_io(struct fb_info *info,
|
||||
struct list_head *pagelist)
|
||||
{
|
||||
struct page *cur;
|
||||
struct fb_deferred_io *fbdefio = info->fbdefio;
|
||||
struct udl_fbdev *ufbdev = info->par;
|
||||
struct drm_device *dev = ufbdev->ufb.base.dev;
|
||||
struct udl_device *udl = dev->dev_private;
|
||||
struct urb *urb;
|
||||
char *cmd;
|
||||
cycles_t start_cycles, end_cycles;
|
||||
int bytes_sent = 0;
|
||||
int bytes_identical = 0;
|
||||
int bytes_rendered = 0;
|
||||
|
||||
if (!fb_defio)
|
||||
return;
|
||||
|
||||
start_cycles = get_cycles();
|
||||
|
||||
urb = udl_get_urb(dev);
|
||||
if (!urb)
|
||||
return;
|
||||
|
||||
cmd = urb->transfer_buffer;
|
||||
|
||||
/* walk the written page list and render each to device */
|
||||
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
|
||||
|
||||
if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8),
|
||||
&urb, (char *) info->fix.smem_start,
|
||||
&cmd, cur->index << PAGE_SHIFT,
|
||||
cur->index << PAGE_SHIFT,
|
||||
PAGE_SIZE, &bytes_identical, &bytes_sent))
|
||||
goto error;
|
||||
bytes_rendered += PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (cmd > (char *) urb->transfer_buffer) {
|
||||
/* Send partial buffer remaining before exiting */
|
||||
int len = cmd - (char *) urb->transfer_buffer;
|
||||
udl_submit_urb(dev, urb, len);
|
||||
bytes_sent += len;
|
||||
} else
|
||||
udl_urb_completion(urb);
|
||||
|
||||
error:
|
||||
atomic_add(bytes_sent, &udl->bytes_sent);
|
||||
atomic_add(bytes_identical, &udl->bytes_identical);
|
||||
atomic_add(bytes_rendered, &udl->bytes_rendered);
|
||||
end_cycles = get_cycles();
|
||||
atomic_add(((unsigned int) ((end_cycles - start_cycles)
|
||||
>> 10)), /* Kcycles */
|
||||
&udl->cpu_kcycles_used);
|
||||
}
|
||||
|
||||
int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
|
||||
int width, int height)
|
||||
{
|
||||
@ -152,9 +90,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
|
||||
struct urb *urb;
|
||||
int aligned_x;
|
||||
int bpp = (fb->base.bits_per_pixel / 8);
|
||||
int x2, y2;
|
||||
bool store_for_later = false;
|
||||
unsigned long flags;
|
||||
|
||||
if (!fb->active_16)
|
||||
return 0;
|
||||
@ -180,38 +115,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
|
||||
(y + height > fb->base.height))
|
||||
return -EINVAL;
|
||||
|
||||
/* if we are in atomic just store the info
|
||||
can't test inside spin lock */
|
||||
if (in_atomic())
|
||||
store_for_later = true;
|
||||
|
||||
x2 = x + width - 1;
|
||||
y2 = y + height - 1;
|
||||
|
||||
spin_lock_irqsave(&fb->dirty_lock, flags);
|
||||
|
||||
if (fb->y1 < y)
|
||||
y = fb->y1;
|
||||
if (fb->y2 > y2)
|
||||
y2 = fb->y2;
|
||||
if (fb->x1 < x)
|
||||
x = fb->x1;
|
||||
if (fb->x2 > x2)
|
||||
x2 = fb->x2;
|
||||
|
||||
if (store_for_later) {
|
||||
fb->x1 = x;
|
||||
fb->x2 = x2;
|
||||
fb->y1 = y;
|
||||
fb->y2 = y2;
|
||||
spin_unlock_irqrestore(&fb->dirty_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
fb->x1 = fb->y1 = INT_MAX;
|
||||
fb->x2 = fb->y2 = 0;
|
||||
|
||||
spin_unlock_irqrestore(&fb->dirty_lock, flags);
|
||||
start_cycles = get_cycles();
|
||||
|
||||
urb = udl_get_urb(dev);
|
||||
@ -219,14 +122,14 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
|
||||
return 0;
|
||||
cmd = urb->transfer_buffer;
|
||||
|
||||
for (i = y; i <= y2 ; i++) {
|
||||
for (i = y; i < height ; i++) {
|
||||
const int line_offset = fb->base.pitches[0] * i;
|
||||
const int byte_offset = line_offset + (x * bpp);
|
||||
const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
|
||||
if (udl_render_hline(dev, bpp, &urb,
|
||||
(char *) fb->obj->vmapping,
|
||||
&cmd, byte_offset, dev_byte_offset,
|
||||
(x2 - x + 1) * bpp,
|
||||
width * bpp,
|
||||
&bytes_identical, &bytes_sent))
|
||||
goto error;
|
||||
}
|
||||
@ -283,36 +186,6 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
{
|
||||
struct udl_fbdev *ufbdev = info->par;
|
||||
|
||||
sys_fillrect(info, rect);
|
||||
|
||||
udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
|
||||
rect->height);
|
||||
}
|
||||
|
||||
static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
||||
{
|
||||
struct udl_fbdev *ufbdev = info->par;
|
||||
|
||||
sys_copyarea(info, region);
|
||||
|
||||
udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
|
||||
region->height);
|
||||
}
|
||||
|
||||
static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
{
|
||||
struct udl_fbdev *ufbdev = info->par;
|
||||
|
||||
sys_imageblit(info, image);
|
||||
|
||||
udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
|
||||
image->height);
|
||||
}
|
||||
|
||||
/*
|
||||
* It's common for several clients to have framebuffer open simultaneously.
|
||||
* e.g. both fbcon and X. Makes things interesting.
|
||||
@ -339,7 +212,7 @@ static int udl_fb_open(struct fb_info *info, int user)
|
||||
|
||||
if (fbdefio) {
|
||||
fbdefio->delay = DL_DEFIO_WRITE_DELAY;
|
||||
fbdefio->deferred_io = udlfb_dpy_deferred_io;
|
||||
fbdefio->deferred_io = drm_fb_helper_deferred_io;
|
||||
}
|
||||
|
||||
info->fbdefio = fbdefio;
|
||||
@ -379,9 +252,9 @@ static struct fb_ops udlfb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par,
|
||||
.fb_fillrect = udl_fb_fillrect,
|
||||
.fb_copyarea = udl_fb_copyarea,
|
||||
.fb_imageblit = udl_fb_imageblit,
|
||||
.fb_fillrect = drm_fb_helper_sys_fillrect,
|
||||
.fb_copyarea = drm_fb_helper_sys_copyarea,
|
||||
.fb_imageblit = drm_fb_helper_sys_imageblit,
|
||||
.fb_pan_display = drm_fb_helper_pan_display,
|
||||
.fb_blank = drm_fb_helper_blank,
|
||||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
@ -458,7 +331,6 @@ udl_framebuffer_init(struct drm_device *dev,
|
||||
{
|
||||
int ret;
|
||||
|
||||
spin_lock_init(&ufb->dirty_lock);
|
||||
ufb->obj = obj;
|
||||
drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
|
||||
ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
|
||||
|
Loading…
Reference in New Issue
Block a user