2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-22 20:23:57 +08:00
linux-next/drivers/video/cfbfillrect.c
Pavel Pisa 779121e9f1 fbdev: Support for byte-reversed framebuffer formats
Allow generic frame-buffer code to correctly write texts and blit images for
1, 2 and 4 bit per pixel frame-buffer organizations when pixels in bytes are
organized to in opposite order than bytes in long type.

Overhead should be reasonable.  If option is not selected, than compiler
should eliminate completely all overhead.

The feature is disabled at compile time if CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is
not set.

[adaplas]
Convert helper functions to macros if feature is not enabled.

Signed-off-by: Pavel Pisa <pisa@cmp.felk.cvut.cz>
Signed-off-by: Antonino Daplas <adaplas@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-16 09:43:19 -07:00

372 lines
8.7 KiB
C

/*
* Generic fillrect for frame buffers with packed pixels of any depth.
*
* Copyright (C) 2000 James Simmons (jsimmons@linux-fbdev.org)
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
* NOTES:
*
* The code for depths like 24 that don't have integer number of pixels per
* long is broken and needs to be fixed. For now I turned these types of
* mode off.
*
* Also need to add code to deal with cards endians that are different than
* the native cpu endians. I also need to deal with MSB position in the word.
*
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/fb.h>
#include <asm/types.h>
#include "fb_draw.h"
#if BITS_PER_LONG == 32
# define FB_WRITEL fb_writel
# define FB_READL fb_readl
#else
# define FB_WRITEL fb_writeq
# define FB_READL fb_readq
#endif
/*
* Aligned pattern fill using 32/64-bit memory accesses
*/
static void
bitfill_aligned(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
unsigned n, int bits, u32 bswapmask)
{
unsigned long first, last;
if (!n)
return;
first = fb_shifted_pixels_mask_long(dst_idx, bswapmask);
last = ~fb_shifted_pixels_mask_long((dst_idx+n) % bits, bswapmask);
if (dst_idx+n <= bits) {
// Single word
if (last)
first &= last;
FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
} else {
// Multiple destination words
// Leading bits
if (first!= ~0UL) {
FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
dst++;
n -= bits - dst_idx;
}
// Main chunk
n /= bits;
while (n >= 8) {
FB_WRITEL(pat, dst++);
FB_WRITEL(pat, dst++);
FB_WRITEL(pat, dst++);
FB_WRITEL(pat, dst++);
FB_WRITEL(pat, dst++);
FB_WRITEL(pat, dst++);
FB_WRITEL(pat, dst++);
FB_WRITEL(pat, dst++);
n -= 8;
}
while (n--)
FB_WRITEL(pat, dst++);
// Trailing bits
if (last)
FB_WRITEL(comp(pat, FB_READL(dst), last), dst);
}
}
/*
* Unaligned generic pattern fill using 32/64-bit memory accesses
* The pattern must have been expanded to a full 32/64-bit value
* Left/right are the appropriate shifts to convert to the pattern to be
* used for the next 32/64-bit word
*/
static void
bitfill_unaligned(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
int left, int right, unsigned n, int bits)
{
unsigned long first, last;
if (!n)
return;
first = FB_SHIFT_HIGH(~0UL, dst_idx);
last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits));
if (dst_idx+n <= bits) {
// Single word
if (last)
first &= last;
FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
} else {
// Multiple destination words
// Leading bits
if (first) {
FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
dst++;
pat = pat << left | pat >> right;
n -= bits - dst_idx;
}
// Main chunk
n /= bits;
while (n >= 4) {
FB_WRITEL(pat, dst++);
pat = pat << left | pat >> right;
FB_WRITEL(pat, dst++);
pat = pat << left | pat >> right;
FB_WRITEL(pat, dst++);
pat = pat << left | pat >> right;
FB_WRITEL(pat, dst++);
pat = pat << left | pat >> right;
n -= 4;
}
while (n--) {
FB_WRITEL(pat, dst++);
pat = pat << left | pat >> right;
}
// Trailing bits
if (last)
FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
}
}
/*
* Aligned pattern invert using 32/64-bit memory accesses
*/
static void
bitfill_aligned_rev(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
unsigned n, int bits, u32 bswapmask)
{
unsigned long val = pat, dat;
unsigned long first, last;
if (!n)
return;
first = fb_shifted_pixels_mask_long(dst_idx, bswapmask);
last = ~fb_shifted_pixels_mask_long((dst_idx+n) % bits, bswapmask);
if (dst_idx+n <= bits) {
// Single word
if (last)
first &= last;
dat = FB_READL(dst);
FB_WRITEL(comp(dat ^ val, dat, first), dst);
} else {
// Multiple destination words
// Leading bits
if (first!=0UL) {
dat = FB_READL(dst);
FB_WRITEL(comp(dat ^ val, dat, first), dst);
dst++;
n -= bits - dst_idx;
}
// Main chunk
n /= bits;
while (n >= 8) {
FB_WRITEL(FB_READL(dst) ^ val, dst);
dst++;
FB_WRITEL(FB_READL(dst) ^ val, dst);
dst++;
FB_WRITEL(FB_READL(dst) ^ val, dst);
dst++;
FB_WRITEL(FB_READL(dst) ^ val, dst);
dst++;
FB_WRITEL(FB_READL(dst) ^ val, dst);
dst++;
FB_WRITEL(FB_READL(dst) ^ val, dst);
dst++;
FB_WRITEL(FB_READL(dst) ^ val, dst);
dst++;
FB_WRITEL(FB_READL(dst) ^ val, dst);
dst++;
n -= 8;
}
while (n--) {
FB_WRITEL(FB_READL(dst) ^ val, dst);
dst++;
}
// Trailing bits
if (last) {
dat = FB_READL(dst);
FB_WRITEL(comp(dat ^ val, dat, last), dst);
}
}
}
/*
* Unaligned generic pattern invert using 32/64-bit memory accesses
* The pattern must have been expanded to a full 32/64-bit value
* Left/right are the appropriate shifts to convert to the pattern to be
* used for the next 32/64-bit word
*/
static void
bitfill_unaligned_rev(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
int left, int right, unsigned n, int bits)
{
unsigned long first, last, dat;
if (!n)
return;
first = FB_SHIFT_HIGH(~0UL, dst_idx);
last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits));
if (dst_idx+n <= bits) {
// Single word
if (last)
first &= last;
dat = FB_READL(dst);
FB_WRITEL(comp(dat ^ pat, dat, first), dst);
} else {
// Multiple destination words
// Leading bits
if (first != 0UL) {
dat = FB_READL(dst);
FB_WRITEL(comp(dat ^ pat, dat, first), dst);
dst++;
pat = pat << left | pat >> right;
n -= bits - dst_idx;
}
// Main chunk
n /= bits;
while (n >= 4) {
FB_WRITEL(FB_READL(dst) ^ pat, dst);
dst++;
pat = pat << left | pat >> right;
FB_WRITEL(FB_READL(dst) ^ pat, dst);
dst++;
pat = pat << left | pat >> right;
FB_WRITEL(FB_READL(dst) ^ pat, dst);
dst++;
pat = pat << left | pat >> right;
FB_WRITEL(FB_READL(dst) ^ pat, dst);
dst++;
pat = pat << left | pat >> right;
n -= 4;
}
while (n--) {
FB_WRITEL(FB_READL(dst) ^ pat, dst);
dst++;
pat = pat << left | pat >> right;
}
// Trailing bits
if (last) {
dat = FB_READL(dst);
FB_WRITEL(comp(dat ^ pat, dat, last), dst);
}
}
}
void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
{
unsigned long pat, fg;
unsigned long width = rect->width, height = rect->height;
int bits = BITS_PER_LONG, bytes = bits >> 3;
u32 bpp = p->var.bits_per_pixel;
unsigned long __iomem *dst;
int dst_idx, left;
if (p->state != FBINFO_STATE_RUNNING)
return;
if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
p->fix.visual == FB_VISUAL_DIRECTCOLOR )
fg = ((u32 *) (p->pseudo_palette))[rect->color];
else
fg = rect->color;
pat = pixel_to_pat( bpp, fg);
dst = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1));
dst_idx = ((unsigned long)p->screen_base & (bytes - 1))*8;
dst_idx += rect->dy*p->fix.line_length*8+rect->dx*bpp;
/* FIXME For now we support 1-32 bpp only */
left = bits % bpp;
if (p->fbops->fb_sync)
p->fbops->fb_sync(p);
if (!left) {
u32 bswapmask = fb_compute_bswapmask(p);
void (*fill_op32)(unsigned long __iomem *dst, int dst_idx,
unsigned long pat, unsigned n, int bits,
u32 bswapmask) = NULL;
switch (rect->rop) {
case ROP_XOR:
fill_op32 = bitfill_aligned_rev;
break;
case ROP_COPY:
fill_op32 = bitfill_aligned;
break;
default:
printk( KERN_ERR "cfb_fillrect(): unknown rop, defaulting to ROP_COPY\n");
fill_op32 = bitfill_aligned;
break;
}
while (height--) {
dst += dst_idx >> (ffs(bits) - 1);
dst_idx &= (bits - 1);
fill_op32(dst, dst_idx, pat, width*bpp, bits, bswapmask);
dst_idx += p->fix.line_length*8;
}
} else {
int right;
int r;
int rot = (left-dst_idx) % bpp;
void (*fill_op)(unsigned long __iomem *dst, int dst_idx,
unsigned long pat, int left, int right,
unsigned n, int bits) = NULL;
/* rotate pattern to correct start position */
pat = pat << rot | pat >> (bpp-rot);
right = bpp-left;
switch (rect->rop) {
case ROP_XOR:
fill_op = bitfill_unaligned_rev;
break;
case ROP_COPY:
fill_op = bitfill_unaligned;
break;
default:
printk( KERN_ERR "cfb_fillrect(): unknown rop, defaulting to ROP_COPY\n");
fill_op = bitfill_unaligned;
break;
}
while (height--) {
dst += dst_idx >> (ffs(bits) - 1);
dst_idx &= (bits - 1);
fill_op(dst, dst_idx, pat, left, right,
width*bpp, bits);
r = (p->fix.line_length*8) % bpp;
pat = pat << (bpp-r) | pat >> r;
dst_idx += p->fix.line_length*8;
}
}
}
EXPORT_SYMBOL(cfb_fillrect);
MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>");
MODULE_DESCRIPTION("Generic software accelerated fill rectangle");
MODULE_LICENSE("GPL");