mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-27 21:14:44 +08:00
Merge branch 'for-airlied' of git://people.freedesktop.org/~danvet/drm into drm-core-next
* 'for-airlied' of git://people.freedesktop.org/~danvet/drm: drm/i810: don't acces hw regs in lastclose drm/i810: cleanup reclaim_buffers drm: kill drm_sman drm/sis: use drm_mm instead of drm_sman drm/via: use drm_mm instead of drm_sman drm/sman: kill user_hash_tab drm/sis: track user->memblock mapping with idr drm/via: track user->memblock mapping with idr drm/sman: rip out owner tracking drm/sman: kill owner tracking interface functions drm/via: track obj->drm_fd relations in the driver drm/sis: track obj->drm_fd relations in the driver
This commit is contained in:
commit
5c72765ed0
@ -9,7 +9,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
|
||||
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
|
||||
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
|
||||
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
|
||||
drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
|
||||
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
|
||||
drm_crtc.o drm_modes.o drm_edid.o \
|
||||
drm_info.o drm_debugfs.o drm_encoder_slave.o \
|
||||
drm_trace_points.o drm_global.o drm_usb.o
|
||||
|
@ -1,351 +0,0 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Simple memory manager interface that keeps track on allocate regions on a
|
||||
* per "owner" basis. All regions associated with an "owner" can be released
|
||||
* with a simple call. Typically if the "owner" exists. The owner is any
|
||||
* "unsigned long" identifier. Can typically be a pointer to a file private
|
||||
* struct or a context identifier.
|
||||
*
|
||||
* Authors:
|
||||
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include "drm_sman.h"
|
||||
|
||||
struct drm_owner_item {
|
||||
struct drm_hash_item owner_hash;
|
||||
struct list_head sman_list;
|
||||
struct list_head mem_blocks;
|
||||
};
|
||||
|
||||
void drm_sman_takedown(struct drm_sman * sman)
|
||||
{
|
||||
drm_ht_remove(&sman->user_hash_tab);
|
||||
drm_ht_remove(&sman->owner_hash_tab);
|
||||
kfree(sman->mm);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_sman_takedown);
|
||||
|
||||
int
|
||||
drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
|
||||
unsigned int user_order, unsigned int owner_order)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
sman->mm = kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL);
|
||||
if (!sman->mm) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
sman->num_managers = num_managers;
|
||||
INIT_LIST_HEAD(&sman->owner_items);
|
||||
ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
|
||||
if (ret)
|
||||
goto out1;
|
||||
ret = drm_ht_create(&sman->user_hash_tab, user_order);
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
drm_ht_remove(&sman->owner_hash_tab);
|
||||
out1:
|
||||
kfree(sman->mm);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_sman_init);
|
||||
|
||||
static void *drm_sman_mm_allocate(void *private, unsigned long size,
|
||||
unsigned alignment)
|
||||
{
|
||||
struct drm_mm *mm = (struct drm_mm *) private;
|
||||
struct drm_mm_node *tmp;
|
||||
|
||||
tmp = drm_mm_search_free(mm, size, alignment, 1);
|
||||
if (!tmp) {
|
||||
return NULL;
|
||||
}
|
||||
tmp = drm_mm_get_block(tmp, size, alignment);
|
||||
return tmp;
|
||||
}
|
||||
|
||||
static void drm_sman_mm_free(void *private, void *ref)
|
||||
{
|
||||
struct drm_mm_node *node = (struct drm_mm_node *) ref;
|
||||
|
||||
drm_mm_put_block(node);
|
||||
}
|
||||
|
||||
static void drm_sman_mm_destroy(void *private)
|
||||
{
|
||||
struct drm_mm *mm = (struct drm_mm *) private;
|
||||
drm_mm_takedown(mm);
|
||||
kfree(mm);
|
||||
}
|
||||
|
||||
static unsigned long drm_sman_mm_offset(void *private, void *ref)
|
||||
{
|
||||
struct drm_mm_node *node = (struct drm_mm_node *) ref;
|
||||
return node->start;
|
||||
}
|
||||
|
||||
int
|
||||
drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
|
||||
unsigned long start, unsigned long size)
|
||||
{
|
||||
struct drm_sman_mm *sman_mm;
|
||||
struct drm_mm *mm;
|
||||
int ret;
|
||||
|
||||
BUG_ON(manager >= sman->num_managers);
|
||||
|
||||
sman_mm = &sman->mm[manager];
|
||||
mm = kzalloc(sizeof(*mm), GFP_KERNEL);
|
||||
if (!mm) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
sman_mm->private = mm;
|
||||
ret = drm_mm_init(mm, start, size);
|
||||
|
||||
if (ret) {
|
||||
kfree(mm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
sman_mm->allocate = drm_sman_mm_allocate;
|
||||
sman_mm->free = drm_sman_mm_free;
|
||||
sman_mm->destroy = drm_sman_mm_destroy;
|
||||
sman_mm->offset = drm_sman_mm_offset;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_sman_set_range);
|
||||
|
||||
int
|
||||
drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
|
||||
struct drm_sman_mm * allocator)
|
||||
{
|
||||
BUG_ON(manager >= sman->num_managers);
|
||||
sman->mm[manager] = *allocator;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sman_set_manager);
|
||||
|
||||
static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
|
||||
unsigned long owner)
|
||||
{
|
||||
int ret;
|
||||
struct drm_hash_item *owner_hash_item;
|
||||
struct drm_owner_item *owner_item;
|
||||
|
||||
ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
|
||||
if (!ret) {
|
||||
return drm_hash_entry(owner_hash_item, struct drm_owner_item,
|
||||
owner_hash);
|
||||
}
|
||||
|
||||
owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL);
|
||||
if (!owner_item)
|
||||
goto out;
|
||||
|
||||
INIT_LIST_HEAD(&owner_item->mem_blocks);
|
||||
owner_item->owner_hash.key = owner;
|
||||
if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
|
||||
goto out1;
|
||||
|
||||
list_add_tail(&owner_item->sman_list, &sman->owner_items);
|
||||
return owner_item;
|
||||
|
||||
out1:
|
||||
kfree(owner_item);
|
||||
out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
|
||||
unsigned long size, unsigned alignment,
|
||||
unsigned long owner)
|
||||
{
|
||||
void *tmp;
|
||||
struct drm_sman_mm *sman_mm;
|
||||
struct drm_owner_item *owner_item;
|
||||
struct drm_memblock_item *memblock;
|
||||
|
||||
BUG_ON(manager >= sman->num_managers);
|
||||
|
||||
sman_mm = &sman->mm[manager];
|
||||
tmp = sman_mm->allocate(sman_mm->private, size, alignment);
|
||||
|
||||
if (!tmp) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memblock = kzalloc(sizeof(*memblock), GFP_KERNEL);
|
||||
|
||||
if (!memblock)
|
||||
goto out;
|
||||
|
||||
memblock->mm_info = tmp;
|
||||
memblock->mm = sman_mm;
|
||||
memblock->sman = sman;
|
||||
|
||||
if (drm_ht_just_insert_please
|
||||
(&sman->user_hash_tab, &memblock->user_hash,
|
||||
(unsigned long)memblock, 32, 0, 0))
|
||||
goto out1;
|
||||
|
||||
owner_item = drm_sman_get_owner_item(sman, owner);
|
||||
if (!owner_item)
|
||||
goto out2;
|
||||
|
||||
list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
|
||||
|
||||
return memblock;
|
||||
|
||||
out2:
|
||||
drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
|
||||
out1:
|
||||
kfree(memblock);
|
||||
out:
|
||||
sman_mm->free(sman_mm->private, tmp);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_sman_alloc);
|
||||
|
||||
static void drm_sman_free(struct drm_memblock_item *item)
|
||||
{
|
||||
struct drm_sman *sman = item->sman;
|
||||
|
||||
list_del(&item->owner_list);
|
||||
drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
|
||||
item->mm->free(item->mm->private, item->mm_info);
|
||||
kfree(item);
|
||||
}
|
||||
|
||||
int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
|
||||
{
|
||||
struct drm_hash_item *hash_item;
|
||||
struct drm_memblock_item *memblock_item;
|
||||
|
||||
if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
|
||||
return -EINVAL;
|
||||
|
||||
memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
|
||||
user_hash);
|
||||
drm_sman_free(memblock_item);
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_sman_free_key);
|
||||
|
||||
static void drm_sman_remove_owner(struct drm_sman *sman,
|
||||
struct drm_owner_item *owner_item)
|
||||
{
|
||||
list_del(&owner_item->sman_list);
|
||||
drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
|
||||
kfree(owner_item);
|
||||
}
|
||||
|
||||
int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
|
||||
{
|
||||
|
||||
struct drm_hash_item *hash_item;
|
||||
struct drm_owner_item *owner_item;
|
||||
|
||||
if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
|
||||
if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
|
||||
drm_sman_remove_owner(sman, owner_item);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_sman_owner_clean);
|
||||
|
||||
static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
|
||||
struct drm_owner_item *owner_item)
|
||||
{
|
||||
struct drm_memblock_item *entry, *next;
|
||||
|
||||
list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
|
||||
owner_list) {
|
||||
drm_sman_free(entry);
|
||||
}
|
||||
drm_sman_remove_owner(sman, owner_item);
|
||||
}
|
||||
|
||||
void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
|
||||
{
|
||||
|
||||
struct drm_hash_item *hash_item;
|
||||
struct drm_owner_item *owner_item;
|
||||
|
||||
if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
|
||||
drm_sman_do_owner_cleanup(sman, owner_item);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_sman_owner_cleanup);
|
||||
|
||||
void drm_sman_cleanup(struct drm_sman *sman)
|
||||
{
|
||||
struct drm_owner_item *entry, *next;
|
||||
unsigned int i;
|
||||
struct drm_sman_mm *sman_mm;
|
||||
|
||||
list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
|
||||
drm_sman_do_owner_cleanup(sman, entry);
|
||||
}
|
||||
if (sman->mm) {
|
||||
for (i = 0; i < sman->num_managers; ++i) {
|
||||
sman_mm = &sman->mm[i];
|
||||
if (sman_mm->private) {
|
||||
sman_mm->destroy(sman_mm->private);
|
||||
sman_mm->private = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_sman_cleanup);
|
@ -222,8 +222,6 @@ static int i810_dma_cleanup(struct drm_device *dev)
|
||||
pci_free_consistent(dev->pdev, PAGE_SIZE,
|
||||
dev_priv->hw_status_page,
|
||||
dev_priv->dma_status_page);
|
||||
/* Need to rewrite hardware status page */
|
||||
I810_WRITE(0x02080, 0x1ffff000);
|
||||
}
|
||||
kfree(dev->dev_private);
|
||||
dev->dev_private = NULL;
|
||||
@ -888,7 +886,7 @@ static int i810_flush_queue(struct drm_device *dev)
|
||||
}
|
||||
|
||||
/* Must be called with the lock held */
|
||||
static void i810_reclaim_buffers(struct drm_device *dev,
|
||||
void i810_driver_reclaim_buffers(struct drm_device *dev,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_device_dma *dma = dev->dma;
|
||||
@ -1225,12 +1223,17 @@ void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
|
||||
if (dev_priv->page_flipping)
|
||||
i810_do_cleanup_pageflip(dev);
|
||||
}
|
||||
}
|
||||
|
||||
void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
i810_reclaim_buffers(dev, file_priv);
|
||||
if (file_priv->master && file_priv->master->lock.hw_lock) {
|
||||
drm_idlelock_take(&file_priv->master->lock);
|
||||
i810_driver_reclaim_buffers(dev, file_priv);
|
||||
drm_idlelock_release(&file_priv->master->lock);
|
||||
} else {
|
||||
/* master disappeared, clean up stuff anyway and hope nothing
|
||||
* goes wrong */
|
||||
i810_driver_reclaim_buffers(dev, file_priv);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
int i810_driver_dma_quiescent(struct drm_device *dev)
|
||||
|
@ -63,7 +63,6 @@ static struct drm_driver driver = {
|
||||
.lastclose = i810_driver_lastclose,
|
||||
.preclose = i810_driver_preclose,
|
||||
.device_is_agp = i810_driver_device_is_agp,
|
||||
.reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
|
||||
.dma_quiescent = i810_driver_dma_quiescent,
|
||||
.ioctls = i810_ioctls,
|
||||
.fops = &i810_driver_fops,
|
||||
|
@ -116,14 +116,12 @@ typedef struct drm_i810_private {
|
||||
|
||||
/* i810_dma.c */
|
||||
extern int i810_driver_dma_quiescent(struct drm_device *dev);
|
||||
extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
void i810_driver_reclaim_buffers(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
extern int i810_driver_load(struct drm_device *, unsigned long flags);
|
||||
extern void i810_driver_lastclose(struct drm_device *dev);
|
||||
extern void i810_driver_preclose(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
extern int i810_driver_device_is_agp(struct drm_device *dev);
|
||||
|
||||
extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
||||
|
@ -48,9 +48,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
dev_priv->chipset = chipset;
|
||||
ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
|
||||
if (ret)
|
||||
kfree(dev_priv);
|
||||
idr_init(&dev->object_name_idr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -59,7 +57,9 @@ static int sis_driver_unload(struct drm_device *dev)
|
||||
{
|
||||
drm_sis_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
drm_sman_takedown(&dev_priv->sman);
|
||||
idr_remove_all(&dev_priv->object_idr);
|
||||
idr_destroy(&dev_priv->object_idr);
|
||||
|
||||
kfree(dev_priv);
|
||||
|
||||
return 0;
|
||||
@ -76,10 +76,35 @@ static const struct file_operations sis_driver_fops = {
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
static int sis_driver_open(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct sis_file_private *file_priv;
|
||||
|
||||
DRM_DEBUG_DRIVER("\n");
|
||||
file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
|
||||
if (!file_priv)
|
||||
return -ENOMEM;
|
||||
|
||||
file->driver_priv = file_priv;
|
||||
|
||||
INIT_LIST_HEAD(&file_priv->obj_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct sis_file_private *file_priv = file->driver_priv;
|
||||
|
||||
kfree(file_priv);
|
||||
}
|
||||
|
||||
static struct drm_driver driver = {
|
||||
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
|
||||
.load = sis_driver_load,
|
||||
.unload = sis_driver_unload,
|
||||
.open = sis_driver_open,
|
||||
.postclose = sis_driver_postclose,
|
||||
.dma_quiescent = sis_idle,
|
||||
.reclaim_buffers = NULL,
|
||||
.reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
|
||||
|
@ -44,7 +44,7 @@ enum sis_family {
|
||||
SIS_CHIP_315 = 1,
|
||||
};
|
||||
|
||||
#include "drm_sman.h"
|
||||
#include "drm_mm.h"
|
||||
|
||||
|
||||
#define SIS_BASE (dev_priv->mmio)
|
||||
@ -54,12 +54,15 @@ enum sis_family {
|
||||
typedef struct drm_sis_private {
|
||||
drm_local_map_t *mmio;
|
||||
unsigned int idle_fault;
|
||||
struct drm_sman sman;
|
||||
unsigned int chipset;
|
||||
int vram_initialized;
|
||||
int agp_initialized;
|
||||
unsigned long vram_offset;
|
||||
unsigned long agp_offset;
|
||||
struct drm_mm vram_mm;
|
||||
struct drm_mm agp_mm;
|
||||
/** Mapping of userspace keys to mm objects */
|
||||
struct idr object_idr;
|
||||
} drm_sis_private_t;
|
||||
|
||||
extern int sis_idle(struct drm_device *dev);
|
||||
|
@ -41,40 +41,18 @@
|
||||
#define AGP_TYPE 1
|
||||
|
||||
|
||||
struct sis_memblock {
|
||||
struct drm_mm_node mm_node;
|
||||
struct sis_memreq req;
|
||||
struct list_head owner_list;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
|
||||
/* fb management via fb device */
|
||||
|
||||
#define SIS_MM_ALIGN_SHIFT 0
|
||||
#define SIS_MM_ALIGN_MASK 0
|
||||
|
||||
static void *sis_sman_mm_allocate(void *private, unsigned long size,
|
||||
unsigned alignment)
|
||||
{
|
||||
struct sis_memreq req;
|
||||
|
||||
req.size = size;
|
||||
sis_malloc(&req);
|
||||
if (req.size == 0)
|
||||
return NULL;
|
||||
else
|
||||
return (void *)(unsigned long)~req.offset;
|
||||
}
|
||||
|
||||
static void sis_sman_mm_free(void *private, void *ref)
|
||||
{
|
||||
sis_free(~((unsigned long)ref));
|
||||
}
|
||||
|
||||
static void sis_sman_mm_destroy(void *private)
|
||||
{
|
||||
;
|
||||
}
|
||||
|
||||
static unsigned long sis_sman_mm_offset(void *private, void *ref)
|
||||
{
|
||||
return ~((unsigned long)ref);
|
||||
}
|
||||
|
||||
#else /* CONFIG_FB_SIS[_MODULE] */
|
||||
|
||||
#define SIS_MM_ALIGN_SHIFT 4
|
||||
@ -86,30 +64,11 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
|
||||
{
|
||||
drm_sis_private_t *dev_priv = dev->dev_private;
|
||||
drm_sis_fb_t *fb = data;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
|
||||
{
|
||||
struct drm_sman_mm sman_mm;
|
||||
sman_mm.private = (void *)0xFFFFFFFF;
|
||||
sman_mm.allocate = sis_sman_mm_allocate;
|
||||
sman_mm.free = sis_sman_mm_free;
|
||||
sman_mm.destroy = sis_sman_mm_destroy;
|
||||
sman_mm.offset = sis_sman_mm_offset;
|
||||
ret =
|
||||
drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm);
|
||||
}
|
||||
#else
|
||||
ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
|
||||
fb->size >> SIS_MM_ALIGN_SHIFT);
|
||||
#endif
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("VRAM memory manager initialisation error\n");
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
/* Unconditionally init the drm_mm, even though we don't use it when the
|
||||
* fb sis driver is available - make cleanup easier. */
|
||||
drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> SIS_MM_ALIGN_SHIFT);
|
||||
|
||||
dev_priv->vram_initialized = 1;
|
||||
dev_priv->vram_offset = fb->offset;
|
||||
@ -120,13 +79,15 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
|
||||
static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
|
||||
void *data, int pool)
|
||||
{
|
||||
drm_sis_private_t *dev_priv = dev->dev_private;
|
||||
drm_sis_mem_t *mem = data;
|
||||
int retval = 0;
|
||||
struct drm_memblock_item *item;
|
||||
int retval = 0, user_key;
|
||||
struct sis_memblock *item;
|
||||
struct sis_file_private *file_priv = file->driver_priv;
|
||||
unsigned long offset;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
@ -138,25 +99,68 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
|
||||
item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0,
|
||||
(unsigned long)file_priv);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (item) {
|
||||
mem->offset = ((pool == 0) ?
|
||||
dev_priv->vram_offset : dev_priv->agp_offset) +
|
||||
(item->mm->
|
||||
offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
|
||||
mem->free = item->user_hash.key;
|
||||
mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
|
||||
} else {
|
||||
mem->offset = 0;
|
||||
mem->size = 0;
|
||||
mem->free = 0;
|
||||
item = kzalloc(sizeof(*item), GFP_KERNEL);
|
||||
if (!item) {
|
||||
retval = -ENOMEM;
|
||||
goto fail_alloc;
|
||||
}
|
||||
|
||||
mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
|
||||
if (pool == AGP_TYPE) {
|
||||
retval = drm_mm_insert_node(&dev_priv->agp_mm,
|
||||
&item->mm_node,
|
||||
mem->size, 0);
|
||||
offset = item->mm_node.start;
|
||||
} else {
|
||||
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
|
||||
item->req.size = mem->size;
|
||||
sis_malloc(&item->req);
|
||||
if (item->req.size == 0)
|
||||
retval = -ENOMEM;
|
||||
offset = item->req.offset;
|
||||
#else
|
||||
retval = drm_mm_insert_node(&dev_priv->vram_mm,
|
||||
&item->mm_node,
|
||||
mem->size, 0);
|
||||
offset = item->mm_node.start;
|
||||
#endif
|
||||
}
|
||||
if (retval)
|
||||
goto fail_alloc;
|
||||
|
||||
again:
|
||||
if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
|
||||
retval = -ENOMEM;
|
||||
goto fail_idr;
|
||||
}
|
||||
|
||||
retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
|
||||
if (retval == -EAGAIN)
|
||||
goto again;
|
||||
if (retval)
|
||||
goto fail_idr;
|
||||
|
||||
list_add(&item->owner_list, &file_priv->obj_list);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
mem->offset = ((pool == 0) ?
|
||||
dev_priv->vram_offset : dev_priv->agp_offset) +
|
||||
(offset << SIS_MM_ALIGN_SHIFT);
|
||||
mem->free = user_key;
|
||||
mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
|
||||
|
||||
return 0;
|
||||
|
||||
fail_idr:
|
||||
drm_mm_remove_node(&item->mm_node);
|
||||
fail_alloc:
|
||||
kfree(item);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
mem->offset = 0;
|
||||
mem->size = 0;
|
||||
mem->free = 0;
|
||||
|
||||
DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
|
||||
mem->offset);
|
||||
|
||||
@ -167,10 +171,25 @@ static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *fil
|
||||
{
|
||||
drm_sis_private_t *dev_priv = dev->dev_private;
|
||||
drm_sis_mem_t *mem = data;
|
||||
struct sis_memblock *obj;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = drm_sman_free_key(&dev_priv->sman, mem->free);
|
||||
obj = idr_find(&dev_priv->object_idr, mem->free);
|
||||
if (obj == NULL) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
idr_remove(&dev_priv->object_idr, mem->free);
|
||||
list_del(&obj->owner_list);
|
||||
if (drm_mm_node_allocated(&obj->mm_node))
|
||||
drm_mm_remove_node(&obj->mm_node);
|
||||
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
|
||||
else
|
||||
sis_free(obj->req.offset);
|
||||
#endif
|
||||
kfree(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
DRM_DEBUG("free = 0x%lx\n", mem->free);
|
||||
|
||||
@ -188,18 +207,10 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
|
||||
{
|
||||
drm_sis_private_t *dev_priv = dev->dev_private;
|
||||
drm_sis_agp_t *agp = data;
|
||||
int ret;
|
||||
dev_priv = dev->dev_private;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
|
||||
agp->size >> SIS_MM_ALIGN_SHIFT);
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("AGP memory manager initialisation error\n");
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> SIS_MM_ALIGN_SHIFT);
|
||||
|
||||
dev_priv->agp_initialized = 1;
|
||||
dev_priv->agp_offset = agp->offset;
|
||||
@ -293,20 +304,26 @@ void sis_lastclose(struct drm_device *dev)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_sman_cleanup(&dev_priv->sman);
|
||||
dev_priv->vram_initialized = 0;
|
||||
dev_priv->agp_initialized = 0;
|
||||
if (dev_priv->vram_initialized) {
|
||||
drm_mm_takedown(&dev_priv->vram_mm);
|
||||
dev_priv->vram_initialized = 0;
|
||||
}
|
||||
if (dev_priv->agp_initialized) {
|
||||
drm_mm_takedown(&dev_priv->agp_mm);
|
||||
dev_priv->agp_initialized = 0;
|
||||
}
|
||||
dev_priv->mmio = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
void sis_reclaim_buffers_locked(struct drm_device *dev,
|
||||
struct drm_file *file_priv)
|
||||
struct drm_file *file)
|
||||
{
|
||||
drm_sis_private_t *dev_priv = dev->dev_private;
|
||||
struct sis_file_private *file_priv = file->driver_priv;
|
||||
struct sis_memblock *entry, *next;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
|
||||
if (list_empty(&file_priv->obj_list)) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return;
|
||||
}
|
||||
@ -314,7 +331,18 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
|
||||
if (dev->driver->dma_quiescent)
|
||||
dev->driver->dma_quiescent(dev);
|
||||
|
||||
drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
|
||||
|
||||
list_for_each_entry_safe(entry, next, &file_priv->obj_list,
|
||||
owner_list) {
|
||||
list_del(&entry->owner_list);
|
||||
if (drm_mm_node_allocated(&entry->mm_node))
|
||||
drm_mm_remove_node(&entry->mm_node);
|
||||
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
|
||||
else
|
||||
sis_free(entry->req.offset);
|
||||
#endif
|
||||
kfree(entry);
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return;
|
||||
}
|
||||
|
@ -30,6 +30,29 @@
|
||||
|
||||
#include "drm_pciids.h"
|
||||
|
||||
static int via_driver_open(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct via_file_private *file_priv;
|
||||
|
||||
DRM_DEBUG_DRIVER("\n");
|
||||
file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
|
||||
if (!file_priv)
|
||||
return -ENOMEM;
|
||||
|
||||
file->driver_priv = file_priv;
|
||||
|
||||
INIT_LIST_HEAD(&file_priv->obj_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct via_file_private *file_priv = file->driver_priv;
|
||||
|
||||
kfree(file_priv);
|
||||
}
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
viadrv_PCI_IDS
|
||||
};
|
||||
@ -51,6 +74,8 @@ static struct drm_driver driver = {
|
||||
DRIVER_IRQ_SHARED,
|
||||
.load = via_driver_load,
|
||||
.unload = via_driver_unload,
|
||||
.open = via_driver_open,
|
||||
.postclose = via_driver_postclose,
|
||||
.context_dtor = via_final_context,
|
||||
.get_vblank_counter = via_get_vblank_counter,
|
||||
.enable_vblank = via_enable_vblank,
|
||||
|
@ -24,7 +24,7 @@
|
||||
#ifndef _VIA_DRV_H_
|
||||
#define _VIA_DRV_H_
|
||||
|
||||
#include "drm_sman.h"
|
||||
#include "drm_mm.h"
|
||||
#define DRIVER_AUTHOR "Various"
|
||||
|
||||
#define DRIVER_NAME "via"
|
||||
@ -88,9 +88,12 @@ typedef struct drm_via_private {
|
||||
uint32_t irq_pending_mask;
|
||||
int *irq_map;
|
||||
unsigned int idle_fault;
|
||||
struct drm_sman sman;
|
||||
int vram_initialized;
|
||||
struct drm_mm vram_mm;
|
||||
int agp_initialized;
|
||||
struct drm_mm agp_mm;
|
||||
/** Mapping of userspace keys to mm objects */
|
||||
struct idr object_idr;
|
||||
unsigned long vram_offset;
|
||||
unsigned long agp_offset;
|
||||
drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
|
||||
|
@ -104,15 +104,10 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
|
||||
dev_priv->chipset = chipset;
|
||||
|
||||
ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
|
||||
if (ret) {
|
||||
kfree(dev_priv);
|
||||
return ret;
|
||||
}
|
||||
idr_init(&dev->object_name_idr);
|
||||
|
||||
ret = drm_vblank_init(dev, 1);
|
||||
if (ret) {
|
||||
drm_sman_takedown(&dev_priv->sman);
|
||||
kfree(dev_priv);
|
||||
return ret;
|
||||
}
|
||||
@ -124,7 +119,8 @@ int via_driver_unload(struct drm_device *dev)
|
||||
{
|
||||
drm_via_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
drm_sman_takedown(&dev_priv->sman);
|
||||
idr_remove_all(&dev_priv->object_idr);
|
||||
idr_destroy(&dev_priv->object_idr);
|
||||
|
||||
kfree(dev_priv);
|
||||
|
||||
|
@ -28,26 +28,22 @@
|
||||
#include "drmP.h"
|
||||
#include "via_drm.h"
|
||||
#include "via_drv.h"
|
||||
#include "drm_sman.h"
|
||||
|
||||
#define VIA_MM_ALIGN_SHIFT 4
|
||||
#define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
|
||||
|
||||
struct via_memblock {
|
||||
struct drm_mm_node mm_node;
|
||||
struct list_head owner_list;
|
||||
};
|
||||
|
||||
int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
{
|
||||
drm_via_agp_t *agp = data;
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
|
||||
agp->size >> VIA_MM_ALIGN_SHIFT);
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("AGP memory manager initialisation error\n");
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
|
||||
|
||||
dev_priv->agp_initialized = 1;
|
||||
dev_priv->agp_offset = agp->offset;
|
||||
@ -61,17 +57,9 @@ int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
{
|
||||
drm_via_fb_t *fb = data;
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
|
||||
fb->size >> VIA_MM_ALIGN_SHIFT);
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("VRAM memory manager initialisation error\n");
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
|
||||
|
||||
dev_priv->vram_initialized = 1;
|
||||
dev_priv->vram_offset = fb->offset;
|
||||
@ -108,19 +96,25 @@ void via_lastclose(struct drm_device *dev)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_sman_cleanup(&dev_priv->sman);
|
||||
dev_priv->vram_initialized = 0;
|
||||
dev_priv->agp_initialized = 0;
|
||||
if (dev_priv->vram_initialized) {
|
||||
drm_mm_takedown(&dev_priv->vram_mm);
|
||||
dev_priv->vram_initialized = 0;
|
||||
}
|
||||
if (dev_priv->agp_initialized) {
|
||||
drm_mm_takedown(&dev_priv->agp_mm);
|
||||
dev_priv->agp_initialized = 0;
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
int via_mem_alloc(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
struct drm_file *file)
|
||||
{
|
||||
drm_via_mem_t *mem = data;
|
||||
int retval = 0;
|
||||
struct drm_memblock_item *item;
|
||||
int retval = 0, user_key;
|
||||
struct via_memblock *item;
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
|
||||
struct via_file_private *file_priv = file->driver_priv;
|
||||
unsigned long tmpSize;
|
||||
|
||||
if (mem->type > VIA_MEM_AGP) {
|
||||
@ -136,24 +130,57 @@ int via_mem_alloc(struct drm_device *dev, void *data,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
|
||||
item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0,
|
||||
(unsigned long)file_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (item) {
|
||||
mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
|
||||
dev_priv->vram_offset : dev_priv->agp_offset) +
|
||||
(item->mm->
|
||||
offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT);
|
||||
mem->index = item->user_hash.key;
|
||||
} else {
|
||||
mem->offset = 0;
|
||||
mem->size = 0;
|
||||
mem->index = 0;
|
||||
DRM_DEBUG("Video memory allocation failed\n");
|
||||
item = kzalloc(sizeof(*item), GFP_KERNEL);
|
||||
if (!item) {
|
||||
retval = -ENOMEM;
|
||||
goto fail_alloc;
|
||||
}
|
||||
|
||||
tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
|
||||
if (mem->type == VIA_MEM_AGP)
|
||||
retval = drm_mm_insert_node(&dev_priv->agp_mm,
|
||||
&item->mm_node,
|
||||
tmpSize, 0);
|
||||
else
|
||||
retval = drm_mm_insert_node(&dev_priv->vram_mm,
|
||||
&item->mm_node,
|
||||
tmpSize, 0);
|
||||
if (retval)
|
||||
goto fail_alloc;
|
||||
|
||||
again:
|
||||
if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
|
||||
retval = -ENOMEM;
|
||||
goto fail_idr;
|
||||
}
|
||||
|
||||
retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
|
||||
if (retval == -EAGAIN)
|
||||
goto again;
|
||||
if (retval)
|
||||
goto fail_idr;
|
||||
|
||||
list_add(&item->owner_list, &file_priv->obj_list);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
|
||||
dev_priv->vram_offset : dev_priv->agp_offset) +
|
||||
((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
|
||||
mem->index = user_key;
|
||||
|
||||
return 0;
|
||||
|
||||
fail_idr:
|
||||
drm_mm_remove_node(&item->mm_node);
|
||||
fail_alloc:
|
||||
kfree(item);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
mem->offset = 0;
|
||||
mem->size = 0;
|
||||
mem->index = 0;
|
||||
DRM_DEBUG("Video memory allocation failed\n");
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
@ -161,11 +188,22 @@ int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
{
|
||||
drm_via_private_t *dev_priv = dev->dev_private;
|
||||
drm_via_mem_t *mem = data;
|
||||
struct via_memblock *obj;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = drm_sman_free_key(&dev_priv->sman, mem->index);
|
||||
obj = idr_find(&dev_priv->object_idr, mem->index);
|
||||
if (obj == NULL) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
idr_remove(&dev_priv->object_idr, mem->index);
|
||||
list_del(&obj->owner_list);
|
||||
drm_mm_remove_node(&obj->mm_node);
|
||||
kfree(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
DRM_DEBUG("free = 0x%lx\n", mem->index);
|
||||
|
||||
return ret;
|
||||
@ -173,12 +211,13 @@ int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
|
||||
|
||||
void via_reclaim_buffers_locked(struct drm_device *dev,
|
||||
struct drm_file *file_priv)
|
||||
struct drm_file *file)
|
||||
{
|
||||
drm_via_private_t *dev_priv = dev->dev_private;
|
||||
struct via_file_private *file_priv = file->driver_priv;
|
||||
struct via_memblock *entry, *next;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
|
||||
if (list_empty(&file_priv->obj_list)) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return;
|
||||
}
|
||||
@ -186,7 +225,12 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
|
||||
if (dev->driver->dma_quiescent)
|
||||
dev->driver->dma_quiescent(dev);
|
||||
|
||||
drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
|
||||
list_for_each_entry_safe(entry, next, &file_priv->obj_list,
|
||||
owner_list) {
|
||||
list_del(&entry->owner_list);
|
||||
drm_mm_remove_node(&entry->mm_node);
|
||||
kfree(entry);
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return;
|
||||
}
|
||||
|
@ -1,176 +0,0 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Simple memory MANager interface that keeps track on allocate regions on a
|
||||
* per "owner" basis. All regions associated with an "owner" can be released
|
||||
* with a simple call. Typically if the "owner" exists. The owner is any
|
||||
* "unsigned long" identifier. Can typically be a pointer to a file private
|
||||
* struct or a context identifier.
|
||||
*
|
||||
* Authors:
|
||||
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||
*/
|
||||
|
||||
#ifndef DRM_SMAN_H
|
||||
#define DRM_SMAN_H
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm_hashtab.h"
|
||||
|
||||
/*
|
||||
* A class that is an abstration of a simple memory allocator.
|
||||
* The sman implementation provides a default such allocator
|
||||
* using the drm_mm.c implementation. But the user can replace it.
|
||||
* See the SiS implementation, which may use the SiS FB kernel module
|
||||
* for memory management.
|
||||
*/
|
||||
|
||||
struct drm_sman_mm {
|
||||
/* private info. If allocated, needs to be destroyed by the destroy
|
||||
function */
|
||||
void *private;
|
||||
|
||||
/* Allocate a memory block with given size and alignment.
|
||||
Return an opaque reference to the memory block */
|
||||
|
||||
void *(*allocate) (void *private, unsigned long size,
|
||||
unsigned alignment);
|
||||
|
||||
/* Free a memory block. "ref" is the opaque reference that we got from
|
||||
the "alloc" function */
|
||||
|
||||
void (*free) (void *private, void *ref);
|
||||
|
||||
/* Free all resources associated with this allocator */
|
||||
|
||||
void (*destroy) (void *private);
|
||||
|
||||
/* Return a memory offset from the opaque reference returned from the
|
||||
"alloc" function */
|
||||
|
||||
unsigned long (*offset) (void *private, void *ref);
|
||||
};
|
||||
|
||||
struct drm_memblock_item {
|
||||
struct list_head owner_list;
|
||||
struct drm_hash_item user_hash;
|
||||
void *mm_info;
|
||||
struct drm_sman_mm *mm;
|
||||
struct drm_sman *sman;
|
||||
};
|
||||
|
||||
struct drm_sman {
|
||||
struct drm_sman_mm *mm;
|
||||
int num_managers;
|
||||
struct drm_open_hash owner_hash_tab;
|
||||
struct drm_open_hash user_hash_tab;
|
||||
struct list_head owner_items;
|
||||
};
|
||||
|
||||
/*
|
||||
* Take down a memory manager. This function should only be called after a
|
||||
* successful init and after a call to drm_sman_cleanup.
|
||||
*/
|
||||
|
||||
extern void drm_sman_takedown(struct drm_sman * sman);
|
||||
|
||||
/*
|
||||
* Allocate structures for a manager.
|
||||
* num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
|
||||
* user_order is the log2 of the number of buckets in the user hash table.
|
||||
* set this to approximately log2 of the max number of memory regions
|
||||
* that will be allocated for _all_ pools together.
|
||||
* owner_order is the log2 of the number of buckets in the owner hash table.
|
||||
* set this to approximately log2 of
|
||||
* the number of client file connections that will
|
||||
* be using the manager.
|
||||
*
|
||||
*/
|
||||
|
||||
extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
|
||||
unsigned int user_order, unsigned int owner_order);
|
||||
|
||||
/*
|
||||
* Initialize a drm_mm.c allocator. Should be called only once for each
|
||||
* manager unless a customized allogator is used.
|
||||
*/
|
||||
|
||||
extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
|
||||
unsigned long start, unsigned long size);
|
||||
|
||||
/*
|
||||
* Initialize a customized allocator for one of the managers.
|
||||
* (See the SiS module). The object pointed to by "allocator" is copied,
|
||||
* so it can be destroyed after this call.
|
||||
*/
|
||||
|
||||
extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
|
||||
struct drm_sman_mm * allocator);
|
||||
|
||||
/*
|
||||
* Allocate a memory block. Aligment is not implemented yet.
|
||||
*/
|
||||
|
||||
extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
|
||||
unsigned int manager,
|
||||
unsigned long size,
|
||||
unsigned alignment,
|
||||
unsigned long owner);
|
||||
/*
|
||||
* Free a memory block identified by its user hash key.
|
||||
*/
|
||||
|
||||
extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
|
||||
|
||||
/*
|
||||
* returns 1 iff there are no stale memory blocks associated with this owner.
|
||||
* Typically called to determine if we need to idle the hardware and call
|
||||
* drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
|
||||
* resources associated with owner.
|
||||
*/
|
||||
|
||||
extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
|
||||
|
||||
/*
|
||||
* Frees all stale memory blocks associated with this owner. Note that this
|
||||
* requires that the hardware is finished with all blocks, so the graphics engine
|
||||
* should be idled before this call is made. This function also frees
|
||||
* any resources associated with "owner" and should be called when owner
|
||||
* is not going to be referenced anymore.
|
||||
*/
|
||||
|
||||
extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
|
||||
|
||||
/*
|
||||
* Frees all stale memory blocks associated with the memory manager.
|
||||
* See idling above.
|
||||
*/
|
||||
|
||||
extern void drm_sman_cleanup(struct drm_sman * sman);
|
||||
|
||||
#endif
|
@ -64,4 +64,8 @@ typedef struct {
|
||||
unsigned int offset, size;
|
||||
} drm_sis_fb_t;
|
||||
|
||||
struct sis_file_private {
|
||||
struct list_head obj_list;
|
||||
};
|
||||
|
||||
#endif /* __SIS_DRM_H__ */
|
||||
|
@ -274,4 +274,8 @@ typedef struct drm_via_dmablit {
|
||||
drm_via_blitsync_t sync;
|
||||
} drm_via_dmablit_t;
|
||||
|
||||
struct via_file_private {
|
||||
struct list_head obj_list;
|
||||
};
|
||||
|
||||
#endif /* _VIA_DRM_H_ */
|
||||
|
Loading…
Reference in New Issue
Block a user