mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-20 16:46:23 +08:00
MIPS: VPE: Get rid of BKL.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
parent
c0648e02db
commit
1bbfc20d01
@ -72,8 +72,9 @@ static void rtlx_dispatch(void)
|
|||||||
*/
|
*/
|
||||||
static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
|
static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
|
unsigned int vpeflags;
|
||||||
|
unsigned long flags;
|
||||||
int i;
|
int i;
|
||||||
unsigned int flags, vpeflags;
|
|
||||||
|
|
||||||
/* Ought not to be strictly necessary for SMTC builds */
|
/* Ought not to be strictly necessary for SMTC builds */
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
@ -392,20 +393,12 @@ out:
|
|||||||
|
|
||||||
static int file_open(struct inode *inode, struct file *filp)
|
static int file_open(struct inode *inode, struct file *filp)
|
||||||
{
|
{
|
||||||
int minor = iminor(inode);
|
return rtlx_open(iminor(inode), (filp->f_flags & O_NONBLOCK) ? 0 : 1);
|
||||||
int err;
|
|
||||||
|
|
||||||
lock_kernel();
|
|
||||||
err = rtlx_open(minor, (filp->f_flags & O_NONBLOCK) ? 0 : 1);
|
|
||||||
unlock_kernel();
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int file_release(struct inode *inode, struct file *filp)
|
static int file_release(struct inode *inode, struct file *filp)
|
||||||
{
|
{
|
||||||
int minor = iminor(inode);
|
return rtlx_release(iminor(inode));
|
||||||
|
|
||||||
return rtlx_release(minor);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int file_poll(struct file *file, poll_table * wait)
|
static unsigned int file_poll(struct file *file, poll_table * wait)
|
||||||
|
@ -144,14 +144,15 @@ struct tc {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
/* Virtual processing elements */
|
spinlock_t vpe_list_lock;
|
||||||
struct list_head vpe_list;
|
struct list_head vpe_list; /* Virtual processing elements */
|
||||||
|
spinlock_t tc_list_lock;
|
||||||
/* Thread contexts */
|
struct list_head tc_list; /* Thread contexts */
|
||||||
struct list_head tc_list;
|
|
||||||
} vpecontrol = {
|
} vpecontrol = {
|
||||||
.vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
|
.vpe_list_lock = SPIN_LOCK_UNLOCKED,
|
||||||
.tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
|
.vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
|
||||||
|
.tc_list_lock = SPIN_LOCK_UNLOCKED,
|
||||||
|
.tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
|
||||||
};
|
};
|
||||||
|
|
||||||
static void release_progmem(void *ptr);
|
static void release_progmem(void *ptr);
|
||||||
@ -159,28 +160,38 @@ static void release_progmem(void *ptr);
|
|||||||
/* get the vpe associated with this minor */
|
/* get the vpe associated with this minor */
|
||||||
static struct vpe *get_vpe(int minor)
|
static struct vpe *get_vpe(int minor)
|
||||||
{
|
{
|
||||||
struct vpe *v;
|
struct vpe *res, *v;
|
||||||
|
|
||||||
if (!cpu_has_mipsmt)
|
if (!cpu_has_mipsmt)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
res = NULL;
|
||||||
|
spin_lock(&vpecontrol.vpe_list_lock);
|
||||||
list_for_each_entry(v, &vpecontrol.vpe_list, list) {
|
list_for_each_entry(v, &vpecontrol.vpe_list, list) {
|
||||||
if (v->minor == minor)
|
if (v->minor == minor) {
|
||||||
return v;
|
res = v;
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
spin_unlock(&vpecontrol.vpe_list_lock);
|
||||||
|
|
||||||
return NULL;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* get the vpe associated with this minor */
|
/* get the vpe associated with this minor */
|
||||||
static struct tc *get_tc(int index)
|
static struct tc *get_tc(int index)
|
||||||
{
|
{
|
||||||
struct tc *t;
|
struct tc *res, *t;
|
||||||
|
|
||||||
|
res = NULL;
|
||||||
|
spin_lock(&vpecontrol.tc_list_lock);
|
||||||
list_for_each_entry(t, &vpecontrol.tc_list, list) {
|
list_for_each_entry(t, &vpecontrol.tc_list, list) {
|
||||||
if (t->index == index)
|
if (t->index == index) {
|
||||||
return t;
|
res = t;
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
spin_unlock(&vpecontrol.tc_list_lock);
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -190,15 +201,17 @@ static struct vpe *alloc_vpe(int minor)
|
|||||||
{
|
{
|
||||||
struct vpe *v;
|
struct vpe *v;
|
||||||
|
|
||||||
if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) {
|
if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&v->tc);
|
INIT_LIST_HEAD(&v->tc);
|
||||||
|
spin_lock(&vpecontrol.vpe_list_lock);
|
||||||
list_add_tail(&v->list, &vpecontrol.vpe_list);
|
list_add_tail(&v->list, &vpecontrol.vpe_list);
|
||||||
|
spin_unlock(&vpecontrol.vpe_list_lock);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&v->notify);
|
INIT_LIST_HEAD(&v->notify);
|
||||||
v->minor = minor;
|
v->minor = minor;
|
||||||
|
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -212,7 +225,10 @@ static struct tc *alloc_tc(int index)
|
|||||||
|
|
||||||
INIT_LIST_HEAD(&tc->tc);
|
INIT_LIST_HEAD(&tc->tc);
|
||||||
tc->index = index;
|
tc->index = index;
|
||||||
|
|
||||||
|
spin_lock(&vpecontrol.tc_list_lock);
|
||||||
list_add_tail(&tc->list, &vpecontrol.tc_list);
|
list_add_tail(&tc->list, &vpecontrol.tc_list);
|
||||||
|
spin_unlock(&vpecontrol.tc_list_lock);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return tc;
|
return tc;
|
||||||
@ -227,7 +243,7 @@ static void release_vpe(struct vpe *v)
|
|||||||
kfree(v);
|
kfree(v);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dump_mtregs(void)
|
static void __maybe_unused dump_mtregs(void)
|
||||||
{
|
{
|
||||||
unsigned long val;
|
unsigned long val;
|
||||||
|
|
||||||
@ -1048,20 +1064,19 @@ static int vpe_open(struct inode *inode, struct file *filp)
|
|||||||
enum vpe_state state;
|
enum vpe_state state;
|
||||||
struct vpe_notifications *not;
|
struct vpe_notifications *not;
|
||||||
struct vpe *v;
|
struct vpe *v;
|
||||||
int ret, err = 0;
|
int ret;
|
||||||
|
|
||||||
lock_kernel();
|
|
||||||
if (minor != iminor(inode)) {
|
if (minor != iminor(inode)) {
|
||||||
/* assume only 1 device at the moment. */
|
/* assume only 1 device at the moment. */
|
||||||
printk(KERN_WARNING "VPE loader: only vpe1 is supported\n");
|
pr_warning("VPE loader: only vpe1 is supported\n");
|
||||||
err = -ENODEV;
|
|
||||||
goto out;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((v = get_vpe(tclimit)) == NULL) {
|
if ((v = get_vpe(tclimit)) == NULL) {
|
||||||
printk(KERN_WARNING "VPE loader: unable to get vpe\n");
|
pr_warning("VPE loader: unable to get vpe\n");
|
||||||
err = -ENODEV;
|
|
||||||
goto out;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
state = xchg(&v->state, VPE_STATE_INUSE);
|
state = xchg(&v->state, VPE_STATE_INUSE);
|
||||||
@ -1101,8 +1116,8 @@ static int vpe_open(struct inode *inode, struct file *filp)
|
|||||||
v->shared_ptr = NULL;
|
v->shared_ptr = NULL;
|
||||||
v->__start = 0;
|
v->__start = 0;
|
||||||
|
|
||||||
out:
|
|
||||||
unlock_kernel();
|
unlock_kernel();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1594,14 +1609,14 @@ static void __exit vpe_module_exit(void)
|
|||||||
{
|
{
|
||||||
struct vpe *v, *n;
|
struct vpe *v, *n;
|
||||||
|
|
||||||
list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
|
|
||||||
if (v->state != VPE_STATE_UNUSED) {
|
|
||||||
release_vpe(v);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
device_del(&vpe_device);
|
device_del(&vpe_device);
|
||||||
unregister_chrdev(major, module_name);
|
unregister_chrdev(major, module_name);
|
||||||
|
|
||||||
|
/* No locking needed here */
|
||||||
|
list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
|
||||||
|
if (v->state != VPE_STATE_UNUSED)
|
||||||
|
release_vpe(v);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(vpe_module_init);
|
module_init(vpe_module_init);
|
||||||
|
Loading…
Reference in New Issue
Block a user