2017-11-15 01:38:02 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2009-06-16 16:30:22 +08:00
|
|
|
* driver for channel subsystem
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2010-10-25 22:10:28 +08:00
|
|
|
* Copyright IBM Corp. 2002, 2010
|
2009-06-16 16:30:22 +08:00
|
|
|
*
|
|
|
|
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
|
|
|
|
* Cornelia Huck (cornelia.huck@de.ibm.com)
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2008-12-25 20:39:36 +08:00
|
|
|
|
|
|
|
#define KMSG_COMPONENT "cio"
|
|
|
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
|
|
|
|
2016-10-31 04:37:24 +08:00
|
|
|
#include <linux/export.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/list.h>
|
2007-10-12 22:11:20 +08:00
|
|
|
#include <linux/reboot.h>
|
2010-02-27 05:37:25 +08:00
|
|
|
#include <linux/proc_fs.h>
|
2019-04-03 00:47:29 +08:00
|
|
|
#include <linux/genalloc.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
2008-07-14 15:58:58 +08:00
|
|
|
#include <asm/isc.h>
|
2009-03-26 22:24:01 +08:00
|
|
|
#include <asm/crw.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include "css.h"
|
|
|
|
#include "cio.h"
|
2018-06-26 21:09:32 +08:00
|
|
|
#include "blacklist.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "cio_debug.h"
|
|
|
|
#include "ioasm.h"
|
|
|
|
#include "chsc.h"
|
2006-06-29 20:57:03 +08:00
|
|
|
#include "device.h"
|
2007-04-27 22:01:34 +08:00
|
|
|
#include "idset.h"
|
2007-04-27 22:01:35 +08:00
|
|
|
#include "chp.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
int css_init_done = 0;
|
2009-09-23 04:58:37 +08:00
|
|
|
int max_ssid;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-11-08 21:28:03 +08:00
|
|
|
#define MAX_CSS_IDX 0
|
|
|
|
struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
|
2024-02-03 22:57:59 +08:00
|
|
|
static const struct bus_type css_bus_type;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-02-06 04:18:53 +08:00
|
|
|
int
|
2006-01-06 16:19:22 +08:00
|
|
|
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
|
|
|
|
{
|
|
|
|
struct subchannel_id schid;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
init_subchannel_id(&schid);
|
|
|
|
do {
|
2006-01-06 16:19:25 +08:00
|
|
|
do {
|
|
|
|
ret = fn(schid, data);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
} while (schid.sch_no++ < __MAX_SUBCHANNEL);
|
|
|
|
schid.sch_no = 0;
|
|
|
|
} while (schid.ssid++ < max_ssid);
|
2006-01-06 16:19:22 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-01-26 21:10:48 +08:00
|
|
|
struct cb_data {
|
|
|
|
void *data;
|
|
|
|
struct idset *set;
|
|
|
|
int (*fn_known_sch)(struct subchannel *, void *);
|
|
|
|
int (*fn_unknown_sch)(struct subchannel_id, void *);
|
|
|
|
};
|
|
|
|
|
|
|
|
static int call_fn_known_sch(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
struct cb_data *cb = data;
|
|
|
|
int rc = 0;
|
|
|
|
|
2013-11-26 21:59:21 +08:00
|
|
|
if (cb->set)
|
|
|
|
idset_sch_del(cb->set, sch->schid);
|
2008-01-26 21:10:48 +08:00
|
|
|
if (cb->fn_known_sch)
|
|
|
|
rc = cb->fn_known_sch(sch, cb->data);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
|
|
|
|
{
|
|
|
|
struct cb_data *cb = data;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
if (idset_sch_contains(cb->set, schid))
|
|
|
|
rc = cb->fn_unknown_sch(schid, cb->data);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2009-03-26 22:24:11 +08:00
|
|
|
static int call_fn_all_sch(struct subchannel_id schid, void *data)
|
|
|
|
{
|
|
|
|
struct cb_data *cb = data;
|
|
|
|
struct subchannel *sch;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
sch = get_subchannel_by_schid(schid);
|
|
|
|
if (sch) {
|
|
|
|
if (cb->fn_known_sch)
|
|
|
|
rc = cb->fn_known_sch(sch, cb->data);
|
|
|
|
put_device(&sch->dev);
|
|
|
|
} else {
|
|
|
|
if (cb->fn_unknown_sch)
|
|
|
|
rc = cb->fn_unknown_sch(schid, cb->data);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2008-01-26 21:10:48 +08:00
|
|
|
int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
|
|
|
|
int (*fn_unknown)(struct subchannel_id,
|
|
|
|
void *), void *data)
|
|
|
|
{
|
|
|
|
struct cb_data cb;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
cb.data = data;
|
|
|
|
cb.fn_known_sch = fn_known;
|
|
|
|
cb.fn_unknown_sch = fn_unknown;
|
2009-03-26 22:24:11 +08:00
|
|
|
|
2013-11-26 21:59:21 +08:00
|
|
|
if (fn_known && !fn_unknown) {
|
|
|
|
/* Skip idset allocation in case of known-only loop. */
|
|
|
|
cb.set = NULL;
|
|
|
|
return bus_for_each_dev(&css_bus_type, NULL, &cb,
|
|
|
|
call_fn_known_sch);
|
|
|
|
}
|
|
|
|
|
2009-03-26 22:24:11 +08:00
|
|
|
cb.set = idset_sch_new();
|
|
|
|
if (!cb.set)
|
|
|
|
/* fall back to brute force scanning in case of oom */
|
|
|
|
return for_each_subchannel(call_fn_all_sch, &cb);
|
|
|
|
|
|
|
|
idset_fill(cb.set);
|
|
|
|
|
2008-01-26 21:10:48 +08:00
|
|
|
/* Process registered subchannels. */
|
|
|
|
rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
/* Process unregistered subchannels. */
|
|
|
|
if (fn_unknown)
|
|
|
|
rc = for_each_subchannel(call_fn_unknown_sch, &cb);
|
|
|
|
out:
|
|
|
|
idset_free(cb.set);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2009-12-07 19:51:18 +08:00
|
|
|
static void css_sch_todo(struct work_struct *work);
|
|
|
|
|
2023-11-01 19:57:51 +08:00
|
|
|
static void css_sch_create_locks(struct subchannel *sch)
|
2013-04-13 19:08:01 +08:00
|
|
|
{
|
2023-11-01 19:57:51 +08:00
|
|
|
spin_lock_init(&sch->lock);
|
2013-04-13 19:08:01 +08:00
|
|
|
mutex_init(&sch->reg_mutex);
|
|
|
|
}
|
|
|
|
|
2013-04-13 18:58:55 +08:00
|
|
|
static void css_subchannel_release(struct device *dev)
|
|
|
|
{
|
2013-04-13 19:01:50 +08:00
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
2013-04-13 18:58:55 +08:00
|
|
|
|
2013-04-13 19:01:50 +08:00
|
|
|
sch->config.intparm = 0;
|
|
|
|
cio_commit_config(sch);
|
2019-06-13 19:08:15 +08:00
|
|
|
kfree(sch->driver_override);
|
2013-04-13 19:01:50 +08:00
|
|
|
kfree(sch);
|
2013-04-13 18:58:55 +08:00
|
|
|
}
|
|
|
|
|
2018-06-26 21:09:32 +08:00
|
|
|
static int css_validate_subchannel(struct subchannel_id schid,
|
|
|
|
struct schib *schib)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
switch (schib->pmcw.st) {
|
|
|
|
case SUBCHANNEL_TYPE_IO:
|
|
|
|
case SUBCHANNEL_TYPE_MSG:
|
|
|
|
if (!css_sch_is_valid(schib))
|
|
|
|
err = -ENODEV;
|
|
|
|
else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
|
|
|
|
CIO_MSG_EVENT(6, "Blacklisted device detected "
|
|
|
|
"at devno %04X, subchannel set %x\n",
|
|
|
|
schib->pmcw.dev, schid.ssid);
|
|
|
|
err = -ENODEV;
|
|
|
|
} else
|
|
|
|
err = 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err = 0;
|
|
|
|
}
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
|
|
|
|
schid.ssid, schid.sch_no, schib->pmcw.st);
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
|
|
|
|
struct schib *schib)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct subchannel *sch;
|
|
|
|
int ret;
|
|
|
|
|
2018-06-26 21:09:32 +08:00
|
|
|
ret = css_validate_subchannel(schid, schib);
|
2018-06-25 17:23:26 +08:00
|
|
|
if (ret < 0)
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
2013-04-13 19:08:01 +08:00
|
|
|
sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
|
|
|
|
if (!sch)
|
2005-04-17 06:20:36 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2013-04-13 19:08:01 +08:00
|
|
|
|
2018-06-25 17:23:26 +08:00
|
|
|
sch->schid = schid;
|
2018-06-26 21:09:32 +08:00
|
|
|
sch->schib = *schib;
|
|
|
|
sch->st = schib->pmcw.st;
|
2013-04-13 19:08:01 +08:00
|
|
|
|
2023-11-01 19:57:51 +08:00
|
|
|
css_sch_create_locks(sch);
|
2013-04-13 19:08:01 +08:00
|
|
|
|
2009-12-07 19:51:18 +08:00
|
|
|
INIT_WORK(&sch->todo_work, css_sch_todo);
|
2013-04-13 18:58:55 +08:00
|
|
|
sch->dev.release = &css_subchannel_release;
|
2020-12-09 18:24:13 +08:00
|
|
|
sch->dev.dma_mask = &sch->dma_mask;
|
2013-04-13 18:58:55 +08:00
|
|
|
device_initialize(&sch->dev);
|
2019-04-03 00:47:29 +08:00
|
|
|
/*
|
2020-12-09 18:24:13 +08:00
|
|
|
* The physical addresses for some of the dma structures that can
|
2019-04-03 00:47:29 +08:00
|
|
|
* belong to a subchannel need to fit 31 bit width (e.g. ccw).
|
|
|
|
*/
|
2020-12-09 18:24:13 +08:00
|
|
|
ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
|
|
|
|
if (ret)
|
2023-11-01 19:57:51 +08:00
|
|
|
goto err;
|
2019-09-30 23:38:02 +08:00
|
|
|
/*
|
|
|
|
* But we don't have such restrictions imposed on the stuff that
|
|
|
|
* is handled by the streaming API.
|
|
|
|
*/
|
2020-12-09 18:24:13 +08:00
|
|
|
ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
|
|
|
|
if (ret)
|
2023-11-01 19:57:51 +08:00
|
|
|
goto err;
|
2020-12-09 18:24:13 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return sch;
|
2013-04-13 19:08:01 +08:00
|
|
|
|
|
|
|
err:
|
|
|
|
kfree(sch);
|
|
|
|
return ERR_PTR(ret);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-07-27 18:29:10 +08:00
|
|
|
static int css_sch_device_register(struct subchannel *sch)
|
2006-07-12 22:39:50 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&sch->reg_mutex);
|
2009-09-11 16:28:25 +08:00
|
|
|
dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
|
|
|
|
sch->schid.sch_no);
|
2013-04-13 18:58:55 +08:00
|
|
|
ret = device_add(&sch->dev);
|
2006-07-12 22:39:50 +08:00
|
|
|
mutex_unlock(&sch->reg_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-07-14 15:58:47 +08:00
|
|
|
/**
|
|
|
|
* css_sch_device_unregister - unregister a subchannel
|
|
|
|
* @sch: subchannel to be unregistered
|
|
|
|
*/
|
2006-07-12 22:39:50 +08:00
|
|
|
void css_sch_device_unregister(struct subchannel *sch)
|
|
|
|
{
|
|
|
|
mutex_lock(&sch->reg_mutex);
|
2008-07-14 15:59:20 +08:00
|
|
|
if (device_is_registered(&sch->dev))
|
|
|
|
device_unregister(&sch->dev);
|
2006-07-12 22:39:50 +08:00
|
|
|
mutex_unlock(&sch->reg_mutex);
|
|
|
|
}
|
2008-07-14 15:58:47 +08:00
|
|
|
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
|
2006-07-12 22:39:50 +08:00
|
|
|
|
2007-04-27 22:01:35 +08:00
|
|
|
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int mask;
|
|
|
|
|
|
|
|
memset(ssd, 0, sizeof(struct chsc_ssd_info));
|
|
|
|
ssd->path_mask = pmcw->pim;
|
|
|
|
for (i = 0; i < 8; i++) {
|
|
|
|
mask = 0x80 >> i;
|
|
|
|
if (pmcw->pim & mask) {
|
|
|
|
chp_id_init(&ssd->chpid[i]);
|
|
|
|
ssd->chpid[i].id = pmcw->chpid[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ssd_register_chpids(struct chsc_ssd_info *ssd)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int mask;
|
|
|
|
|
|
|
|
for (i = 0; i < 8; i++) {
|
|
|
|
mask = 0x80 >> i;
|
|
|
|
if (ssd->path_mask & mask)
|
2018-06-13 22:26:23 +08:00
|
|
|
chp_new(ssd->chpid[i]);
|
2007-04-27 22:01:35 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void css_update_ssd_info(struct subchannel *sch)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2013-04-13 19:03:54 +08:00
|
|
|
ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
|
|
|
|
if (ret)
|
2007-04-27 22:01:35 +08:00
|
|
|
ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
|
2013-04-13 19:03:54 +08:00
|
|
|
|
|
|
|
ssd_register_chpids(&sch->ssd_info);
|
2007-04-27 22:01:35 +08:00
|
|
|
}
|
|
|
|
|
2008-07-14 15:58:44 +08:00
|
|
|
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
|
2024-03-14 17:52:09 +08:00
|
|
|
return sysfs_emit(buf, "%01x\n", sch->st);
|
2008-07-14 15:58:44 +08:00
|
|
|
}
|
|
|
|
|
2017-12-20 02:15:08 +08:00
|
|
|
static DEVICE_ATTR_RO(type);
|
2008-07-14 15:58:44 +08:00
|
|
|
|
|
|
|
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
|
2024-03-14 17:52:09 +08:00
|
|
|
return sysfs_emit(buf, "css:t%01X\n", sch->st);
|
2008-07-14 15:58:44 +08:00
|
|
|
}
|
|
|
|
|
2017-12-20 02:15:08 +08:00
|
|
|
static DEVICE_ATTR_RO(modalias);
|
2008-07-14 15:58:44 +08:00
|
|
|
|
2019-06-13 19:08:15 +08:00
|
|
|
static ssize_t driver_override_store(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
2022-04-19 19:34:29 +08:00
|
|
|
int ret;
|
2019-06-13 19:08:15 +08:00
|
|
|
|
2022-04-19 19:34:29 +08:00
|
|
|
ret = driver_set_override(dev, &sch->driver_override, buf, count);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2019-06-13 19:08:15 +08:00
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t driver_override_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
ssize_t len;
|
|
|
|
|
|
|
|
device_lock(dev);
|
2024-03-14 17:52:09 +08:00
|
|
|
len = sysfs_emit(buf, "%s\n", sch->driver_override);
|
2019-06-13 19:08:15 +08:00
|
|
|
device_unlock(dev);
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(driver_override);
|
|
|
|
|
2008-07-14 15:58:44 +08:00
|
|
|
static struct attribute *subch_attrs[] = {
|
|
|
|
&dev_attr_type.attr,
|
|
|
|
&dev_attr_modalias.attr,
|
2019-06-13 19:08:15 +08:00
|
|
|
&dev_attr_driver_override.attr,
|
2008-07-14 15:58:44 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group subch_attr_group = {
|
|
|
|
.attrs = subch_attrs,
|
|
|
|
};
|
|
|
|
|
2009-06-25 01:06:31 +08:00
|
|
|
static const struct attribute_group *default_subch_attr_groups[] = {
|
2008-07-14 15:58:44 +08:00
|
|
|
&subch_attr_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2017-05-15 21:49:07 +08:00
|
|
|
static ssize_t chpids_show(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
struct chsc_ssd_info *ssd = &sch->ssd_info;
|
|
|
|
ssize_t ret = 0;
|
|
|
|
int mask;
|
|
|
|
int chp;
|
|
|
|
|
|
|
|
for (chp = 0; chp < 8; chp++) {
|
|
|
|
mask = 0x80 >> chp;
|
|
|
|
if (ssd->path_mask & mask)
|
|
|
|
ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
|
|
|
|
else
|
|
|
|
ret += sprintf(buf + ret, "00 ");
|
|
|
|
}
|
|
|
|
ret += sprintf(buf + ret, "\n");
|
|
|
|
return ret;
|
|
|
|
}
|
2017-12-20 02:15:08 +08:00
|
|
|
static DEVICE_ATTR_RO(chpids);
|
2017-05-15 21:49:07 +08:00
|
|
|
|
|
|
|
static ssize_t pimpampom_show(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
struct pmcw *pmcw = &sch->schib.pmcw;
|
|
|
|
|
2024-03-14 17:52:09 +08:00
|
|
|
return sysfs_emit(buf, "%02x %02x %02x\n",
|
|
|
|
pmcw->pim, pmcw->pam, pmcw->pom);
|
2017-05-15 21:49:07 +08:00
|
|
|
}
|
2017-12-20 02:15:08 +08:00
|
|
|
static DEVICE_ATTR_RO(pimpampom);
|
2017-05-15 21:49:07 +08:00
|
|
|
|
2021-04-25 16:52:38 +08:00
|
|
|
static ssize_t dev_busid_show(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
struct pmcw *pmcw = &sch->schib.pmcw;
|
|
|
|
|
2021-11-05 23:44:51 +08:00
|
|
|
if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
|
|
|
|
(pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
|
2021-04-25 16:52:38 +08:00
|
|
|
return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
|
|
|
|
pmcw->dev);
|
|
|
|
else
|
|
|
|
return sysfs_emit(buf, "none\n");
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(dev_busid);
|
|
|
|
|
2017-05-15 21:49:07 +08:00
|
|
|
static struct attribute *io_subchannel_type_attrs[] = {
|
|
|
|
&dev_attr_chpids.attr,
|
|
|
|
&dev_attr_pimpampom.attr,
|
2021-04-25 16:52:38 +08:00
|
|
|
&dev_attr_dev_busid.attr,
|
2017-05-15 21:49:07 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
ATTRIBUTE_GROUPS(io_subchannel_type);
|
|
|
|
|
|
|
|
static const struct device_type io_subchannel_type = {
|
|
|
|
.groups = io_subchannel_type_groups,
|
|
|
|
};
|
|
|
|
|
2013-04-13 19:03:54 +08:00
|
|
|
int css_register_subchannel(struct subchannel *sch)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Initialize the subchannel structure */
|
2007-10-12 22:11:13 +08:00
|
|
|
sch->dev.parent = &channel_subsystems[0]->device;
|
2005-04-17 06:20:36 +08:00
|
|
|
sch->dev.bus = &css_bus_type;
|
2008-07-14 15:58:44 +08:00
|
|
|
sch->dev.groups = default_subch_attr_groups;
|
2017-05-15 21:49:07 +08:00
|
|
|
|
|
|
|
if (sch->st == SUBCHANNEL_TYPE_IO)
|
|
|
|
sch->dev.type = &io_subchannel_type;
|
|
|
|
|
2007-04-27 22:01:35 +08:00
|
|
|
css_update_ssd_info(sch);
|
2005-04-17 06:20:36 +08:00
|
|
|
/* make it known to the system */
|
2006-07-12 22:39:50 +08:00
|
|
|
ret = css_sch_device_register(sch);
|
2006-12-08 22:54:21 +08:00
|
|
|
if (ret) {
|
2007-07-27 18:29:19 +08:00
|
|
|
CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
|
|
|
|
sch->schid.ssid, sch->schid.sch_no, ret);
|
2006-12-08 22:54:21 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-06-26 21:09:32 +08:00
|
|
|
static int css_probe_device(struct subchannel_id schid, struct schib *schib)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct subchannel *sch;
|
2013-04-13 19:04:49 +08:00
|
|
|
int ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-06-26 21:09:32 +08:00
|
|
|
sch = css_alloc_subchannel(schid, schib);
|
2013-04-13 19:03:54 +08:00
|
|
|
if (IS_ERR(sch))
|
|
|
|
return PTR_ERR(sch);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
ret = css_register_subchannel(sch);
|
2013-04-13 19:01:50 +08:00
|
|
|
if (ret)
|
|
|
|
put_device(&sch->dev);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-06-26 05:55:27 +08:00
|
|
|
static int
|
2019-06-15 01:53:59 +08:00
|
|
|
check_subchannel(struct device *dev, const void *data)
|
2005-06-26 05:55:27 +08:00
|
|
|
{
|
|
|
|
struct subchannel *sch;
|
2019-06-15 01:53:59 +08:00
|
|
|
struct subchannel_id *schid = (void *)data;
|
2005-06-26 05:55:27 +08:00
|
|
|
|
|
|
|
sch = to_subchannel(dev);
|
2006-01-06 16:19:21 +08:00
|
|
|
return schid_equal(&sch->schid, schid);
|
2005-06-26 05:55:27 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
struct subchannel *
|
2006-01-06 16:19:21 +08:00
|
|
|
get_subchannel_by_schid(struct subchannel_id schid)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct device *dev;
|
|
|
|
|
2005-06-26 05:55:27 +08:00
|
|
|
dev = bus_find_device(&css_bus_type, NULL,
|
2006-10-11 21:31:47 +08:00
|
|
|
&schid, check_subchannel);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-06-26 05:55:27 +08:00
|
|
|
return dev ? to_subchannel(dev) : NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-01-26 21:10:45 +08:00
|
|
|
/**
|
|
|
|
* css_sch_is_valid() - check if a subchannel is valid
|
|
|
|
* @schib: subchannel information block for the subchannel
|
|
|
|
*/
|
|
|
|
int css_sch_is_valid(struct schib *schib)
|
|
|
|
{
|
|
|
|
if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
|
|
|
|
return 0;
|
2008-07-14 15:58:48 +08:00
|
|
|
if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
|
|
|
|
return 0;
|
2008-01-26 21:10:45 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(css_sch_is_valid);
|
|
|
|
|
2006-09-20 22:00:01 +08:00
|
|
|
static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
|
|
|
|
{
|
|
|
|
struct schib schib;
|
2018-06-26 21:09:32 +08:00
|
|
|
int ccode;
|
2006-09-20 22:00:01 +08:00
|
|
|
|
|
|
|
if (!slow) {
|
|
|
|
/* Will be done on the slow path. */
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
2018-06-26 21:09:32 +08:00
|
|
|
/*
|
|
|
|
* The first subchannel that is not-operational (ccode==3)
|
|
|
|
* indicates that there aren't any more devices available.
|
|
|
|
* If stsch gets an exception, it means the current subchannel set
|
|
|
|
* is not valid.
|
|
|
|
*/
|
|
|
|
ccode = stsch(schid, &schib);
|
|
|
|
if (ccode)
|
|
|
|
return (ccode == 3) ? -ENXIO : ccode;
|
2006-09-20 22:00:01 +08:00
|
|
|
|
2018-06-26 21:09:32 +08:00
|
|
|
return css_probe_device(schid, &schib);
|
2006-09-20 22:00:01 +08:00
|
|
|
}
|
|
|
|
|
2008-07-14 15:58:45 +08:00
|
|
|
static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (sch->driver) {
|
|
|
|
if (sch->driver->sch_event)
|
|
|
|
ret = sch->driver->sch_event(sch, slow);
|
|
|
|
else
|
|
|
|
dev_dbg(&sch->dev,
|
|
|
|
"Got subchannel machine check but "
|
|
|
|
"no sch_event handler provided.\n");
|
|
|
|
}
|
2009-12-07 19:51:17 +08:00
|
|
|
if (ret != 0 && ret != -EAGAIN) {
|
|
|
|
CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
|
|
|
|
sch->schid.ssid, sch->schid.sch_no, ret);
|
|
|
|
}
|
2008-07-14 15:58:45 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-04-27 22:01:34 +08:00
|
|
|
static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
|
2006-09-20 22:00:01 +08:00
|
|
|
{
|
|
|
|
struct subchannel *sch;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
sch = get_subchannel_by_schid(schid);
|
|
|
|
if (sch) {
|
|
|
|
ret = css_evaluate_known_subchannel(sch, slow);
|
|
|
|
put_device(&sch->dev);
|
|
|
|
} else
|
|
|
|
ret = css_evaluate_new_subchannel(schid, slow);
|
2007-04-27 22:01:34 +08:00
|
|
|
if (ret == -EAGAIN)
|
|
|
|
css_schedule_eval(schid);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-12-01 20:32:19 +08:00
|
|
|
/**
|
|
|
|
* css_sched_sch_todo - schedule a subchannel operation
|
|
|
|
* @sch: subchannel
|
|
|
|
* @todo: todo
|
|
|
|
*
|
|
|
|
* Schedule the operation identified by @todo to be performed on the slow path
|
|
|
|
* workqueue. Do nothing if another operation with higher priority is already
|
|
|
|
* scheduled. Needs to be called with subchannel lock held.
|
|
|
|
*/
|
|
|
|
void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
|
|
|
|
{
|
|
|
|
CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
|
|
|
|
sch->schid.ssid, sch->schid.sch_no, todo);
|
|
|
|
if (sch->todo >= todo)
|
|
|
|
return;
|
|
|
|
/* Get workqueue ref. */
|
|
|
|
if (!get_device(&sch->dev))
|
|
|
|
return;
|
|
|
|
sch->todo = todo;
|
|
|
|
if (!queue_work(cio_work_q, &sch->todo_work)) {
|
|
|
|
/* Already queued, release workqueue ref. */
|
|
|
|
put_device(&sch->dev);
|
|
|
|
}
|
|
|
|
}
|
2012-08-28 22:42:37 +08:00
|
|
|
EXPORT_SYMBOL_GPL(css_sched_sch_todo);
|
2011-12-01 20:32:19 +08:00
|
|
|
|
|
|
|
static void css_sch_todo(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct subchannel *sch;
|
|
|
|
enum sch_todo todo;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
sch = container_of(work, struct subchannel, todo_work);
|
|
|
|
/* Find out todo. */
|
2023-11-01 19:57:51 +08:00
|
|
|
spin_lock_irq(&sch->lock);
|
2011-12-01 20:32:19 +08:00
|
|
|
todo = sch->todo;
|
|
|
|
CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
|
|
|
|
sch->schid.sch_no, todo);
|
|
|
|
sch->todo = SCH_TODO_NOTHING;
|
2023-11-01 19:57:51 +08:00
|
|
|
spin_unlock_irq(&sch->lock);
|
2011-12-01 20:32:19 +08:00
|
|
|
/* Perform todo. */
|
|
|
|
switch (todo) {
|
|
|
|
case SCH_TODO_NOTHING:
|
|
|
|
break;
|
|
|
|
case SCH_TODO_EVAL:
|
|
|
|
ret = css_evaluate_known_subchannel(sch, 1);
|
|
|
|
if (ret == -EAGAIN) {
|
2023-11-01 19:57:51 +08:00
|
|
|
spin_lock_irq(&sch->lock);
|
2011-12-01 20:32:19 +08:00
|
|
|
css_sched_sch_todo(sch, todo);
|
2023-11-01 19:57:51 +08:00
|
|
|
spin_unlock_irq(&sch->lock);
|
2011-12-01 20:32:19 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SCH_TODO_UNREG:
|
|
|
|
css_sch_device_unregister(sch);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Release workqueue ref. */
|
|
|
|
put_device(&sch->dev);
|
|
|
|
}
|
|
|
|
|
2007-04-27 22:01:34 +08:00
|
|
|
static struct idset *slow_subchannel_set;
|
2021-03-29 17:40:18 +08:00
|
|
|
static DEFINE_SPINLOCK(slow_subchannel_lock);
|
2021-03-29 17:40:19 +08:00
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(css_eval_wq);
|
2009-09-23 04:58:34 +08:00
|
|
|
static atomic_t css_eval_scheduled;
|
2007-04-27 22:01:34 +08:00
|
|
|
|
|
|
|
static int __init slow_subchannel_init(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2009-09-23 04:58:34 +08:00
|
|
|
atomic_set(&css_eval_scheduled, 0);
|
2007-04-27 22:01:34 +08:00
|
|
|
slow_subchannel_set = idset_sch_new();
|
|
|
|
if (!slow_subchannel_set) {
|
2007-07-27 18:29:19 +08:00
|
|
|
CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
|
2007-04-27 22:01:34 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-01-26 21:10:48 +08:00
|
|
|
static int slow_eval_known_fn(struct subchannel *sch, void *data)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-01-26 21:10:48 +08:00
|
|
|
int eval;
|
|
|
|
int rc;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
spin_lock_irq(&slow_subchannel_lock);
|
2008-01-26 21:10:48 +08:00
|
|
|
eval = idset_sch_contains(slow_subchannel_set, sch->schid);
|
|
|
|
idset_sch_del(slow_subchannel_set, sch->schid);
|
|
|
|
spin_unlock_irq(&slow_subchannel_lock);
|
|
|
|
if (eval) {
|
|
|
|
rc = css_evaluate_known_subchannel(sch, 1);
|
|
|
|
if (rc == -EAGAIN)
|
|
|
|
css_schedule_eval(sch->schid);
|
2020-06-18 22:42:45 +08:00
|
|
|
/*
|
|
|
|
* The loop might take long time for platforms with lots of
|
|
|
|
* known devices. Allow scheduling here.
|
|
|
|
*/
|
|
|
|
cond_resched();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2008-01-26 21:10:48 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
|
|
|
|
{
|
|
|
|
int eval;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
spin_lock_irq(&slow_subchannel_lock);
|
|
|
|
eval = idset_sch_contains(slow_subchannel_set, schid);
|
|
|
|
idset_sch_del(slow_subchannel_set, schid);
|
2005-04-17 06:20:36 +08:00
|
|
|
spin_unlock_irq(&slow_subchannel_lock);
|
2008-01-26 21:10:48 +08:00
|
|
|
if (eval) {
|
|
|
|
rc = css_evaluate_new_subchannel(schid, 1);
|
|
|
|
switch (rc) {
|
|
|
|
case -EAGAIN:
|
|
|
|
css_schedule_eval(schid);
|
|
|
|
rc = 0;
|
|
|
|
break;
|
|
|
|
case -ENXIO:
|
|
|
|
case -ENOMEM:
|
|
|
|
case -EIO:
|
|
|
|
/* These should abort looping */
|
2013-08-30 02:31:06 +08:00
|
|
|
spin_lock_irq(&slow_subchannel_lock);
|
2012-10-15 18:24:56 +08:00
|
|
|
idset_sch_del_subseq(slow_subchannel_set, schid);
|
2013-08-30 02:31:06 +08:00
|
|
|
spin_unlock_irq(&slow_subchannel_lock);
|
2008-01-26 21:10:48 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
rc = 0;
|
|
|
|
}
|
2013-11-26 21:57:13 +08:00
|
|
|
/* Allow scheduling here since the containing loop might
|
|
|
|
* take a while. */
|
|
|
|
cond_resched();
|
2008-01-26 21:10:48 +08:00
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void css_slow_path_func(struct work_struct *unused)
|
|
|
|
{
|
2009-09-23 04:58:34 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
2008-01-26 21:10:48 +08:00
|
|
|
CIO_TRACE_EVENT(4, "slowpath");
|
|
|
|
for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
|
|
|
|
NULL);
|
2009-09-23 04:58:34 +08:00
|
|
|
spin_lock_irqsave(&slow_subchannel_lock, flags);
|
|
|
|
if (idset_is_empty(slow_subchannel_set)) {
|
|
|
|
atomic_set(&css_eval_scheduled, 0);
|
|
|
|
wake_up(&css_eval_wq);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2013-11-26 21:58:08 +08:00
|
|
|
static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
|
2010-02-27 05:37:24 +08:00
|
|
|
struct workqueue_struct *cio_work_q;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-04-27 22:01:34 +08:00
|
|
|
void css_schedule_eval(struct subchannel_id schid)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&slow_subchannel_lock, flags);
|
|
|
|
idset_sch_add(slow_subchannel_set, schid);
|
2009-09-23 04:58:34 +08:00
|
|
|
atomic_set(&css_eval_scheduled, 1);
|
2013-11-26 21:58:08 +08:00
|
|
|
queue_delayed_work(cio_work_q, &slow_path_work, 0);
|
2007-04-27 22:01:34 +08:00
|
|
|
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
void css_schedule_eval_all(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&slow_subchannel_lock, flags);
|
|
|
|
idset_fill(slow_subchannel_set);
|
2009-09-23 04:58:34 +08:00
|
|
|
atomic_set(&css_eval_scheduled, 1);
|
2013-11-26 21:58:08 +08:00
|
|
|
queue_delayed_work(cio_work_q, &slow_path_work, 0);
|
2007-04-27 22:01:34 +08:00
|
|
|
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
|
|
|
|
}
|
|
|
|
|
s390/cio: evaluate devices with non-operational paths
css_schedule_reprobe() function calls the evaluation for CSS_EVAL_UNREG
which is specific to the idset of unregistered subchannels. This
evaluation was introduced because, previously, if the underlying device
become not-accessible, the subchannel was unregistered. But, in the recent
changes in cio,with the commit '2297791c92d0 s390/cio: dont unregister
subchannel from child-drivers', we no longer unregister the subchannels
just because of a non-operational device. This allows to have subchannels
without any operational device connected on it. So, a css_schedule_reprobe
function on unregistered subchannel does not have any effect.
Change this functionality to evaluate the subchannels which does not
have a working path to the device. This could be due the erroneous
device or due to the erraneous path. Evaluate based on the values of OPM
and PAM&POM.
Here we introduced a new idset function,to keep I/O subchannels in the
idset when the last seen status indicates that the device has no working
path. A device has no working path if all available paths have been tried
without success.A failed I/O attempt on a path is indicated as a 0 bit
value in the POM mask. By looking at the POM mask bit values of available
paths (1 in PAM) that Linux is supposed to use (1 in vary mask OPM), we
can identify a non-working device as a device where the bit-wise and of
the PAM, POM and OPM mask return 0.
css_schedule_reprobe() is being used by dasd-driver and chsc-cio
component. dasd driver, when it detects a change in the pathgroup, invokes
the re-evaluation of the subchannel. And chsc-cio component upon a CRW
event, (resource accessibility event). In both the cases, it makes much
better sense to re-evalute the subchannel with no-valid path.
Signed-off-by: Vineeth Vijayan <vneethv@linux.ibm.com>
Reported-by: Eric Farman <farman@linux.ibm.com>
Reviewed-by: Peter Oberparleiter <oberpar@linux.ibm.com>
Tested-by: Eric Farman <farman@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2022-11-17 14:31:50 +08:00
|
|
|
static int __unset_validpath(struct device *dev, void *data)
|
2008-04-17 13:45:59 +08:00
|
|
|
{
|
2009-09-23 04:58:38 +08:00
|
|
|
struct idset *set = data;
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
s390/cio: evaluate devices with non-operational paths
css_schedule_reprobe() function calls the evaluation for CSS_EVAL_UNREG
which is specific to the idset of unregistered subchannels. This
evaluation was introduced because, previously, if the underlying device
become not-accessible, the subchannel was unregistered. But, in the recent
changes in cio,with the commit '2297791c92d0 s390/cio: dont unregister
subchannel from child-drivers', we no longer unregister the subchannels
just because of a non-operational device. This allows to have subchannels
without any operational device connected on it. So, a css_schedule_reprobe
function on unregistered subchannel does not have any effect.
Change this functionality to evaluate the subchannels which does not
have a working path to the device. This could be due the erroneous
device or due to the erraneous path. Evaluate based on the values of OPM
and PAM&POM.
Here we introduced a new idset function,to keep I/O subchannels in the
idset when the last seen status indicates that the device has no working
path. A device has no working path if all available paths have been tried
without success.A failed I/O attempt on a path is indicated as a 0 bit
value in the POM mask. By looking at the POM mask bit values of available
paths (1 in PAM) that Linux is supposed to use (1 in vary mask OPM), we
can identify a non-working device as a device where the bit-wise and of
the PAM, POM and OPM mask return 0.
css_schedule_reprobe() is being used by dasd-driver and chsc-cio
component. dasd driver, when it detects a change in the pathgroup, invokes
the re-evaluation of the subchannel. And chsc-cio component upon a CRW
event, (resource accessibility event). In both the cases, it makes much
better sense to re-evalute the subchannel with no-valid path.
Signed-off-by: Vineeth Vijayan <vneethv@linux.ibm.com>
Reported-by: Eric Farman <farman@linux.ibm.com>
Reviewed-by: Peter Oberparleiter <oberpar@linux.ibm.com>
Tested-by: Eric Farman <farman@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2022-11-17 14:31:50 +08:00
|
|
|
struct pmcw *pmcw = &sch->schib.pmcw;
|
|
|
|
|
|
|
|
/* Here we want to make sure that we are considering only those subchannels
|
|
|
|
* which do not have an operational device attached to it. This can be found
|
|
|
|
* with the help of PAM and POM values of pmcw. OPM provides the information
|
|
|
|
* about any path which is currently vary-off, so that we should not consider.
|
|
|
|
*/
|
|
|
|
if (sch->st == SUBCHANNEL_TYPE_IO &&
|
|
|
|
(sch->opm & pmcw->pam & pmcw->pom))
|
|
|
|
idset_sch_del(set, sch->schid);
|
2006-06-29 20:57:03 +08:00
|
|
|
|
2009-09-23 04:58:38 +08:00
|
|
|
return 0;
|
2009-03-26 22:24:20 +08:00
|
|
|
}
|
|
|
|
|
2021-09-10 19:45:24 +08:00
|
|
|
static int __unset_online(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
struct idset *set = data;
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
|
2022-10-14 18:24:58 +08:00
|
|
|
if (sch->st == SUBCHANNEL_TYPE_IO && sch->config.ena)
|
|
|
|
idset_sch_del(set, sch->schid);
|
2021-09-10 19:45:24 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
|
2006-06-29 20:57:03 +08:00
|
|
|
{
|
2009-09-23 04:58:38 +08:00
|
|
|
unsigned long flags;
|
2021-09-10 19:45:24 +08:00
|
|
|
struct idset *set;
|
2006-06-29 20:57:03 +08:00
|
|
|
|
2009-09-23 04:58:38 +08:00
|
|
|
/* Find unregistered subchannels. */
|
2021-09-10 19:45:24 +08:00
|
|
|
set = idset_sch_new();
|
|
|
|
if (!set) {
|
2009-09-23 04:58:38 +08:00
|
|
|
/* Fallback. */
|
|
|
|
css_schedule_eval_all();
|
2009-03-26 22:24:20 +08:00
|
|
|
return;
|
|
|
|
}
|
2021-09-10 19:45:24 +08:00
|
|
|
idset_fill(set);
|
|
|
|
switch (cond) {
|
s390/cio: evaluate devices with non-operational paths
css_schedule_reprobe() function calls the evaluation for CSS_EVAL_UNREG
which is specific to the idset of unregistered subchannels. This
evaluation was introduced because, previously, if the underlying device
become not-accessible, the subchannel was unregistered. But, in the recent
changes in cio,with the commit '2297791c92d0 s390/cio: dont unregister
subchannel from child-drivers', we no longer unregister the subchannels
just because of a non-operational device. This allows to have subchannels
without any operational device connected on it. So, a css_schedule_reprobe
function on unregistered subchannel does not have any effect.
Change this functionality to evaluate the subchannels which does not
have a working path to the device. This could be due the erroneous
device or due to the erraneous path. Evaluate based on the values of OPM
and PAM&POM.
Here we introduced a new idset function,to keep I/O subchannels in the
idset when the last seen status indicates that the device has no working
path. A device has no working path if all available paths have been tried
without success.A failed I/O attempt on a path is indicated as a 0 bit
value in the POM mask. By looking at the POM mask bit values of available
paths (1 in PAM) that Linux is supposed to use (1 in vary mask OPM), we
can identify a non-working device as a device where the bit-wise and of
the PAM, POM and OPM mask return 0.
css_schedule_reprobe() is being used by dasd-driver and chsc-cio
component. dasd driver, when it detects a change in the pathgroup, invokes
the re-evaluation of the subchannel. And chsc-cio component upon a CRW
event, (resource accessibility event). In both the cases, it makes much
better sense to re-evalute the subchannel with no-valid path.
Signed-off-by: Vineeth Vijayan <vneethv@linux.ibm.com>
Reported-by: Eric Farman <farman@linux.ibm.com>
Reviewed-by: Peter Oberparleiter <oberpar@linux.ibm.com>
Tested-by: Eric Farman <farman@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2022-11-17 14:31:50 +08:00
|
|
|
case CSS_EVAL_NO_PATH:
|
|
|
|
bus_for_each_dev(&css_bus_type, NULL, set, __unset_validpath);
|
2021-09-10 19:45:24 +08:00
|
|
|
break;
|
|
|
|
case CSS_EVAL_NOT_ONLINE:
|
|
|
|
bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-09-23 04:58:38 +08:00
|
|
|
/* Apply to slow_subchannel_set. */
|
|
|
|
spin_lock_irqsave(&slow_subchannel_lock, flags);
|
2021-09-10 19:45:24 +08:00
|
|
|
idset_add_set(slow_subchannel_set, set);
|
2009-09-23 04:58:38 +08:00
|
|
|
atomic_set(&css_eval_scheduled, 1);
|
2013-11-26 21:58:08 +08:00
|
|
|
queue_delayed_work(cio_work_q, &slow_path_work, delay);
|
2009-09-23 04:58:38 +08:00
|
|
|
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
|
2021-09-10 19:45:24 +08:00
|
|
|
idset_free(set);
|
2006-06-29 20:57:03 +08:00
|
|
|
}
|
|
|
|
|
2009-09-23 04:58:38 +08:00
|
|
|
void css_wait_for_slow_path(void)
|
|
|
|
{
|
2010-02-27 05:37:24 +08:00
|
|
|
flush_workqueue(cio_work_q);
|
2009-09-23 04:58:38 +08:00
|
|
|
}
|
2006-06-29 20:57:03 +08:00
|
|
|
|
s390/cio: evaluate devices with non-operational paths
css_schedule_reprobe() function calls the evaluation for CSS_EVAL_UNREG
which is specific to the idset of unregistered subchannels. This
evaluation was introduced because, previously, if the underlying device
become not-accessible, the subchannel was unregistered. But, in the recent
changes in cio,with the commit '2297791c92d0 s390/cio: dont unregister
subchannel from child-drivers', we no longer unregister the subchannels
just because of a non-operational device. This allows to have subchannels
without any operational device connected on it. So, a css_schedule_reprobe
function on unregistered subchannel does not have any effect.
Change this functionality to evaluate the subchannels which does not
have a working path to the device. This could be due the erroneous
device or due to the erraneous path. Evaluate based on the values of OPM
and PAM&POM.
Here we introduced a new idset function,to keep I/O subchannels in the
idset when the last seen status indicates that the device has no working
path. A device has no working path if all available paths have been tried
without success.A failed I/O attempt on a path is indicated as a 0 bit
value in the POM mask. By looking at the POM mask bit values of available
paths (1 in PAM) that Linux is supposed to use (1 in vary mask OPM), we
can identify a non-working device as a device where the bit-wise and of
the PAM, POM and OPM mask return 0.
css_schedule_reprobe() is being used by dasd-driver and chsc-cio
component. dasd driver, when it detects a change in the pathgroup, invokes
the re-evaluation of the subchannel. And chsc-cio component upon a CRW
event, (resource accessibility event). In both the cases, it makes much
better sense to re-evalute the subchannel with no-valid path.
Signed-off-by: Vineeth Vijayan <vneethv@linux.ibm.com>
Reported-by: Eric Farman <farman@linux.ibm.com>
Reviewed-by: Peter Oberparleiter <oberpar@linux.ibm.com>
Tested-by: Eric Farman <farman@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2022-11-17 14:31:50 +08:00
|
|
|
/* Schedule reprobing of all subchannels with no valid operational path. */
|
2006-06-29 20:57:03 +08:00
|
|
|
void css_schedule_reprobe(void)
|
|
|
|
{
|
2013-11-26 21:58:08 +08:00
|
|
|
/* Schedule with a delay to allow merging of subsequent calls. */
|
s390/cio: evaluate devices with non-operational paths
css_schedule_reprobe() function calls the evaluation for CSS_EVAL_UNREG
which is specific to the idset of unregistered subchannels. This
evaluation was introduced because, previously, if the underlying device
become not-accessible, the subchannel was unregistered. But, in the recent
changes in cio,with the commit '2297791c92d0 s390/cio: dont unregister
subchannel from child-drivers', we no longer unregister the subchannels
just because of a non-operational device. This allows to have subchannels
without any operational device connected on it. So, a css_schedule_reprobe
function on unregistered subchannel does not have any effect.
Change this functionality to evaluate the subchannels which does not
have a working path to the device. This could be due the erroneous
device or due to the erraneous path. Evaluate based on the values of OPM
and PAM&POM.
Here we introduced a new idset function,to keep I/O subchannels in the
idset when the last seen status indicates that the device has no working
path. A device has no working path if all available paths have been tried
without success.A failed I/O attempt on a path is indicated as a 0 bit
value in the POM mask. By looking at the POM mask bit values of available
paths (1 in PAM) that Linux is supposed to use (1 in vary mask OPM), we
can identify a non-working device as a device where the bit-wise and of
the PAM, POM and OPM mask return 0.
css_schedule_reprobe() is being used by dasd-driver and chsc-cio
component. dasd driver, when it detects a change in the pathgroup, invokes
the re-evaluation of the subchannel. And chsc-cio component upon a CRW
event, (resource accessibility event). In both the cases, it makes much
better sense to re-evalute the subchannel with no-valid path.
Signed-off-by: Vineeth Vijayan <vneethv@linux.ibm.com>
Reported-by: Eric Farman <farman@linux.ibm.com>
Reviewed-by: Peter Oberparleiter <oberpar@linux.ibm.com>
Tested-by: Eric Farman <farman@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2022-11-17 14:31:50 +08:00
|
|
|
css_schedule_eval_cond(CSS_EVAL_NO_PATH, 1 * HZ);
|
2006-06-29 20:57:03 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Called from the machine check handler for subchannel report words.
|
|
|
|
*/
|
2008-07-14 15:58:46 +08:00
|
|
|
static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-01-06 16:19:21 +08:00
|
|
|
struct subchannel_id mchk_schid;
|
2011-01-05 19:47:58 +08:00
|
|
|
struct subchannel *sch;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-07-14 15:58:46 +08:00
|
|
|
if (overflow) {
|
|
|
|
css_schedule_eval_all();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
|
|
|
|
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
|
|
|
|
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
|
|
|
|
crw0->erc, crw0->rsid);
|
|
|
|
if (crw1)
|
|
|
|
CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
|
|
|
|
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
|
|
|
|
crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
|
|
|
|
crw1->anc, crw1->erc, crw1->rsid);
|
2006-01-06 16:19:21 +08:00
|
|
|
init_subchannel_id(&mchk_schid);
|
2008-07-14 15:58:46 +08:00
|
|
|
mchk_schid.sch_no = crw0->rsid;
|
|
|
|
if (crw1)
|
2010-12-01 17:08:02 +08:00
|
|
|
mchk_schid.ssid = (crw1->rsid >> 4) & 3;
|
2006-01-06 16:19:25 +08:00
|
|
|
|
2011-01-05 19:47:58 +08:00
|
|
|
if (crw0->erc == CRW_ERC_PMOD) {
|
|
|
|
sch = get_subchannel_by_schid(mchk_schid);
|
|
|
|
if (sch) {
|
|
|
|
css_update_ssd_info(sch);
|
|
|
|
put_device(&sch->dev);
|
|
|
|
}
|
|
|
|
}
|
2008-07-14 15:58:46 +08:00
|
|
|
/*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Since we are always presented with IPI in the CRW, we have to
|
|
|
|
* use stsch() to find out if the subchannel in question has come
|
|
|
|
* or gone.
|
|
|
|
*/
|
2007-04-27 22:01:34 +08:00
|
|
|
css_evaluate_subchannel(mchk_schid, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __init
|
2006-01-06 16:19:23 +08:00
|
|
|
css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-05-17 16:00:00 +08:00
|
|
|
struct cpuid cpu_id;
|
|
|
|
|
2008-07-14 15:58:57 +08:00
|
|
|
if (css_general_characteristics.mcss) {
|
2006-01-06 16:19:23 +08:00
|
|
|
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
|
2016-06-18 01:45:23 +08:00
|
|
|
css->global_pgid.pgid_high.ext_cssid.cssid =
|
2020-09-11 01:23:45 +08:00
|
|
|
css->id_valid ? css->cssid : 0;
|
2006-01-06 16:19:23 +08:00
|
|
|
} else {
|
2009-03-26 22:24:42 +08:00
|
|
|
css->global_pgid.pgid_high.cpu_addr = stap();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2010-05-17 16:00:00 +08:00
|
|
|
get_cpu_id(&cpu_id);
|
|
|
|
css->global_pgid.cpu_id = cpu_id.ident;
|
|
|
|
css->global_pgid.cpu_model = cpu_id.machine;
|
2006-01-06 16:19:23 +08:00
|
|
|
css->global_pgid.tod_high = tod_high;
|
|
|
|
}
|
|
|
|
|
2016-10-25 20:05:08 +08:00
|
|
|
static void channel_subsystem_release(struct device *dev)
|
2006-01-06 16:19:26 +08:00
|
|
|
{
|
2016-10-25 20:05:08 +08:00
|
|
|
struct channel_subsystem *css = to_css(dev);
|
2006-01-06 16:19:26 +08:00
|
|
|
|
2006-03-24 19:15:14 +08:00
|
|
|
mutex_destroy(&css->mutex);
|
2006-01-06 16:19:26 +08:00
|
|
|
kfree(css);
|
|
|
|
}
|
|
|
|
|
2016-10-12 00:21:36 +08:00
|
|
|
static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct channel_subsystem *css = to_css(dev);
|
|
|
|
|
2020-09-11 01:23:45 +08:00
|
|
|
if (!css->id_valid)
|
2016-10-12 00:21:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2024-03-14 17:52:09 +08:00
|
|
|
return sysfs_emit(buf, "%x\n", css->cssid);
|
2016-10-12 00:21:36 +08:00
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(real_cssid);
|
|
|
|
|
2021-04-25 17:27:59 +08:00
|
|
|
static ssize_t rescan_store(struct device *dev, struct device_attribute *a,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
CIO_TRACE_EVENT(4, "usr-rescan");
|
|
|
|
|
|
|
|
css_schedule_eval_all();
|
|
|
|
css_complete_work();
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_WO(rescan);
|
|
|
|
|
2016-10-11 22:37:43 +08:00
|
|
|
static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
|
|
|
|
char *buf)
|
2006-03-24 19:15:14 +08:00
|
|
|
{
|
|
|
|
struct channel_subsystem *css = to_css(dev);
|
2008-04-17 13:46:01 +08:00
|
|
|
int ret;
|
2006-03-24 19:15:14 +08:00
|
|
|
|
2008-04-17 13:46:01 +08:00
|
|
|
mutex_lock(&css->mutex);
|
2024-03-14 17:52:09 +08:00
|
|
|
ret = sysfs_emit(buf, "%x\n", css->cm_enabled);
|
2008-04-17 13:46:01 +08:00
|
|
|
mutex_unlock(&css->mutex);
|
|
|
|
return ret;
|
2006-03-24 19:15:14 +08:00
|
|
|
}
|
|
|
|
|
2016-10-11 22:37:43 +08:00
|
|
|
static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
|
|
|
|
const char *buf, size_t count)
|
2006-03-24 19:15:14 +08:00
|
|
|
{
|
|
|
|
struct channel_subsystem *css = to_css(dev);
|
2008-04-30 19:38:33 +08:00
|
|
|
unsigned long val;
|
2016-10-11 22:37:43 +08:00
|
|
|
int ret;
|
2006-03-24 19:15:14 +08:00
|
|
|
|
2013-07-22 09:18:15 +08:00
|
|
|
ret = kstrtoul(buf, 16, &val);
|
2008-04-30 19:38:33 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2008-04-17 13:46:01 +08:00
|
|
|
mutex_lock(&css->mutex);
|
2008-04-30 19:38:33 +08:00
|
|
|
switch (val) {
|
|
|
|
case 0:
|
2006-03-24 19:15:14 +08:00
|
|
|
ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
|
|
|
|
break;
|
2008-04-30 19:38:33 +08:00
|
|
|
case 1:
|
2006-03-24 19:15:14 +08:00
|
|
|
ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
2008-04-17 13:46:01 +08:00
|
|
|
mutex_unlock(&css->mutex);
|
2006-03-24 19:15:14 +08:00
|
|
|
return ret < 0 ? ret : count;
|
|
|
|
}
|
2016-10-11 22:37:43 +08:00
|
|
|
static DEVICE_ATTR_RW(cm_enable);
|
|
|
|
|
|
|
|
static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
|
|
|
|
int index)
|
|
|
|
{
|
|
|
|
return css_chsc_characteristics.secm ? attr->mode : 0;
|
|
|
|
}
|
|
|
|
|
2016-10-12 00:21:36 +08:00
|
|
|
static struct attribute *cssdev_attrs[] = {
|
|
|
|
&dev_attr_real_cssid.attr,
|
2021-04-25 17:27:59 +08:00
|
|
|
&dev_attr_rescan.attr,
|
2016-10-12 00:21:36 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group cssdev_attr_group = {
|
|
|
|
.attrs = cssdev_attrs,
|
|
|
|
};
|
|
|
|
|
2016-10-11 22:37:43 +08:00
|
|
|
static struct attribute *cssdev_cm_attrs[] = {
|
|
|
|
&dev_attr_cm_enable.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group cssdev_cm_attr_group = {
|
|
|
|
.attrs = cssdev_cm_attrs,
|
|
|
|
.is_visible = cm_enable_mode,
|
|
|
|
};
|
2006-03-24 19:15:14 +08:00
|
|
|
|
2016-10-11 22:37:43 +08:00
|
|
|
static const struct attribute_group *cssdev_attr_groups[] = {
|
2016-10-12 00:21:36 +08:00
|
|
|
&cssdev_attr_group,
|
2016-10-11 22:37:43 +08:00
|
|
|
&cssdev_cm_attr_group,
|
|
|
|
NULL,
|
|
|
|
};
|
2006-03-24 19:15:14 +08:00
|
|
|
|
2007-02-06 04:18:53 +08:00
|
|
|
static int __init setup_css(int nr)
|
2006-01-06 16:19:23 +08:00
|
|
|
{
|
2007-10-12 22:11:13 +08:00
|
|
|
struct channel_subsystem *css;
|
2016-10-25 20:05:08 +08:00
|
|
|
int ret;
|
2006-01-06 16:19:23 +08:00
|
|
|
|
2016-10-25 20:05:08 +08:00
|
|
|
css = kzalloc(sizeof(*css), GFP_KERNEL);
|
|
|
|
if (!css)
|
2006-12-08 22:54:28 +08:00
|
|
|
return -ENOMEM;
|
2016-10-25 20:05:08 +08:00
|
|
|
|
|
|
|
channel_subsystems[nr] = css;
|
|
|
|
dev_set_name(&css->device, "css%x", nr);
|
|
|
|
css->device.groups = cssdev_attr_groups;
|
|
|
|
css->device.release = channel_subsystem_release;
|
2019-04-03 00:47:29 +08:00
|
|
|
/*
|
|
|
|
* We currently allocate notifier bits with this (using
|
|
|
|
* css->device as the device argument with the DMA API)
|
|
|
|
* and are fine with 64 bit addresses.
|
|
|
|
*/
|
2020-12-09 18:24:13 +08:00
|
|
|
ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
|
|
|
|
if (ret) {
|
|
|
|
kfree(css);
|
|
|
|
goto out_err;
|
|
|
|
}
|
2016-10-25 20:05:08 +08:00
|
|
|
|
|
|
|
mutex_init(&css->mutex);
|
2020-09-11 01:23:45 +08:00
|
|
|
ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
|
|
|
|
if (!ret) {
|
|
|
|
css->id_valid = true;
|
|
|
|
pr_info("Partition identifier %01x.%01x\n", css->cssid,
|
|
|
|
css->iid);
|
|
|
|
}
|
2016-10-25 20:05:08 +08:00
|
|
|
css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
|
|
|
|
|
|
|
|
ret = device_register(&css->device);
|
|
|
|
if (ret) {
|
|
|
|
put_device(&css->device);
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!css->pseudo_subchannel) {
|
|
|
|
device_unregister(&css->device);
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
2007-10-12 22:11:13 +08:00
|
|
|
css->pseudo_subchannel->dev.parent = &css->device;
|
|
|
|
css->pseudo_subchannel->dev.release = css_subchannel_release;
|
2009-12-07 19:51:17 +08:00
|
|
|
mutex_init(&css->pseudo_subchannel->reg_mutex);
|
2023-11-01 19:57:51 +08:00
|
|
|
css_sch_create_locks(css->pseudo_subchannel);
|
2016-06-18 01:45:23 +08:00
|
|
|
|
2016-10-25 20:05:08 +08:00
|
|
|
dev_set_name(&css->pseudo_subchannel->dev, "defunct");
|
|
|
|
ret = device_register(&css->pseudo_subchannel->dev);
|
|
|
|
if (ret) {
|
|
|
|
put_device(&css->pseudo_subchannel->dev);
|
|
|
|
device_unregister(&css->device);
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
out_err:
|
|
|
|
channel_subsystems[nr] = NULL;
|
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-10-12 22:11:20 +08:00
|
|
|
static int css_reboot_event(struct notifier_block *this,
|
|
|
|
unsigned long event,
|
|
|
|
void *ptr)
|
|
|
|
{
|
2016-11-08 21:28:03 +08:00
|
|
|
struct channel_subsystem *css;
|
|
|
|
int ret;
|
2007-10-12 22:11:20 +08:00
|
|
|
|
|
|
|
ret = NOTIFY_DONE;
|
2016-11-08 21:28:03 +08:00
|
|
|
for_each_css(css) {
|
2008-04-17 13:46:01 +08:00
|
|
|
mutex_lock(&css->mutex);
|
2007-10-12 22:11:20 +08:00
|
|
|
if (css->cm_enabled)
|
|
|
|
if (chsc_secm(css, 0))
|
|
|
|
ret = NOTIFY_BAD;
|
2008-04-17 13:46:01 +08:00
|
|
|
mutex_unlock(&css->mutex);
|
2007-10-12 22:11:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block css_reboot_notifier = {
|
|
|
|
.notifier_call = css_reboot_event,
|
|
|
|
};
|
|
|
|
|
2019-04-03 00:47:29 +08:00
|
|
|
#define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
|
|
|
|
static struct gen_pool *cio_dma_pool;
|
|
|
|
|
|
|
|
/* Currently cio supports only a single css */
|
|
|
|
struct device *cio_get_dma_css_dev(void)
|
|
|
|
{
|
|
|
|
return &channel_subsystems[0]->device;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
|
|
|
|
{
|
|
|
|
struct gen_pool *gp_dma;
|
|
|
|
void *cpu_addr;
|
|
|
|
dma_addr_t dma_addr;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
gp_dma = gen_pool_create(3, -1);
|
|
|
|
if (!gp_dma)
|
|
|
|
return NULL;
|
|
|
|
for (i = 0; i < nr_pages; ++i) {
|
|
|
|
cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
|
|
|
|
CIO_DMA_GFP);
|
|
|
|
if (!cpu_addr)
|
|
|
|
return gp_dma;
|
|
|
|
gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
|
|
|
|
dma_addr, PAGE_SIZE, -1);
|
|
|
|
}
|
|
|
|
return gp_dma;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __gp_dma_free_dma(struct gen_pool *pool,
|
|
|
|
struct gen_pool_chunk *chunk, void *data)
|
|
|
|
{
|
|
|
|
size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
|
|
|
|
|
|
|
|
dma_free_coherent((struct device *) data, chunk_size,
|
|
|
|
(void *) chunk->start_addr,
|
|
|
|
(dma_addr_t) chunk->phys_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
|
|
|
|
{
|
|
|
|
if (!gp_dma)
|
|
|
|
return;
|
|
|
|
/* this is quite ugly but no better idea */
|
|
|
|
gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
|
|
|
|
gen_pool_destroy(gp_dma);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cio_dma_pool_init(void)
|
|
|
|
{
|
|
|
|
/* No need to free up the resources: compiled in */
|
|
|
|
cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
|
|
|
|
if (!cio_dma_pool)
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-03-07 20:28:08 +08:00
|
|
|
void *__cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
|
|
|
|
size_t size, dma32_t *dma_handle)
|
2019-04-03 00:47:29 +08:00
|
|
|
{
|
|
|
|
dma_addr_t dma_addr;
|
|
|
|
size_t chunk_size;
|
2024-03-07 20:28:08 +08:00
|
|
|
void *addr;
|
2019-04-03 00:47:29 +08:00
|
|
|
|
|
|
|
if (!gp_dma)
|
|
|
|
return NULL;
|
2024-03-07 20:28:08 +08:00
|
|
|
addr = gen_pool_dma_alloc(gp_dma, size, &dma_addr);
|
2019-04-03 00:47:29 +08:00
|
|
|
while (!addr) {
|
|
|
|
chunk_size = round_up(size, PAGE_SIZE);
|
2024-03-07 20:28:08 +08:00
|
|
|
addr = dma_alloc_coherent(dma_dev, chunk_size, &dma_addr, CIO_DMA_GFP);
|
2019-04-03 00:47:29 +08:00
|
|
|
if (!addr)
|
|
|
|
return NULL;
|
2024-03-07 20:28:08 +08:00
|
|
|
gen_pool_add_virt(gp_dma, (unsigned long)addr, dma_addr, chunk_size, -1);
|
|
|
|
addr = gen_pool_dma_alloc(gp_dma, size, dma_handle ? &dma_addr : NULL);
|
2019-04-03 00:47:29 +08:00
|
|
|
}
|
2024-03-07 20:28:08 +08:00
|
|
|
if (dma_handle)
|
|
|
|
*dma_handle = (__force dma32_t)dma_addr;
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
|
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
return __cio_gp_dma_zalloc(gp_dma, dma_dev, size, NULL);
|
2019-04-03 00:47:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
|
|
|
|
{
|
|
|
|
if (!cpu_addr)
|
|
|
|
return;
|
|
|
|
memset(cpu_addr, 0, size);
|
|
|
|
gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate dma memory from the css global pool. Intended for memory not
|
|
|
|
* specific to any single device within the css. The allocated memory
|
|
|
|
* is not guaranteed to be 31-bit addressable.
|
|
|
|
*
|
|
|
|
* Caution: Not suitable for early stuff like console.
|
|
|
|
*/
|
|
|
|
void *cio_dma_zalloc(size_t size)
|
|
|
|
{
|
|
|
|
return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void cio_dma_free(void *cpu_addr, size_t size)
|
|
|
|
{
|
|
|
|
cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Now that the driver core is running, we can setup our channel subsystem.
|
2013-04-13 19:03:54 +08:00
|
|
|
* The struct subchannel's are created during probing.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2009-09-23 04:58:33 +08:00
|
|
|
static int __init css_bus_init(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-01-06 16:19:23 +08:00
|
|
|
int ret, i;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-10-25 22:10:28 +08:00
|
|
|
ret = chsc_init();
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2010-10-25 22:10:29 +08:00
|
|
|
chsc_determine_css_characteristics();
|
2009-09-23 04:58:37 +08:00
|
|
|
/* Try to enable MSS. */
|
|
|
|
ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
|
2010-04-22 23:17:03 +08:00
|
|
|
if (ret)
|
2009-09-23 04:58:37 +08:00
|
|
|
max_ssid = 0;
|
2010-04-22 23:17:03 +08:00
|
|
|
else /* Success. */
|
|
|
|
max_ssid = __MAX_SSID;
|
2009-09-23 04:58:37 +08:00
|
|
|
|
2007-07-27 18:29:21 +08:00
|
|
|
ret = slow_subchannel_init();
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2009-03-26 22:24:01 +08:00
|
|
|
ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
|
2008-07-14 15:58:46 +08:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if ((ret = bus_register(&css_bus_type)))
|
|
|
|
goto out;
|
|
|
|
|
2006-01-06 16:19:23 +08:00
|
|
|
/* Setup css structure. */
|
2016-11-08 21:28:03 +08:00
|
|
|
for (i = 0; i <= MAX_CSS_IDX; i++) {
|
2006-12-08 22:54:28 +08:00
|
|
|
ret = setup_css(i);
|
2016-10-25 20:05:08 +08:00
|
|
|
if (ret)
|
2008-09-09 18:38:57 +08:00
|
|
|
goto out_unregister;
|
2006-01-06 16:19:23 +08:00
|
|
|
}
|
2007-10-12 22:11:20 +08:00
|
|
|
ret = register_reboot_notifier(&css_reboot_notifier);
|
|
|
|
if (ret)
|
2008-09-09 18:38:57 +08:00
|
|
|
goto out_unregister;
|
2019-04-03 00:47:29 +08:00
|
|
|
ret = cio_dma_pool_init();
|
|
|
|
if (ret)
|
2020-11-12 23:26:29 +08:00
|
|
|
goto out_unregister_rn;
|
2018-09-14 00:57:16 +08:00
|
|
|
airq_init();
|
2005-04-17 06:20:36 +08:00
|
|
|
css_init_done = 1;
|
|
|
|
|
2008-07-14 15:58:58 +08:00
|
|
|
/* Enable default isc for I/O subchannels. */
|
2008-07-14 15:59:01 +08:00
|
|
|
isc_register(IO_SCH_ISC);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
2019-04-03 00:47:29 +08:00
|
|
|
out_unregister_rn:
|
|
|
|
unregister_reboot_notifier(&css_reboot_notifier);
|
2006-01-06 16:19:25 +08:00
|
|
|
out_unregister:
|
2016-10-25 20:05:08 +08:00
|
|
|
while (i-- > 0) {
|
|
|
|
struct channel_subsystem *css = channel_subsystems[i];
|
2007-10-12 22:11:13 +08:00
|
|
|
device_unregister(&css->pseudo_subchannel->dev);
|
|
|
|
device_unregister(&css->device);
|
2006-01-06 16:19:23 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
bus_unregister(&css_bus_type);
|
|
|
|
out:
|
2010-10-25 22:10:28 +08:00
|
|
|
crw_unregister_handler(CRW_RSC_SCH);
|
2009-09-23 04:58:36 +08:00
|
|
|
idset_free(slow_subchannel_set);
|
2010-10-25 22:10:28 +08:00
|
|
|
chsc_init_cleanup();
|
2008-12-25 20:39:36 +08:00
|
|
|
pr_alert("The CSS device driver initialization failed with "
|
|
|
|
"errno=%d\n", ret);
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-09-23 04:58:33 +08:00
|
|
|
static void __init css_bus_cleanup(void)
|
|
|
|
{
|
|
|
|
struct channel_subsystem *css;
|
|
|
|
|
2016-11-08 21:28:03 +08:00
|
|
|
for_each_css(css) {
|
2009-09-23 04:58:33 +08:00
|
|
|
device_unregister(&css->pseudo_subchannel->dev);
|
|
|
|
device_unregister(&css->device);
|
|
|
|
}
|
|
|
|
bus_unregister(&css_bus_type);
|
2010-10-25 22:10:28 +08:00
|
|
|
crw_unregister_handler(CRW_RSC_SCH);
|
2009-09-23 04:58:36 +08:00
|
|
|
idset_free(slow_subchannel_set);
|
2010-10-25 22:10:28 +08:00
|
|
|
chsc_init_cleanup();
|
2009-09-23 04:58:33 +08:00
|
|
|
isc_unregister(IO_SCH_ISC);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init channel_subsystem_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = css_bus_init();
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2010-02-27 05:37:24 +08:00
|
|
|
cio_work_q = create_singlethread_workqueue("cio");
|
|
|
|
if (!cio_work_q) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_bus;
|
|
|
|
}
|
2009-09-23 04:58:33 +08:00
|
|
|
ret = io_subchannel_init();
|
|
|
|
if (ret)
|
2010-02-27 05:37:24 +08:00
|
|
|
goto out_wq;
|
2009-09-23 04:58:33 +08:00
|
|
|
|
2018-06-12 19:56:21 +08:00
|
|
|
/* Register subchannels which are already in use. */
|
|
|
|
cio_register_early_subchannels();
|
|
|
|
/* Start initial subchannel evaluation. */
|
|
|
|
css_schedule_eval_all();
|
|
|
|
|
2009-09-23 04:58:33 +08:00
|
|
|
return ret;
|
2010-02-27 05:37:24 +08:00
|
|
|
out_wq:
|
|
|
|
destroy_workqueue(cio_work_q);
|
|
|
|
out_bus:
|
|
|
|
css_bus_cleanup();
|
|
|
|
return ret;
|
2009-09-23 04:58:33 +08:00
|
|
|
}
|
|
|
|
subsys_initcall(channel_subsystem_init);
|
|
|
|
|
2009-09-23 04:58:35 +08:00
|
|
|
static int css_settle(struct device_driver *drv, void *unused)
|
|
|
|
{
|
|
|
|
struct css_driver *cssdrv = to_cssdriver(drv);
|
|
|
|
|
|
|
|
if (cssdrv->settle)
|
2010-02-27 05:37:27 +08:00
|
|
|
return cssdrv->settle();
|
2009-09-23 04:58:35 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-02-27 05:37:29 +08:00
|
|
|
int css_complete_work(void)
|
2010-02-27 05:37:25 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Wait for the evaluation of subchannels to finish. */
|
2010-02-27 05:37:27 +08:00
|
|
|
ret = wait_event_interruptible(css_eval_wq,
|
|
|
|
atomic_read(&css_eval_scheduled) == 0);
|
|
|
|
if (ret)
|
|
|
|
return -EINTR;
|
2010-02-27 05:37:25 +08:00
|
|
|
flush_workqueue(cio_work_q);
|
|
|
|
/* Wait for the subchannel type specific initialization to finish */
|
2010-02-27 05:37:27 +08:00
|
|
|
return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
|
2010-02-27 05:37:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-09-23 04:58:33 +08:00
|
|
|
/*
|
|
|
|
* Wait for the initialization of devices to finish, to make sure we are
|
|
|
|
* done with our setup if the search for the root device starts.
|
|
|
|
*/
|
|
|
|
static int __init channel_subsystem_init_sync(void)
|
|
|
|
{
|
2010-02-27 05:37:25 +08:00
|
|
|
css_complete_work();
|
|
|
|
return 0;
|
2009-09-23 04:58:33 +08:00
|
|
|
}
|
|
|
|
subsys_initcall_sync(channel_subsystem_init_sync);
|
|
|
|
|
2010-02-27 05:37:25 +08:00
|
|
|
#ifdef CONFIG_PROC_FS
|
|
|
|
static ssize_t cio_settle_write(struct file *file, const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
2010-02-27 05:37:27 +08:00
|
|
|
int ret;
|
|
|
|
|
2010-02-27 05:37:25 +08:00
|
|
|
/* Handle pending CRW's. */
|
|
|
|
crw_wait_for_channel_report();
|
2010-02-27 05:37:27 +08:00
|
|
|
ret = css_complete_work();
|
|
|
|
|
|
|
|
return ret ? ret : count;
|
2010-02-27 05:37:25 +08:00
|
|
|
}
|
|
|
|
|
2020-02-04 09:37:17 +08:00
|
|
|
static const struct proc_ops cio_settle_proc_ops = {
|
|
|
|
.proc_open = nonseekable_open,
|
|
|
|
.proc_write = cio_settle_write,
|
|
|
|
.proc_lseek = no_llseek,
|
2010-02-27 05:37:25 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int __init cio_settle_init(void)
|
|
|
|
{
|
|
|
|
struct proc_dir_entry *entry;
|
|
|
|
|
2020-02-04 09:37:17 +08:00
|
|
|
entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
|
2010-02-27 05:37:25 +08:00
|
|
|
if (!entry)
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
device_initcall(cio_settle_init);
|
|
|
|
#endif /*CONFIG_PROC_FS*/
|
|
|
|
|
2006-12-08 22:54:28 +08:00
|
|
|
int sch_is_pseudo_sch(struct subchannel *sch)
|
|
|
|
{
|
2019-09-19 21:55:17 +08:00
|
|
|
if (!sch->dev.parent)
|
|
|
|
return 0;
|
2006-12-08 22:54:28 +08:00
|
|
|
return sch == to_css(sch->dev.parent)->pseudo_subchannel;
|
|
|
|
}
|
|
|
|
|
2008-07-14 15:59:03 +08:00
|
|
|
static int css_bus_match(struct device *dev, struct device_driver *drv)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-01-26 21:10:38 +08:00
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
struct css_driver *driver = to_cssdriver(drv);
|
2008-07-14 15:59:03 +08:00
|
|
|
struct css_device_id *id;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-06-13 19:08:15 +08:00
|
|
|
/* When driver_override is set, only bind to the matching driver */
|
|
|
|
if (sch->driver_override && strcmp(sch->driver_override, drv->name))
|
|
|
|
return 0;
|
|
|
|
|
2008-07-14 15:59:03 +08:00
|
|
|
for (id = driver->subchannel_type; id->match_flags; id++) {
|
|
|
|
if (sch->st == id->type)
|
|
|
|
return 1;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-01-26 21:10:40 +08:00
|
|
|
static int css_probe(struct device *dev)
|
2006-01-11 17:56:22 +08:00
|
|
|
{
|
|
|
|
struct subchannel *sch;
|
2008-01-26 21:10:40 +08:00
|
|
|
int ret;
|
2006-01-11 17:56:22 +08:00
|
|
|
|
|
|
|
sch = to_subchannel(dev);
|
2008-01-26 21:10:38 +08:00
|
|
|
sch->driver = to_cssdriver(dev->driver);
|
2008-01-26 21:10:40 +08:00
|
|
|
ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
|
|
|
|
if (ret)
|
|
|
|
sch->driver = NULL;
|
|
|
|
return ret;
|
2006-01-11 17:56:22 +08:00
|
|
|
}
|
|
|
|
|
2021-07-14 03:35:22 +08:00
|
|
|
static void css_remove(struct device *dev)
|
2006-01-11 17:56:22 +08:00
|
|
|
{
|
|
|
|
struct subchannel *sch;
|
|
|
|
|
|
|
|
sch = to_subchannel(dev);
|
2021-07-14 03:35:19 +08:00
|
|
|
if (sch->driver->remove)
|
|
|
|
sch->driver->remove(sch);
|
2008-01-26 21:10:40 +08:00
|
|
|
sch->driver = NULL;
|
2006-01-11 17:56:22 +08:00
|
|
|
}
|
|
|
|
|
2008-01-26 21:10:40 +08:00
|
|
|
static void css_shutdown(struct device *dev)
|
2006-01-11 17:56:22 +08:00
|
|
|
{
|
|
|
|
struct subchannel *sch;
|
|
|
|
|
|
|
|
sch = to_subchannel(dev);
|
2008-01-26 21:10:40 +08:00
|
|
|
if (sch->driver && sch->driver->shutdown)
|
2006-01-11 17:56:22 +08:00
|
|
|
sch->driver->shutdown(sch);
|
|
|
|
}
|
|
|
|
|
2023-01-11 19:30:17 +08:00
|
|
|
static int css_uevent(const struct device *dev, struct kobj_uevent_env *env)
|
2008-07-14 15:58:44 +08:00
|
|
|
{
|
2023-01-11 19:30:17 +08:00
|
|
|
const struct subchannel *sch = to_subchannel(dev);
|
2008-07-14 15:58:44 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = add_uevent_var(env, "ST=%01X", sch->st);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-02-03 22:57:59 +08:00
|
|
|
static const struct bus_type css_bus_type = {
|
2006-01-11 17:56:22 +08:00
|
|
|
.name = "css",
|
|
|
|
.match = css_bus_match,
|
|
|
|
.probe = css_probe,
|
|
|
|
.remove = css_remove,
|
|
|
|
.shutdown = css_shutdown,
|
2008-07-14 15:58:44 +08:00
|
|
|
.uevent = css_uevent,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2008-01-26 21:10:41 +08:00
|
|
|
/**
|
|
|
|
* css_driver_register - register a css driver
|
|
|
|
* @cdrv: css driver to register
|
|
|
|
*
|
|
|
|
* This is mainly a wrapper around driver_register that sets name
|
|
|
|
* and bus_type in the embedded struct device_driver correctly.
|
|
|
|
*/
|
|
|
|
int css_driver_register(struct css_driver *cdrv)
|
|
|
|
{
|
|
|
|
cdrv->drv.bus = &css_bus_type;
|
|
|
|
return driver_register(&cdrv->drv);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(css_driver_register);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* css_driver_unregister - unregister a css driver
|
|
|
|
* @cdrv: css driver to unregister
|
|
|
|
*
|
|
|
|
* This is a wrapper around driver_unregister.
|
|
|
|
*/
|
|
|
|
void css_driver_unregister(struct css_driver *cdrv)
|
|
|
|
{
|
|
|
|
driver_unregister(&cdrv->drv);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(css_driver_unregister);
|