mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Merge the 'net' tree to get the recent set of netfilter bug fixes in order to assist with some merge hassles Pablo is going to have to deal with for upcoming changes. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
c32f38619a
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Saber-toothed Squirrel
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -387,6 +387,7 @@ acpi_get_table_with_size(char *signature,
|
||||
|
||||
return (AE_NOT_FOUND);
|
||||
}
|
||||
ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
|
||||
|
||||
acpi_status
|
||||
acpi_get_table(char *signature,
|
||||
|
@ -64,6 +64,7 @@
|
||||
#define I830_PTE_SYSTEM_CACHED 0x00000006
|
||||
/* GT PTE cache control fields */
|
||||
#define GEN6_PTE_UNCACHED 0x00000002
|
||||
#define HSW_PTE_UNCACHED 0x00000000
|
||||
#define GEN6_PTE_LLC 0x00000004
|
||||
#define GEN6_PTE_LLC_MLC 0x00000006
|
||||
#define GEN6_PTE_GFDT 0x00000008
|
||||
|
@ -1156,6 +1156,30 @@ static bool gen6_check_flags(unsigned int flags)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
|
||||
unsigned int flags)
|
||||
{
|
||||
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
|
||||
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
|
||||
u32 pte_flags;
|
||||
|
||||
if (type_mask == AGP_USER_MEMORY)
|
||||
pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
|
||||
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
|
||||
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
|
||||
if (gfdt)
|
||||
pte_flags |= GEN6_PTE_GFDT;
|
||||
} else { /* set 'normal'/'cached' to LLC by default */
|
||||
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
|
||||
if (gfdt)
|
||||
pte_flags |= GEN6_PTE_GFDT;
|
||||
}
|
||||
|
||||
/* gen6 has bit11-4 for physical addr bit39-32 */
|
||||
addr |= (addr >> 28) & 0xff0;
|
||||
writel(addr | pte_flags, intel_private.gtt + entry);
|
||||
}
|
||||
|
||||
static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
|
||||
unsigned int flags)
|
||||
{
|
||||
@ -1382,6 +1406,15 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {
|
||||
.check_flags = gen6_check_flags,
|
||||
.chipset_flush = i9xx_chipset_flush,
|
||||
};
|
||||
static const struct intel_gtt_driver haswell_gtt_driver = {
|
||||
.gen = 6,
|
||||
.setup = i9xx_setup,
|
||||
.cleanup = gen6_cleanup,
|
||||
.write_entry = haswell_write_entry,
|
||||
.dma_mask_size = 40,
|
||||
.check_flags = gen6_check_flags,
|
||||
.chipset_flush = i9xx_chipset_flush,
|
||||
};
|
||||
static const struct intel_gtt_driver valleyview_gtt_driver = {
|
||||
.gen = 7,
|
||||
.setup = i9xx_setup,
|
||||
@ -1499,77 +1532,77 @@ static const struct intel_gtt_driver_description {
|
||||
{ PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
|
||||
"ValleyView", &valleyview_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
|
||||
"Haswell", &sandybridge_gtt_driver },
|
||||
"Haswell", &haswell_gtt_driver },
|
||||
{ 0, NULL, NULL }
|
||||
};
|
||||
|
||||
|
@ -706,9 +706,6 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
|
||||
p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
|
||||
p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
|
||||
p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
|
||||
|
||||
p->crtc_hadjusted = false;
|
||||
p->crtc_vadjusted = false;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mode_set_crtcinfo);
|
||||
|
||||
|
@ -89,7 +89,7 @@ static const struct file_operations drm_proc_fops = {
|
||||
* Create a given set of proc files represented by an array of
|
||||
* gdm_proc_lists in the given root directory.
|
||||
*/
|
||||
int drm_proc_create_files(struct drm_info_list *files, int count,
|
||||
static int drm_proc_create_files(struct drm_info_list *files, int count,
|
||||
struct proc_dir_entry *root, struct drm_minor *minor)
|
||||
{
|
||||
struct drm_device *dev = minor->dev;
|
||||
@ -172,7 +172,7 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int drm_proc_remove_files(struct drm_info_list *files, int count,
|
||||
static int drm_proc_remove_files(struct drm_info_list *files, int count,
|
||||
struct drm_minor *minor)
|
||||
{
|
||||
struct list_head *pos, *q;
|
||||
|
@ -2365,6 +2365,10 @@ int i915_gpu_idle(struct drm_device *dev)
|
||||
|
||||
/* Flush everything onto the inactive list. */
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_ring_idle(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -2372,10 +2376,6 @@ int i915_gpu_idle(struct drm_device *dev)
|
||||
/* Is the device fubar? */
|
||||
if (WARN_ON(!list_empty(&ring->gpu_write_list)))
|
||||
return -EBUSY;
|
||||
|
||||
ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -261,7 +261,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
|
||||
pte_flags |= GEN6_PTE_CACHE_LLC;
|
||||
break;
|
||||
case I915_CACHE_NONE:
|
||||
pte_flags |= GEN6_PTE_UNCACHED;
|
||||
if (IS_HASWELL(dev))
|
||||
pte_flags |= HSW_PTE_UNCACHED;
|
||||
else
|
||||
pte_flags |= GEN6_PTE_UNCACHED;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
@ -115,6 +115,7 @@
|
||||
|
||||
#define GEN6_PTE_VALID (1 << 0)
|
||||
#define GEN6_PTE_UNCACHED (1 << 1)
|
||||
#define HSW_PTE_UNCACHED (0)
|
||||
#define GEN6_PTE_CACHE_LLC (2 << 1)
|
||||
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
|
||||
#define GEN6_PTE_CACHE_BITS (3 << 1)
|
||||
|
@ -326,6 +326,36 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct edid *intel_crt_get_edid(struct drm_connector *connector,
|
||||
struct i2c_adapter *i2c)
|
||||
{
|
||||
struct edid *edid;
|
||||
|
||||
edid = drm_get_edid(connector, i2c);
|
||||
|
||||
if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
|
||||
DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
|
||||
intel_gmbus_force_bit(i2c, true);
|
||||
edid = drm_get_edid(connector, i2c);
|
||||
intel_gmbus_force_bit(i2c, false);
|
||||
}
|
||||
|
||||
return edid;
|
||||
}
|
||||
|
||||
/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
|
||||
static int intel_crt_ddc_get_modes(struct drm_connector *connector,
|
||||
struct i2c_adapter *adapter)
|
||||
{
|
||||
struct edid *edid;
|
||||
|
||||
edid = intel_crt_get_edid(connector, adapter);
|
||||
if (!edid)
|
||||
return 0;
|
||||
|
||||
return intel_connector_update_modes(connector, edid);
|
||||
}
|
||||
|
||||
static bool intel_crt_detect_ddc(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_crt *crt = intel_attached_crt(connector);
|
||||
@ -336,7 +366,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
|
||||
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
|
||||
|
||||
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
|
||||
edid = drm_get_edid(connector, i2c);
|
||||
edid = intel_crt_get_edid(connector, i2c);
|
||||
|
||||
if (edid) {
|
||||
bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
|
||||
@ -544,13 +574,13 @@ static int intel_crt_get_modes(struct drm_connector *connector)
|
||||
struct i2c_adapter *i2c;
|
||||
|
||||
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
|
||||
ret = intel_ddc_get_modes(connector, i2c);
|
||||
ret = intel_crt_ddc_get_modes(connector, i2c);
|
||||
if (ret || !IS_G4X(dev))
|
||||
return ret;
|
||||
|
||||
/* Try to probe digital port for output in DVI-I -> VGA mode. */
|
||||
i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
|
||||
return intel_ddc_get_modes(connector, i2c);
|
||||
return intel_crt_ddc_get_modes(connector, i2c);
|
||||
}
|
||||
|
||||
static int intel_crt_set_property(struct drm_connector *connector,
|
||||
|
@ -342,6 +342,8 @@ struct intel_fbc_work {
|
||||
int interval;
|
||||
};
|
||||
|
||||
int intel_connector_update_modes(struct drm_connector *connector,
|
||||
struct edid *edid);
|
||||
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
|
||||
|
||||
extern void intel_attach_force_audio_property(struct drm_connector *connector);
|
||||
|
@ -32,6 +32,25 @@
|
||||
#include "intel_drv.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
/**
|
||||
* intel_connector_update_modes - update connector from edid
|
||||
* @connector: DRM connector device to use
|
||||
* @edid: previously read EDID information
|
||||
*/
|
||||
int intel_connector_update_modes(struct drm_connector *connector,
|
||||
struct edid *edid)
|
||||
{
|
||||
int ret;
|
||||
|
||||
drm_mode_connector_update_edid_property(connector, edid);
|
||||
ret = drm_add_edid_modes(connector, edid);
|
||||
drm_edid_to_eld(connector, edid);
|
||||
connector->display_info.raw_edid = NULL;
|
||||
kfree(edid);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_ddc_get_modes - get modelist from monitor
|
||||
* @connector: DRM connector device to use
|
||||
@ -43,18 +62,12 @@ int intel_ddc_get_modes(struct drm_connector *connector,
|
||||
struct i2c_adapter *adapter)
|
||||
{
|
||||
struct edid *edid;
|
||||
int ret = 0;
|
||||
|
||||
edid = drm_get_edid(connector, adapter);
|
||||
if (edid) {
|
||||
drm_mode_connector_update_edid_property(connector, edid);
|
||||
ret = drm_add_edid_modes(connector, edid);
|
||||
drm_edid_to_eld(connector, edid);
|
||||
connector->display_info.raw_edid = NULL;
|
||||
kfree(edid);
|
||||
}
|
||||
if (!edid)
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
return intel_connector_update_modes(connector, edid);
|
||||
}
|
||||
|
||||
static const struct drm_prop_enum_list force_audio_names[] = {
|
||||
|
@ -2441,17 +2441,10 @@ static void gen6_enable_rps(struct drm_device *dev)
|
||||
dev_priv->max_delay << 24 |
|
||||
dev_priv->min_delay << 16);
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
|
||||
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
|
||||
I915_WRITE(GEN6_RP_UP_EI, 66000);
|
||||
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
|
||||
} else {
|
||||
I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
|
||||
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
|
||||
I915_WRITE(GEN6_RP_UP_EI, 100000);
|
||||
I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
|
||||
}
|
||||
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
|
||||
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
|
||||
I915_WRITE(GEN6_RP_UP_EI, 66000);
|
||||
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
|
||||
|
||||
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
|
||||
I915_WRITE(GEN6_RP_CONTROL,
|
||||
|
@ -1692,6 +1692,7 @@ static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
|
||||
edid = intel_sdvo_get_edid(connector);
|
||||
if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
|
||||
has_audio = drm_detect_monitor_audio(edid);
|
||||
kfree(edid);
|
||||
|
||||
return has_audio;
|
||||
}
|
||||
|
@ -444,11 +444,28 @@ union atom_enable_ss {
|
||||
static void atombios_crtc_program_ss(struct radeon_device *rdev,
|
||||
int enable,
|
||||
int pll_id,
|
||||
int crtc_id,
|
||||
struct radeon_atom_ss *ss)
|
||||
{
|
||||
unsigned i;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
|
||||
union atom_enable_ss args;
|
||||
|
||||
if (!enable) {
|
||||
for (i = 0; i < rdev->num_crtc; i++) {
|
||||
if (rdev->mode_info.crtcs[i] &&
|
||||
rdev->mode_info.crtcs[i]->enabled &&
|
||||
i != crtc_id &&
|
||||
pll_id == rdev->mode_info.crtcs[i]->pll_id) {
|
||||
/* one other crtc is using this pll don't turn
|
||||
* off spread spectrum as it might turn off
|
||||
* display on active crtc
|
||||
*/
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
if (ASIC_IS_DCE5(rdev)) {
|
||||
@ -1028,7 +1045,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
|
||||
radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
|
||||
&ref_div, &post_div);
|
||||
|
||||
atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
|
||||
atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
|
||||
|
||||
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
|
||||
encoder_mode, radeon_encoder->encoder_id, mode->clock,
|
||||
@ -1051,7 +1068,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
|
||||
ss.step = step_size;
|
||||
}
|
||||
|
||||
atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
|
||||
atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1572,11 +1589,11 @@ void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
|
||||
ASIC_INTERNAL_SS_ON_DCPLL,
|
||||
rdev->clock.default_dispclk);
|
||||
if (ss_enabled)
|
||||
atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, &ss);
|
||||
atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss);
|
||||
/* XXX: DCE5, make sure voltage, dispclk is high enough */
|
||||
atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
|
||||
if (ss_enabled)
|
||||
atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, &ss);
|
||||
atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -47,13 +47,17 @@ struct r600_cs_track {
|
||||
u32 npipes;
|
||||
/* value we track */
|
||||
u32 sq_config;
|
||||
u32 log_nsamples;
|
||||
u32 nsamples;
|
||||
u32 cb_color_base_last[8];
|
||||
struct radeon_bo *cb_color_bo[8];
|
||||
u64 cb_color_bo_mc[8];
|
||||
u32 cb_color_bo_offset[8];
|
||||
struct radeon_bo *cb_color_frag_bo[8]; /* unused */
|
||||
struct radeon_bo *cb_color_tile_bo[8]; /* unused */
|
||||
u64 cb_color_bo_offset[8];
|
||||
struct radeon_bo *cb_color_frag_bo[8];
|
||||
u64 cb_color_frag_offset[8];
|
||||
struct radeon_bo *cb_color_tile_bo[8];
|
||||
u64 cb_color_tile_offset[8];
|
||||
u32 cb_color_mask[8];
|
||||
u32 cb_color_info[8];
|
||||
u32 cb_color_view[8];
|
||||
u32 cb_color_size_idx[8]; /* unused */
|
||||
@ -349,10 +353,6 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
|
||||
unsigned array_mode;
|
||||
u32 format;
|
||||
|
||||
if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
|
||||
dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
|
||||
format = G_0280A0_FORMAT(track->cb_color_info[i]);
|
||||
if (!r600_fmt_is_valid_color(format)) {
|
||||
@ -420,7 +420,8 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
|
||||
}
|
||||
|
||||
/* check offset */
|
||||
tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format);
|
||||
tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
|
||||
r600_fmt_get_blocksize(format) * track->nsamples;
|
||||
switch (array_mode) {
|
||||
default:
|
||||
case V_0280A0_ARRAY_LINEAR_GENERAL:
|
||||
@ -441,7 +442,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
|
||||
* broken userspace.
|
||||
*/
|
||||
} else {
|
||||
dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big (%d %d) (%d %d %d)\n",
|
||||
dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
|
||||
__func__, i, array_mode,
|
||||
track->cb_color_bo_offset[i], tmp,
|
||||
radeon_bo_size(track->cb_color_bo[i]),
|
||||
@ -458,6 +459,51 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
|
||||
tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
|
||||
S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
|
||||
ib[track->cb_color_size_idx[i]] = tmp;
|
||||
|
||||
/* FMASK/CMASK */
|
||||
switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
|
||||
case V_0280A0_TILE_DISABLE:
|
||||
break;
|
||||
case V_0280A0_FRAG_ENABLE:
|
||||
if (track->nsamples > 1) {
|
||||
uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
|
||||
/* the tile size is 8x8, but the size is in units of bits.
|
||||
* for bytes, do just * 8. */
|
||||
uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
|
||||
|
||||
if (bytes + track->cb_color_frag_offset[i] >
|
||||
radeon_bo_size(track->cb_color_frag_bo[i])) {
|
||||
dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
|
||||
"(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
|
||||
__func__, tile_max, bytes,
|
||||
track->cb_color_frag_offset[i],
|
||||
radeon_bo_size(track->cb_color_frag_bo[i]));
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
/* fall through */
|
||||
case V_0280A0_CLEAR_ENABLE:
|
||||
{
|
||||
uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
|
||||
/* One block = 128x128 pixels, one 8x8 tile has 4 bits..
|
||||
* (128*128) / (8*8) / 2 = 128 bytes per block. */
|
||||
uint32_t bytes = (block_max + 1) * 128;
|
||||
|
||||
if (bytes + track->cb_color_tile_offset[i] >
|
||||
radeon_bo_size(track->cb_color_tile_bo[i])) {
|
||||
dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
|
||||
"(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
|
||||
__func__, block_max, bytes,
|
||||
track->cb_color_tile_offset[i],
|
||||
radeon_bo_size(track->cb_color_tile_bo[i]));
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
dev_warn(p->dev, "%s invalid tile mode\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -566,7 +612,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
|
||||
|
||||
ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
|
||||
nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
|
||||
tmp = ntiles * bpe * 64 * nviews;
|
||||
tmp = ntiles * bpe * 64 * nviews * track->nsamples;
|
||||
if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
|
||||
dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
|
||||
array_mode,
|
||||
@ -1231,6 +1277,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
break;
|
||||
case R_028C04_PA_SC_AA_CONFIG:
|
||||
tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
|
||||
track->log_nsamples = tmp;
|
||||
track->nsamples = 1 << tmp;
|
||||
track->cb_dirty = true;
|
||||
break;
|
||||
@ -1312,16 +1359,21 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
|
||||
return -EINVAL;
|
||||
}
|
||||
ib[idx] = track->cb_color_base_last[tmp];
|
||||
track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
|
||||
track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
|
||||
ib[idx] = track->cb_color_base_last[tmp];
|
||||
} else {
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
if (r) {
|
||||
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
|
||||
return -EINVAL;
|
||||
}
|
||||
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
|
||||
track->cb_color_frag_bo[tmp] = reloc->robj;
|
||||
track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
|
||||
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
|
||||
}
|
||||
if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
|
||||
track->cb_dirty = true;
|
||||
}
|
||||
break;
|
||||
case R_0280C0_CB_COLOR0_TILE:
|
||||
@ -1338,16 +1390,35 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
|
||||
return -EINVAL;
|
||||
}
|
||||
ib[idx] = track->cb_color_base_last[tmp];
|
||||
track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
|
||||
track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
|
||||
ib[idx] = track->cb_color_base_last[tmp];
|
||||
} else {
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
if (r) {
|
||||
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
|
||||
return -EINVAL;
|
||||
}
|
||||
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
|
||||
track->cb_color_tile_bo[tmp] = reloc->robj;
|
||||
track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
|
||||
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
|
||||
}
|
||||
if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
|
||||
track->cb_dirty = true;
|
||||
}
|
||||
break;
|
||||
case R_028100_CB_COLOR0_MASK:
|
||||
case R_028104_CB_COLOR1_MASK:
|
||||
case R_028108_CB_COLOR2_MASK:
|
||||
case R_02810C_CB_COLOR3_MASK:
|
||||
case R_028110_CB_COLOR4_MASK:
|
||||
case R_028114_CB_COLOR5_MASK:
|
||||
case R_028118_CB_COLOR6_MASK:
|
||||
case R_02811C_CB_COLOR7_MASK:
|
||||
tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
|
||||
track->cb_color_mask[tmp] = ib[idx];
|
||||
if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
|
||||
track->cb_dirty = true;
|
||||
}
|
||||
break;
|
||||
case CB_COLOR0_BASE:
|
||||
@ -1492,7 +1563,7 @@ unsigned r600_mip_minify(unsigned size, unsigned level)
|
||||
}
|
||||
|
||||
static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
|
||||
unsigned w0, unsigned h0, unsigned d0, unsigned format,
|
||||
unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
|
||||
unsigned block_align, unsigned height_align, unsigned base_align,
|
||||
unsigned *l0_size, unsigned *mipmap_size)
|
||||
{
|
||||
@ -1520,7 +1591,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
|
||||
|
||||
depth = r600_mip_minify(d0, i);
|
||||
|
||||
size = nbx * nby * blocksize;
|
||||
size = nbx * nby * blocksize * nsamples;
|
||||
if (nfaces)
|
||||
size *= nfaces;
|
||||
else
|
||||
@ -1672,7 +1743,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
|
||||
|
||||
nfaces = larray - barray + 1;
|
||||
}
|
||||
r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format,
|
||||
r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
|
||||
pitch_align, height_align, base_align,
|
||||
&l0_size, &mipmap_size);
|
||||
/* using get ib will give us the offset into the texture bo */
|
||||
|
@ -92,6 +92,20 @@
|
||||
#define R_028094_CB_COLOR5_VIEW 0x028094
|
||||
#define R_028098_CB_COLOR6_VIEW 0x028098
|
||||
#define R_02809C_CB_COLOR7_VIEW 0x02809C
|
||||
#define R_028100_CB_COLOR0_MASK 0x028100
|
||||
#define S_028100_CMASK_BLOCK_MAX(x) (((x) & 0xFFF) << 0)
|
||||
#define G_028100_CMASK_BLOCK_MAX(x) (((x) >> 0) & 0xFFF)
|
||||
#define C_028100_CMASK_BLOCK_MAX 0xFFFFF000
|
||||
#define S_028100_FMASK_TILE_MAX(x) (((x) & 0xFFFFF) << 12)
|
||||
#define G_028100_FMASK_TILE_MAX(x) (((x) >> 12) & 0xFFFFF)
|
||||
#define C_028100_FMASK_TILE_MAX 0x00000FFF
|
||||
#define R_028104_CB_COLOR1_MASK 0x028104
|
||||
#define R_028108_CB_COLOR2_MASK 0x028108
|
||||
#define R_02810C_CB_COLOR3_MASK 0x02810C
|
||||
#define R_028110_CB_COLOR4_MASK 0x028110
|
||||
#define R_028114_CB_COLOR5_MASK 0x028114
|
||||
#define R_028118_CB_COLOR6_MASK 0x028118
|
||||
#define R_02811C_CB_COLOR7_MASK 0x02811C
|
||||
#define CB_COLOR0_INFO 0x280a0
|
||||
# define CB_FORMAT(x) ((x) << 2)
|
||||
# define CB_ARRAY_MODE(x) ((x) << 8)
|
||||
@ -1400,6 +1414,9 @@
|
||||
#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18)
|
||||
#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3)
|
||||
#define C_0280A0_TILE_MODE 0xFFF3FFFF
|
||||
#define V_0280A0_TILE_DISABLE 0
|
||||
#define V_0280A0_CLEAR_ENABLE 1
|
||||
#define V_0280A0_FRAG_ENABLE 2
|
||||
#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20)
|
||||
#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1)
|
||||
#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF
|
||||
|
@ -142,21 +142,6 @@ struct radeon_device;
|
||||
/*
|
||||
* BIOS.
|
||||
*/
|
||||
#define ATRM_BIOS_PAGE 4096
|
||||
|
||||
#if defined(CONFIG_VGA_SWITCHEROO)
|
||||
bool radeon_atrm_supported(struct pci_dev *pdev);
|
||||
int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
|
||||
#else
|
||||
static inline bool radeon_atrm_supported(struct pci_dev *pdev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
bool radeon_get_bios(struct radeon_device *rdev);
|
||||
|
||||
/*
|
||||
|
@ -452,7 +452,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
|
||||
}
|
||||
|
||||
/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
|
||||
if ((dev->pdev->device == 0x9802) &&
|
||||
if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
|
||||
(dev->pdev->subsystem_vendor == 0x1734) &&
|
||||
(dev->pdev->subsystem_device == 0x11bd)) {
|
||||
if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
|
||||
|
@ -30,57 +30,8 @@ static struct radeon_atpx_priv {
|
||||
/* handle for device - and atpx */
|
||||
acpi_handle dhandle;
|
||||
acpi_handle atpx_handle;
|
||||
acpi_handle atrm_handle;
|
||||
} radeon_atpx_priv;
|
||||
|
||||
/* retrieve the ROM in 4k blocks */
|
||||
static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
|
||||
int offset, int len)
|
||||
{
|
||||
acpi_status status;
|
||||
union acpi_object atrm_arg_elements[2], *obj;
|
||||
struct acpi_object_list atrm_arg;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
|
||||
|
||||
atrm_arg.count = 2;
|
||||
atrm_arg.pointer = &atrm_arg_elements[0];
|
||||
|
||||
atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
|
||||
atrm_arg_elements[0].integer.value = offset;
|
||||
|
||||
atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
|
||||
atrm_arg_elements[1].integer.value = len;
|
||||
|
||||
status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
obj = (union acpi_object *)buffer.pointer;
|
||||
memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
|
||||
len = obj->buffer.length;
|
||||
kfree(buffer.pointer);
|
||||
return len;
|
||||
}
|
||||
|
||||
bool radeon_atrm_supported(struct pci_dev *pdev)
|
||||
{
|
||||
/* get the discrete ROM only via ATRM */
|
||||
if (!radeon_atpx_priv.atpx_detected)
|
||||
return false;
|
||||
|
||||
if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
|
||||
{
|
||||
return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
|
||||
}
|
||||
|
||||
static int radeon_atpx_get_version(acpi_handle handle)
|
||||
{
|
||||
acpi_status status;
|
||||
@ -198,7 +149,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
|
||||
|
||||
static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
|
||||
{
|
||||
acpi_handle dhandle, atpx_handle, atrm_handle;
|
||||
acpi_handle dhandle, atpx_handle;
|
||||
acpi_status status;
|
||||
|
||||
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
|
||||
@ -209,13 +160,8 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
|
||||
if (ACPI_FAILURE(status))
|
||||
return false;
|
||||
|
||||
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
|
||||
if (ACPI_FAILURE(status))
|
||||
return false;
|
||||
|
||||
radeon_atpx_priv.dhandle = dhandle;
|
||||
radeon_atpx_priv.atpx_handle = atpx_handle;
|
||||
radeon_atpx_priv.atrm_handle = atrm_handle;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -32,6 +32,7 @@
|
||||
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/acpi.h>
|
||||
/*
|
||||
* BIOS.
|
||||
*/
|
||||
@ -98,16 +99,81 @@ static bool radeon_read_bios(struct radeon_device *rdev)
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
/* ATRM is used to get the BIOS on the discrete cards in
|
||||
* dual-gpu systems.
|
||||
*/
|
||||
/* retrieve the ROM in 4k blocks */
|
||||
#define ATRM_BIOS_PAGE 4096
|
||||
/**
|
||||
* radeon_atrm_call - fetch a chunk of the vbios
|
||||
*
|
||||
* @atrm_handle: acpi ATRM handle
|
||||
* @bios: vbios image pointer
|
||||
* @offset: offset of vbios image data to fetch
|
||||
* @len: length of vbios image data to fetch
|
||||
*
|
||||
* Executes ATRM to fetch a chunk of the discrete
|
||||
* vbios image on PX systems (all asics).
|
||||
* Returns the length of the buffer fetched.
|
||||
*/
|
||||
static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
|
||||
int offset, int len)
|
||||
{
|
||||
acpi_status status;
|
||||
union acpi_object atrm_arg_elements[2], *obj;
|
||||
struct acpi_object_list atrm_arg;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
|
||||
|
||||
atrm_arg.count = 2;
|
||||
atrm_arg.pointer = &atrm_arg_elements[0];
|
||||
|
||||
atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
|
||||
atrm_arg_elements[0].integer.value = offset;
|
||||
|
||||
atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
|
||||
atrm_arg_elements[1].integer.value = len;
|
||||
|
||||
status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
obj = (union acpi_object *)buffer.pointer;
|
||||
memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
|
||||
len = obj->buffer.length;
|
||||
kfree(buffer.pointer);
|
||||
return len;
|
||||
}
|
||||
|
||||
static bool radeon_atrm_get_bios(struct radeon_device *rdev)
|
||||
{
|
||||
int ret;
|
||||
int size = 256 * 1024;
|
||||
int i;
|
||||
struct pci_dev *pdev = NULL;
|
||||
acpi_handle dhandle, atrm_handle;
|
||||
acpi_status status;
|
||||
bool found = false;
|
||||
|
||||
if (!radeon_atrm_supported(rdev->pdev))
|
||||
/* ATRM is for the discrete card only */
|
||||
if (rdev->flags & RADEON_IS_IGP)
|
||||
return false;
|
||||
|
||||
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
|
||||
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
|
||||
if (!dhandle)
|
||||
continue;
|
||||
|
||||
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
|
||||
if (!ACPI_FAILURE(status)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
return false;
|
||||
|
||||
rdev->bios = kmalloc(size, GFP_KERNEL);
|
||||
@ -117,9 +183,10 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
|
||||
}
|
||||
|
||||
for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
|
||||
ret = radeon_atrm_get_bios_chunk(rdev->bios,
|
||||
(i * ATRM_BIOS_PAGE),
|
||||
ATRM_BIOS_PAGE);
|
||||
ret = radeon_atrm_call(atrm_handle,
|
||||
rdev->bios,
|
||||
(i * ATRM_BIOS_PAGE),
|
||||
ATRM_BIOS_PAGE);
|
||||
if (ret < ATRM_BIOS_PAGE)
|
||||
break;
|
||||
}
|
||||
@ -130,6 +197,12 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
|
||||
}
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
static inline bool radeon_atrm_get_bios(struct radeon_device *rdev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool ni_read_disabled_bios(struct radeon_device *rdev)
|
||||
{
|
||||
@ -476,6 +549,61 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
|
||||
return legacy_read_disabled_bios(rdev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
|
||||
{
|
||||
bool ret = false;
|
||||
struct acpi_table_header *hdr;
|
||||
acpi_size tbl_size;
|
||||
UEFI_ACPI_VFCT *vfct;
|
||||
GOP_VBIOS_CONTENT *vbios;
|
||||
VFCT_IMAGE_HEADER *vhdr;
|
||||
|
||||
if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
|
||||
return false;
|
||||
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
|
||||
DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
vfct = (UEFI_ACPI_VFCT *)hdr;
|
||||
if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
|
||||
DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
|
||||
vhdr = &vbios->VbiosHeader;
|
||||
DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
|
||||
vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
|
||||
vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
|
||||
|
||||
if (vhdr->PCIBus != rdev->pdev->bus->number ||
|
||||
vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
|
||||
vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
|
||||
vhdr->VendorID != rdev->pdev->vendor ||
|
||||
vhdr->DeviceID != rdev->pdev->device) {
|
||||
DRM_INFO("ACPI VFCT table is not for this card\n");
|
||||
goto out_unmap;
|
||||
};
|
||||
|
||||
if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
|
||||
DRM_ERROR("ACPI VFCT image truncated\n");
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
|
||||
ret = !!rdev->bios;
|
||||
|
||||
out_unmap:
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
bool radeon_get_bios(struct radeon_device *rdev)
|
||||
{
|
||||
@ -483,6 +611,8 @@ bool radeon_get_bios(struct radeon_device *rdev)
|
||||
uint16_t tmp;
|
||||
|
||||
r = radeon_atrm_get_bios(rdev);
|
||||
if (r == false)
|
||||
r = radeon_acpi_vfct_bios(rdev);
|
||||
if (r == false)
|
||||
r = igp_read_bios_from_vram(rdev);
|
||||
if (r == false)
|
||||
|
@ -62,9 +62,10 @@
|
||||
* 2.18.0 - r600-eg: allow "invalid" DB formats
|
||||
* 2.19.0 - r600-eg: MSAA textures
|
||||
* 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
|
||||
* 2.21.0 - r600-r700: FMASK and CMASK
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 2
|
||||
#define KMS_DRIVER_MINOR 20
|
||||
#define KMS_DRIVER_MINOR 21
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
|
||||
int radeon_driver_unload_kms(struct drm_device *dev);
|
||||
|
@ -132,6 +132,7 @@ int radeon_bo_create(struct radeon_device *rdev,
|
||||
acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
|
||||
sizeof(struct radeon_bo));
|
||||
|
||||
retry:
|
||||
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
|
||||
if (bo == NULL)
|
||||
return -ENOMEM;
|
||||
@ -145,8 +146,6 @@ int radeon_bo_create(struct radeon_device *rdev,
|
||||
bo->surface_reg = -1;
|
||||
INIT_LIST_HEAD(&bo->list);
|
||||
INIT_LIST_HEAD(&bo->va);
|
||||
|
||||
retry:
|
||||
radeon_ttm_placement_from_domain(bo, domain);
|
||||
/* Kernel allocation are uninterruptible */
|
||||
down_read(&rdev->pm.mclk_lock);
|
||||
|
@ -706,6 +706,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
|
||||
if (radeon_debugfs_ring_init(rdev, ring)) {
|
||||
DRM_ERROR("Failed to register debugfs file for rings !\n");
|
||||
}
|
||||
radeon_ring_lockup_update(ring);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -744,14 +744,6 @@ r600 0x9400
|
||||
0x00028C38 CB_CLRCMP_DST
|
||||
0x00028C3C CB_CLRCMP_MSK
|
||||
0x00028C34 CB_CLRCMP_SRC
|
||||
0x00028100 CB_COLOR0_MASK
|
||||
0x00028104 CB_COLOR1_MASK
|
||||
0x00028108 CB_COLOR2_MASK
|
||||
0x0002810C CB_COLOR3_MASK
|
||||
0x00028110 CB_COLOR4_MASK
|
||||
0x00028114 CB_COLOR5_MASK
|
||||
0x00028118 CB_COLOR6_MASK
|
||||
0x0002811C CB_COLOR7_MASK
|
||||
0x00028808 CB_COLOR_CONTROL
|
||||
0x0002842C CB_FOG_BLUE
|
||||
0x00028428 CB_FOG_GREEN
|
||||
|
@ -354,8 +354,7 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
|
||||
|
||||
static void udl_crtc_disable(struct drm_crtc *crtc)
|
||||
{
|
||||
|
||||
|
||||
udl_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
|
||||
static void udl_crtc_destroy(struct drm_crtc *crtc)
|
||||
|
@ -1688,15 +1688,19 @@ int vmw_du_page_flip(struct drm_crtc *crtc,
|
||||
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
|
||||
struct drm_framebuffer *old_fb = crtc->fb;
|
||||
struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
|
||||
struct drm_file *file_priv = event->base.file_priv;
|
||||
struct drm_file *file_priv ;
|
||||
struct vmw_fence_obj *fence = NULL;
|
||||
struct drm_clip_rect clips;
|
||||
int ret;
|
||||
|
||||
if (event == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* require ScreenObject support for page flipping */
|
||||
if (!dev_priv->sou_priv)
|
||||
return -ENOSYS;
|
||||
|
||||
file_priv = event->base.file_priv;
|
||||
if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -405,6 +405,7 @@ static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
|
||||
}
|
||||
}
|
||||
}
|
||||
ret = num;
|
||||
abort:
|
||||
sret = diolan_i2c_stop(dev);
|
||||
if (sret < 0 && ret >= 0)
|
||||
|
@ -350,10 +350,6 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
|
||||
|
||||
i2c_clk = clk_get_rate(dev->clk);
|
||||
|
||||
/* fallback to std. mode if machine has not provided it */
|
||||
if (dev->cfg.clk_freq == 0)
|
||||
dev->cfg.clk_freq = 100000;
|
||||
|
||||
/*
|
||||
* The spec says, in case of std. mode the divider is
|
||||
* 2 whereas it is 3 for fast and fastplus mode of
|
||||
@ -911,20 +907,32 @@ static const struct i2c_algorithm nmk_i2c_algo = {
|
||||
.functionality = nmk_i2c_functionality
|
||||
};
|
||||
|
||||
static struct nmk_i2c_controller u8500_i2c = {
|
||||
/*
|
||||
* Slave data setup time; 250ns, 100ns, and 10ns, which
|
||||
* is 14, 6 and 2 respectively for a 48Mhz i2c clock.
|
||||
*/
|
||||
.slsu = 0xe,
|
||||
.tft = 1, /* Tx FIFO threshold */
|
||||
.rft = 8, /* Rx FIFO threshold */
|
||||
.clk_freq = 400000, /* fast mode operation */
|
||||
.timeout = 200, /* Slave response timeout(ms) */
|
||||
.sm = I2C_FREQ_MODE_FAST,
|
||||
};
|
||||
|
||||
static atomic_t adapter_id = ATOMIC_INIT(0);
|
||||
|
||||
static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
{
|
||||
int ret = 0;
|
||||
struct nmk_i2c_controller *pdata =
|
||||
adev->dev.platform_data;
|
||||
struct nmk_i2c_controller *pdata = adev->dev.platform_data;
|
||||
struct nmk_i2c_dev *dev;
|
||||
struct i2c_adapter *adap;
|
||||
|
||||
if (!pdata) {
|
||||
dev_warn(&adev->dev, "no platform data\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
if (!pdata)
|
||||
/* No i2c configuration found, using the default. */
|
||||
pdata = &u8500_i2c;
|
||||
|
||||
dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL);
|
||||
if (!dev) {
|
||||
dev_err(&adev->dev, "cannot allocate memory\n");
|
||||
|
@ -584,7 +584,7 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
|
||||
|
||||
r = pm_runtime_get_sync(dev->dev);
|
||||
if (IS_ERR_VALUE(r))
|
||||
return r;
|
||||
goto out;
|
||||
|
||||
r = omap_i2c_wait_for_bb(dev);
|
||||
if (r < 0)
|
||||
|
@ -712,7 +712,7 @@ static int __devexit tegra_i2c_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int tegra_i2c_suspend(struct device *dev)
|
||||
{
|
||||
struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
|
||||
|
@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev)
|
||||
priv = netdev_priv(dev);
|
||||
|
||||
dev->irq = res_irq->start;
|
||||
priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED);
|
||||
priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
|
||||
if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
|
||||
priv->irq_flags |= IRQF_SHARED;
|
||||
priv->reg_base = addr;
|
||||
/* The CAN clock frequency is half the oscillator clock frequency */
|
||||
priv->can.clock.freq = pdata->osc_freq / 2;
|
||||
|
@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card,
|
||||
const uint8_t *mem, *end, *dat;
|
||||
uint16_t type, len;
|
||||
uint32_t addr;
|
||||
uint8_t *buf = NULL;
|
||||
uint8_t *buf = NULL, *new_buf;
|
||||
int buflen = 0;
|
||||
int8_t type_end = 0;
|
||||
|
||||
@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card,
|
||||
if (len > buflen) {
|
||||
/* align buflen */
|
||||
buflen = (len + (1024-1)) & ~(1024-1);
|
||||
buf = krealloc(buf, buflen, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
new_buf = krealloc(buf, buflen, GFP_KERNEL);
|
||||
if (!new_buf) {
|
||||
ret = -ENOMEM;
|
||||
goto failed;
|
||||
}
|
||||
buf = new_buf;
|
||||
}
|
||||
/* verify record data */
|
||||
memcpy_fromio(buf, &dpram[addr + offset], len);
|
||||
|
@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params {
|
||||
continue; \
|
||||
else
|
||||
|
||||
#define for_each_napi_rx_queue(bp, var) \
|
||||
for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
|
||||
|
||||
/* Skip OOO FP */
|
||||
#define for_each_tx_queue(bp, var) \
|
||||
for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
|
||||
|
@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
||||
*/
|
||||
bnx2x_setup_tc(bp->dev, bp->max_cos);
|
||||
|
||||
/* Add all NAPI objects */
|
||||
bnx2x_add_all_napi(bp);
|
||||
bnx2x_napi_enable(bp);
|
||||
|
||||
/* set pf load just before approaching the MCP */
|
||||
@ -2408,6 +2410,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
|
||||
|
||||
/* Disable HW interrupts, NAPI */
|
||||
bnx2x_netif_stop(bp, 1);
|
||||
/* Delete all NAPI objects */
|
||||
bnx2x_del_all_napi(bp);
|
||||
|
||||
/* Release IRQs */
|
||||
bnx2x_free_irq(bp);
|
||||
|
@ -792,7 +792,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
|
||||
bp->num_napi_queues = bp->num_queues;
|
||||
|
||||
/* Add NAPI objects */
|
||||
for_each_napi_rx_queue(bp, i)
|
||||
for_each_rx_queue(bp, i)
|
||||
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
|
||||
bnx2x_poll, BNX2X_NAPI_WEIGHT);
|
||||
}
|
||||
@ -801,7 +801,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_napi_rx_queue(bp, i)
|
||||
for_each_rx_queue(bp, i)
|
||||
netif_napi_del(&bnx2x_fp(bp, i, napi));
|
||||
}
|
||||
|
||||
|
@ -2888,11 +2888,9 @@ static void bnx2x_get_channels(struct net_device *dev,
|
||||
*/
|
||||
static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
|
||||
{
|
||||
bnx2x_del_all_napi(bp);
|
||||
bnx2x_disable_msi(bp);
|
||||
BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
|
||||
bnx2x_set_int_mode(bp);
|
||||
bnx2x_add_all_napi(bp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -8427,6 +8427,8 @@ unload_error:
|
||||
|
||||
/* Disable HW interrupts, NAPI */
|
||||
bnx2x_netif_stop(bp, 1);
|
||||
/* Delete all NAPI objects */
|
||||
bnx2x_del_all_napi(bp);
|
||||
|
||||
/* Release IRQs */
|
||||
bnx2x_free_irq(bp);
|
||||
@ -11229,10 +11231,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
||||
static void poll_bnx2x(struct net_device *dev)
|
||||
{
|
||||
struct bnx2x *bp = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
disable_irq(bp->pdev->irq);
|
||||
bnx2x_interrupt(bp->pdev->irq, dev);
|
||||
enable_irq(bp->pdev->irq);
|
||||
for_each_eth_queue(bp, i) {
|
||||
struct bnx2x_fastpath *fp = &bp->fp[i];
|
||||
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -11899,9 +11903,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
|
||||
*/
|
||||
bnx2x_set_int_mode(bp);
|
||||
|
||||
/* Add all NAPI objects */
|
||||
bnx2x_add_all_napi(bp);
|
||||
|
||||
rc = register_netdev(dev);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Cannot register net device\n");
|
||||
@ -11976,9 +11977,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
|
||||
|
||||
unregister_netdev(dev);
|
||||
|
||||
/* Delete all NAPI objects */
|
||||
bnx2x_del_all_napi(bp);
|
||||
|
||||
/* Power on: we can't let PCI layer write to us while we are in D3 */
|
||||
bnx2x_set_power_state(bp, PCI_D0);
|
||||
|
||||
@ -12025,6 +12023,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
|
||||
bnx2x_tx_disable(bp);
|
||||
|
||||
bnx2x_netif_stop(bp, 0);
|
||||
/* Delete all NAPI objects */
|
||||
bnx2x_del_all_napi(bp);
|
||||
|
||||
del_timer_sync(&bp->timer);
|
||||
|
||||
|
@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter)
|
||||
int num = 0, status = 0;
|
||||
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
|
||||
|
||||
spin_lock_bh(&adapter->mcc_cq_lock);
|
||||
spin_lock(&adapter->mcc_cq_lock);
|
||||
while ((compl = be_mcc_compl_get(adapter))) {
|
||||
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
|
||||
/* Interpret flags as an async trailer */
|
||||
@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter)
|
||||
if (num)
|
||||
be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
|
||||
|
||||
spin_unlock_bh(&adapter->mcc_cq_lock);
|
||||
spin_unlock(&adapter->mcc_cq_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
|
||||
if (be_error(adapter))
|
||||
return -EIO;
|
||||
|
||||
local_bh_disable();
|
||||
status = be_process_mcc(adapter);
|
||||
local_bh_enable();
|
||||
|
||||
if (atomic_read(&mcc_obj->q.used) == 0)
|
||||
break;
|
||||
|
@ -3765,7 +3765,9 @@ static void be_worker(struct work_struct *work)
|
||||
/* when interrupts are not yet enabled, just reap any pending
|
||||
* mcc completions */
|
||||
if (!netif_running(adapter->netdev)) {
|
||||
local_bh_disable();
|
||||
be_process_mcc(adapter);
|
||||
local_bh_enable();
|
||||
goto reschedule;
|
||||
}
|
||||
|
||||
|
@ -1040,7 +1040,7 @@ static int gfar_probe(struct platform_device *ofdev)
|
||||
|
||||
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
dev->features |= NETIF_F_HW_VLAN_RX;
|
||||
}
|
||||
|
||||
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
|
||||
|
@ -310,6 +310,7 @@ struct e1000_adapter {
|
||||
*/
|
||||
struct e1000_ring *tx_ring /* One per active queue */
|
||||
____cacheline_aligned_in_smp;
|
||||
u32 tx_fifo_limit;
|
||||
|
||||
struct napi_struct napi;
|
||||
|
||||
|
@ -3516,6 +3516,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Alignment of Tx data is on an arbitrary byte boundary with the
|
||||
* maximum size per Tx descriptor limited only to the transmit
|
||||
* allocation of the packet buffer minus 96 bytes with an upper
|
||||
* limit of 24KB due to receive synchronization limitations.
|
||||
*/
|
||||
adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
|
||||
24 << 10);
|
||||
|
||||
/*
|
||||
* Disable Adaptive Interrupt Moderation if 2 full packets cannot
|
||||
* fit in receive buffer.
|
||||
@ -4785,12 +4794,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define E1000_MAX_PER_TXD 8192
|
||||
#define E1000_MAX_TXD_PWR 12
|
||||
|
||||
static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
|
||||
unsigned int first, unsigned int max_per_txd,
|
||||
unsigned int nr_frags, unsigned int mss)
|
||||
unsigned int nr_frags)
|
||||
{
|
||||
struct e1000_adapter *adapter = tx_ring->adapter;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
@ -5023,20 +5029,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
|
||||
|
||||
static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
|
||||
{
|
||||
BUG_ON(size > tx_ring->count);
|
||||
|
||||
if (e1000_desc_unused(tx_ring) >= size)
|
||||
return 0;
|
||||
return __e1000_maybe_stop_tx(tx_ring, size);
|
||||
}
|
||||
|
||||
#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
|
||||
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_ring *tx_ring = adapter->tx_ring;
|
||||
unsigned int first;
|
||||
unsigned int max_per_txd = E1000_MAX_PER_TXD;
|
||||
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
|
||||
unsigned int tx_flags = 0;
|
||||
unsigned int len = skb_headlen(skb);
|
||||
unsigned int nr_frags;
|
||||
@ -5056,18 +5061,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
mss = skb_shinfo(skb)->gso_size;
|
||||
/*
|
||||
* The controller does a simple calculation to
|
||||
* make sure there is enough room in the FIFO before
|
||||
* initiating the DMA for each buffer. The calc is:
|
||||
* 4 = ceil(buffer len/mss). To make sure we don't
|
||||
* overrun the FIFO, adjust the max buffer len if mss
|
||||
* drops.
|
||||
*/
|
||||
if (mss) {
|
||||
u8 hdr_len;
|
||||
max_per_txd = min(mss << 2, max_per_txd);
|
||||
max_txd_pwr = fls(max_per_txd) - 1;
|
||||
|
||||
/*
|
||||
* TSO Workaround for 82571/2/3 Controllers -- if skb->data
|
||||
@ -5097,12 +5092,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
count++;
|
||||
count++;
|
||||
|
||||
count += TXD_USE_COUNT(len, max_txd_pwr);
|
||||
count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
|
||||
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
for (f = 0; f < nr_frags; f++)
|
||||
count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
|
||||
max_txd_pwr);
|
||||
count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
|
||||
adapter->tx_fifo_limit);
|
||||
|
||||
if (adapter->hw.mac.tx_pkt_filtering)
|
||||
e1000_transfer_dhcp_info(adapter, skb);
|
||||
@ -5144,15 +5139,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
tx_flags |= E1000_TX_FLAGS_NO_FCS;
|
||||
|
||||
/* if count is 0 then mapping error has occurred */
|
||||
count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
|
||||
count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
|
||||
nr_frags);
|
||||
if (count) {
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
netdev_sent_queue(netdev, skb->len);
|
||||
e1000_tx_queue(tx_ring, tx_flags, count);
|
||||
/* Make sure there is space in the ring for the next send. */
|
||||
e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2);
|
||||
|
||||
e1000_maybe_stop_tx(tx_ring,
|
||||
(MAX_SKB_FRAGS *
|
||||
DIV_ROUND_UP(PAGE_SIZE,
|
||||
adapter->tx_fifo_limit) + 2));
|
||||
} else {
|
||||
dev_kfree_skb_any(skb);
|
||||
tx_ring->buffer_info[first].time_stamp = 0;
|
||||
@ -6327,8 +6325,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
|
||||
adapter->hw.phy.autoneg_advertised = 0x2f;
|
||||
|
||||
/* ring size defaults */
|
||||
adapter->rx_ring->count = 256;
|
||||
adapter->tx_ring->count = 256;
|
||||
adapter->rx_ring->count = E1000_DEFAULT_RXD;
|
||||
adapter->tx_ring->count = E1000_DEFAULT_TXD;
|
||||
|
||||
/*
|
||||
* Initial Wake on LAN setting - If APM wake is enabled in
|
||||
|
@ -861,8 +861,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
|
||||
&ip_entry->ip4dst, &ip_entry->pdst);
|
||||
if (rc != 0) {
|
||||
rc = efx_filter_get_ipv4_full(
|
||||
&spec, &proto, &ip_entry->ip4src, &ip_entry->psrc,
|
||||
&ip_entry->ip4dst, &ip_entry->pdst);
|
||||
&spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
|
||||
&ip_entry->ip4src, &ip_entry->psrc);
|
||||
EFX_WARN_ON_PARANOID(rc);
|
||||
ip_mask->ip4src = ~0;
|
||||
ip_mask->psrc = ~0;
|
||||
|
@ -22,6 +22,9 @@
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __COMMON_H__
|
||||
#define __COMMON_H__
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/phy.h>
|
||||
@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
|
||||
|
||||
extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
|
||||
extern const struct stmmac_ring_mode_ops ring_mode_ops;
|
||||
|
||||
#endif /* __COMMON_H__ */
|
||||
|
@ -20,6 +20,10 @@
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __DESCS_H__
|
||||
#define __DESCS_H__
|
||||
|
||||
struct dma_desc {
|
||||
/* Receive descriptor */
|
||||
union {
|
||||
@ -166,3 +170,5 @@ enum tdes_csum_insertion {
|
||||
* is not calculated */
|
||||
cic_full = 3, /* IP header and pseudoheader */
|
||||
};
|
||||
|
||||
#endif /* __DESCS_H__ */
|
||||
|
@ -27,6 +27,9 @@
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __DESC_COM_H__
|
||||
#define __DESC_COM_H__
|
||||
|
||||
#if defined(CONFIG_STMMAC_RING)
|
||||
static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
|
||||
{
|
||||
@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
|
||||
p->des01.tx.buffer1_size = len;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __DESC_COM_H__ */
|
||||
|
@ -22,6 +22,9 @@
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __DWMAC100_H__
|
||||
#define __DWMAC100_H__
|
||||
|
||||
#include <linux/phy.h>
|
||||
#include "common.h"
|
||||
|
||||
@ -119,3 +122,5 @@ enum ttc_control {
|
||||
#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
|
||||
|
||||
extern const struct stmmac_dma_ops dwmac100_dma_ops;
|
||||
|
||||
#endif /* __DWMAC100_H__ */
|
||||
|
@ -19,6 +19,8 @@
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
#ifndef __DWMAC1000_H__
|
||||
#define __DWMAC1000_H__
|
||||
|
||||
#include <linux/phy.h>
|
||||
#include "common.h"
|
||||
@ -229,6 +231,7 @@ enum rtc_control {
|
||||
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
|
||||
|
||||
/* Synopsys Core versions */
|
||||
#define DWMAC_CORE_3_40 34
|
||||
#define DWMAC_CORE_3_40 0x34
|
||||
|
||||
extern const struct stmmac_dma_ops dwmac1000_dma_ops;
|
||||
#endif /* __DWMAC1000_H__ */
|
||||
|
@ -22,6 +22,9 @@
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __DWMAC_DMA_H__
|
||||
#define __DWMAC_DMA_H__
|
||||
|
||||
/* DMA CRS Control and Status Register Mapping */
|
||||
#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
|
||||
#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
|
||||
@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr);
|
||||
extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
|
||||
extern int dwmac_dma_interrupt(void __iomem *ioaddr,
|
||||
struct stmmac_extra_stats *x);
|
||||
|
||||
#endif /* __DWMAC_DMA_H__ */
|
||||
|
@ -22,6 +22,9 @@
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __MMC_H__
|
||||
#define __MMC_H__
|
||||
|
||||
/* MMC control register */
|
||||
/* When set, all counter are reset */
|
||||
#define MMC_CNTRL_COUNTER_RESET 0x1
|
||||
@ -129,3 +132,5 @@ struct stmmac_counters {
|
||||
extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
|
||||
extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
|
||||
extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
|
||||
|
||||
#endif /* __MMC_H__ */
|
||||
|
@ -33,7 +33,7 @@
|
||||
#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */
|
||||
#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */
|
||||
#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */
|
||||
#define MMC_DEFAUL_MASK 0xffffffff
|
||||
#define MMC_DEFAULT_MASK 0xffffffff
|
||||
|
||||
/* MMC TX counter registers */
|
||||
|
||||
@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
|
||||
/* To mask all all interrupts.*/
|
||||
void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
|
||||
{
|
||||
writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK);
|
||||
writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK);
|
||||
writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
|
||||
writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
|
||||
}
|
||||
|
||||
/* This reads the MAC core counters (if actaully supported).
|
||||
|
@ -20,6 +20,9 @@
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __STMMAC_H__
|
||||
#define __STMMAC_H__
|
||||
|
||||
#define STMMAC_RESOURCE_NAME "stmmaceth"
|
||||
#define DRV_MODULE_VERSION "March_2012"
|
||||
|
||||
@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_STMMAC_PCI */
|
||||
|
||||
#endif /* __STMMAC_H__ */
|
||||
|
@ -21,6 +21,8 @@
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
#ifndef __STMMAC_TIMER_H__
|
||||
#define __STMMAC_TIMER_H__
|
||||
|
||||
struct stmmac_timer {
|
||||
void (*timer_start) (unsigned int new_freq);
|
||||
@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev);
|
||||
extern int tmu2_register_user(void *fnt, void *data);
|
||||
extern void tmu2_unregister_user(void);
|
||||
#endif
|
||||
|
||||
#endif /* __STMMAC_TIMER_H__ */
|
||||
|
@ -1482,7 +1482,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
|
||||
case AR5K_EEPROM_MODE_11A:
|
||||
offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version);
|
||||
rate_pcal_info = ee->ee_rate_tpwr_a;
|
||||
ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN;
|
||||
ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN;
|
||||
break;
|
||||
case AR5K_EEPROM_MODE_11B:
|
||||
offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version);
|
||||
|
@ -182,6 +182,7 @@
|
||||
#define AR5K_EEPROM_EEP_DELTA 10
|
||||
#define AR5K_EEPROM_N_MODES 3
|
||||
#define AR5K_EEPROM_N_5GHZ_CHAN 10
|
||||
#define AR5K_EEPROM_N_5GHZ_RATE_CHAN 8
|
||||
#define AR5K_EEPROM_N_2GHZ_CHAN 3
|
||||
#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4
|
||||
#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4
|
||||
|
@ -1237,6 +1237,9 @@ uint brcms_reset(struct brcms_info *wl)
|
||||
/* dpc will not be rescheduled */
|
||||
wl->resched = false;
|
||||
|
||||
/* inform publicly that interface is down */
|
||||
wl->pub->up = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2042,7 +2042,8 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
|
||||
return;
|
||||
}
|
||||
len = ETH_ALEN;
|
||||
ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len);
|
||||
ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid,
|
||||
&len);
|
||||
if (ret) {
|
||||
IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
|
||||
__LINE__);
|
||||
|
@ -124,6 +124,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
|
||||
const struct fw_img *img;
|
||||
size_t bufsz;
|
||||
|
||||
if (!iwl_is_ready_rf(priv))
|
||||
return -EAGAIN;
|
||||
|
||||
/* default is to dump the entire data segment */
|
||||
if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
|
||||
priv->dbgfs_sram_offset = 0x800000;
|
||||
|
@ -351,7 +351,7 @@ int iwl_queue_space(const struct iwl_queue *q);
|
||||
/*****************************************************
|
||||
* Error handling
|
||||
******************************************************/
|
||||
int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
|
||||
int iwl_dump_fh(struct iwl_trans *trans, char **buf);
|
||||
void iwl_dump_csr(struct iwl_trans *trans);
|
||||
|
||||
/*****************************************************
|
||||
|
@ -565,7 +565,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
|
||||
}
|
||||
|
||||
iwl_dump_csr(trans);
|
||||
iwl_dump_fh(trans, NULL, false);
|
||||
iwl_dump_fh(trans, NULL);
|
||||
|
||||
iwl_op_mode_nic_error(trans->op_mode);
|
||||
}
|
||||
|
@ -1649,13 +1649,9 @@ static const char *get_fh_string(int cmd)
|
||||
#undef IWL_CMD
|
||||
}
|
||||
|
||||
int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
|
||||
int iwl_dump_fh(struct iwl_trans *trans, char **buf)
|
||||
{
|
||||
int i;
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
int pos = 0;
|
||||
size_t bufsz = 0;
|
||||
#endif
|
||||
static const u32 fh_tbl[] = {
|
||||
FH_RSCSR_CHNL0_STTS_WPTR_REG,
|
||||
FH_RSCSR_CHNL0_RBDCB_BASE_REG,
|
||||
@ -1667,29 +1663,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
|
||||
FH_TSSR_TX_STATUS_REG,
|
||||
FH_TSSR_TX_ERROR_REG
|
||||
};
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
if (display) {
|
||||
bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
if (buf) {
|
||||
int pos = 0;
|
||||
size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
|
||||
|
||||
*buf = kmalloc(bufsz, GFP_KERNEL);
|
||||
if (!*buf)
|
||||
return -ENOMEM;
|
||||
|
||||
pos += scnprintf(*buf + pos, bufsz - pos,
|
||||
"FH register values:\n");
|
||||
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
|
||||
pos += scnprintf(*buf + pos, bufsz - pos,
|
||||
" %34s: 0X%08x\n",
|
||||
get_fh_string(fh_tbl[i]),
|
||||
iwl_read_direct32(trans, fh_tbl[i]));
|
||||
}
|
||||
|
||||
return pos;
|
||||
}
|
||||
#endif
|
||||
|
||||
IWL_ERR(trans, "FH register values:\n");
|
||||
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
|
||||
IWL_ERR(trans, " %34s: 0X%08x\n",
|
||||
get_fh_string(fh_tbl[i]),
|
||||
iwl_read_direct32(trans, fh_tbl[i]));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1982,11 +1984,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct iwl_trans *trans = file->private_data;
|
||||
char *buf;
|
||||
char *buf = NULL;
|
||||
int pos = 0;
|
||||
ssize_t ret = -EFAULT;
|
||||
|
||||
ret = pos = iwl_dump_fh(trans, &buf, true);
|
||||
ret = pos = iwl_dump_fh(trans, &buf);
|
||||
if (buf) {
|
||||
ret = simple_read_from_buffer(user_buf,
|
||||
count, ppos, buf, pos);
|
||||
|
@ -57,8 +57,7 @@
|
||||
static const struct ethtool_ops xennet_ethtool_ops;
|
||||
|
||||
struct netfront_cb {
|
||||
struct page *page;
|
||||
unsigned offset;
|
||||
int pull_to;
|
||||
};
|
||||
|
||||
#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
|
||||
@ -867,15 +866,9 @@ static int handle_incoming_queue(struct net_device *dev,
|
||||
struct sk_buff *skb;
|
||||
|
||||
while ((skb = __skb_dequeue(rxq)) != NULL) {
|
||||
struct page *page = NETFRONT_SKB_CB(skb)->page;
|
||||
void *vaddr = page_address(page);
|
||||
unsigned offset = NETFRONT_SKB_CB(skb)->offset;
|
||||
int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
|
||||
|
||||
memcpy(skb->data, vaddr + offset,
|
||||
skb_headlen(skb));
|
||||
|
||||
if (page != skb_frag_page(&skb_shinfo(skb)->frags[0]))
|
||||
__free_page(page);
|
||||
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
|
||||
|
||||
/* Ethernet work: Delayed to here as it peeks the header. */
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
@ -913,7 +906,6 @@ static int xennet_poll(struct napi_struct *napi, int budget)
|
||||
struct sk_buff_head errq;
|
||||
struct sk_buff_head tmpq;
|
||||
unsigned long flags;
|
||||
unsigned int len;
|
||||
int err;
|
||||
|
||||
spin_lock(&np->rx_lock);
|
||||
@ -955,24 +947,13 @@ err:
|
||||
}
|
||||
}
|
||||
|
||||
NETFRONT_SKB_CB(skb)->page =
|
||||
skb_frag_page(&skb_shinfo(skb)->frags[0]);
|
||||
NETFRONT_SKB_CB(skb)->offset = rx->offset;
|
||||
NETFRONT_SKB_CB(skb)->pull_to = rx->status;
|
||||
if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
|
||||
NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
|
||||
|
||||
len = rx->status;
|
||||
if (len > RX_COPY_THRESHOLD)
|
||||
len = RX_COPY_THRESHOLD;
|
||||
skb_put(skb, len);
|
||||
|
||||
if (rx->status > len) {
|
||||
skb_shinfo(skb)->frags[0].page_offset =
|
||||
rx->offset + len;
|
||||
skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len);
|
||||
skb->data_len = rx->status - len;
|
||||
} else {
|
||||
__skb_fill_page_desc(skb, 0, NULL, 0, 0);
|
||||
skb_shinfo(skb)->nr_frags = 0;
|
||||
}
|
||||
skb_shinfo(skb)->frags[0].page_offset = rx->offset;
|
||||
skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
|
||||
skb->data_len = rx->status;
|
||||
|
||||
i = xennet_fill_frags(np, skb, &tmpq);
|
||||
|
||||
@ -999,7 +980,7 @@ err:
|
||||
* receive throughout using the standard receive
|
||||
* buffer size was cut by 25%(!!!).
|
||||
*/
|
||||
skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
|
||||
skb->truesize += skb->data_len - RX_COPY_THRESHOLD;
|
||||
skb->len += skb->data_len;
|
||||
|
||||
if (rx->flags & XEN_NETRXF_csum_blank)
|
||||
|
@ -1,12 +1,31 @@
|
||||
menuconfig PWM
|
||||
bool "PWM Support"
|
||||
bool "Pulse-Width Modulation (PWM) Support"
|
||||
depends on !MACH_JZ4740 && !PUV3_PWM
|
||||
help
|
||||
This enables PWM support through the generic PWM framework.
|
||||
You only need to enable this, if you also want to enable
|
||||
one or more of the PWM drivers below.
|
||||
Generic Pulse-Width Modulation (PWM) support.
|
||||
|
||||
If unsure, say N.
|
||||
In Pulse-Width Modulation, a variation of the width of pulses
|
||||
in a rectangular pulse signal is used as a means to alter the
|
||||
average power of the signal. Applications include efficient
|
||||
power delivery and voltage regulation. In computer systems,
|
||||
PWMs are commonly used to control fans or the brightness of
|
||||
display backlights.
|
||||
|
||||
This framework provides a generic interface to PWM devices
|
||||
within the Linux kernel. On the driver side it provides an API
|
||||
to register and unregister a PWM chip, an abstraction of a PWM
|
||||
controller, that supports one or more PWM devices. Client
|
||||
drivers can request PWM devices and use the generic framework
|
||||
to configure as well as enable and disable them.
|
||||
|
||||
This generic framework replaces the legacy PWM framework which
|
||||
allows only a single driver implementing the required API. Not
|
||||
all legacy implementations have been ported to the framework
|
||||
yet. The framework provides an API that is backward compatible
|
||||
with the legacy framework so that existing client drivers
|
||||
continue to work as expected.
|
||||
|
||||
If unsure, say no.
|
||||
|
||||
if PWM
|
||||
|
||||
|
@ -129,8 +129,8 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pwm_device *of_pwm_simple_xlate(struct pwm_chip *pc,
|
||||
const struct of_phandle_args *args)
|
||||
static struct pwm_device *
|
||||
of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
|
||||
{
|
||||
struct pwm_device *pwm;
|
||||
|
||||
@ -149,7 +149,7 @@ static struct pwm_device *of_pwm_simple_xlate(struct pwm_chip *pc,
|
||||
return pwm;
|
||||
}
|
||||
|
||||
void of_pwmchip_add(struct pwm_chip *chip)
|
||||
static void of_pwmchip_add(struct pwm_chip *chip)
|
||||
{
|
||||
if (!chip->dev || !chip->dev->of_node)
|
||||
return;
|
||||
@ -162,7 +162,7 @@ void of_pwmchip_add(struct pwm_chip *chip)
|
||||
of_node_get(chip->dev->of_node);
|
||||
}
|
||||
|
||||
void of_pwmchip_remove(struct pwm_chip *chip)
|
||||
static void of_pwmchip_remove(struct pwm_chip *chip)
|
||||
{
|
||||
if (chip->dev && chip->dev->of_node)
|
||||
of_node_put(chip->dev->of_node);
|
||||
@ -527,7 +527,7 @@ void __init pwm_add_table(struct pwm_lookup *table, size_t num)
|
||||
struct pwm_device *pwm_get(struct device *dev, const char *con_id)
|
||||
{
|
||||
struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER);
|
||||
const char *dev_id = dev ? dev_name(dev): NULL;
|
||||
const char *dev_id = dev ? dev_name(dev) : NULL;
|
||||
struct pwm_chip *chip = NULL;
|
||||
unsigned int index = 0;
|
||||
unsigned int best = 0;
|
||||
@ -609,7 +609,7 @@ void pwm_put(struct pwm_device *pwm)
|
||||
mutex_lock(&pwm_lock);
|
||||
|
||||
if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) {
|
||||
pr_warning("PWM device already freed\n");
|
||||
pr_warn("PWM device already freed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -225,6 +225,7 @@ static int s3c_pwm_probe(struct platform_device *pdev)
|
||||
|
||||
/* calculate base of control bits in TCON */
|
||||
s3c->tcon_base = id == 0 ? 0 : (id * 4) + 4;
|
||||
s3c->chip.dev = &pdev->dev;
|
||||
s3c->chip.ops = &s3c_pwm_ops;
|
||||
s3c->chip.base = -1;
|
||||
s3c->chip.npwm = 1;
|
||||
|
@ -187,10 +187,8 @@ static int tegra_pwm_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
pwm->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
|
||||
if (!pwm->mmio_base) {
|
||||
dev_err(&pdev->dev, "failed to ioremap() region\n");
|
||||
if (!pwm->mmio_base)
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, pwm);
|
||||
|
||||
|
@ -192,10 +192,8 @@ static int __devinit ecap_pwm_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
|
||||
if (!pc->mmio_base) {
|
||||
dev_err(&pdev->dev, "failed to ioremap() registers\n");
|
||||
if (!pc->mmio_base)
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
ret = pwmchip_add(&pc->chip);
|
||||
if (ret < 0) {
|
||||
|
@ -371,10 +371,8 @@ static int __devinit ehrpwm_pwm_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
|
||||
if (!pc->mmio_base) {
|
||||
dev_err(&pdev->dev, "failed to ioremap() registers\n");
|
||||
if (!pc->mmio_base)
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
ret = pwmchip_add(&pc->chip);
|
||||
if (ret < 0) {
|
||||
|
@ -41,7 +41,7 @@ static inline void pwm_busy_wait(void __iomem *reg, u8 bitmask)
|
||||
cpu_relax();
|
||||
|
||||
if (unlikely(!loops))
|
||||
pr_warning("Waiting for status bits 0x%x to clear timed out\n",
|
||||
pr_warn("Waiting for status bits 0x%x to clear timed out\n",
|
||||
bitmask);
|
||||
}
|
||||
|
||||
|
@ -673,8 +673,15 @@ static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg)
|
||||
struct scsi_device *sd = pdv->pdv_sd;
|
||||
int result;
|
||||
struct pscsi_plugin_task *pt = cmd->priv;
|
||||
unsigned char *cdb = &pt->pscsi_cdb[0];
|
||||
unsigned char *cdb;
|
||||
/*
|
||||
* Special case for REPORT_LUNs handling where pscsi_plugin_task has
|
||||
* not been allocated because TCM is handling the emulation directly.
|
||||
*/
|
||||
if (!pt)
|
||||
return 0;
|
||||
|
||||
cdb = &pt->pscsi_cdb[0];
|
||||
result = pt->pscsi_result;
|
||||
/*
|
||||
* Hack to make sure that Write-Protect modepage is set if R/O mode is
|
||||
|
@ -1165,8 +1165,6 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
|
||||
" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
|
||||
cmd->data_length, size, cmd->t_task_cdb[0]);
|
||||
|
||||
cmd->cmd_spdtl = size;
|
||||
|
||||
if (cmd->data_direction == DMA_TO_DEVICE) {
|
||||
pr_err("Rejecting underflow/overflow"
|
||||
" WRITE data\n");
|
||||
@ -2294,9 +2292,9 @@ transport_generic_get_mem(struct se_cmd *cmd)
|
||||
return 0;
|
||||
|
||||
out:
|
||||
while (i >= 0) {
|
||||
__free_page(sg_page(&cmd->t_data_sg[i]));
|
||||
while (i > 0) {
|
||||
i--;
|
||||
__free_page(sg_page(&cmd->t_data_sg[i]));
|
||||
}
|
||||
kfree(cmd->t_data_sg);
|
||||
cmd->t_data_sg = NULL;
|
||||
@ -2323,9 +2321,12 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
|
||||
if (ret < 0)
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
/* Workaround for handling zero-length control CDBs */
|
||||
if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->data_length) {
|
||||
/*
|
||||
* If this command doesn't have any payload and we don't have to call
|
||||
* into the fabric for data transfers, go ahead and complete it right
|
||||
* away.
|
||||
*/
|
||||
if (!cmd->data_length) {
|
||||
spin_lock_irq(&cmd->t_state_lock);
|
||||
cmd->t_state = TRANSPORT_COMPLETE;
|
||||
cmd->transport_state |= CMD_T_ACTIVE;
|
||||
|
@ -131,6 +131,7 @@ extern struct list_head ft_lport_list;
|
||||
extern struct mutex ft_lport_lock;
|
||||
extern struct fc4_prov ft_prov;
|
||||
extern struct target_fabric_configfs *ft_configfs;
|
||||
extern unsigned int ft_debug_logging;
|
||||
|
||||
/*
|
||||
* Fabric methods.
|
||||
|
@ -48,7 +48,7 @@
|
||||
/*
|
||||
* Dump cmd state for debugging.
|
||||
*/
|
||||
void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
|
||||
static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
|
||||
{
|
||||
struct fc_exch *ep;
|
||||
struct fc_seq *sp;
|
||||
@ -80,6 +80,12 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
|
||||
}
|
||||
}
|
||||
|
||||
void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
|
||||
{
|
||||
if (unlikely(ft_debug_logging))
|
||||
_ft_dump_cmd(cmd, caller);
|
||||
}
|
||||
|
||||
static void ft_free_cmd(struct ft_cmd *cmd)
|
||||
{
|
||||
struct fc_frame *fp;
|
||||
|
@ -456,7 +456,9 @@ static void ft_prlo(struct fc_rport_priv *rdata)
|
||||
struct ft_tport *tport;
|
||||
|
||||
mutex_lock(&ft_lport_lock);
|
||||
tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]);
|
||||
tport = rcu_dereference_protected(rdata->local_port->prov[FC_TYPE_FCP],
|
||||
lockdep_is_held(&ft_lport_lock));
|
||||
|
||||
if (!tport) {
|
||||
mutex_unlock(&ft_lport_lock);
|
||||
return;
|
||||
|
@ -264,6 +264,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
|
||||
return group;
|
||||
}
|
||||
|
||||
/* called with vfio.group_lock held */
|
||||
static void vfio_group_release(struct kref *kref)
|
||||
{
|
||||
struct vfio_group *group = container_of(kref, struct vfio_group, kref);
|
||||
@ -287,13 +288,7 @@ static void vfio_group_release(struct kref *kref)
|
||||
|
||||
static void vfio_group_put(struct vfio_group *group)
|
||||
{
|
||||
mutex_lock(&vfio.group_lock);
|
||||
/*
|
||||
* Release needs to unlock to unregister the notifier, so only
|
||||
* unlock if not released.
|
||||
*/
|
||||
if (!kref_put(&group->kref, vfio_group_release))
|
||||
mutex_unlock(&vfio.group_lock);
|
||||
kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
|
||||
}
|
||||
|
||||
/* Assume group_lock or group reference is held */
|
||||
@ -401,7 +396,6 @@ static void vfio_device_release(struct kref *kref)
|
||||
struct vfio_device, kref);
|
||||
struct vfio_group *group = device->group;
|
||||
|
||||
mutex_lock(&group->device_lock);
|
||||
list_del(&device->group_next);
|
||||
mutex_unlock(&group->device_lock);
|
||||
|
||||
@ -416,8 +410,9 @@ static void vfio_device_release(struct kref *kref)
|
||||
/* Device reference always implies a group reference */
|
||||
static void vfio_device_put(struct vfio_device *device)
|
||||
{
|
||||
kref_put(&device->kref, vfio_device_release);
|
||||
vfio_group_put(device->group);
|
||||
struct vfio_group *group = device->group;
|
||||
kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
|
||||
vfio_group_put(group);
|
||||
}
|
||||
|
||||
static void vfio_device_get(struct vfio_device *device)
|
||||
@ -1116,10 +1111,10 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
|
||||
*/
|
||||
filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
|
||||
|
||||
fd_install(ret, filep);
|
||||
|
||||
vfio_device_get(device);
|
||||
atomic_inc(&group->container_users);
|
||||
|
||||
fd_install(ret, filep);
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&group->device_lock);
|
||||
|
@ -53,9 +53,14 @@
|
||||
#include "vhost.h"
|
||||
#include "tcm_vhost.h"
|
||||
|
||||
enum {
|
||||
VHOST_SCSI_VQ_CTL = 0,
|
||||
VHOST_SCSI_VQ_EVT = 1,
|
||||
VHOST_SCSI_VQ_IO = 2,
|
||||
};
|
||||
|
||||
struct vhost_scsi {
|
||||
atomic_t vhost_ref_cnt;
|
||||
struct tcm_vhost_tpg *vs_tpg;
|
||||
struct tcm_vhost_tpg *vs_tpg; /* Protected by vhost_scsi->dev.mutex */
|
||||
struct vhost_dev dev;
|
||||
struct vhost_virtqueue vqs[3];
|
||||
|
||||
@ -131,8 +136,7 @@ static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static u32 tcm_vhost_get_pr_transport_id(
|
||||
struct se_portal_group *se_tpg,
|
||||
static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
|
||||
struct se_node_acl *se_nacl,
|
||||
struct t10_pr_registration *pr_reg,
|
||||
int *format_code,
|
||||
@ -162,8 +166,7 @@ static u32 tcm_vhost_get_pr_transport_id(
|
||||
format_code, buf);
|
||||
}
|
||||
|
||||
static u32 tcm_vhost_get_pr_transport_id_len(
|
||||
struct se_portal_group *se_tpg,
|
||||
static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
|
||||
struct se_node_acl *se_nacl,
|
||||
struct t10_pr_registration *pr_reg,
|
||||
int *format_code)
|
||||
@ -192,8 +195,7 @@ static u32 tcm_vhost_get_pr_transport_id_len(
|
||||
format_code);
|
||||
}
|
||||
|
||||
static char *tcm_vhost_parse_pr_out_transport_id(
|
||||
struct se_portal_group *se_tpg,
|
||||
static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
|
||||
const char *buf,
|
||||
u32 *out_tid_len,
|
||||
char **port_nexus_ptr)
|
||||
@ -236,8 +238,7 @@ static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
|
||||
return &nacl->se_node_acl;
|
||||
}
|
||||
|
||||
static void tcm_vhost_release_fabric_acl(
|
||||
struct se_portal_group *se_tpg,
|
||||
static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
|
||||
struct se_node_acl *se_nacl)
|
||||
{
|
||||
struct tcm_vhost_nacl *nacl = container_of(se_nacl,
|
||||
@ -297,7 +298,16 @@ static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *);
|
||||
static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
|
||||
{
|
||||
struct vhost_scsi *vs = tv_cmd->tvc_vhost;
|
||||
|
||||
spin_lock_bh(&vs->vs_completion_lock);
|
||||
list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
|
||||
spin_unlock_bh(&vs->vs_completion_lock);
|
||||
|
||||
vhost_work_queue(&vs->dev, &vs->vs_completion_work);
|
||||
}
|
||||
|
||||
static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
|
||||
{
|
||||
@ -381,7 +391,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
|
||||
vs_completion_work);
|
||||
struct tcm_vhost_cmd *tv_cmd;
|
||||
|
||||
while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs)) != NULL) {
|
||||
while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs))) {
|
||||
struct virtio_scsi_cmd_resp v_rsp;
|
||||
struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
|
||||
int ret;
|
||||
@ -408,19 +418,6 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
|
||||
vhost_signal(&vs->dev, &vs->vqs[2]);
|
||||
}
|
||||
|
||||
static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
|
||||
{
|
||||
struct vhost_scsi *vs = tv_cmd->tvc_vhost;
|
||||
|
||||
pr_debug("%s tv_cmd %p\n", __func__, tv_cmd);
|
||||
|
||||
spin_lock_bh(&vs->vs_completion_lock);
|
||||
list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
|
||||
spin_unlock_bh(&vs->vs_completion_lock);
|
||||
|
||||
vhost_work_queue(&vs->dev, &vs->vs_completion_work);
|
||||
}
|
||||
|
||||
static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
|
||||
struct tcm_vhost_tpg *tv_tpg,
|
||||
struct virtio_scsi_cmd_req *v_req,
|
||||
@ -533,8 +530,8 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
|
||||
sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
|
||||
if (!sg)
|
||||
return -ENOMEM;
|
||||
pr_debug("%s sg %p sgl_count %u is_err %ld\n", __func__,
|
||||
sg, sgl_count, IS_ERR(sg));
|
||||
pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
|
||||
sg, sgl_count, !sg);
|
||||
sg_init_table(sg, sgl_count);
|
||||
|
||||
tv_cmd->tvc_sgl = sg;
|
||||
@ -787,12 +784,12 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
|
||||
|
||||
static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
|
||||
{
|
||||
pr_err("%s: The handling func for control queue.\n", __func__);
|
||||
pr_debug("%s: The handling func for control queue.\n", __func__);
|
||||
}
|
||||
|
||||
static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
|
||||
{
|
||||
pr_err("%s: The handling func for event queue.\n", __func__);
|
||||
pr_debug("%s: The handling func for event queue.\n", __func__);
|
||||
}
|
||||
|
||||
static void vhost_scsi_handle_kick(struct vhost_work *work)
|
||||
@ -825,11 +822,6 @@ static int vhost_scsi_set_endpoint(
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
if (vs->vs_tpg) {
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
return -EEXIST;
|
||||
}
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
|
||||
mutex_lock(&tcm_vhost_mutex);
|
||||
@ -839,7 +831,7 @@ static int vhost_scsi_set_endpoint(
|
||||
mutex_unlock(&tv_tpg->tv_tpg_mutex);
|
||||
continue;
|
||||
}
|
||||
if (atomic_read(&tv_tpg->tv_tpg_vhost_count)) {
|
||||
if (tv_tpg->tv_tpg_vhost_count != 0) {
|
||||
mutex_unlock(&tv_tpg->tv_tpg_mutex);
|
||||
continue;
|
||||
}
|
||||
@ -847,14 +839,20 @@ static int vhost_scsi_set_endpoint(
|
||||
|
||||
if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) &&
|
||||
(tv_tpg->tport_tpgt == t->vhost_tpgt)) {
|
||||
atomic_inc(&tv_tpg->tv_tpg_vhost_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
tv_tpg->tv_tpg_vhost_count++;
|
||||
mutex_unlock(&tv_tpg->tv_tpg_mutex);
|
||||
mutex_unlock(&tcm_vhost_mutex);
|
||||
|
||||
mutex_lock(&vs->dev.mutex);
|
||||
if (vs->vs_tpg) {
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
mutex_lock(&tv_tpg->tv_tpg_mutex);
|
||||
tv_tpg->tv_tpg_vhost_count--;
|
||||
mutex_unlock(&tv_tpg->tv_tpg_mutex);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
vs->vs_tpg = tv_tpg;
|
||||
atomic_inc(&vs->vhost_ref_cnt);
|
||||
smp_mb__after_atomic_inc();
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
return 0;
|
||||
@ -871,38 +869,42 @@ static int vhost_scsi_clear_endpoint(
|
||||
{
|
||||
struct tcm_vhost_tport *tv_tport;
|
||||
struct tcm_vhost_tpg *tv_tpg;
|
||||
int index;
|
||||
int index, ret;
|
||||
|
||||
mutex_lock(&vs->dev.mutex);
|
||||
/* Verify that ring has been setup correctly. */
|
||||
for (index = 0; index < vs->dev.nvqs; ++index) {
|
||||
if (!vhost_vq_access_ok(&vs->vqs[index])) {
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
return -EFAULT;
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
if (!vs->vs_tpg) {
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
return -ENODEV;
|
||||
ret = -ENODEV;
|
||||
goto err;
|
||||
}
|
||||
tv_tpg = vs->vs_tpg;
|
||||
tv_tport = tv_tpg->tport;
|
||||
|
||||
if (strcmp(tv_tport->tport_name, t->vhost_wwpn) ||
|
||||
(tv_tpg->tport_tpgt != t->vhost_tpgt)) {
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
|
||||
" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
|
||||
tv_tport->tport_name, tv_tpg->tport_tpgt,
|
||||
t->vhost_wwpn, t->vhost_tpgt);
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
atomic_dec(&tv_tpg->tv_tpg_vhost_count);
|
||||
tv_tpg->tv_tpg_vhost_count--;
|
||||
vs->vs_tpg = NULL;
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vhost_scsi_open(struct inode *inode, struct file *f)
|
||||
@ -918,9 +920,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
|
||||
INIT_LIST_HEAD(&s->vs_completion_list);
|
||||
spin_lock_init(&s->vs_completion_lock);
|
||||
|
||||
s->vqs[0].handle_kick = vhost_scsi_ctl_handle_kick;
|
||||
s->vqs[1].handle_kick = vhost_scsi_evt_handle_kick;
|
||||
s->vqs[2].handle_kick = vhost_scsi_handle_kick;
|
||||
s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
|
||||
s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
|
||||
s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick;
|
||||
r = vhost_dev_init(&s->dev, s->vqs, 3);
|
||||
if (r < 0) {
|
||||
kfree(s);
|
||||
@ -949,6 +951,18 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
|
||||
{
|
||||
vhost_poll_flush(&vs->dev.vqs[index].poll);
|
||||
}
|
||||
|
||||
static void vhost_scsi_flush(struct vhost_scsi *vs)
|
||||
{
|
||||
vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL);
|
||||
vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT);
|
||||
vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO);
|
||||
}
|
||||
|
||||
static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
|
||||
{
|
||||
if (features & ~VHOST_FEATURES)
|
||||
@ -961,7 +975,8 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
|
||||
return -EFAULT;
|
||||
}
|
||||
vs->dev.acked_features = features;
|
||||
/* TODO possibly smp_wmb() and flush vqs */
|
||||
smp_wmb();
|
||||
vhost_scsi_flush(vs);
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
return 0;
|
||||
}
|
||||
@ -974,26 +989,25 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
|
||||
void __user *argp = (void __user *)arg;
|
||||
u64 __user *featurep = argp;
|
||||
u64 features;
|
||||
int r;
|
||||
int r, abi_version = VHOST_SCSI_ABI_VERSION;
|
||||
|
||||
switch (ioctl) {
|
||||
case VHOST_SCSI_SET_ENDPOINT:
|
||||
if (copy_from_user(&backend, argp, sizeof backend))
|
||||
return -EFAULT;
|
||||
if (backend.reserved != 0)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return vhost_scsi_set_endpoint(vs, &backend);
|
||||
case VHOST_SCSI_CLEAR_ENDPOINT:
|
||||
if (copy_from_user(&backend, argp, sizeof backend))
|
||||
return -EFAULT;
|
||||
if (backend.reserved != 0)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return vhost_scsi_clear_endpoint(vs, &backend);
|
||||
case VHOST_SCSI_GET_ABI_VERSION:
|
||||
if (copy_from_user(&backend, argp, sizeof backend))
|
||||
return -EFAULT;
|
||||
|
||||
backend.abi_version = VHOST_SCSI_ABI_VERSION;
|
||||
|
||||
if (copy_to_user(argp, &backend, sizeof backend))
|
||||
if (copy_to_user(argp, &abi_version, sizeof abi_version))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
case VHOST_GET_FEATURES:
|
||||
@ -1013,11 +1027,21 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
|
||||
unsigned long arg)
|
||||
{
|
||||
return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct file_operations vhost_scsi_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.release = vhost_scsi_release,
|
||||
.unlocked_ioctl = vhost_scsi_ioctl,
|
||||
/* TODO compat ioctl? */
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = vhost_scsi_compat_ioctl,
|
||||
#endif
|
||||
.open = vhost_scsi_open,
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
@ -1054,28 +1078,28 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
|
||||
return "Unknown";
|
||||
}
|
||||
|
||||
static int tcm_vhost_port_link(
|
||||
struct se_portal_group *se_tpg,
|
||||
static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
|
||||
struct se_lun *lun)
|
||||
{
|
||||
struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
|
||||
struct tcm_vhost_tpg, se_tpg);
|
||||
|
||||
atomic_inc(&tv_tpg->tv_tpg_port_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
mutex_lock(&tv_tpg->tv_tpg_mutex);
|
||||
tv_tpg->tv_tpg_port_count++;
|
||||
mutex_unlock(&tv_tpg->tv_tpg_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tcm_vhost_port_unlink(
|
||||
struct se_portal_group *se_tpg,
|
||||
static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
|
||||
struct se_lun *se_lun)
|
||||
{
|
||||
struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
|
||||
struct tcm_vhost_tpg, se_tpg);
|
||||
|
||||
atomic_dec(&tv_tpg->tv_tpg_port_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
mutex_lock(&tv_tpg->tv_tpg_mutex);
|
||||
tv_tpg->tv_tpg_port_count--;
|
||||
mutex_unlock(&tv_tpg->tv_tpg_mutex);
|
||||
}
|
||||
|
||||
static struct se_node_acl *tcm_vhost_make_nodeacl(
|
||||
@ -1122,8 +1146,7 @@ static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
|
||||
kfree(nacl);
|
||||
}
|
||||
|
||||
static int tcm_vhost_make_nexus(
|
||||
struct tcm_vhost_tpg *tv_tpg,
|
||||
static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
|
||||
const char *name)
|
||||
{
|
||||
struct se_portal_group *se_tpg;
|
||||
@ -1168,7 +1191,7 @@ static int tcm_vhost_make_nexus(
|
||||
return -ENOMEM;
|
||||
}
|
||||
/*
|
||||
* Now register the TCM vHost virtual I_T Nexus as active with the
|
||||
* Now register the TCM vhost virtual I_T Nexus as active with the
|
||||
* call to __transport_register_session()
|
||||
*/
|
||||
__transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
|
||||
@ -1179,8 +1202,7 @@ static int tcm_vhost_make_nexus(
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tcm_vhost_drop_nexus(
|
||||
struct tcm_vhost_tpg *tpg)
|
||||
static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
|
||||
{
|
||||
struct se_session *se_sess;
|
||||
struct tcm_vhost_nexus *tv_nexus;
|
||||
@ -1198,27 +1220,27 @@ static int tcm_vhost_drop_nexus(
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (atomic_read(&tpg->tv_tpg_port_count)) {
|
||||
if (tpg->tv_tpg_port_count != 0) {
|
||||
mutex_unlock(&tpg->tv_tpg_mutex);
|
||||
pr_err("Unable to remove TCM_vHost I_T Nexus with"
|
||||
pr_err("Unable to remove TCM_vhost I_T Nexus with"
|
||||
" active TPG port count: %d\n",
|
||||
atomic_read(&tpg->tv_tpg_port_count));
|
||||
return -EPERM;
|
||||
tpg->tv_tpg_port_count);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (atomic_read(&tpg->tv_tpg_vhost_count)) {
|
||||
if (tpg->tv_tpg_vhost_count != 0) {
|
||||
mutex_unlock(&tpg->tv_tpg_mutex);
|
||||
pr_err("Unable to remove TCM_vHost I_T Nexus with"
|
||||
pr_err("Unable to remove TCM_vhost I_T Nexus with"
|
||||
" active TPG vhost count: %d\n",
|
||||
atomic_read(&tpg->tv_tpg_vhost_count));
|
||||
return -EPERM;
|
||||
tpg->tv_tpg_vhost_count);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
pr_debug("TCM_vHost_ConfigFS: Removing I_T Nexus to emulated"
|
||||
pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
|
||||
" %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
|
||||
tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
|
||||
/*
|
||||
* Release the SCSI I_T Nexus to the emulated vHost Target Port
|
||||
* Release the SCSI I_T Nexus to the emulated vhost Target Port
|
||||
*/
|
||||
transport_deregister_session(tv_nexus->tvn_se_sess);
|
||||
tpg->tpg_nexus = NULL;
|
||||
@ -1228,8 +1250,7 @@ static int tcm_vhost_drop_nexus(
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t tcm_vhost_tpg_show_nexus(
|
||||
struct se_portal_group *se_tpg,
|
||||
static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
|
||||
char *page)
|
||||
{
|
||||
struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
|
||||
@ -1250,8 +1271,7 @@ static ssize_t tcm_vhost_tpg_show_nexus(
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t tcm_vhost_tpg_store_nexus(
|
||||
struct se_portal_group *se_tpg,
|
||||
static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
|
||||
const char *page,
|
||||
size_t count)
|
||||
{
|
||||
@ -1336,8 +1356,7 @@ static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct se_portal_group *tcm_vhost_make_tpg(
|
||||
struct se_wwn *wwn,
|
||||
static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
|
||||
struct config_group *group,
|
||||
const char *name)
|
||||
{
|
||||
@ -1385,7 +1404,7 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
|
||||
list_del(&tpg->tv_tpg_list);
|
||||
mutex_unlock(&tcm_vhost_mutex);
|
||||
/*
|
||||
* Release the virtual I_T Nexus for this vHost TPG
|
||||
* Release the virtual I_T Nexus for this vhost TPG
|
||||
*/
|
||||
tcm_vhost_drop_nexus(tpg);
|
||||
/*
|
||||
@ -1395,8 +1414,7 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
|
||||
kfree(tpg);
|
||||
}
|
||||
|
||||
static struct se_wwn *tcm_vhost_make_tport(
|
||||
struct target_fabric_configfs *tf,
|
||||
static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
|
||||
struct config_group *group,
|
||||
const char *name)
|
||||
{
|
||||
@ -1592,7 +1610,10 @@ static void tcm_vhost_deregister_configfs(void)
|
||||
static int __init tcm_vhost_init(void)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
|
||||
/*
|
||||
* Use our own dedicated workqueue for submitting I/O into
|
||||
* target core to avoid contention within system_wq.
|
||||
*/
|
||||
tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
|
||||
if (!tcm_vhost_workqueue)
|
||||
goto out;
|
||||
|
@ -47,9 +47,9 @@ struct tcm_vhost_tpg {
|
||||
/* Vhost port target portal group tag for TCM */
|
||||
u16 tport_tpgt;
|
||||
/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
|
||||
atomic_t tv_tpg_port_count;
|
||||
/* Used for vhost_scsi device reference to tpg_nexus */
|
||||
atomic_t tv_tpg_vhost_count;
|
||||
int tv_tpg_port_count;
|
||||
/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
|
||||
int tv_tpg_vhost_count;
|
||||
/* list for tcm_vhost_list */
|
||||
struct list_head tv_tpg_list;
|
||||
/* Used to protect access for tpg_nexus */
|
||||
@ -91,11 +91,13 @@ struct tcm_vhost_tport {
|
||||
|
||||
struct vhost_scsi_target {
|
||||
int abi_version;
|
||||
unsigned char vhost_wwpn[TRANSPORT_IQN_LEN];
|
||||
char vhost_wwpn[TRANSPORT_IQN_LEN];
|
||||
unsigned short vhost_tpgt;
|
||||
unsigned short reserved;
|
||||
};
|
||||
|
||||
/* VHOST_SCSI specific defines */
|
||||
#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
|
||||
#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
|
||||
#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, struct vhost_scsi_target)
|
||||
/* Changing this breaks userspace. */
|
||||
#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int)
|
||||
|
@ -374,6 +374,9 @@ static void fb_flashcursor(struct work_struct *work)
|
||||
int mode;
|
||||
int ret;
|
||||
|
||||
/* FIXME: we should sort out the unbind locking instead */
|
||||
/* instead we just fail to flash the cursor if we can't get
|
||||
* the lock instead of blocking fbcon deinit */
|
||||
ret = console_trylock();
|
||||
if (ret == 0)
|
||||
return;
|
||||
|
@ -201,6 +201,7 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
|
||||
int err = -ENOMEM;
|
||||
|
||||
dout("ceph_fs_debugfs_init\n");
|
||||
BUG_ON(!fsc->client->debugfs_dir);
|
||||
fsc->debugfs_congestion_kb =
|
||||
debugfs_create_file("writeback_congestion_kb",
|
||||
0600,
|
||||
|
@ -992,11 +992,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
|
||||
if (rinfo->head->is_dentry) {
|
||||
struct inode *dir = req->r_locked_dir;
|
||||
|
||||
err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
|
||||
session, req->r_request_started, -1,
|
||||
&req->r_caps_reservation);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (dir) {
|
||||
err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
|
||||
session, req->r_request_started, -1,
|
||||
&req->r_caps_reservation);
|
||||
if (err < 0)
|
||||
return err;
|
||||
} else {
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1004,6 +1008,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
|
||||
* will have trouble splicing in the virtual snapdir later
|
||||
*/
|
||||
if (rinfo->head->is_dentry && !req->r_aborted &&
|
||||
req->r_locked_dir &&
|
||||
(rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
|
||||
fsc->mount_options->snapdir_name,
|
||||
req->r_dentry->d_name.len))) {
|
||||
|
@ -42,7 +42,8 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
|
||||
/* validate striping parameters */
|
||||
if ((l->object_size & ~PAGE_MASK) ||
|
||||
(l->stripe_unit & ~PAGE_MASK) ||
|
||||
((unsigned)l->object_size % (unsigned)l->stripe_unit))
|
||||
(l->stripe_unit != 0 &&
|
||||
((unsigned)l->object_size % (unsigned)l->stripe_unit)))
|
||||
return -EINVAL;
|
||||
|
||||
/* make sure it's a valid data pool */
|
||||
|
@ -1654,8 +1654,8 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
|
||||
error = PTR_ERR(file);
|
||||
goto out_free_fd;
|
||||
}
|
||||
fd_install(fd, file);
|
||||
ep->file = file;
|
||||
fd_install(fd, file);
|
||||
return fd;
|
||||
|
||||
out_free_fd:
|
||||
|
@ -352,6 +352,7 @@ int __inode_permission(struct inode *inode, int mask)
|
||||
/**
|
||||
* sb_permission - Check superblock-level permissions
|
||||
* @sb: Superblock of inode to check permission on
|
||||
* @inode: Inode to check permission on
|
||||
* @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
|
||||
*
|
||||
* Separate out file-system wide checks from inode-specific permission checks.
|
||||
@ -656,6 +657,7 @@ int sysctl_protected_hardlinks __read_mostly = 1;
|
||||
/**
|
||||
* may_follow_link - Check symlink following for unsafe situations
|
||||
* @link: The path of the symlink
|
||||
* @nd: nameidata pathwalk data
|
||||
*
|
||||
* In the case of the sysctl_protected_symlinks sysctl being enabled,
|
||||
* CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
|
||||
|
@ -12,19 +12,19 @@ nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
|
||||
nfs-$(CONFIG_SYSCTL) += sysctl.o
|
||||
nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o
|
||||
|
||||
obj-$(CONFIG_NFS_V2) += nfs2.o
|
||||
nfs2-y := nfs2super.o proc.o nfs2xdr.o
|
||||
obj-$(CONFIG_NFS_V2) += nfsv2.o
|
||||
nfsv2-y := nfs2super.o proc.o nfs2xdr.o
|
||||
|
||||
obj-$(CONFIG_NFS_V3) += nfs3.o
|
||||
nfs3-y := nfs3super.o nfs3client.o nfs3proc.o nfs3xdr.o
|
||||
nfs3-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
|
||||
obj-$(CONFIG_NFS_V3) += nfsv3.o
|
||||
nfsv3-y := nfs3super.o nfs3client.o nfs3proc.o nfs3xdr.o
|
||||
nfsv3-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
|
||||
|
||||
obj-$(CONFIG_NFS_V4) += nfs4.o
|
||||
nfs4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \
|
||||
obj-$(CONFIG_NFS_V4) += nfsv4.o
|
||||
nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \
|
||||
delegation.o idmap.o callback.o callback_xdr.o callback_proc.o \
|
||||
nfs4namespace.o nfs4getroot.o nfs4client.o
|
||||
nfs4-$(CONFIG_SYSCTL) += nfs4sysctl.o
|
||||
nfs4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o
|
||||
nfsv4-$(CONFIG_SYSCTL) += nfs4sysctl.o
|
||||
nfsv4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o
|
||||
|
||||
obj-$(CONFIG_PNFS_FILE_LAYOUT) += nfs_layout_nfsv41_files.o
|
||||
nfs_layout_nfsv41_files-y := nfs4filelayout.o nfs4filelayoutdev.o
|
||||
|
@ -105,7 +105,7 @@ struct nfs_subversion *get_nfs_version(unsigned int version)
|
||||
|
||||
if (IS_ERR(nfs)) {
|
||||
mutex_lock(&nfs_version_mutex);
|
||||
request_module("nfs%d", version);
|
||||
request_module("nfsv%d", version);
|
||||
nfs = find_nfs_version(version);
|
||||
mutex_unlock(&nfs_version_mutex);
|
||||
}
|
||||
|
@ -61,6 +61,12 @@ struct idmap {
|
||||
struct mutex idmap_mutex;
|
||||
};
|
||||
|
||||
struct idmap_legacy_upcalldata {
|
||||
struct rpc_pipe_msg pipe_msg;
|
||||
struct idmap_msg idmap_msg;
|
||||
struct idmap *idmap;
|
||||
};
|
||||
|
||||
/**
|
||||
* nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
|
||||
* @fattr: fully initialised struct nfs_fattr
|
||||
@ -324,6 +330,7 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
|
||||
ret = nfs_idmap_request_key(&key_type_id_resolver_legacy,
|
||||
name, namelen, type, data,
|
||||
data_size, idmap);
|
||||
idmap->idmap_key_cons = NULL;
|
||||
mutex_unlock(&idmap->idmap_mutex);
|
||||
}
|
||||
return ret;
|
||||
@ -380,11 +387,13 @@ static const match_table_t nfs_idmap_tokens = {
|
||||
static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *);
|
||||
static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
|
||||
size_t);
|
||||
static void idmap_release_pipe(struct inode *);
|
||||
static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
|
||||
|
||||
static const struct rpc_pipe_ops idmap_upcall_ops = {
|
||||
.upcall = rpc_pipe_generic_upcall,
|
||||
.downcall = idmap_pipe_downcall,
|
||||
.release_pipe = idmap_release_pipe,
|
||||
.destroy_msg = idmap_pipe_destroy_msg,
|
||||
};
|
||||
|
||||
@ -616,7 +625,8 @@ void nfs_idmap_quit(void)
|
||||
nfs_idmap_quit_keyring();
|
||||
}
|
||||
|
||||
static int nfs_idmap_prepare_message(char *desc, struct idmap_msg *im,
|
||||
static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap,
|
||||
struct idmap_msg *im,
|
||||
struct rpc_pipe_msg *msg)
|
||||
{
|
||||
substring_t substr;
|
||||
@ -659,6 +669,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
|
||||
const char *op,
|
||||
void *aux)
|
||||
{
|
||||
struct idmap_legacy_upcalldata *data;
|
||||
struct rpc_pipe_msg *msg;
|
||||
struct idmap_msg *im;
|
||||
struct idmap *idmap = (struct idmap *)aux;
|
||||
@ -666,15 +677,15 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
|
||||
int ret = -ENOMEM;
|
||||
|
||||
/* msg and im are freed in idmap_pipe_destroy_msg */
|
||||
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
|
||||
if (!msg)
|
||||
goto out0;
|
||||
|
||||
im = kmalloc(sizeof(*im), GFP_KERNEL);
|
||||
if (!im)
|
||||
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
goto out1;
|
||||
|
||||
ret = nfs_idmap_prepare_message(key->description, im, msg);
|
||||
msg = &data->pipe_msg;
|
||||
im = &data->idmap_msg;
|
||||
data->idmap = idmap;
|
||||
|
||||
ret = nfs_idmap_prepare_message(key->description, idmap, im, msg);
|
||||
if (ret < 0)
|
||||
goto out2;
|
||||
|
||||
@ -683,15 +694,15 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
|
||||
|
||||
ret = rpc_queue_upcall(idmap->idmap_pipe, msg);
|
||||
if (ret < 0)
|
||||
goto out2;
|
||||
goto out3;
|
||||
|
||||
return ret;
|
||||
|
||||
out3:
|
||||
idmap->idmap_key_cons = NULL;
|
||||
out2:
|
||||
kfree(im);
|
||||
kfree(data);
|
||||
out1:
|
||||
kfree(msg);
|
||||
out0:
|
||||
complete_request_key(cons, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -749,9 +760,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
|
||||
}
|
||||
|
||||
if (!(im.im_status & IDMAP_STATUS_SUCCESS)) {
|
||||
ret = mlen;
|
||||
complete_request_key(cons, -ENOKEY);
|
||||
goto out_incomplete;
|
||||
ret = -ENOKEY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
namelen_in = strnlen(im.im_name, IDMAP_NAMESZ);
|
||||
@ -768,16 +778,32 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
|
||||
|
||||
out:
|
||||
complete_request_key(cons, ret);
|
||||
out_incomplete:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg)
|
||||
{
|
||||
struct idmap_legacy_upcalldata *data = container_of(msg,
|
||||
struct idmap_legacy_upcalldata,
|
||||
pipe_msg);
|
||||
struct idmap *idmap = data->idmap;
|
||||
struct key_construction *cons;
|
||||
if (msg->errno) {
|
||||
cons = ACCESS_ONCE(idmap->idmap_key_cons);
|
||||
idmap->idmap_key_cons = NULL;
|
||||
complete_request_key(cons, msg->errno);
|
||||
}
|
||||
/* Free memory allocated in nfs_idmap_legacy_upcall() */
|
||||
kfree(msg->data);
|
||||
kfree(msg);
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
static void
|
||||
idmap_release_pipe(struct inode *inode)
|
||||
{
|
||||
struct rpc_inode *rpci = RPC_I(inode);
|
||||
struct idmap *idmap = (struct idmap *)rpci->private;
|
||||
idmap->idmap_key_cons = NULL;
|
||||
}
|
||||
|
||||
int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid)
|
||||
|
@ -69,7 +69,7 @@ do_proc_get_root(struct rpc_clnt *client, struct nfs_fh *fhandle,
|
||||
nfs_fattr_init(info->fattr);
|
||||
status = rpc_call_sync(client, &msg, 0);
|
||||
dprintk("%s: reply fsinfo: %d\n", __func__, status);
|
||||
if (!(info->fattr->valid & NFS_ATTR_FATTR)) {
|
||||
if (status == 0 && !(info->fattr->valid & NFS_ATTR_FATTR)) {
|
||||
msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
|
||||
msg.rpc_resp = info->fattr;
|
||||
status = rpc_call_sync(client, &msg, 0);
|
||||
|
@ -205,6 +205,9 @@ extern const struct dentry_operations nfs4_dentry_operations;
|
||||
int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
|
||||
unsigned, umode_t, int *);
|
||||
|
||||
/* super.c */
|
||||
extern struct file_system_type nfs4_fs_type;
|
||||
|
||||
/* nfs4namespace.c */
|
||||
rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
|
||||
struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *);
|
||||
|
@ -74,7 +74,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
|
||||
return clp;
|
||||
|
||||
error:
|
||||
kfree(clp);
|
||||
nfs_free_client(clp);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
|
@ -3737,9 +3737,10 @@ out:
|
||||
static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
|
||||
{
|
||||
struct nfs4_cached_acl *acl;
|
||||
size_t buflen = sizeof(*acl) + acl_len;
|
||||
|
||||
if (pages && acl_len <= PAGE_SIZE) {
|
||||
acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
|
||||
if (pages && buflen <= PAGE_SIZE) {
|
||||
acl = kmalloc(buflen, GFP_KERNEL);
|
||||
if (acl == NULL)
|
||||
goto out;
|
||||
acl->cached = 1;
|
||||
@ -3819,7 +3820,7 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
acl_len = res.acl_len - res.acl_data_offset;
|
||||
acl_len = res.acl_len;
|
||||
if (acl_len > args.acl_len)
|
||||
nfs4_write_cached_acl(inode, NULL, 0, acl_len);
|
||||
else
|
||||
@ -6223,11 +6224,58 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
|
||||
dprintk("<-- %s\n", __func__);
|
||||
}
|
||||
|
||||
static size_t max_response_pages(struct nfs_server *server)
|
||||
{
|
||||
u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
|
||||
return nfs_page_array_len(0, max_resp_sz);
|
||||
}
|
||||
|
||||
static void nfs4_free_pages(struct page **pages, size_t size)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!pages)
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (!pages[i])
|
||||
break;
|
||||
__free_page(pages[i]);
|
||||
}
|
||||
kfree(pages);
|
||||
}
|
||||
|
||||
static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
|
||||
{
|
||||
struct page **pages;
|
||||
int i;
|
||||
|
||||
pages = kcalloc(size, sizeof(struct page *), gfp_flags);
|
||||
if (!pages) {
|
||||
dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
pages[i] = alloc_page(gfp_flags);
|
||||
if (!pages[i]) {
|
||||
dprintk("%s: failed to allocate page\n", __func__);
|
||||
nfs4_free_pages(pages, size);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
static void nfs4_layoutget_release(void *calldata)
|
||||
{
|
||||
struct nfs4_layoutget *lgp = calldata;
|
||||
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
|
||||
size_t max_pages = max_response_pages(server);
|
||||
|
||||
dprintk("--> %s\n", __func__);
|
||||
nfs4_free_pages(lgp->args.layout.pages, max_pages);
|
||||
put_nfs_open_context(lgp->args.ctx);
|
||||
kfree(calldata);
|
||||
dprintk("<-- %s\n", __func__);
|
||||
@ -6239,9 +6287,10 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
|
||||
.rpc_release = nfs4_layoutget_release,
|
||||
};
|
||||
|
||||
int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
|
||||
void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
|
||||
{
|
||||
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
|
||||
size_t max_pages = max_response_pages(server);
|
||||
struct rpc_task *task;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
|
||||
@ -6259,12 +6308,19 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
|
||||
|
||||
dprintk("--> %s\n", __func__);
|
||||
|
||||
lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
|
||||
if (!lgp->args.layout.pages) {
|
||||
nfs4_layoutget_release(lgp);
|
||||
return;
|
||||
}
|
||||
lgp->args.layout.pglen = max_pages * PAGE_SIZE;
|
||||
|
||||
lgp->res.layoutp = &lgp->args.layout;
|
||||
lgp->res.seq_res.sr_slot = NULL;
|
||||
nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
|
||||
task = rpc_run_task(&task_setup_data);
|
||||
if (IS_ERR(task))
|
||||
return PTR_ERR(task);
|
||||
return;
|
||||
status = nfs4_wait_for_completion_rpc_task(task);
|
||||
if (status == 0)
|
||||
status = task->tk_status;
|
||||
@ -6272,7 +6328,7 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
|
||||
status = pnfs_layout_process(lgp);
|
||||
rpc_put_task(task);
|
||||
dprintk("<-- %s status=%d\n", __func__, status);
|
||||
return status;
|
||||
return;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -6304,12 +6360,8 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
|
||||
return;
|
||||
}
|
||||
spin_lock(&lo->plh_inode->i_lock);
|
||||
if (task->tk_status == 0) {
|
||||
if (lrp->res.lrs_present) {
|
||||
pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
|
||||
} else
|
||||
BUG_ON(!list_empty(&lo->plh_segs));
|
||||
}
|
||||
if (task->tk_status == 0 && lrp->res.lrs_present)
|
||||
pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
|
||||
lo->plh_block_lgets--;
|
||||
spin_unlock(&lo->plh_inode->i_lock);
|
||||
dprintk("<-- %s\n", __func__);
|
||||
|
@ -23,14 +23,6 @@ static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
|
||||
static struct dentry *nfs4_remote_referral_mount(struct file_system_type *fs_type,
|
||||
int flags, const char *dev_name, void *raw_data);
|
||||
|
||||
static struct file_system_type nfs4_fs_type = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "nfs4",
|
||||
.mount = nfs_fs_mount,
|
||||
.kill_sb = nfs_kill_super,
|
||||
.fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
|
||||
};
|
||||
|
||||
static struct file_system_type nfs4_remote_fs_type = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "nfs4",
|
||||
@ -344,14 +336,8 @@ static int __init init_nfs_v4(void)
|
||||
if (err)
|
||||
goto out1;
|
||||
|
||||
err = register_filesystem(&nfs4_fs_type);
|
||||
if (err < 0)
|
||||
goto out2;
|
||||
|
||||
register_nfs_version(&nfs_v4);
|
||||
return 0;
|
||||
out2:
|
||||
nfs4_unregister_sysctl();
|
||||
out1:
|
||||
nfs_idmap_quit();
|
||||
out:
|
||||
@ -361,7 +347,6 @@ out:
|
||||
static void __exit exit_nfs_v4(void)
|
||||
{
|
||||
unregister_nfs_version(&nfs_v4);
|
||||
unregister_filesystem(&nfs4_fs_type);
|
||||
nfs4_unregister_sysctl();
|
||||
nfs_idmap_quit();
|
||||
}
|
||||
|
@ -5045,22 +5045,19 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
|
||||
struct nfs_getaclres *res)
|
||||
{
|
||||
unsigned int savep;
|
||||
__be32 *bm_p;
|
||||
uint32_t attrlen,
|
||||
bitmap[3] = {0};
|
||||
int status;
|
||||
size_t page_len = xdr->buf->page_len;
|
||||
unsigned int pg_offset;
|
||||
|
||||
res->acl_len = 0;
|
||||
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
|
||||
goto out;
|
||||
|
||||
bm_p = xdr->p;
|
||||
res->acl_data_offset = be32_to_cpup(bm_p) + 2;
|
||||
res->acl_data_offset <<= 2;
|
||||
/* Check if the acl data starts beyond the allocated buffer */
|
||||
if (res->acl_data_offset > page_len)
|
||||
return -ERANGE;
|
||||
xdr_enter_page(xdr, xdr->buf->page_len);
|
||||
|
||||
/* Calculate the offset of the page data */
|
||||
pg_offset = xdr->buf->head[0].iov_len;
|
||||
|
||||
if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
|
||||
goto out;
|
||||
@ -5074,23 +5071,20 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
|
||||
/* The bitmap (xdr len + bitmaps) and the attr xdr len words
|
||||
* are stored with the acl data to handle the problem of
|
||||
* variable length bitmaps.*/
|
||||
xdr->p = bm_p;
|
||||
res->acl_data_offset = xdr_stream_pos(xdr) - pg_offset;
|
||||
|
||||
/* We ignore &savep and don't do consistency checks on
|
||||
* the attr length. Let userspace figure it out.... */
|
||||
attrlen += res->acl_data_offset;
|
||||
if (attrlen > page_len) {
|
||||
res->acl_len = attrlen;
|
||||
if (attrlen > (xdr->nwords << 2)) {
|
||||
if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
|
||||
/* getxattr interface called with a NULL buf */
|
||||
res->acl_len = attrlen;
|
||||
goto out;
|
||||
}
|
||||
dprintk("NFS: acl reply: attrlen %u > page_len %zu\n",
|
||||
attrlen, page_len);
|
||||
dprintk("NFS: acl reply: attrlen %u > page_len %u\n",
|
||||
attrlen, xdr->nwords << 2);
|
||||
return -EINVAL;
|
||||
}
|
||||
xdr_read_pages(xdr, attrlen);
|
||||
res->acl_len = attrlen;
|
||||
} else
|
||||
status = -EOPNOTSUPP;
|
||||
|
||||
|
@ -570,17 +570,66 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
|
||||
return false;
|
||||
|
||||
return pgio->pg_count + req->wb_bytes <=
|
||||
OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
|
||||
(unsigned long)pgio->pg_layout_private;
|
||||
}
|
||||
|
||||
void objio_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
|
||||
{
|
||||
pnfs_generic_pg_init_read(pgio, req);
|
||||
if (unlikely(pgio->pg_lseg == NULL))
|
||||
return; /* Not pNFS */
|
||||
|
||||
pgio->pg_layout_private = (void *)
|
||||
OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
|
||||
}
|
||||
|
||||
static bool aligned_on_raid_stripe(u64 offset, struct ore_layout *layout,
|
||||
unsigned long *stripe_end)
|
||||
{
|
||||
u32 stripe_off;
|
||||
unsigned stripe_size;
|
||||
|
||||
if (layout->raid_algorithm == PNFS_OSD_RAID_0)
|
||||
return true;
|
||||
|
||||
stripe_size = layout->stripe_unit *
|
||||
(layout->group_width - layout->parity);
|
||||
|
||||
div_u64_rem(offset, stripe_size, &stripe_off);
|
||||
if (!stripe_off)
|
||||
return true;
|
||||
|
||||
*stripe_end = stripe_size - stripe_off;
|
||||
return false;
|
||||
}
|
||||
|
||||
void objio_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
|
||||
{
|
||||
unsigned long stripe_end = 0;
|
||||
|
||||
pnfs_generic_pg_init_write(pgio, req);
|
||||
if (unlikely(pgio->pg_lseg == NULL))
|
||||
return; /* Not pNFS */
|
||||
|
||||
if (req->wb_offset ||
|
||||
!aligned_on_raid_stripe(req->wb_index * PAGE_SIZE,
|
||||
&OBJIO_LSEG(pgio->pg_lseg)->layout,
|
||||
&stripe_end)) {
|
||||
pgio->pg_layout_private = (void *)stripe_end;
|
||||
} else {
|
||||
pgio->pg_layout_private = (void *)
|
||||
OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct nfs_pageio_ops objio_pg_read_ops = {
|
||||
.pg_init = pnfs_generic_pg_init_read,
|
||||
.pg_init = objio_init_read,
|
||||
.pg_test = objio_pg_test,
|
||||
.pg_doio = pnfs_generic_pg_readpages,
|
||||
};
|
||||
|
||||
static const struct nfs_pageio_ops objio_pg_write_ops = {
|
||||
.pg_init = pnfs_generic_pg_init_write,
|
||||
.pg_init = objio_init_write,
|
||||
.pg_test = objio_pg_test,
|
||||
.pg_doio = pnfs_generic_pg_writepages,
|
||||
};
|
||||
|
@ -49,6 +49,7 @@ void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
|
||||
hdr->io_start = req_offset(hdr->req);
|
||||
hdr->good_bytes = desc->pg_count;
|
||||
hdr->dreq = desc->pg_dreq;
|
||||
hdr->layout_private = desc->pg_layout_private;
|
||||
hdr->release = release;
|
||||
hdr->completion_ops = desc->pg_completion_ops;
|
||||
if (hdr->completion_ops->init_hdr)
|
||||
@ -268,6 +269,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
|
||||
desc->pg_error = 0;
|
||||
desc->pg_lseg = NULL;
|
||||
desc->pg_dreq = NULL;
|
||||
desc->pg_layout_private = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_pageio_init);
|
||||
|
||||
|
@ -583,9 +583,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
|
||||
struct nfs_server *server = NFS_SERVER(ino);
|
||||
struct nfs4_layoutget *lgp;
|
||||
struct pnfs_layout_segment *lseg = NULL;
|
||||
struct page **pages = NULL;
|
||||
int i;
|
||||
u32 max_resp_sz, max_pages;
|
||||
|
||||
dprintk("--> %s\n", __func__);
|
||||
|
||||
@ -594,20 +591,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
|
||||
if (lgp == NULL)
|
||||
return NULL;
|
||||
|
||||
/* allocate pages for xdr post processing */
|
||||
max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
|
||||
max_pages = nfs_page_array_len(0, max_resp_sz);
|
||||
|
||||
pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
|
||||
if (!pages)
|
||||
goto out_err_free;
|
||||
|
||||
for (i = 0; i < max_pages; i++) {
|
||||
pages[i] = alloc_page(gfp_flags);
|
||||
if (!pages[i])
|
||||
goto out_err_free;
|
||||
}
|
||||
|
||||
lgp->args.minlength = PAGE_CACHE_SIZE;
|
||||
if (lgp->args.minlength > range->length)
|
||||
lgp->args.minlength = range->length;
|
||||
@ -616,39 +599,19 @@ send_layoutget(struct pnfs_layout_hdr *lo,
|
||||
lgp->args.type = server->pnfs_curr_ld->id;
|
||||
lgp->args.inode = ino;
|
||||
lgp->args.ctx = get_nfs_open_context(ctx);
|
||||
lgp->args.layout.pages = pages;
|
||||
lgp->args.layout.pglen = max_pages * PAGE_SIZE;
|
||||
lgp->lsegpp = &lseg;
|
||||
lgp->gfp_flags = gfp_flags;
|
||||
|
||||
/* Synchronously retrieve layout information from server and
|
||||
* store in lseg.
|
||||
*/
|
||||
nfs4_proc_layoutget(lgp);
|
||||
nfs4_proc_layoutget(lgp, gfp_flags);
|
||||
if (!lseg) {
|
||||
/* remember that LAYOUTGET failed and suspend trying */
|
||||
set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
|
||||
}
|
||||
|
||||
/* free xdr pages */
|
||||
for (i = 0; i < max_pages; i++)
|
||||
__free_page(pages[i]);
|
||||
kfree(pages);
|
||||
|
||||
return lseg;
|
||||
|
||||
out_err_free:
|
||||
/* free any allocated xdr pages, lgp as it's not used */
|
||||
if (pages) {
|
||||
for (i = 0; i < max_pages; i++) {
|
||||
if (!pages[i])
|
||||
break;
|
||||
__free_page(pages[i]);
|
||||
}
|
||||
kfree(pages);
|
||||
}
|
||||
kfree(lgp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -172,7 +172,7 @@ extern int nfs4_proc_getdevicelist(struct nfs_server *server,
|
||||
struct pnfs_devicelist *devlist);
|
||||
extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
|
||||
struct pnfs_device *dev);
|
||||
extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
|
||||
extern void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags);
|
||||
extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
|
||||
|
||||
/* pnfs.c */
|
||||
|
@ -319,6 +319,34 @@ EXPORT_SYMBOL_GPL(nfs_sops);
|
||||
static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *);
|
||||
static int nfs4_validate_mount_data(void *options,
|
||||
struct nfs_parsed_mount_data *args, const char *dev_name);
|
||||
|
||||
struct file_system_type nfs4_fs_type = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "nfs4",
|
||||
.mount = nfs_fs_mount,
|
||||
.kill_sb = nfs_kill_super,
|
||||
.fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(nfs4_fs_type);
|
||||
|
||||
static int __init register_nfs4_fs(void)
|
||||
{
|
||||
return register_filesystem(&nfs4_fs_type);
|
||||
}
|
||||
|
||||
static void unregister_nfs4_fs(void)
|
||||
{
|
||||
unregister_filesystem(&nfs4_fs_type);
|
||||
}
|
||||
#else
|
||||
static int __init register_nfs4_fs(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void unregister_nfs4_fs(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct shrinker acl_shrinker = {
|
||||
@ -337,12 +365,18 @@ int __init register_nfs_fs(void)
|
||||
if (ret < 0)
|
||||
goto error_0;
|
||||
|
||||
ret = nfs_register_sysctl();
|
||||
ret = register_nfs4_fs();
|
||||
if (ret < 0)
|
||||
goto error_1;
|
||||
|
||||
ret = nfs_register_sysctl();
|
||||
if (ret < 0)
|
||||
goto error_2;
|
||||
register_shrinker(&acl_shrinker);
|
||||
return 0;
|
||||
|
||||
error_2:
|
||||
unregister_nfs4_fs();
|
||||
error_1:
|
||||
unregister_filesystem(&nfs_fs_type);
|
||||
error_0:
|
||||
@ -356,6 +390,7 @@ void __exit unregister_nfs_fs(void)
|
||||
{
|
||||
unregister_shrinker(&acl_shrinker);
|
||||
nfs_unregister_sysctl();
|
||||
unregister_nfs4_fs();
|
||||
unregister_filesystem(&nfs_fs_type);
|
||||
}
|
||||
|
||||
@ -2645,4 +2680,6 @@ MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
|
||||
module_param(send_implementation_id, ushort, 0644);
|
||||
MODULE_PARM_DESC(send_implementation_id,
|
||||
"Send implementation ID with NFSv4.1 exchange_id");
|
||||
MODULE_ALIAS("nfs4");
|
||||
|
||||
#endif /* CONFIG_NFS_V4 */
|
||||
|
@ -1814,19 +1814,19 @@ int __init nfs_init_writepagecache(void)
|
||||
nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
|
||||
nfs_wdata_cachep);
|
||||
if (nfs_wdata_mempool == NULL)
|
||||
return -ENOMEM;
|
||||
goto out_destroy_write_cache;
|
||||
|
||||
nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
|
||||
sizeof(struct nfs_commit_data),
|
||||
0, SLAB_HWCACHE_ALIGN,
|
||||
NULL);
|
||||
if (nfs_cdata_cachep == NULL)
|
||||
return -ENOMEM;
|
||||
goto out_destroy_write_mempool;
|
||||
|
||||
nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
|
||||
nfs_wdata_cachep);
|
||||
if (nfs_commit_mempool == NULL)
|
||||
return -ENOMEM;
|
||||
goto out_destroy_commit_cache;
|
||||
|
||||
/*
|
||||
* NFS congestion size, scale with available memory.
|
||||
@ -1849,11 +1849,20 @@ int __init nfs_init_writepagecache(void)
|
||||
nfs_congestion_kb = 256*1024;
|
||||
|
||||
return 0;
|
||||
|
||||
out_destroy_commit_cache:
|
||||
kmem_cache_destroy(nfs_cdata_cachep);
|
||||
out_destroy_write_mempool:
|
||||
mempool_destroy(nfs_wdata_mempool);
|
||||
out_destroy_write_cache:
|
||||
kmem_cache_destroy(nfs_wdata_cachep);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void nfs_destroy_writepagecache(void)
|
||||
{
|
||||
mempool_destroy(nfs_commit_mempool);
|
||||
kmem_cache_destroy(nfs_cdata_cachep);
|
||||
mempool_destroy(nfs_wdata_mempool);
|
||||
kmem_cache_destroy(nfs_wdata_cachep);
|
||||
}
|
||||
|
@ -166,8 +166,6 @@ struct drm_display_mode {
|
||||
int crtc_vsync_start;
|
||||
int crtc_vsync_end;
|
||||
int crtc_vtotal;
|
||||
int crtc_hadjusted;
|
||||
int crtc_vadjusted;
|
||||
|
||||
/* Driver private mode info */
|
||||
int private_size;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user