This is the 6.11.9 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmc5+YAACgkQONu9yGCS
 aT7jsA/8C1l7ia/mz3HsfvHO5cCHj/8VUfVQeSh7PjQkEVMnFIm/eXgr3ED+oDBY
 pzO7CfZ6LHbEfYAVYmFwYdLrJvZfZMHmJynNVNJMdwJkcmEPVy5db2b/xQFVovGO
 6Xh45qSkZShDyzi4TkTiq9Tp6fXn0Fbs8i0aB5OFVgVhDvL8+ZH7d1nCGv+Y8ofX
 YevtPmh04Ryf/uL/eXeNUWfniRHdL3+JuCGTz+RgRjPXx3mt5pCmPoW9biqGF+qb
 gqFJaW4kxRuZz9QI00wzHRzQejNAkj3e42LjrVE1SMoFbeb2M6jw5A8VrekISCaX
 oasoLGDJuWKB4XKYW8IFwhfJPWNtUPttXrPtuMLx34yADEHzqw3GZe/B/+RdffOc
 pMcGcjOhAMNAw2j00uzzIpMVgQFoM1qvPG2RHMxWV3QD2cbzvxdGFNv31U2OUQtk
 qnLyQo9q61xSAueWFo6bz0qsRkKZEP1+n/eRpnMEU8CUpB5Ta2g3bMkSO1olbxBU
 3MlAHpY8VRkn4EKCJ7pY8CjWdIAYEAwECXj+6GXYjPQL0Ov24ZhojgvDFbu9MsPL
 /73Z6+uZcIu7TJ6r549D9rofpOkHRWGmd7yqaIezmdiDGxMskOPdzbPRKWqmS3Vl
 Prr5g51m8wuG/6swP9nKGprrTqpTSUO8ZM3G6DDG0uLsiZUK928=
 =hwOD
 -----END PGP SIGNATURE-----

Merge v6.11.9

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2024-11-17 15:09:55 +01:00
commit ecd929caff
72 changed files with 664 additions and 164 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 11
SUBLEVEL = 8
SUBLEVEL = 9
EXTRAVERSION =
NAME = Baby Opossum Posse

View File

@ -256,7 +256,7 @@
#define CSR_ESTAT_IS_WIDTH 14
#define CSR_ESTAT_IS (_ULCAST_(0x3fff) << CSR_ESTAT_IS_SHIFT)
#define LOONGARCH_CSR_ERA 0x6 /* ERA */
#define LOONGARCH_CSR_ERA 0x6 /* Exception return address */
#define LOONGARCH_CSR_BADV 0x7 /* Bad virtual address */

View File

@ -161,10 +161,11 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu)
if (kvm_vcpu_is_blocking(vcpu)) {
/*
* HRTIMER_MODE_PINNED is suggested since vcpu may run in
* the same physical cpu in next time
* HRTIMER_MODE_PINNED_HARD is suggested since vcpu may run in
* the same physical cpu in next time, and the timer should run
* in hardirq context even in the PREEMPT_RT case.
*/
hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED_HARD);
}
}

View File

@ -1144,7 +1144,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.vpid = 0;
vcpu->arch.flush_gpa = INVALID_GPA;
hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
vcpu->arch.handle_exit = kvm_handle_exit;

View File

@ -282,6 +282,7 @@ int __init opal_event_init(void)
name, NULL);
if (rc) {
pr_warn("Error %d requesting OPAL irq %d\n", rc, (int)r->start);
kfree(name);
continue;
}
}

View File

@ -55,7 +55,7 @@ struct imsic {
/* IMSIC SW-file */
struct imsic_mrif *swfile;
phys_addr_t swfile_pa;
spinlock_t swfile_extirq_lock;
raw_spinlock_t swfile_extirq_lock;
};
#define imsic_vs_csr_read(__c) \
@ -622,7 +622,7 @@ static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu)
* interruptions between reading topei and updating pending status.
*/
spin_lock_irqsave(&imsic->swfile_extirq_lock, flags);
raw_spin_lock_irqsave(&imsic->swfile_extirq_lock, flags);
if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) &&
imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis))
@ -630,7 +630,7 @@ static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu)
else
kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags);
raw_spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags);
}
static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear,
@ -1051,7 +1051,7 @@ int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu)
}
imsic->swfile = page_to_virt(swfile_page);
imsic->swfile_pa = page_to_phys(swfile_page);
spin_lock_init(&imsic->swfile_extirq_lock);
raw_spin_lock_init(&imsic->swfile_extirq_lock);
/* Setup IO device */
kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops);

View File

@ -550,7 +550,7 @@ EXPORT_SYMBOL_GPL(elv_unregister);
static inline bool elv_support_iosched(struct request_queue *q)
{
if (!queue_is_mq(q) ||
(q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
(q->tag_set->flags & BLK_MQ_F_NO_SCHED))
return false;
return true;
}
@ -561,7 +561,7 @@ static inline bool elv_support_iosched(struct request_queue *q)
*/
static struct elevator_type *elevator_get_default(struct request_queue *q)
{
if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
return NULL;
if (q->nr_hw_queues != 1 &&

View File

@ -396,7 +396,7 @@ found:
q->cra_flags |= CRYPTO_ALG_DEAD;
alg = test->adult;
if (list_empty(&alg->cra_list))
if (crypto_is_dead(alg))
goto complete;
if (err == -ECANCELED)

View File

@ -947,7 +947,7 @@ struct ahash_alg mv_md5_alg = {
.base = {
.cra_name = "md5",
.cra_driver_name = "mv-md5",
.cra_priority = 300,
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
@ -1018,7 +1018,7 @@ struct ahash_alg mv_sha1_alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name = "mv-sha1",
.cra_priority = 300,
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
@ -1092,7 +1092,7 @@ struct ahash_alg mv_sha256_alg = {
.base = {
.cra_name = "sha256",
.cra_driver_name = "mv-sha256",
.cra_priority = 300,
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
@ -1302,7 +1302,7 @@ struct ahash_alg mv_ahmac_md5_alg = {
.base = {
.cra_name = "hmac(md5)",
.cra_driver_name = "mv-hmac-md5",
.cra_priority = 300,
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
@ -1373,7 +1373,7 @@ struct ahash_alg mv_ahmac_sha1_alg = {
.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "mv-hmac-sha1",
.cra_priority = 300,
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
@ -1444,7 +1444,7 @@ struct ahash_alg mv_ahmac_sha256_alg = {
.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "mv-hmac-sha256",
.cra_priority = 300,
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,

View File

@ -1170,7 +1170,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
size >>= 1;
WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size));
atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage);
}
mutex_unlock(&p->mutex);
@ -1241,7 +1241,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
kfd_process_device_remove_obj_handle(
pdd, GET_IDR_HANDLE(args->handle));
WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
atomic64_sub(size, &pdd->vram_usage);
err_unlock:
err_pdd:
@ -2346,7 +2346,7 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
bo_bucket->restored_offset = offset;
/* Update the VRAM usage count */
WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size);
atomic64_add(bo_bucket->size, &pdd->vram_usage);
}
return 0;
}

View File

@ -766,7 +766,7 @@ struct kfd_process_device {
enum kfd_pdd_bound bound;
/* VRAM usage */
uint64_t vram_usage;
atomic64_t vram_usage;
struct attribute attr_vram;
char vram_filename[MAX_SYSFS_FILENAME_LEN];

View File

@ -306,7 +306,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
} else if (strncmp(attr->name, "vram_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_vram);
return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage));
} else if (strncmp(attr->name, "sdma_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_sdma);
@ -1599,7 +1599,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
pdd->runtime_inuse = false;
pdd->vram_usage = 0;
atomic64_set(&pdd->vram_usage, 0);
pdd->sdma_past_activity_counter = 0;
pdd->user_gpu_id = dev->id;
atomic64_set(&pdd->evict_duration_counter, 0);

View File

@ -404,6 +404,27 @@ static void svm_range_bo_release(struct kref *kref)
spin_lock(&svm_bo->list_lock);
}
spin_unlock(&svm_bo->list_lock);
if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
struct kfd_process_device *pdd;
struct kfd_process *p;
struct mm_struct *mm;
mm = svm_bo->eviction_fence->mm;
/*
* The forked child process takes svm_bo device pages ref, svm_bo could be
* released after parent process is gone.
*/
p = kfd_lookup_process_by_mm(mm);
if (p) {
pdd = kfd_get_process_device_data(svm_bo->node, p);
if (pdd)
atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage);
kfd_unref_process(p);
}
mmput(mm);
}
if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
/* We're not in the eviction worker. Signal the fence. */
dma_fence_signal(&svm_bo->eviction_fence->base);
@ -531,6 +552,7 @@ int
svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
bool clear)
{
struct kfd_process_device *pdd;
struct amdgpu_bo_param bp;
struct svm_range_bo *svm_bo;
struct amdgpu_bo_user *ubo;
@ -622,6 +644,10 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
list_add(&prange->svm_bo_list, &svm_bo->range_list);
spin_unlock(&svm_bo->list_lock);
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage);
return 0;
reserve_bo_failed:

View File

@ -62,7 +62,7 @@
#define VMWGFX_DRIVER_MINOR 20
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_DISPLAYS 16
#define VMWGFX_NUM_DISPLAY_UNITS 8
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
#define VMWGFX_MIN_INITIAL_WIDTH 1280
@ -82,7 +82,7 @@
#define VMWGFX_NUM_GB_CONTEXT 256
#define VMWGFX_NUM_GB_SHADER 20000
#define VMWGFX_NUM_GB_SURFACE 32768
#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_NUM_DISPLAY_UNITS
#define VMWGFX_NUM_DXCONTEXT 256
#define VMWGFX_NUM_DXQUERY 512
#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\

View File

@ -2197,7 +2197,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data;
void __user *user_rects;
const void __user *user_rects;
struct drm_vmw_rect *rects;
struct drm_rect *drm_rects;
unsigned rects_size;
@ -2209,6 +2209,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
VMWGFX_MIN_INITIAL_HEIGHT};
vmw_du_update_layout(dev_priv, 1, &def_rect);
return 0;
} else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) {
return -E2BIG;
}
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);

View File

@ -199,9 +199,6 @@ struct vmw_kms_dirty {
s32 unit_y2;
};
#define VMWGFX_NUM_DISPLAY_UNITS 8
#define vmw_framebuffer_to_vfb(x) \
container_of(x, struct vmw_framebuffer, base)
#define vmw_framebuffer_to_vfbs(x) \

View File

@ -870,7 +870,7 @@ void xe_device_l2_flush(struct xe_device *xe)
spin_lock(&gt->global_invl_lock);
xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1);
if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true))
if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true))
xe_gt_err_once(gt, "Global invalidation timeout\n");
spin_unlock(&gt->global_invl_lock);

View File

@ -115,9 +115,15 @@ static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain,
XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
&value, true);
if (ret)
xe_gt_notice(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
domain->id, str_wake_sleep(wake), ERR_PTR(ret),
domain->reg_ack.addr, value);
xe_gt_err(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
domain->id, str_wake_sleep(wake), ERR_PTR(ret),
domain->reg_ack.addr, value);
if (value == ~0) {
xe_gt_err(gt,
"Force wake domain %d: %s. MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
domain->id, str_wake_sleep(wake));
ret = -EIO;
}
return ret;
}

View File

@ -897,6 +897,24 @@ retry_same_fence:
}
}
/*
* Occasionally it is seen that the G2H worker starts running after a delay of more than
* a second even after being queued and activated by the Linux workqueue subsystem. This
* leads to G2H timeout error. The root cause of issue lies with scheduling latency of
* Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS
* and this is beyond xe kmd.
*
* TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
*/
if (!ret) {
flush_work(&ct->g2h_worker);
if (g2h_fence.done) {
xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
g2h_fence.seqno, action[0]);
ret = 1;
}
}
/*
* Ensure we serialize with completion side to prevent UAF with fence going out of scope on
* the stack, since we have no clue if it will fire after the timeout before we can erase

View File

@ -1771,8 +1771,13 @@ void xe_guc_submit_stop(struct xe_guc *guc)
mutex_lock(&guc->submission_state.lock);
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
/* Prevent redundant attempts to stop parallel queues */
if (q->guc->id != index)
continue;
guc_exec_queue_stop(guc, q);
}
mutex_unlock(&guc->submission_state.lock);
@ -1810,8 +1815,13 @@ int xe_guc_submit_start(struct xe_guc *guc)
mutex_lock(&guc->submission_state.lock);
atomic_dec(&guc->submission_state.stopped);
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
/* Prevent redundant attempts to start parallel queues */
if (q->guc->id != index)
continue;
guc_exec_queue_start(q);
}
mutex_unlock(&guc->submission_state.lock);
wake_up_all(&guc->ct.wq);

View File

@ -161,7 +161,11 @@ query_engine_cycles(struct xe_device *xe,
cpu_clock);
xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
resp.width = 36;
if (GRAPHICS_VER(xe) >= 20)
resp.width = 64;
else
resp.width = 36;
/* Only write to the output fields of user query */
if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp))

View File

@ -54,8 +54,9 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
{
struct xe_user_fence *ufence;
u64 __user *ptr = u64_to_user_ptr(addr);
u64 __maybe_unused prefetch_val;
if (!access_ok(ptr, sizeof(*ptr)))
if (get_user(prefetch_val, ptr))
return ERR_PTR(-EFAULT);
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);

View File

@ -509,6 +509,7 @@
#define I2C_DEVICE_ID_GOODIX_01E8 0x01e8
#define I2C_DEVICE_ID_GOODIX_01E9 0x01e9
#define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
#define I2C_DEVICE_ID_GOODIX_0D42 0x0d42
#define USB_VENDOR_ID_GOODTOUCH 0x1aad
#define USB_DEVICE_ID_GOODTOUCH_000f 0x000f
@ -868,6 +869,7 @@
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a
#define USB_DEVICE_ID_LOGITECH_BOLT_RECEIVER 0xc548
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704

View File

@ -473,6 +473,7 @@ static int lenovo_input_mapping(struct hid_device *hdev,
return lenovo_input_mapping_tp10_ultrabook_kbd(hdev, hi, field,
usage, bit, max);
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
return lenovo_input_mapping_x1_tab_kbd(hdev, hi, field, usage, bit, max);
default:
return 0;
@ -583,6 +584,7 @@ static ssize_t attr_fn_lock_store(struct device *dev,
break;
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
if (ret)
return ret;
@ -776,6 +778,7 @@ static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
return lenovo_event_cptkbd(hdev, field, usage, value);
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
return lenovo_event_tp10ubkbd(hdev, field, usage, value);
default:
return 0;
@ -1056,6 +1059,7 @@ static int lenovo_led_brightness_set(struct led_classdev *led_cdev,
break;
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
break;
}
@ -1286,6 +1290,7 @@ static int lenovo_probe(struct hid_device *hdev,
break;
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
ret = lenovo_probe_tp10ubkbd(hdev);
break;
default:
@ -1372,6 +1377,7 @@ static void lenovo_remove(struct hid_device *hdev)
break;
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
lenovo_remove_tp10ubkbd(hdev);
break;
}
@ -1421,6 +1427,8 @@ static const struct hid_device_id lenovo_devices[] = {
*/
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB3) },
{ }
};

View File

@ -2017,6 +2017,10 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_ELAN, 0x3148) },
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_ELAN, 0x32ae) },
/* Elitegroup panel */
{ .driver_data = MT_CLS_SERIAL,
MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
@ -2086,6 +2090,11 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
0x347d, 0x7853) },
/* HONOR MagicBook Art 14 touchpad */
{ .driver_data = MT_CLS_VTL,
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
0x35cc, 0x0104) },
/* Ilitek dual touch panel */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_ILITEK,
@ -2128,6 +2137,10 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD) },
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_BOLT_RECEIVER) },
/* MosArt panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,

View File

@ -50,6 +50,7 @@
#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(3)
#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(4)
#define I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND BIT(5)
#define I2C_HID_QUIRK_DELAY_WAKEUP_AFTER_RESUME BIT(6)
/* Command opcodes */
#define I2C_HID_OPCODE_RESET 0x01
@ -140,6 +141,8 @@ static const struct i2c_hid_quirks {
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET |
I2C_HID_QUIRK_BOGUS_IRQ },
{ I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_0D42,
I2C_HID_QUIRK_DELAY_WAKEUP_AFTER_RESUME },
{ 0, 0 }
};
@ -981,6 +984,13 @@ static int i2c_hid_core_resume(struct i2c_hid *ihid)
return -ENXIO;
}
/* On Goodix 27c6:0d42 wait extra time before device wakeup.
* It's not clear why but if we send wakeup too early, the device will
* never trigger input interrupts.
*/
if (ihid->quirks & I2C_HID_QUIRK_DELAY_WAKEUP_AFTER_RESUME)
msleep(1500);
/* Instead of resetting device, simply powers the device on. This
* solves "incomplete reports" on Raydium devices 2386:3118 and
* 2386:4B33 and fixes various SIS touchscreens no longer sending

View File

@ -331,6 +331,8 @@ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
msg.msg_flags &= ~MSG_MORE;
tcp_rate_check_app_limited(sk);
if (!sendpage_ok(page[i]))
msg.msg_flags &= ~MSG_SPLICE_PAGES;
bvec_set_page(&bvec, page[i], bytes, offset);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);

View File

@ -130,7 +130,7 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu)
/*
* Disable MMU-500's not-particularly-beneficial next-page
* prefetcher for the sake of errata #841119 and #826419.
* prefetcher for the sake of at least 5 known errata.
*/
for (i = 0; i < smmu->num_context_banks; ++i) {
reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
@ -138,7 +138,7 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu)
arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
if (reg & ARM_MMU500_ACTLR_CPRE)
dev_warn_once(smmu->dev, "Failed to disable prefetcher [errata #841119 and #826419], check ACR.CACHE_LOCK\n");
dev_warn_once(smmu->dev, "Failed to disable prefetcher for errata workarounds, check SACR.CACHE_LOCK\n");
}
return 0;

View File

@ -37,7 +37,7 @@ static struct chip_props ocelot_props = {
.reg_off_ena_clr = 0x1c,
.reg_off_ena_set = 0x20,
.reg_off_ident = 0x38,
.reg_off_trigger = 0x5c,
.reg_off_trigger = 0x4,
.n_irq = 24,
};
@ -70,7 +70,7 @@ static struct chip_props jaguar2_props = {
.reg_off_ena_clr = 0x1c,
.reg_off_ena_set = 0x20,
.reg_off_ident = 0x38,
.reg_off_trigger = 0x5c,
.reg_off_trigger = 0x4,
.n_irq = 29,
};

View File

@ -337,6 +337,7 @@ static const struct of_device_id unimac_mdio_ids[] = {
{ .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
{ .compatible = "brcm,asp-v2.0-mdio", },
{ .compatible = "brcm,bcm6846-mdio", },
{ .compatible = "brcm,genet-mdio-v5", },
{ .compatible = "brcm,genet-mdio-v4", },
{ .compatible = "brcm,genet-mdio-v3", },

View File

@ -1426,6 +1426,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x030e, 4)}, /* Quectel EM05GV2 */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0112, 0)}, /* Fibocom FG132 */
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */

View File

@ -1302,10 +1302,9 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
}
static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
blk_status_t status)
static void nvme_keep_alive_finish(struct request *rq,
blk_status_t status, struct nvme_ctrl *ctrl)
{
struct nvme_ctrl *ctrl = rq->end_io_data;
unsigned long flags;
bool startka = false;
unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
@ -1323,13 +1322,11 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
delay = 0;
}
blk_mq_free_request(rq);
if (status) {
dev_err(ctrl->device,
"failed nvme_keep_alive_end_io error=%d\n",
status);
return RQ_END_IO_NONE;
return;
}
ctrl->ka_last_check_time = jiffies;
@ -1341,7 +1338,6 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka)
queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
return RQ_END_IO_NONE;
}
static void nvme_keep_alive_work(struct work_struct *work)
@ -1350,6 +1346,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
struct nvme_ctrl, ka_work);
bool comp_seen = ctrl->comp_seen;
struct request *rq;
blk_status_t status;
ctrl->ka_last_check_time = jiffies;
@ -1372,9 +1369,9 @@ static void nvme_keep_alive_work(struct work_struct *work)
nvme_init_request(rq, &ctrl->ka_cmd);
rq->timeout = ctrl->kato * HZ;
rq->end_io = nvme_keep_alive_end_io;
rq->end_io_data = ctrl;
blk_execute_rq_nowait(rq, false);
status = blk_execute_rq(rq, false);
nvme_keep_alive_finish(rq, status, ctrl);
blk_mq_free_request(rq);
}
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
@ -2472,8 +2469,13 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
else
ctrl->ctrl_config = NVME_CC_CSS_NVM;
if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS)
ctrl->ctrl_config |= NVME_CC_CRIME;
/*
* Setting CRIME results in CSTS.RDY before the media is ready. This
* makes it possible for media related commands to return the error
* NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY. Until the driver is
* restructured to handle retries, disable CC.CRIME.
*/
ctrl->ctrl_config &= ~NVME_CC_CRIME;
ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
@ -2508,10 +2510,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
* devices are known to get this wrong. Use the larger of the
* two values.
*/
if (ctrl->ctrl_config & NVME_CC_CRIME)
ready_timeout = NVME_CRTO_CRIMT(crto);
else
ready_timeout = NVME_CRTO_CRWMT(crto);
ready_timeout = NVME_CRTO_CRWMT(crto);
if (ready_timeout < timeout)
dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
@ -3793,7 +3792,8 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
int srcu_idx;
srcu_idx = srcu_read_lock(&ctrl->srcu);
list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
srcu_read_lock_held(&ctrl->srcu)) {
if (ns->head->ns_id == nsid) {
if (!nvme_get_ns(ns))
continue;
@ -4840,7 +4840,8 @@ void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
int srcu_idx;
srcu_idx = srcu_read_lock(&ctrl->srcu);
list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
srcu_read_lock_held(&ctrl->srcu))
blk_mark_disk_dead(ns->disk);
srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
@ -4852,7 +4853,8 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl)
int srcu_idx;
srcu_idx = srcu_read_lock(&ctrl->srcu);
list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
srcu_read_lock_held(&ctrl->srcu))
blk_mq_unfreeze_queue(ns->queue);
srcu_read_unlock(&ctrl->srcu, srcu_idx);
clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
@ -4865,7 +4867,8 @@ int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
int srcu_idx;
srcu_idx = srcu_read_lock(&ctrl->srcu);
list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
srcu_read_lock_held(&ctrl->srcu)) {
timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
if (timeout <= 0)
break;
@ -4881,7 +4884,8 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl)
int srcu_idx;
srcu_idx = srcu_read_lock(&ctrl->srcu);
list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
srcu_read_lock_held(&ctrl->srcu))
blk_mq_freeze_queue_wait(ns->queue);
srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
@ -4894,7 +4898,8 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl)
set_bit(NVME_CTRL_FROZEN, &ctrl->flags);
srcu_idx = srcu_read_lock(&ctrl->srcu);
list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
srcu_read_lock_held(&ctrl->srcu))
blk_freeze_queue_start(ns->queue);
srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
@ -4942,7 +4947,8 @@ void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
int srcu_idx;
srcu_idx = srcu_read_lock(&ctrl->srcu);
list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
srcu_read_lock_held(&ctrl->srcu))
blk_sync_queue(ns->queue);
srcu_read_unlock(&ctrl->srcu, srcu_idx);
}

View File

@ -577,6 +577,20 @@ static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
return ret;
}
static void nvme_partition_scan_work(struct work_struct *work)
{
struct nvme_ns_head *head =
container_of(work, struct nvme_ns_head, partition_scan_work);
if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
&head->disk->state)))
return;
mutex_lock(&head->disk->open_mutex);
bdev_disk_changed(head->disk, false);
mutex_unlock(&head->disk->open_mutex);
}
static void nvme_requeue_work(struct work_struct *work)
{
struct nvme_ns_head *head =
@ -603,6 +617,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
bio_list_init(&head->requeue_list);
spin_lock_init(&head->requeue_lock);
INIT_WORK(&head->requeue_work, nvme_requeue_work);
INIT_WORK(&head->partition_scan_work, nvme_partition_scan_work);
/*
* Add a multipath node if the subsystems supports multiple controllers.
@ -626,6 +641,16 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
return PTR_ERR(head->disk);
head->disk->fops = &nvme_ns_head_ops;
head->disk->private_data = head;
/*
* We need to suppress the partition scan from occuring within the
* controller's scan_work context. If a path error occurs here, the IO
* will wait until a path becomes available or all paths are torn down,
* but that action also occurs within scan_work, so it would deadlock.
* Defer the partion scan to a different context that does not block
* scan_work.
*/
set_bit(GD_SUPPRESS_PART_SCAN, &head->disk->state);
sprintf(head->disk->disk_name, "nvme%dn%d",
ctrl->subsys->instance, head->instance);
return 0;
@ -652,6 +677,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
return;
}
nvme_add_ns_head_cdev(head);
kblockd_schedule_work(&head->partition_scan_work);
}
mutex_lock(&head->lock);
@ -972,6 +998,12 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
kblockd_schedule_work(&head->requeue_work);
if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
nvme_cdev_del(&head->cdev, &head->cdev_device);
/*
* requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared
* to allow multipath to fail all I/O.
*/
synchronize_srcu(&head->srcu);
kblockd_schedule_work(&head->requeue_work);
del_gendisk(head->disk);
}
}
@ -983,6 +1015,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
flush_work(&head->requeue_work);
flush_work(&head->partition_scan_work);
put_disk(head->disk);
}

View File

@ -494,6 +494,7 @@ struct nvme_ns_head {
struct bio_list requeue_list;
spinlock_t requeue_lock;
struct work_struct requeue_work;
struct work_struct partition_scan_work;
struct mutex lock;
unsigned long flags;
#define NVME_NSHEAD_DISK_LIVE 0

View File

@ -2644,10 +2644,11 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
len = nvmf_get_address(ctrl, buf, size);
if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
return len;
mutex_lock(&queue->queue_lock);
if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
goto done;
ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
if (ret > 0) {
if (len > 0)
@ -2655,7 +2656,7 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
(len) ? "," : "", &src_addr);
}
done:
mutex_unlock(&queue->queue_lock);
return len;

View File

@ -265,6 +265,13 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{
if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
return;
/*
* It's possible that some requests might have been added
* after admin queue is stopped/quiesced. So now start the
* queue to flush these requests to the completion.
*/
nvme_unquiesce_admin_queue(&ctrl->ctrl);
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
nvme_remove_admin_tag_set(&ctrl->ctrl);
}
@ -297,6 +304,12 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
}
ctrl->ctrl.queue_count = 1;
/*
* It's possible that some requests might have been added
* after io queue is stopped/quiesced. So now start the
* queue to flush these requests to the completion.
*/
nvme_unquiesce_io_queues(&ctrl->ctrl);
}
static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)

View File

@ -535,10 +535,6 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
break;
case nvme_admin_identify:
switch (req->cmd->identify.cns) {
case NVME_ID_CNS_CTRL:
req->execute = nvmet_passthru_execute_cmd;
req->p.use_workqueue = true;
return NVME_SC_SUCCESS;
case NVME_ID_CNS_CS_CTRL:
switch (req->cmd->identify.csi) {
case NVME_CSI_ZNS:
@ -547,7 +543,9 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
return NVME_SC_SUCCESS;
}
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
case NVME_ID_CNS_CTRL:
case NVME_ID_CNS_NS:
case NVME_ID_CNS_NS_DESC_LIST:
req->execute = nvmet_passthru_execute_cmd;
req->p.use_workqueue = true;
return NVME_SC_SUCCESS;

View File

@ -46,6 +46,7 @@ config PINCTRL_INTEL_PLATFORM
of Intel PCH pins and using them as GPIOs. Currently the following
Intel SoCs / platforms require this to be functional:
- Lunar Lake
- Panther Lake
config PINCTRL_ALDERLAKE
tristate "Intel Alder Lake pinctrl and GPIO driver"

View File

@ -987,8 +987,10 @@ static int aw9523_probe(struct i2c_client *client)
lockdep_set_subclass(&awi->i2c_lock, i2c_adapter_depth(client->adapter));
pdesc = devm_kzalloc(dev, sizeof(*pdesc), GFP_KERNEL);
if (!pdesc)
return -ENOMEM;
if (!pdesc) {
ret = -ENOMEM;
goto err_disable_vregs;
}
ret = aw9523_hw_init(awi);
if (ret)

View File

@ -1865,13 +1865,12 @@ static inline void ap_scan_domains(struct ap_card *ac)
}
/* if no queue device exists, create a new one */
if (!aq) {
aq = ap_queue_create(qid, ac->ap_dev.device_type);
aq = ap_queue_create(qid, ac);
if (!aq) {
AP_DBF_WARN("%s(%d,%d) ap_queue_create() failed\n",
__func__, ac->id, dom);
continue;
}
aq->card = ac;
aq->config = !decfg;
aq->chkstop = chkstop;
aq->se_bstate = hwinfo.bs;

View File

@ -272,7 +272,7 @@ int ap_test_config_usage_domain(unsigned int domain);
int ap_test_config_ctrl_domain(unsigned int domain);
void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
struct ap_queue *ap_queue_create(ap_qid_t qid, struct ap_card *ac);
void ap_queue_prepare_remove(struct ap_queue *aq);
void ap_queue_remove(struct ap_queue *aq);
void ap_queue_init_state(struct ap_queue *aq);

View File

@ -22,6 +22,11 @@ static void __ap_flush_queue(struct ap_queue *aq);
* some AP queue helper functions
*/
static inline bool ap_q_supported_in_se(struct ap_queue *aq)
{
return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
}
static inline bool ap_q_supports_bind(struct ap_queue *aq)
{
return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
@ -1104,18 +1109,19 @@ static void ap_queue_device_release(struct device *dev)
kfree(aq);
}
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
struct ap_queue *ap_queue_create(ap_qid_t qid, struct ap_card *ac)
{
struct ap_queue *aq;
aq = kzalloc(sizeof(*aq), GFP_KERNEL);
if (!aq)
return NULL;
aq->card = ac;
aq->ap_dev.device.release = ap_queue_device_release;
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
// add optional SE secure binding attributes group
if (ap_sb_available() && is_prot_virt_guest())
aq->ap_dev.device_type = ac->ap_dev.device_type;
/* in SE environment add bind/associate attributes group */
if (ap_is_se_guest() && ap_q_supported_in_se(aq))
aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
aq->qid = qid;
spin_lock_init(&aq->lock);
@ -1196,10 +1202,16 @@ bool ap_queue_usable(struct ap_queue *aq)
}
/* SE guest's queues additionally need to be bound */
if (ap_q_needs_bind(aq) &&
!(aq->se_bstate == AP_BS_Q_USABLE ||
aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY))
rc = false;
if (ap_is_se_guest()) {
if (!ap_q_supported_in_se(aq)) {
rc = false;
goto unlock_and_out;
}
if (ap_q_needs_bind(aq) &&
!(aq->se_bstate == AP_BS_Q_USABLE ||
aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY))
rc = false;
}
unlock_and_out:
spin_unlock_bh(&aq->lock);

View File

@ -108,7 +108,7 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
u32 i;
ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
if (ret < 0) {
if (ret) {
IFCVF_ERR(pdev, "Failed to read PCI capability list\n");
return -EIO;
}

View File

@ -24,6 +24,16 @@ MODULE_PARM_DESC(force_legacy,
"Force legacy mode for transitional virtio 1 devices");
#endif
bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
return false;
return index == vp_dev->admin_vq.vq_index;
}
/* wait for pending irq handlers */
void vp_synchronize_vectors(struct virtio_device *vdev)
{
@ -234,10 +244,9 @@ out_info:
return vq;
}
static void vp_del_vq(struct virtqueue *vq)
static void vp_del_vq(struct virtqueue *vq, struct virtio_pci_vq_info *info)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
unsigned long flags;
/*
@ -258,13 +267,16 @@ static void vp_del_vq(struct virtqueue *vq)
void vp_del_vqs(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_vq_info *info;
struct virtqueue *vq, *n;
int i;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
if (vp_dev->per_vq_vectors) {
int v = vp_dev->vqs[vq->index]->msix_vector;
info = vp_is_avq(vdev, vq->index) ? vp_dev->admin_vq.info :
vp_dev->vqs[vq->index];
if (vp_dev->per_vq_vectors) {
int v = info->msix_vector;
if (v != VIRTIO_MSI_NO_VECTOR &&
!vp_is_slow_path_vector(v)) {
int irq = pci_irq_vector(vp_dev->pci_dev, v);
@ -273,7 +285,7 @@ void vp_del_vqs(struct virtio_device *vdev)
free_irq(irq, vq);
}
}
vp_del_vq(vq);
vp_del_vq(vq, info);
}
vp_dev->per_vq_vectors = false;
@ -354,7 +366,7 @@ vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx,
vring_interrupt, 0,
vp_dev->msix_names[msix_vec], vq);
if (err) {
vp_del_vq(vq);
vp_del_vq(vq, *p_info);
return ERR_PTR(err);
}

View File

@ -178,6 +178,7 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
#define VIRTIO_ADMIN_CMD_BITMAP 0
#endif
bool vp_is_avq(struct virtio_device *vdev, unsigned int index);
void vp_modern_avq_done(struct virtqueue *vq);
int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
struct virtio_admin_cmd *cmd);

View File

@ -43,16 +43,6 @@ static int vp_avq_index(struct virtio_device *vdev, u16 *index, u16 *num)
return 0;
}
static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
return false;
return index == vp_dev->admin_vq.vq_index;
}
void vp_modern_avq_done(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
@ -245,7 +235,7 @@ static void vp_modern_avq_cleanup(struct virtio_device *vdev)
if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
return;
vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq;
vq = vp_dev->admin_vq.info->vq;
if (!vq)
return;

View File

@ -131,10 +131,9 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any)
}
}
spin_unlock(&dentry->d_lock);
} else {
if (dentry->d_inode)
ret = v9fs_fid_find_inode(dentry->d_inode, false, uid, any);
}
if (!ret && dentry->d_inode)
ret = v9fs_fid_find_inode(dentry->d_inode, false, uid, any);
return ret;
}

View File

@ -130,6 +130,7 @@ struct afs_call {
wait_queue_head_t waitq; /* processes awaiting completion */
struct work_struct async_work; /* async I/O processor */
struct work_struct work; /* actual work processor */
struct work_struct free_work; /* Deferred free processor */
struct rxrpc_call *rxcall; /* RxRPC call handle */
struct rxrpc_peer *peer; /* Remote endpoint */
struct key *key; /* security for this call */
@ -1333,6 +1334,7 @@ extern int __net_init afs_open_socket(struct afs_net *);
extern void __net_exit afs_close_socket(struct afs_net *);
extern void afs_charge_preallocation(struct work_struct *);
extern void afs_put_call(struct afs_call *);
void afs_deferred_put_call(struct afs_call *call);
void afs_make_call(struct afs_call *call, gfp_t gfp);
void afs_wait_for_call_to_complete(struct afs_call *call);
extern struct afs_call *afs_alloc_flat_call(struct afs_net *,

View File

@ -18,6 +18,7 @@
struct workqueue_struct *afs_async_calls;
static void afs_deferred_free_worker(struct work_struct *work);
static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
static void afs_process_async_call(struct work_struct *);
@ -149,6 +150,7 @@ static struct afs_call *afs_alloc_call(struct afs_net *net,
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
refcount_set(&call->ref, 1);
INIT_WORK(&call->async_work, afs_process_async_call);
INIT_WORK(&call->free_work, afs_deferred_free_worker);
init_waitqueue_head(&call->waitq);
spin_lock_init(&call->state_lock);
call->iter = &call->def_iter;
@ -159,6 +161,36 @@ static struct afs_call *afs_alloc_call(struct afs_net *net,
return call;
}
static void afs_free_call(struct afs_call *call)
{
struct afs_net *net = call->net;
int o;
ASSERT(!work_pending(&call->async_work));
rxrpc_kernel_put_peer(call->peer);
if (call->rxcall) {
rxrpc_kernel_shutdown_call(net->socket, call->rxcall);
rxrpc_kernel_put_call(net->socket, call->rxcall);
call->rxcall = NULL;
}
if (call->type->destructor)
call->type->destructor(call);
afs_unuse_server_notime(call->net, call->server, afs_server_trace_put_call);
kfree(call->request);
o = atomic_read(&net->nr_outstanding_calls);
trace_afs_call(call->debug_id, afs_call_trace_free, 0, o,
__builtin_return_address(0));
kfree(call);
o = atomic_dec_return(&net->nr_outstanding_calls);
if (o == 0)
wake_up_var(&net->nr_outstanding_calls);
}
/*
* Dispose of a reference on a call.
*/
@ -173,32 +205,34 @@ void afs_put_call(struct afs_call *call)
o = atomic_read(&net->nr_outstanding_calls);
trace_afs_call(debug_id, afs_call_trace_put, r - 1, o,
__builtin_return_address(0));
if (zero)
afs_free_call(call);
}
if (zero) {
ASSERT(!work_pending(&call->async_work));
ASSERT(call->type->name != NULL);
static void afs_deferred_free_worker(struct work_struct *work)
{
struct afs_call *call = container_of(work, struct afs_call, free_work);
rxrpc_kernel_put_peer(call->peer);
afs_free_call(call);
}
if (call->rxcall) {
rxrpc_kernel_shutdown_call(net->socket, call->rxcall);
rxrpc_kernel_put_call(net->socket, call->rxcall);
call->rxcall = NULL;
}
if (call->type->destructor)
call->type->destructor(call);
/*
* Dispose of a reference on a call, deferring the cleanup to a workqueue
* to avoid lock recursion.
*/
void afs_deferred_put_call(struct afs_call *call)
{
struct afs_net *net = call->net;
unsigned int debug_id = call->debug_id;
bool zero;
int r, o;
afs_unuse_server_notime(call->net, call->server, afs_server_trace_put_call);
kfree(call->request);
trace_afs_call(call->debug_id, afs_call_trace_free, 0, o,
__builtin_return_address(0));
kfree(call);
o = atomic_dec_return(&net->nr_outstanding_calls);
if (o == 0)
wake_up_var(&net->nr_outstanding_calls);
}
zero = __refcount_dec_and_test(&call->ref, &r);
o = atomic_read(&net->nr_outstanding_calls);
trace_afs_call(debug_id, afs_call_trace_put, r - 1, o,
__builtin_return_address(0));
if (zero)
schedule_work(&call->free_work);
}
static struct afs_call *afs_get_call(struct afs_call *call,
@ -640,7 +674,8 @@ static void afs_wake_up_call_waiter(struct sock *sk, struct rxrpc_call *rxcall,
}
/*
* wake up an asynchronous call
* Wake up an asynchronous call. The caller is holding the call notify
* spinlock around this, so we can't call afs_put_call().
*/
static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
unsigned long call_user_ID)
@ -657,7 +692,7 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
__builtin_return_address(0));
if (!queue_work(afs_async_calls, &call->async_work))
afs_put_call(call);
afs_deferred_put_call(call);
}
}

View File

@ -121,6 +121,7 @@ int netfs_start_io_write(struct inode *inode)
up_write(&inode->i_rwsem);
return -ERESTARTSYS;
}
downgrade_write(&inode->i_rwsem);
return 0;
}
EXPORT_SYMBOL(netfs_start_io_write);
@ -135,7 +136,7 @@ EXPORT_SYMBOL(netfs_start_io_write);
void netfs_end_io_write(struct inode *inode)
__releases(inode->i_rwsem)
{
up_write(&inode->i_rwsem);
up_read(&inode->i_rwsem);
}
EXPORT_SYMBOL(netfs_end_io_write);

View File

@ -1128,9 +1128,12 @@ int ocfs2_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
trace_ocfs2_setattr(inode, dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
dentry->d_name.len, dentry->d_name.name,
attr->ia_valid, attr->ia_mode,
from_kuid(&init_user_ns, attr->ia_uid),
from_kgid(&init_user_ns, attr->ia_gid));
attr->ia_valid,
attr->ia_valid & ATTR_MODE ? attr->ia_mode : 0,
attr->ia_valid & ATTR_UID ?
from_kuid(&init_user_ns, attr->ia_uid) : 0,
attr->ia_valid & ATTR_GID ?
from_kgid(&init_user_ns, attr->ia_gid) : 0);
/* ensuring we don't even attempt to truncate a symlink */
if (S_ISLNK(inode->i_mode))

View File

@ -1054,6 +1054,7 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
*/
}
put_net(cifs_net_ns(server));
kfree(server->leaf_fullpath);
kfree(server);
@ -1649,8 +1650,6 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
/* srv_count can never go negative */
WARN_ON(server->srv_count < 0);
put_net(cifs_net_ns(server));
list_del_init(&server->tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
@ -3077,13 +3076,22 @@ generic_ip_connect(struct TCP_Server_Info *server)
if (server->ssocket) {
socket = server->ssocket;
} else {
rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM,
struct net *net = cifs_net_ns(server);
struct sock *sk;
rc = __sock_create(net, sfamily, SOCK_STREAM,
IPPROTO_TCP, &server->ssocket, 1);
if (rc < 0) {
cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
return rc;
}
sk = server->ssocket->sk;
__netns_tracker_free(net, &sk->ns_tracker, false);
sk->sk_net_refcnt = 1;
get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
sock_inuse_add(net, 1);
/* BB other socket options to set KEEPALIVE, NODELAY? */
cifs_dbg(FYI, "Socket created\n");
socket = server->ssocket;

View File

@ -390,8 +390,12 @@ tls_offload_ctx_tx(const struct tls_context *tls_ctx)
static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
{
struct tls_context *ctx = tls_get_ctx(sk);
struct tls_context *ctx;
if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk))
return false;
ctx = tls_get_ctx(sk);
if (!ctx)
return false;
return !!tls_sw_ctx_tx(ctx);
@ -399,8 +403,12 @@ static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
{
struct tls_context *ctx = tls_get_ctx(sk);
struct tls_context *ctx;
if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk))
return false;
ctx = tls_get_ctx(sk);
if (!ctx)
return false;
return !!tls_sw_ctx_rx(ctx);

View File

@ -3136,13 +3136,17 @@ static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
{
const struct bpf_link *link = filp->private_data;
const struct bpf_prog *prog = link->prog;
enum bpf_link_type type = link->type;
char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
seq_printf(m,
"link_type:\t%s\n"
"link_id:\t%u\n",
bpf_link_type_strs[link->type],
link->id);
if (type < ARRAY_SIZE(bpf_link_type_strs) && bpf_link_type_strs[type]) {
seq_printf(m, "link_type:\t%s\n", bpf_link_type_strs[type]);
} else {
WARN_ONCE(1, "missing BPF_LINK_TYPE(...) for link type %u\n", type);
seq_printf(m, "link_type:\t<%u>\n", type);
}
seq_printf(m, "link_id:\t%u\n", link->id);
if (prog) {
bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
seq_printf(m,

View File

@ -21718,7 +21718,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
/* 'struct bpf_verifier_env' can be global, but since it's not small,
* allocate/free it every time bpf_check() is called
*/
env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
env = kvzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
if (!env)
return -ENOMEM;
@ -21944,6 +21944,6 @@ err_unlock:
mutex_unlock(&bpf_verifier_lock);
vfree(env->insn_aux_data);
err_free_env:
kfree(env);
kvfree(env);
return ret;
}

View File

@ -1285,7 +1285,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
/* Zero out spare memory. */
if (want_init_on_alloc(flags)) {
kasan_disable_current();
memset((void *)p + new_size, 0, ks - new_size);
memset(kasan_reset_tag(p) + new_size, 0, ks - new_size);
kasan_enable_current();
}

View File

@ -977,8 +977,10 @@ error:
struct p9_client *p9_client_create(const char *dev_name, char *options)
{
int err;
static atomic_t seqno = ATOMIC_INIT(0);
struct p9_client *clnt;
char *client_id;
char *cache_name;
clnt = kmalloc(sizeof(*clnt), GFP_KERNEL);
if (!clnt)
@ -1035,15 +1037,23 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
if (err)
goto close_trans;
cache_name = kasprintf(GFP_KERNEL,
"9p-fcall-cache-%u", atomic_inc_return(&seqno));
if (!cache_name) {
err = -ENOMEM;
goto close_trans;
}
/* P9_HDRSZ + 4 is the smallest packet header we can have that is
* followed by data accessed from userspace by read
*/
clnt->fcall_cache =
kmem_cache_create_usercopy("9p-fcall-cache", clnt->msize,
kmem_cache_create_usercopy(cache_name, clnt->msize,
0, 0, P9_HDRSZ + 4,
clnt->msize - (P9_HDRSZ + 4),
NULL);
kfree(cache_name);
return clnt;
close_trans:

View File

@ -2248,7 +2248,7 @@ static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb,
rcu_read_unlock();
return ret;
}
rcu_read_unlock_bh();
rcu_read_unlock();
if (dst)
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
out_drop:

View File

@ -57,6 +57,25 @@ static inline int landlock_restrict_self(const int ruleset_fd,
#define ENV_TCP_CONNECT_NAME "LL_TCP_CONNECT"
#define ENV_DELIMITER ":"
static int str2num(const char *numstr, __u64 *num_dst)
{
char *endptr = NULL;
int err = 0;
__u64 num;
errno = 0;
num = strtoull(numstr, &endptr, 10);
if (errno != 0)
err = errno;
/* Was the string empty, or not entirely parsed successfully? */
else if ((*numstr == '\0') || (*endptr != '\0'))
err = EINVAL;
else
*num_dst = num;
return err;
}
static int parse_path(char *env_path, const char ***const path_list)
{
int i, num_paths = 0;
@ -157,7 +176,6 @@ static int populate_ruleset_net(const char *const env_var, const int ruleset_fd,
char *env_port_name, *env_port_name_next, *strport;
struct landlock_net_port_attr net_port = {
.allowed_access = allowed_access,
.port = 0,
};
env_port_name = getenv(env_var);
@ -168,7 +186,17 @@ static int populate_ruleset_net(const char *const env_var, const int ruleset_fd,
env_port_name_next = env_port_name;
while ((strport = strsep(&env_port_name_next, ENV_DELIMITER))) {
net_port.port = atoi(strport);
__u64 port;
if (strcmp(strport, "") == 0)
continue;
if (str2num(strport, &port)) {
fprintf(stderr, "Failed to parse port at \"%s\"\n",
strport);
goto out_free_name;
}
net_port.port = port;
if (landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
&net_port, 0)) {
fprintf(stderr,

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig SOUND
tristate "Sound card support"
depends on HAS_IOMEM || UML
depends on HAS_IOMEM || INDIRECT_IOMEM
help
If you have a sound card in your computer, i.e. if it can say more
than an occasional beep, say Y.

View File

@ -10258,6 +10258,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0c1e, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
SND_PCI_QUIRK(0x1028, 0x0c28, "Dell Inspiron 16 Plus 7630", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
SND_PCI_QUIRK(0x1028, 0x0c4d, "Dell", ALC287_FIXUP_CS35L41_I2C_4),
SND_PCI_QUIRK(0x1028, 0x0c94, "Dell Polaris 3 metal", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1028, 0x0c96, "Dell Polaris 2in1", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC289_FIXUP_DELL_CS35L41_SPI_2),
@ -10572,11 +10574,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
SND_PCI_QUIRK(0x1043, 0x10a4, "ASUS TP3407SA", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x10d3, "ASUS K6500ZC", ALC294_FIXUP_ASUS_SPK),
SND_PCI_QUIRK(0x1043, 0x1154, "ASUS TP3607SH", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1204, "ASUS Strix G615JHR_JMR_JPR", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x1214, "ASUS Strix G615LH_LM_LP", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
@ -10656,6 +10662,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1e63, "ASUS H7606W", ALC285_FIXUP_CS35L56_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1e83, "ASUS GA605W", ALC285_FIXUP_CS35L56_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1eb3, "ASUS Ally RCLA72", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x1ed3, "ASUS HN7306W", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1ee2, "ASUS UM6702RA/RC", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401),
@ -10670,6 +10677,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x3e30, "ASUS TP3607SA", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x3ee0, "ASUS Strix G815_JHR_JMR_JPR", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x3ef0, "ASUS Strix G635LR_LW_LX", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x3f00, "ASUS Strix G815LH_LM_LP", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x3f10, "ASUS Strix G835LR_LW_LX", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x3f20, "ASUS Strix G615LR_LW", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x3f30, "ASUS Strix G815LR_LW", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
@ -10891,11 +10905,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3878, "Lenovo Legion 7 Slim 16ARHA7", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x387d, "Yoga S780-16 pro Quad AAC", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x387e, "Yoga S780-16 pro Quad YC", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x387f, "Yoga S780-16 pro dual LX", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x3880, "Yoga S780-16 pro dual YC", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x3881, "YB9 dual power mode2 YC", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x3882, "Lenovo Yoga Pro 7 14APH8", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x3884, "Y780 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x3886, "Y780 VECO DUAL", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x3891, "Lenovo Yoga Pro 7 14AHP9", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x38a5, "Y580P AMD dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38a7, "Y780P AMD YG dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38a8, "Y780P AMD VECO dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38a9, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
@ -10904,6 +10921,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x38b5, "Legion Slim 7 16IRH8", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x38b6, "Legion Slim 7 16APH8", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x38b7, "Legion Slim 7 16APH8", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x38b8, "Yoga S780-14.5 proX AMD YC Dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38b9, "Yoga S780-14.5 proX AMD LX Dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38ba, "Yoga S780-14.5 Air AMD quad YC", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38bb, "Yoga S780-14.5 Air AMD quad AAC", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38be, "Yoga S980-14.5 proX YC Dual", ALC287_FIXUP_TAS2781_I2C),
@ -10914,12 +10933,22 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x38cb, "Y790 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38cd, "Y790 VECO DUAL", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38d2, "Lenovo Yoga 9 14IMH9", ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x38d3, "Yoga S990-16 Pro IMH YC Dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38d4, "Yoga S990-16 Pro IMH VECO Dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38d5, "Yoga S990-16 Pro IMH YC Quad", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38d6, "Yoga S990-16 Pro IMH VECO Quad", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38d7, "Lenovo Yoga 9 14IMH9", ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x38df, "Yoga Y990 Intel YC Dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38e0, "Yoga Y990 Intel VECO Dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38f8, "Yoga Book 9i", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38df, "Y990 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x38fd, "ThinkBook plus Gen5 Hybrid", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC),
SND_PCI_QUIRK(0x17aa, 0x391f, "Yoga S990-16 pro Quad YC Quad", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x3920, "Yoga S990-16 pro Quad VECO Quad", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),

View File

@ -325,6 +325,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "M6500RC"),
}
},
{
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "E1404FA"),
}
},
{
.driver_data = &acp6x_card,
.matches = {
@ -339,6 +346,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "M7600RE"),
}
},
{
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "M3502RA"),
}
},
{
.driver_data = &acp6x_card,
.matches = {

View File

@ -656,7 +656,7 @@ static int aw_dev_get_dsp_status(struct aw_device *aw_dev)
if (ret)
return ret;
if (!(reg_val & (~AW88399_WDT_CNT_MASK)))
ret = -EPERM;
return -EPERM;
return 0;
}

View File

@ -202,12 +202,14 @@
#define CDC_RX_RXn_RX_PATH_SEC3(rx, n) (0x042c + rx->rxn_reg_stride * n)
#define CDC_RX_RX0_RX_PATH_SEC4 (0x0430)
#define CDC_RX_RX0_RX_PATH_SEC7 (0x0434)
#define CDC_RX_RXn_RX_PATH_SEC7(rx, n) (0x0434 + rx->rxn_reg_stride * n)
#define CDC_RX_RXn_RX_PATH_SEC7(rx, n) \
(0x0434 + (rx->rxn_reg_stride * n) + ((n > 1) ? rx->rxn_reg_stride2 : 0))
#define CDC_RX_DSM_OUT_DELAY_SEL_MASK GENMASK(2, 0)
#define CDC_RX_DSM_OUT_DELAY_TWO_SAMPLE 0x2
#define CDC_RX_RX0_RX_PATH_MIX_SEC0 (0x0438)
#define CDC_RX_RX0_RX_PATH_MIX_SEC1 (0x043C)
#define CDC_RX_RXn_RX_PATH_DSM_CTL(rx, n) (0x0440 + rx->rxn_reg_stride * n)
#define CDC_RX_RXn_RX_PATH_DSM_CTL(rx, n) \
(0x0440 + (rx->rxn_reg_stride * n) + ((n > 1) ? rx->rxn_reg_stride2 : 0))
#define CDC_RX_RXn_DSM_CLK_EN_MASK BIT(0)
#define CDC_RX_RX0_RX_PATH_DSM_CTL (0x0440)
#define CDC_RX_RX0_RX_PATH_DSM_DATA1 (0x0444)
@ -645,6 +647,7 @@ struct rx_macro {
int rx_mclk_cnt;
enum lpass_codec_version codec_version;
int rxn_reg_stride;
int rxn_reg_stride2;
bool is_ear_mode_on;
bool hph_pwr_mode;
bool hph_hd2_mode;
@ -1929,9 +1932,6 @@ static int rx_macro_digital_mute(struct snd_soc_dai *dai, int mute, int stream)
CDC_RX_PATH_PGA_MUTE_MASK, 0x0);
}
if (j == INTERP_AUX)
dsm_reg = CDC_RX_RXn_RX_PATH_DSM_CTL(rx, 2);
int_mux_cfg0 = CDC_RX_INP_MUX_RX_INT0_CFG0 + j * 8;
int_mux_cfg1 = int_mux_cfg0 + 4;
int_mux_cfg0_val = snd_soc_component_read(component, int_mux_cfg0);
@ -2702,9 +2702,6 @@ static int rx_macro_enable_interp_clk(struct snd_soc_component *component,
main_reg = CDC_RX_RXn_RX_PATH_CTL(rx, interp_idx);
dsm_reg = CDC_RX_RXn_RX_PATH_DSM_CTL(rx, interp_idx);
if (interp_idx == INTERP_AUX)
dsm_reg = CDC_RX_RXn_RX_PATH_DSM_CTL(rx, 2);
rx_cfg2_reg = CDC_RX_RXn_RX_PATH_CFG2(rx, interp_idx);
if (SND_SOC_DAPM_EVENT_ON(event)) {
@ -3821,6 +3818,7 @@ static int rx_macro_probe(struct platform_device *pdev)
case LPASS_CODEC_VERSION_2_0:
case LPASS_CODEC_VERSION_2_1:
rx->rxn_reg_stride = 0x80;
rx->rxn_reg_stride2 = 0xc;
def_count = ARRAY_SIZE(rx_defaults) + ARRAY_SIZE(rx_pre_2_5_defaults);
reg_defaults = kmalloc_array(def_count, sizeof(struct reg_default), GFP_KERNEL);
if (!reg_defaults)
@ -3834,6 +3832,7 @@ static int rx_macro_probe(struct platform_device *pdev)
case LPASS_CODEC_VERSION_2_7:
case LPASS_CODEC_VERSION_2_8:
rx->rxn_reg_stride = 0xc0;
rx->rxn_reg_stride2 = 0x0;
def_count = ARRAY_SIZE(rx_defaults) + ARRAY_SIZE(rx_2_5_defaults);
reg_defaults = kmalloc_array(def_count, sizeof(struct reg_default), GFP_KERNEL);
if (!reg_defaults)

View File

@ -253,7 +253,7 @@ static int rt722_sdca_read_prop(struct sdw_slave *slave)
}
/* set the timeout values */
prop->clk_stop_timeout = 200;
prop->clk_stop_timeout = 900;
/* wake-up event */
prop->wake_capable = 1;

View File

@ -28,6 +28,13 @@
#define MICFIL_OSR_DEFAULT 16
#define MICFIL_NUM_RATES 7
#define MICFIL_CLK_SRC_NUM 3
/* clock source ids */
#define MICFIL_AUDIO_PLL1 0
#define MICFIL_AUDIO_PLL2 1
#define MICFIL_CLK_EXT3 2
enum quality {
QUALITY_HIGH,
QUALITY_MEDIUM,
@ -45,9 +52,12 @@ struct fsl_micfil {
struct clk *mclk;
struct clk *pll8k_clk;
struct clk *pll11k_clk;
struct clk *clk_src[MICFIL_CLK_SRC_NUM];
struct snd_dmaengine_dai_dma_data dma_params_rx;
struct sdma_peripheral_config sdmacfg;
struct snd_soc_card *card;
struct snd_pcm_hw_constraint_list constraint_rates;
unsigned int constraint_rates_list[MICFIL_NUM_RATES];
unsigned int dataline;
char name[32];
int irq[MICFIL_IRQ_LINES];
@ -475,12 +485,34 @@ static int fsl_micfil_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct fsl_micfil *micfil = snd_soc_dai_get_drvdata(dai);
unsigned int rates[MICFIL_NUM_RATES] = {8000, 11025, 16000, 22050, 32000, 44100, 48000};
int i, j, k = 0;
u64 clk_rate;
if (!micfil) {
dev_err(dai->dev, "micfil dai priv_data not set\n");
return -EINVAL;
}
micfil->constraint_rates.list = micfil->constraint_rates_list;
micfil->constraint_rates.count = 0;
for (j = 0; j < MICFIL_NUM_RATES; j++) {
for (i = 0; i < MICFIL_CLK_SRC_NUM; i++) {
clk_rate = clk_get_rate(micfil->clk_src[i]);
if (clk_rate != 0 && do_div(clk_rate, rates[j]) == 0) {
micfil->constraint_rates_list[k++] = rates[j];
micfil->constraint_rates.count++;
break;
}
}
}
if (micfil->constraint_rates.count > 0)
snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
&micfil->constraint_rates);
return 0;
}
@ -1175,6 +1207,12 @@ static int fsl_micfil_probe(struct platform_device *pdev)
fsl_asoc_get_pll_clocks(&pdev->dev, &micfil->pll8k_clk,
&micfil->pll11k_clk);
micfil->clk_src[MICFIL_AUDIO_PLL1] = micfil->pll8k_clk;
micfil->clk_src[MICFIL_AUDIO_PLL2] = micfil->pll11k_clk;
micfil->clk_src[MICFIL_CLK_EXT3] = devm_clk_get(&pdev->dev, "clkext3");
if (IS_ERR(micfil->clk_src[MICFIL_CLK_EXT3]))
micfil->clk_src[MICFIL_CLK_EXT3] = NULL;
/* init regmap */
regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))

View File

@ -28,6 +28,7 @@
#include "avs.h"
#include "cldma.h"
#include "messages.h"
#include "pcm.h"
static u32 pgctl_mask = AZX_PGCTL_LSRMD_MASK;
module_param(pgctl_mask, uint, 0444);
@ -247,7 +248,7 @@ static void hdac_stream_update_pos(struct hdac_stream *stream, u64 buffer_size)
static void hdac_update_stream(struct hdac_bus *bus, struct hdac_stream *stream)
{
if (stream->substream) {
snd_pcm_period_elapsed(stream->substream);
avs_period_elapsed(stream->substream);
} else if (stream->cstream) {
u64 buffer_size = stream->cstream->runtime->buffer_size;

View File

@ -16,6 +16,7 @@
#include <sound/soc-component.h>
#include "avs.h"
#include "path.h"
#include "pcm.h"
#include "topology.h"
#include "../../codecs/hda.h"
@ -30,6 +31,7 @@ struct avs_dma_data {
struct hdac_ext_stream *host_stream;
};
struct work_struct period_elapsed_work;
struct snd_pcm_substream *substream;
};
@ -56,6 +58,22 @@ avs_dai_find_path_template(struct snd_soc_dai *dai, bool is_fe, int direction)
return dw->priv;
}
static void avs_period_elapsed_work(struct work_struct *work)
{
struct avs_dma_data *data = container_of(work, struct avs_dma_data, period_elapsed_work);
snd_pcm_period_elapsed(data->substream);
}
void avs_period_elapsed(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
struct snd_soc_dai *dai = snd_soc_rtd_to_cpu(rtd, 0);
struct avs_dma_data *data = snd_soc_dai_get_dma_data(dai, substream);
schedule_work(&data->period_elapsed_work);
}
static int avs_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
@ -77,6 +95,7 @@ static int avs_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_d
data->substream = substream;
data->template = template;
data->adev = adev;
INIT_WORK(&data->period_elapsed_work, avs_period_elapsed_work);
snd_soc_dai_set_dma_data(dai, substream, data);
if (rtd->dai_link->ignore_suspend)

16
sound/soc/intel/avs/pcm.h Normal file
View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright(c) 2024 Intel Corporation
*
* Authors: Cezary Rojewski <cezary.rojewski@intel.com>
* Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
*/
#ifndef __SOUND_SOC_INTEL_AVS_PCM_H
#define __SOUND_SOC_INTEL_AVS_PCM_H
#include <sound/pcm.h>
void avs_period_elapsed(struct snd_pcm_substream *substream);
#endif

View File

@ -166,6 +166,15 @@ static const struct snd_soc_acpi_adr_device rt1316_3_group1_adr[] = {
}
};
static const struct snd_soc_acpi_adr_device rt1318_1_adr[] = {
{
.adr = 0x000133025D131801ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt1318-1"
}
};
static const struct snd_soc_acpi_adr_device rt1318_1_group1_adr[] = {
{
.adr = 0x000130025D131801ull,
@ -184,6 +193,15 @@ static const struct snd_soc_acpi_adr_device rt1318_2_group1_adr[] = {
}
};
static const struct snd_soc_acpi_adr_device rt713_0_adr[] = {
{
.adr = 0x000031025D071301ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt713"
}
};
static const struct snd_soc_acpi_adr_device rt714_0_adr[] = {
{
.adr = 0x000030025D071401ull,
@ -286,6 +304,20 @@ static const struct snd_soc_acpi_link_adr lnl_sdw_rt1318_l12_rt714_l0[] = {
{}
};
static const struct snd_soc_acpi_link_adr lnl_sdw_rt713_l0_rt1318_l1[] = {
{
.mask = BIT(0),
.num_adr = ARRAY_SIZE(rt713_0_adr),
.adr_d = rt713_0_adr,
},
{
.mask = BIT(1),
.num_adr = ARRAY_SIZE(rt1318_1_adr),
.adr_d = rt1318_1_adr,
},
{}
};
/* this table is used when there is no I2S codec present */
struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_sdw_machines[] = {
/* mockup tests need to be first */
@ -343,6 +375,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_sdw_machines[] = {
.drv_name = "sof_sdw",
.sof_tplg_filename = "sof-lnl-rt1318-l12-rt714-l0.tplg"
},
{
.link_mask = BIT(0) | BIT(1),
.links = lnl_sdw_rt713_l0_rt1318_l1,
.drv_name = "sof_sdw",
.sof_tplg_filename = "sof-lnl-rt713-l0-rt1318-l1.tplg"
},
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_lnl_sdw_machines);

View File

@ -420,6 +420,15 @@ verify_umulti_link_info(int fd, bool retprobe, __u64 *offsets,
if (!ASSERT_NEQ(err, -1, "readlink"))
return -1;
memset(&info, 0, sizeof(info));
err = bpf_link_get_info_by_fd(fd, &info, &len);
if (!ASSERT_OK(err, "bpf_link_get_info_by_fd"))
return -1;
ASSERT_EQ(info.uprobe_multi.count, 3, "info.uprobe_multi.count");
ASSERT_EQ(info.uprobe_multi.path_size, strlen(path) + 1,
"info.uprobe_multi.path_size");
for (bit = 0; bit < 8; bit++) {
memset(&info, 0, sizeof(info));
info.uprobe_multi.path = ptr_to_u64(path_buf);

View File

@ -656,4 +656,71 @@ __naked void two_old_ids_one_cur_id(void)
: __clobber_all);
}
SEC("socket")
/* Note the flag, see verifier.c:opt_subreg_zext_lo32_rnd_hi32() */
__flag(BPF_F_TEST_RND_HI32)
__success
/* This test was added because of a bug in verifier.c:sync_linked_regs(),
* upon range propagation it destroyed subreg_def marks for registers.
* The subreg_def mark is used to decide whether zero extension instructions
* are needed when register is read. When BPF_F_TEST_RND_HI32 is set it
* also causes generation of statements to randomize upper halves of
* read registers.
*
* The test is written in a way to return an upper half of a register
* that is affected by range propagation and must have it's subreg_def
* preserved. This gives a return value of 0 and leads to undefined
* return value if subreg_def mark is not preserved.
*/
__retval(0)
/* Check that verifier believes r1/r0 are zero at exit */
__log_level(2)
__msg("4: (77) r1 >>= 32 ; R1_w=0")
__msg("5: (bf) r0 = r1 ; R0_w=0 R1_w=0")
__msg("6: (95) exit")
__msg("from 3 to 4")
__msg("4: (77) r1 >>= 32 ; R1_w=0")
__msg("5: (bf) r0 = r1 ; R0_w=0 R1_w=0")
__msg("6: (95) exit")
/* Verify that statements to randomize upper half of r1 had not been
* generated.
*/
__xlated("call unknown")
__xlated("r0 &= 2147483647")
__xlated("w1 = w0")
/* This is how disasm.c prints BPF_ZEXT_REG at the moment, x86 and arm
* are the only CI archs that do not need zero extension for subregs.
*/
#if !defined(__TARGET_ARCH_x86) && !defined(__TARGET_ARCH_arm64)
__xlated("w1 = w1")
#endif
__xlated("if w0 < 0xa goto pc+0")
__xlated("r1 >>= 32")
__xlated("r0 = r1")
__xlated("exit")
__naked void linked_regs_and_subreg_def(void)
{
asm volatile (
"call %[bpf_ktime_get_ns];"
/* make sure r0 is in 32-bit range, otherwise w1 = w0 won't
* assign same IDs to registers.
*/
"r0 &= 0x7fffffff;"
/* link w1 and w0 via ID */
"w1 = w0;"
/* 'if' statement propagates range info from w0 to w1,
* but should not affect w1->subreg_def property.
*/
"if w0 < 10 goto +0;"
/* r1 is read here, on archs that require subreg zero
* extension this would cause zext patch generation.
*/
"r1 >>= 32;"
"r0 = r1;"
"exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";