mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 20:54:10 +08:00
pstore: Allow prz to control need for locking
In preparation of not locking at all for certain buffers depending on if there's contention, make locking optional depending on the initialization of the prz. Signed-off-by: Joel Fernandes <joelaf@google.com> [kees: moved locking flag into prz instead of via caller arguments] Signed-off-by: Kees Cook <keescook@chromium.org>
This commit is contained in:
parent
d8991f51e5
commit
663deb4788
@ -432,7 +432,7 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
|
||||
for (i = 0; i < cxt->max_dump_cnt; i++) {
|
||||
cxt->przs[i] = persistent_ram_new(*paddr, cxt->record_size, 0,
|
||||
&cxt->ecc_info,
|
||||
cxt->memtype);
|
||||
cxt->memtype, 0);
|
||||
if (IS_ERR(cxt->przs[i])) {
|
||||
err = PTR_ERR(cxt->przs[i]);
|
||||
dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
|
||||
@ -469,7 +469,8 @@ static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
*prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype);
|
||||
*prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
|
||||
cxt->memtype, 0);
|
||||
if (IS_ERR(*prz)) {
|
||||
int err = PTR_ERR(*prz);
|
||||
|
||||
|
@ -53,9 +53,10 @@ static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
|
||||
{
|
||||
int old;
|
||||
int new;
|
||||
unsigned long flags;
|
||||
unsigned long flags = 0;
|
||||
|
||||
raw_spin_lock_irqsave(&prz->buffer_lock, flags);
|
||||
if (!(prz->flags & PRZ_FLAG_NO_LOCK))
|
||||
raw_spin_lock_irqsave(&prz->buffer_lock, flags);
|
||||
|
||||
old = atomic_read(&prz->buffer->start);
|
||||
new = old + a;
|
||||
@ -63,7 +64,8 @@ static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
|
||||
new -= prz->buffer_size;
|
||||
atomic_set(&prz->buffer->start, new);
|
||||
|
||||
raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
|
||||
if (!(prz->flags & PRZ_FLAG_NO_LOCK))
|
||||
raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
|
||||
|
||||
return old;
|
||||
}
|
||||
@ -73,9 +75,10 @@ static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
|
||||
{
|
||||
size_t old;
|
||||
size_t new;
|
||||
unsigned long flags;
|
||||
unsigned long flags = 0;
|
||||
|
||||
raw_spin_lock_irqsave(&prz->buffer_lock, flags);
|
||||
if (!(prz->flags & PRZ_FLAG_NO_LOCK))
|
||||
raw_spin_lock_irqsave(&prz->buffer_lock, flags);
|
||||
|
||||
old = atomic_read(&prz->buffer->size);
|
||||
if (old == prz->buffer_size)
|
||||
@ -87,7 +90,8 @@ static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
|
||||
atomic_set(&prz->buffer->size, new);
|
||||
|
||||
exit:
|
||||
raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
|
||||
if (!(prz->flags & PRZ_FLAG_NO_LOCK))
|
||||
raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
|
||||
}
|
||||
|
||||
static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
|
||||
@ -463,7 +467,8 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
|
||||
}
|
||||
|
||||
static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
|
||||
struct persistent_ram_ecc_info *ecc_info)
|
||||
struct persistent_ram_ecc_info *ecc_info,
|
||||
unsigned long flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -492,6 +497,7 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
|
||||
prz->buffer->sig = sig;
|
||||
persistent_ram_zap(prz);
|
||||
prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock);
|
||||
prz->flags = flags;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -516,7 +522,7 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
|
||||
|
||||
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
|
||||
u32 sig, struct persistent_ram_ecc_info *ecc_info,
|
||||
unsigned int memtype)
|
||||
unsigned int memtype, u32 flags)
|
||||
{
|
||||
struct persistent_ram_zone *prz;
|
||||
int ret = -ENOMEM;
|
||||
@ -531,7 +537,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = persistent_ram_post_init(prz, sig, ecc_info);
|
||||
ret = persistent_ram_post_init(prz, sig, ecc_info, flags);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
@ -24,6 +24,13 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* Choose whether access to the RAM zone requires locking or not. If a zone
|
||||
* can be written to from different CPUs like with ftrace for example, then
|
||||
* PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required.
|
||||
*/
|
||||
#define PRZ_FLAG_NO_LOCK BIT(0)
|
||||
|
||||
struct persistent_ram_buffer;
|
||||
struct rs_control;
|
||||
|
||||
@ -40,6 +47,7 @@ struct persistent_ram_zone {
|
||||
void *vaddr;
|
||||
struct persistent_ram_buffer *buffer;
|
||||
size_t buffer_size;
|
||||
u32 flags;
|
||||
raw_spinlock_t buffer_lock;
|
||||
|
||||
/* ECC correction */
|
||||
@ -56,7 +64,7 @@ struct persistent_ram_zone {
|
||||
|
||||
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
|
||||
u32 sig, struct persistent_ram_ecc_info *ecc_info,
|
||||
unsigned int memtype);
|
||||
unsigned int memtype, u32 flags);
|
||||
void persistent_ram_free(struct persistent_ram_zone *prz);
|
||||
void persistent_ram_zap(struct persistent_ram_zone *prz);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user