Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security

Pull security subsystem updates from James Morris:

 - EVM gains support for loading an x509 cert from the kernel
   (EVM_LOAD_X509), into the EVM trusted kernel keyring.

 - Smack implements 'file receive' process-based permission checking for
   sockets, rather than just depending on inode checks.

 - Misc enhancments for TPM & TPM2.

 - Cleanups and bugfixes for SELinux, Keys, and IMA.

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security: (41 commits)
  selinux: Inode label revalidation performance fix
  KEYS: refcount bug fix
  ima: ima_write_policy() limit locking
  IMA: policy can be updated zero times
  selinux: rate-limit netlink message warnings in selinux_nlmsg_perm()
  selinux: export validatetrans decisions
  gfs2: Invalid security labels of inodes when they go invalid
  selinux: Revalidate invalid inode security labels
  security: Add hook to invalidate inode security labels
  selinux: Add accessor functions for inode->i_security
  security: Make inode argument of inode_getsecid non-const
  security: Make inode argument of inode_getsecurity non-const
  selinux: Remove unused variable in selinux_inode_init_security
  keys, trusted: seal with a TPM2 authorization policy
  keys, trusted: select hash algorithm for TPM2 chips
  keys, trusted: fix: *do not* allow duplicate key options
  tpm_ibmvtpm: properly handle interrupted packet receptions
  tpm_tis: Tighten IRQ auto-probing
  tpm_tis: Refactor the interrupt setup
  tpm_tis: Get rid of the duplicate IRQ probing code
  ...
This commit is contained in:
Linus Torvalds 2016-01-17 19:13:15 -08:00
commit 5807fcaa9b
53 changed files with 1282 additions and 375 deletions

View File

@ -27,17 +27,26 @@ Usage:
keyctl print keyid
options:
keyhandle= ascii hex value of sealing key default 0x40000000 (SRK)
keyauth= ascii hex auth for sealing key default 0x00...i
(40 ascii zeros)
blobauth= ascii hex auth for sealed data default 0x00...
(40 ascii zeros)
blobauth= ascii hex auth for sealed data default 0x00...
(40 ascii zeros)
pcrinfo= ascii hex of PCR_INFO or PCR_INFO_LONG (no default)
pcrlock= pcr number to be extended to "lock" blob
migratable= 0|1 indicating permission to reseal to new PCR values,
default 1 (resealing allowed)
keyhandle= ascii hex value of sealing key default 0x40000000 (SRK)
keyauth= ascii hex auth for sealing key default 0x00...i
(40 ascii zeros)
blobauth= ascii hex auth for sealed data default 0x00...
(40 ascii zeros)
blobauth= ascii hex auth for sealed data default 0x00...
(40 ascii zeros)
pcrinfo= ascii hex of PCR_INFO or PCR_INFO_LONG (no default)
pcrlock= pcr number to be extended to "lock" blob
migratable= 0|1 indicating permission to reseal to new PCR values,
default 1 (resealing allowed)
hash= hash algorithm name as a string. For TPM 1.x the only
allowed value is sha1. For TPM 2.x the allowed values
are sha1, sha256, sha384, sha512 and sm3-256.
policydigest= digest for the authorization policy. must be calculated
with the same hash algorithm as specified by the 'hash='
option.
policyhandle= handle to an authorization policy session that defines the
same policy and with the same hash algorithm as was used to
seal the key.
"keyctl print" returns an ascii hex copy of the sealed key, which is in standard
TPM_STORED_DATA format. The key length for new keys are always in bytes.

View File

@ -321,6 +321,8 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
goto error_free_cert;
} else if (!prep->trusted) {
ret = x509_validate_trust(cert, get_system_trusted_keyring());
if (ret)
ret = x509_validate_trust(cert, get_ima_mok_keyring());
if (!ret)
prep->trusted = 1;
}

View File

@ -31,6 +31,7 @@ const char *const hash_algo_name[HASH_ALGO__LAST] = {
[HASH_ALGO_TGR_128] = "tgr128",
[HASH_ALGO_TGR_160] = "tgr160",
[HASH_ALGO_TGR_192] = "tgr192",
[HASH_ALGO_SM3_256] = "sm3-256",
};
EXPORT_SYMBOL_GPL(hash_algo_name);
@ -52,5 +53,6 @@ const int hash_digest_size[HASH_ALGO__LAST] = {
[HASH_ALGO_TGR_128] = TGR128_DIGEST_SIZE,
[HASH_ALGO_TGR_160] = TGR160_DIGEST_SIZE,
[HASH_ALGO_TGR_192] = TGR192_DIGEST_SIZE,
[HASH_ALGO_SM3_256] = SM3256_DIGEST_SIZE,
};
EXPORT_SYMBOL_GPL(hash_digest_size);

View File

@ -310,10 +310,12 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
{
int duration_idx = TPM_UNDEFINED;
int duration = 0;
u8 category = (ordinal >> 24) & 0xFF;
if ((category == TPM_PROTECTED_COMMAND && ordinal < TPM_MAX_ORDINAL) ||
(category == TPM_CONNECTION_COMMAND && ordinal < TSC_MAX_ORDINAL))
/*
* We only have a duration table for protected commands, where the upper
* 16 bits are 0. For the few other ordinals the fallback will be used.
*/
if (ordinal < TPM_MAX_ORDINAL)
duration_idx = tpm_ordinal_duration[ordinal];
if (duration_idx != TPM_UNDEFINED)
@ -501,6 +503,21 @@ int tpm_get_timeouts(struct tpm_chip *chip)
struct duration_t *duration_cap;
ssize_t rc;
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
/* Fixed timeouts for TPM2 */
chip->vendor.timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
chip->vendor.timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
chip->vendor.timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
chip->vendor.timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
chip->vendor.duration[TPM_SHORT] =
msecs_to_jiffies(TPM2_DURATION_SHORT);
chip->vendor.duration[TPM_MEDIUM] =
msecs_to_jiffies(TPM2_DURATION_MEDIUM);
chip->vendor.duration[TPM_LONG] =
msecs_to_jiffies(TPM2_DURATION_LONG);
return 0;
}
tpm_cmd.header.in = tpm_getcap_header;
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);

View File

@ -83,16 +83,20 @@ enum tpm2_structures {
};
enum tpm2_return_codes {
TPM2_RC_INITIALIZE = 0x0100,
TPM2_RC_TESTING = 0x090A,
TPM2_RC_HASH = 0x0083, /* RC_FMT1 */
TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */
TPM2_RC_DISABLED = 0x0120,
TPM2_RC_TESTING = 0x090A, /* RC_WARN */
};
enum tpm2_algorithms {
TPM2_ALG_SHA1 = 0x0004,
TPM2_ALG_KEYEDHASH = 0x0008,
TPM2_ALG_SHA256 = 0x000B,
TPM2_ALG_NULL = 0x0010
TPM2_ALG_SHA384 = 0x000C,
TPM2_ALG_SHA512 = 0x000D,
TPM2_ALG_NULL = 0x0010,
TPM2_ALG_SM3_256 = 0x0012,
};
enum tpm2_command_codes {
@ -138,7 +142,6 @@ struct tpm_vendor_specific {
unsigned long base; /* TPM base address */
int irq;
int probed_irq;
int region_size;
int have_region;

View File

@ -16,6 +16,7 @@
*/
#include "tpm.h"
#include <crypto/hash_info.h>
#include <keys/trusted-type.h>
enum tpm2_object_attributes {
@ -104,6 +105,19 @@ struct tpm2_cmd {
union tpm2_cmd_params params;
} __packed;
struct tpm2_hash {
unsigned int crypto_id;
unsigned int tpm_id;
};
static struct tpm2_hash tpm2_hash_map[] = {
{HASH_ALGO_SHA1, TPM2_ALG_SHA1},
{HASH_ALGO_SHA256, TPM2_ALG_SHA256},
{HASH_ALGO_SHA384, TPM2_ALG_SHA384},
{HASH_ALGO_SHA512, TPM2_ALG_SHA512},
{HASH_ALGO_SM3_256, TPM2_ALG_SM3_256},
};
/*
* Array with one entry per ordinal defining the maximum amount
* of time the chip could take to return the result. The values
@ -429,8 +443,20 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
{
unsigned int blob_len;
struct tpm_buf buf;
u32 hash;
int i;
int rc;
for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++) {
if (options->hash == tpm2_hash_map[i].crypto_id) {
hash = tpm2_hash_map[i].tpm_id;
break;
}
}
if (i == ARRAY_SIZE(tpm2_hash_map))
return -EINVAL;
rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
if (rc)
return rc;
@ -452,12 +478,26 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
tpm_buf_append_u8(&buf, payload->migratable);
/* public */
tpm_buf_append_u16(&buf, 14);
if (options->policydigest)
tpm_buf_append_u16(&buf, 14 + options->digest_len);
else
tpm_buf_append_u16(&buf, 14);
tpm_buf_append_u16(&buf, TPM2_ALG_KEYEDHASH);
tpm_buf_append_u16(&buf, TPM2_ALG_SHA256);
tpm_buf_append_u32(&buf, TPM2_ATTR_USER_WITH_AUTH);
tpm_buf_append_u16(&buf, 0); /* policy digest size */
tpm_buf_append_u16(&buf, hash);
/* policy */
if (options->policydigest) {
tpm_buf_append_u32(&buf, 0);
tpm_buf_append_u16(&buf, options->digest_len);
tpm_buf_append(&buf, options->policydigest,
options->digest_len);
} else {
tpm_buf_append_u32(&buf, TPM2_ATTR_USER_WITH_AUTH);
tpm_buf_append_u16(&buf, 0);
}
/* public parameters */
tpm_buf_append_u16(&buf, TPM2_ALG_NULL);
tpm_buf_append_u16(&buf, 0);
@ -488,8 +528,12 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
out:
tpm_buf_destroy(&buf);
if (rc > 0)
rc = -EPERM;
if (rc > 0) {
if ((rc & TPM2_RC_HASH) == TPM2_RC_HASH)
rc = -EINVAL;
else
rc = -EPERM;
}
return rc;
}
@ -583,7 +627,9 @@ static int tpm2_unseal(struct tpm_chip *chip,
return rc;
tpm_buf_append_u32(&buf, blob_handle);
tpm2_buf_append_auth(&buf, TPM2_RS_PW,
tpm2_buf_append_auth(&buf,
options->policyhandle ?
options->policyhandle : TPM2_RS_PW,
NULL /* nonce */, 0,
0 /* session_attributes */,
options->blobauth /* hmac */,

View File

@ -284,17 +284,9 @@ static int crb_acpi_add(struct acpi_device *device)
chip->vendor.priv = priv;
/* Default timeouts and durations */
chip->vendor.timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
chip->vendor.timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
chip->vendor.timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
chip->vendor.timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
chip->vendor.duration[TPM_SHORT] =
msecs_to_jiffies(TPM2_DURATION_SHORT);
chip->vendor.duration[TPM_MEDIUM] =
msecs_to_jiffies(TPM2_DURATION_MEDIUM);
chip->vendor.duration[TPM_LONG] =
msecs_to_jiffies(TPM2_DURATION_LONG);
rc = tpm_get_timeouts(chip);
if (rc)
return rc;
chip->acpi_dev_handle = device->handle;

View File

@ -90,7 +90,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return 0;
}
sig = wait_event_interruptible(ibmvtpm->wq, ibmvtpm->res_len != 0);
sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
if (sig)
return -EINTR;
@ -125,7 +125,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
struct ibmvtpm_dev *ibmvtpm;
struct ibmvtpm_crq crq;
__be64 *word = (__be64 *)&crq;
int rc;
int rc, sig;
ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
@ -141,18 +141,35 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
return -EIO;
}
if (ibmvtpm->tpm_processing_cmd) {
dev_info(ibmvtpm->dev,
"Need to wait for TPM to finish\n");
/* wait for previous command to finish */
sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
if (sig)
return -EINTR;
}
spin_lock(&ibmvtpm->rtce_lock);
ibmvtpm->res_len = 0;
memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
crq.valid = (u8)IBMVTPM_VALID_CMD;
crq.msg = (u8)VTPM_TPM_COMMAND;
crq.len = cpu_to_be16(count);
crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
/*
* set the processing flag before the Hcall, since we may get the
* result (interrupt) before even being able to check rc.
*/
ibmvtpm->tpm_processing_cmd = true;
rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
be64_to_cpu(word[1]));
if (rc != H_SUCCESS) {
dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
rc = 0;
ibmvtpm->tpm_processing_cmd = false;
} else
rc = count;
@ -515,6 +532,7 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
case VTPM_TPM_COMMAND_RES:
/* len of the data in rtce buffer */
ibmvtpm->res_len = be16_to_cpu(crq->len);
ibmvtpm->tpm_processing_cmd = false;
wake_up_interruptible(&ibmvtpm->wq);
return;
default:

View File

@ -45,6 +45,7 @@ struct ibmvtpm_dev {
wait_queue_head_t wq;
u16 res_len;
u32 vtpm_version;
bool tpm_processing_cmd;
};
#define CRQ_RES_BUF_SIZE PAGE_SIZE

View File

@ -401,7 +401,7 @@ static void disable_interrupts(struct tpm_chip *chip)
iowrite32(intmask,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
free_irq(chip->vendor.irq, chip);
devm_free_irq(chip->pdev, chip->vendor.irq, chip);
chip->vendor.irq = 0;
}
@ -461,11 +461,8 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
chip->vendor.irq = irq;
if (!priv->irq_tested)
msleep(1);
if (!priv->irq_tested) {
if (!priv->irq_tested)
disable_interrupts(chip);
dev_err(chip->pdev,
FW_BUG "TPM interrupt not working, polling instead\n");
}
priv->irq_tested = true;
return rc;
}
@ -570,26 +567,6 @@ static const struct tpm_class_ops tpm_tis = {
.req_canceled = tpm_tis_req_canceled,
};
static irqreturn_t tis_int_probe(int irq, void *dev_id)
{
struct tpm_chip *chip = dev_id;
u32 interrupt;
interrupt = ioread32(chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
if (interrupt == 0)
return IRQ_NONE;
chip->vendor.probed_irq = irq;
/* Clear interrupts handled with TPM_EOI */
iowrite32(interrupt,
chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
return IRQ_HANDLED;
}
static irqreturn_t tis_int_handler(int dummy, void *dev_id)
{
struct tpm_chip *chip = dev_id;
@ -622,6 +599,84 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
return IRQ_HANDLED;
}
/* Register the IRQ and issue a command that will cause an interrupt. If an
* irq is seen then leave the chip setup for IRQ operation, otherwise reverse
* everything and leave in polling mode. Returns 0 on success.
*/
static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
int flags, int irq)
{
struct priv_data *priv = chip->vendor.priv;
u8 original_int_vec;
if (devm_request_irq(chip->pdev, irq, tis_int_handler, flags,
chip->devname, chip) != 0) {
dev_info(chip->pdev, "Unable to request irq: %d for probe\n",
irq);
return -1;
}
chip->vendor.irq = irq;
original_int_vec = ioread8(chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
iowrite8(irq,
chip->vendor.iobase + TPM_INT_VECTOR(chip->vendor.locality));
/* Clear all existing */
iowrite32(ioread32(chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality)),
chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
/* Turn on */
iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
priv->irq_tested = false;
/* Generate an interrupt by having the core call through to
* tpm_tis_send
*/
if (chip->flags & TPM_CHIP_FLAG_TPM2)
tpm2_gen_interrupt(chip);
else
tpm_gen_interrupt(chip);
/* tpm_tis_send will either confirm the interrupt is working or it
* will call disable_irq which undoes all of the above.
*/
if (!chip->vendor.irq) {
iowrite8(original_int_vec,
chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
return 1;
}
return 0;
}
/* Try to find the IRQ the TPM is using. This is for legacy x86 systems that
* do not have ACPI/etc. We typically expect the interrupt to be declared if
* present.
*/
static void tpm_tis_probe_irq(struct tpm_chip *chip, u32 intmask)
{
u8 original_int_vec;
int i;
original_int_vec = ioread8(chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
if (!original_int_vec) {
if (IS_ENABLED(CONFIG_X86))
for (i = 3; i <= 15; i++)
if (!tpm_tis_probe_irq_single(chip, intmask, 0,
i))
return;
} else if (!tpm_tis_probe_irq_single(chip, intmask, 0,
original_int_vec))
return;
}
static bool interrupts = true;
module_param(interrupts, bool, 0444);
MODULE_PARM_DESC(interrupts, "Enable interrupts");
@ -644,8 +699,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
acpi_handle acpi_dev_handle)
{
u32 vendor, intfcaps, intmask;
int rc, i, irq_s, irq_e, probe;
int irq_r = -1;
int rc, probe;
struct tpm_chip *chip;
struct priv_data *priv;
@ -677,6 +731,15 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
goto out_err;
}
/* Take control of the TPM's interrupt hardware and shut it off */
intmask = ioread32(chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT |
TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
intmask &= ~TPM_GLOBAL_INT_ENABLE;
iowrite32(intmask,
chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
if (request_locality(chip, 0) != 0) {
rc = -ENODEV;
goto out_err;
@ -731,126 +794,31 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
dev_dbg(dev, "\tData Avail Int Support\n");
/* Very early on issue a command to the TPM in polling mode to make
* sure it works. May as well use that command to set the proper
* timeouts for the driver.
*/
if (tpm_get_timeouts(chip)) {
dev_err(dev, "Could not get TPM timeouts and durations\n");
rc = -ENODEV;
goto out_err;
}
/* INTERRUPT Setup */
init_waitqueue_head(&chip->vendor.read_queue);
init_waitqueue_head(&chip->vendor.int_queue);
intmask =
ioread32(chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
intmask |= TPM_INTF_CMD_READY_INT
| TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
| TPM_INTF_STS_VALID_INT;
iowrite32(intmask,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
if (interrupts)
chip->vendor.irq = tpm_info->irq;
if (interrupts && !chip->vendor.irq) {
irq_s =
ioread8(chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
irq_r = irq_s;
if (irq_s) {
irq_e = irq_s;
} else {
irq_s = 3;
irq_e = 15;
}
for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
iowrite8(i, chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
if (devm_request_irq
(dev, i, tis_int_probe, IRQF_SHARED,
chip->devname, chip) != 0) {
dev_info(chip->pdev,
"Unable to request irq: %d for probe\n",
i);
continue;
}
/* Clear all existing */
iowrite32(ioread32
(chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality)),
chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
/* Turn on */
iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
chip->vendor.probed_irq = 0;
/* Generate Interrupts */
if (chip->flags & TPM_CHIP_FLAG_TPM2)
tpm2_gen_interrupt(chip);
else
tpm_gen_interrupt(chip);
chip->vendor.irq = chip->vendor.probed_irq;
/* free_irq will call into tis_int_probe;
clear all irqs we haven't seen while doing
tpm_gen_interrupt */
iowrite32(ioread32
(chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality)),
chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
/* Turn off */
iowrite32(intmask,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
devm_free_irq(dev, i, chip);
}
if (interrupts) {
if (tpm_info->irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
tpm_info->irq);
if (!chip->vendor.irq)
dev_err(chip->pdev, FW_BUG
"TPM interrupt not working, polling instead\n");
} else
tpm_tis_probe_irq(chip, intmask);
}
if (chip->vendor.irq) {
iowrite8(chip->vendor.irq,
chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
if (devm_request_irq
(dev, chip->vendor.irq, tis_int_handler, IRQF_SHARED,
chip->devname, chip) != 0) {
dev_info(chip->pdev,
"Unable to request irq: %d for use\n",
chip->vendor.irq);
chip->vendor.irq = 0;
} else {
/* Clear all existing */
iowrite32(ioread32
(chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality)),
chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
/* Turn on */
iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
}
} else if (irq_r != -1)
iowrite8(irq_r, chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
chip->vendor.timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
chip->vendor.timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
chip->vendor.timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
chip->vendor.timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
chip->vendor.duration[TPM_SHORT] =
msecs_to_jiffies(TPM2_DURATION_SHORT);
chip->vendor.duration[TPM_MEDIUM] =
msecs_to_jiffies(TPM2_DURATION_MEDIUM);
chip->vendor.duration[TPM_LONG] =
msecs_to_jiffies(TPM2_DURATION_LONG);
rc = tpm2_do_selftest(chip);
if (rc == TPM2_RC_INITIALIZE) {
dev_warn(dev, "Firmware has not started TPM\n");
@ -866,12 +834,6 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
goto out_err;
}
} else {
if (tpm_get_timeouts(chip)) {
dev_err(dev, "Could not get TPM timeouts and durations\n");
rc = -ENODEV;
goto out_err;
}
if (tpm_do_selftest(chip)) {
dev_err(dev, "TPM self test failed\n");
rc = -ENODEV;

View File

@ -13,6 +13,7 @@
#include <linux/gfs2_ondisk.h>
#include <linux/bio.h>
#include <linux/posix_acl.h>
#include <linux/security.h>
#include "gfs2.h"
#include "incore.h"
@ -262,6 +263,7 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
if (ip) {
set_bit(GIF_INVALID, &ip->i_flags);
forget_all_cached_acls(&ip->i_inode);
security_inode_invalidate_secctx(&ip->i_inode);
gfs2_dir_hash_inval(ip);
}
}

View File

@ -34,6 +34,9 @@
#define TGR160_DIGEST_SIZE 20
#define TGR192_DIGEST_SIZE 24
/* not defined in include/crypto/ */
#define SM3256_DIGEST_SIZE 32
extern const char *const hash_algo_name[HASH_ALGO__LAST];
extern const int hash_digest_size[HASH_ALGO__LAST];

View File

@ -35,4 +35,28 @@ extern int system_verify_data(const void *data, unsigned long len,
enum key_being_used_for usage);
#endif
#ifdef CONFIG_IMA_MOK_KEYRING
extern struct key *ima_mok_keyring;
extern struct key *ima_blacklist_keyring;
static inline struct key *get_ima_mok_keyring(void)
{
return ima_mok_keyring;
}
static inline struct key *get_ima_blacklist_keyring(void)
{
return ima_blacklist_keyring;
}
#else
static inline struct key *get_ima_mok_keyring(void)
{
return NULL;
}
static inline struct key *get_ima_blacklist_keyring(void)
{
return NULL;
}
#endif /* CONFIG_IMA_MOK_KEYRING */
#endif /* _KEYS_SYSTEM_KEYRING_H */

View File

@ -18,6 +18,7 @@
#define MAX_KEY_SIZE 128
#define MAX_BLOB_SIZE 512
#define MAX_PCRINFO_SIZE 64
#define MAX_DIGEST_SIZE 64
struct trusted_key_payload {
struct rcu_head rcu;
@ -36,6 +37,10 @@ struct trusted_key_options {
uint32_t pcrinfo_len;
unsigned char pcrinfo[MAX_PCRINFO_SIZE];
int pcrlock;
uint32_t hash;
uint32_t digest_len;
unsigned char policydigest[MAX_DIGEST_SIZE];
uint32_t policyhandle;
};
extern struct key_type key_type_trusted;

View File

@ -238,7 +238,7 @@ extern void __audit_getname(struct filename *name);
extern void __audit_inode(struct filename *name, const struct dentry *dentry,
unsigned int flags);
extern void __audit_file(const struct file *);
extern void __audit_inode_child(const struct inode *parent,
extern void __audit_inode_child(struct inode *parent,
const struct dentry *dentry,
const unsigned char type);
extern void __audit_seccomp(unsigned long syscall, long signr, int code);
@ -303,7 +303,7 @@ static inline void audit_inode_parent_hidden(struct filename *name,
__audit_inode(name, dentry,
AUDIT_INODE_PARENT | AUDIT_INODE_HIDDEN);
}
static inline void audit_inode_child(const struct inode *parent,
static inline void audit_inode_child(struct inode *parent,
const struct dentry *dentry,
const unsigned char type) {
if (unlikely(!audit_dummy_context()))
@ -463,7 +463,7 @@ static inline void __audit_inode(struct filename *name,
const struct dentry *dentry,
unsigned int flags)
{ }
static inline void __audit_inode_child(const struct inode *parent,
static inline void __audit_inode_child(struct inode *parent,
const struct dentry *dentry,
const unsigned char type)
{ }
@ -477,7 +477,7 @@ static inline void audit_file(struct file *file)
static inline void audit_inode_parent_hidden(struct filename *name,
const struct dentry *dentry)
{ }
static inline void audit_inode_child(const struct inode *parent,
static inline void audit_inode_child(struct inode *parent,
const struct dentry *dentry,
const unsigned char type)
{ }

View File

@ -145,24 +145,24 @@ static inline kernel_cap_t cap_invert(const kernel_cap_t c)
return dest;
}
static inline int cap_isclear(const kernel_cap_t a)
static inline bool cap_isclear(const kernel_cap_t a)
{
unsigned __capi;
CAP_FOR_EACH_U32(__capi) {
if (a.cap[__capi] != 0)
return 0;
return false;
}
return 1;
return true;
}
/*
* Check if "a" is a subset of "set".
* return 1 if ALL of the capabilities in "a" are also in "set"
* cap_issubset(0101, 1111) will return 1
* return 0 if ANY of the capabilities in "a" are not in "set"
* cap_issubset(1111, 0101) will return 0
* return true if ALL of the capabilities in "a" are also in "set"
* cap_issubset(0101, 1111) will return true
* return false if ANY of the capabilities in "a" are not in "set"
* cap_issubset(1111, 0101) will return false
*/
static inline int cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
static inline bool cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
{
kernel_cap_t dest;
dest = cap_drop(a, set);
@ -171,12 +171,6 @@ static inline int cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
/* Used to decide between falling back on the old suser() or fsuser(). */
static inline int cap_is_fs_cap(int cap)
{
const kernel_cap_t __cap_fs_set = CAP_FS_SET;
return !!(CAP_TO_MASK(cap) & __cap_fs_set.cap[CAP_TO_INDEX(cap)]);
}
static inline kernel_cap_t cap_drop_fs_set(const kernel_cap_t a)
{
const kernel_cap_t __cap_fs_set = CAP_FS_SET;

View File

@ -14,6 +14,7 @@
struct integrity_iint_cache;
#ifdef CONFIG_EVM
extern int evm_set_key(void *key, size_t keylen);
extern enum integrity_status evm_verifyxattr(struct dentry *dentry,
const char *xattr_name,
void *xattr_value,
@ -42,6 +43,12 @@ static inline int posix_xattr_acl(const char *xattrname)
}
#endif
#else
static inline int evm_set_key(void *key, size_t keylen)
{
return -EOPNOTSUPP;
}
#ifdef CONFIG_INTEGRITY
static inline enum integrity_status evm_verifyxattr(struct dentry *dentry,
const char *xattr_name,

View File

@ -177,6 +177,7 @@ struct key {
#define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */
#define KEY_FLAG_BUILTIN 10 /* set if key is builtin */
#define KEY_FLAG_ROOT_CAN_INVAL 11 /* set if key can be invalidated by root without permission */
#define KEY_FLAG_KEEP 12 /* set if key should not be removed */
/* the key type and key description string
* - the desc is used to match a key against search criteria

View File

@ -1261,6 +1261,10 @@
* audit_rule_init.
* @rule contains the allocated rule
*
* @inode_invalidate_secctx:
* Notify the security module that it must revalidate the security context
* of an inode.
*
* @inode_notifysecctx:
* Notify the security module of what the security context of an inode
* should be. Initializes the incore security context managed by the
@ -1413,14 +1417,14 @@ union security_list_options {
int (*inode_removexattr)(struct dentry *dentry, const char *name);
int (*inode_need_killpriv)(struct dentry *dentry);
int (*inode_killpriv)(struct dentry *dentry);
int (*inode_getsecurity)(const struct inode *inode, const char *name,
int (*inode_getsecurity)(struct inode *inode, const char *name,
void **buffer, bool alloc);
int (*inode_setsecurity)(struct inode *inode, const char *name,
const void *value, size_t size,
int flags);
int (*inode_listsecurity)(struct inode *inode, char *buffer,
size_t buffer_size);
void (*inode_getsecid)(const struct inode *inode, u32 *secid);
void (*inode_getsecid)(struct inode *inode, u32 *secid);
int (*file_permission)(struct file *file, int mask);
int (*file_alloc_security)(struct file *file);
@ -1516,6 +1520,7 @@ union security_list_options {
int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid);
void (*release_secctx)(char *secdata, u32 seclen);
void (*inode_invalidate_secctx)(struct inode *inode);
int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen);
int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen);
int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen);
@ -1757,6 +1762,7 @@ struct security_hook_heads {
struct list_head secid_to_secctx;
struct list_head secctx_to_secid;
struct list_head release_secctx;
struct list_head inode_invalidate_secctx;
struct list_head inode_notifysecctx;
struct list_head inode_setsecctx;
struct list_head inode_getsecctx;

View File

@ -270,10 +270,10 @@ int security_inode_listxattr(struct dentry *dentry);
int security_inode_removexattr(struct dentry *dentry, const char *name);
int security_inode_need_killpriv(struct dentry *dentry);
int security_inode_killpriv(struct dentry *dentry);
int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc);
int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc);
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags);
int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size);
void security_inode_getsecid(const struct inode *inode, u32 *secid);
void security_inode_getsecid(struct inode *inode, u32 *secid);
int security_file_permission(struct file *file, int mask);
int security_file_alloc(struct file *file);
void security_file_free(struct file *file);
@ -353,6 +353,7 @@ int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
void security_release_secctx(char *secdata, u32 seclen);
void security_inode_invalidate_secctx(struct inode *inode);
int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
@ -719,7 +720,7 @@ static inline int security_inode_killpriv(struct dentry *dentry)
return cap_inode_killpriv(dentry);
}
static inline int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc)
{
return -EOPNOTSUPP;
}
@ -734,7 +735,7 @@ static inline int security_inode_listsecurity(struct inode *inode, char *buffer,
return 0;
}
static inline void security_inode_getsecid(const struct inode *inode, u32 *secid)
static inline void security_inode_getsecid(struct inode *inode, u32 *secid)
{
*secid = 0;
}
@ -1093,6 +1094,10 @@ static inline void security_release_secctx(char *secdata, u32 seclen)
{
}
static inline void security_inode_invalidate_secctx(struct inode *inode)
{
}
static inline int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
{
return -EOPNOTSUPP;

View File

@ -31,6 +31,7 @@ enum hash_algo {
HASH_ALGO_TGR_128,
HASH_ALGO_TGR_160,
HASH_ALGO_TGR_192,
HASH_ALGO_SM3_256,
HASH_ALGO__LAST
};

View File

@ -1719,7 +1719,7 @@ static inline int audit_copy_fcaps(struct audit_names *name,
/* Copy inode data into an audit_names. */
void audit_copy_inode(struct audit_names *name, const struct dentry *dentry,
const struct inode *inode)
struct inode *inode)
{
name->ino = inode->i_ino;
name->dev = inode->i_sb->s_dev;

View File

@ -207,7 +207,7 @@ extern u32 audit_ever_enabled;
extern void audit_copy_inode(struct audit_names *name,
const struct dentry *dentry,
const struct inode *inode);
struct inode *inode);
extern void audit_log_cap(struct audit_buffer *ab, char *prefix,
kernel_cap_t *cap);
extern void audit_log_name(struct audit_context *context,

View File

@ -1754,7 +1754,7 @@ void __audit_inode(struct filename *name, const struct dentry *dentry,
unsigned int flags)
{
struct audit_context *context = current->audit_context;
const struct inode *inode = d_backing_inode(dentry);
struct inode *inode = d_backing_inode(dentry);
struct audit_names *n;
bool parent = flags & AUDIT_INODE_PARENT;
@ -1848,12 +1848,12 @@ void __audit_file(const struct file *file)
* must be hooked prior, in order to capture the target inode during
* unsuccessful attempts.
*/
void __audit_inode_child(const struct inode *parent,
void __audit_inode_child(struct inode *parent,
const struct dentry *dentry,
const unsigned char type)
{
struct audit_context *context = current->audit_context;
const struct inode *inode = d_backing_inode(dentry);
struct inode *inode = d_backing_inode(dentry);
const char *dname = dentry->d_name.name;
struct audit_names *n, *found_parent = NULL, *found_child = NULL;

View File

@ -41,6 +41,17 @@ config INTEGRITY_ASYMMETRIC_KEYS
This option enables digital signature verification using
asymmetric keys.
config INTEGRITY_TRUSTED_KEYRING
bool "Require all keys on the integrity keyrings be signed"
depends on SYSTEM_TRUSTED_KEYRING
depends on INTEGRITY_ASYMMETRIC_KEYS
select KEYS_DEBUG_PROC_KEYS
default y
help
This option requires that all keys added to the .ima and
.evm keyrings be signed by a key on the system trusted
keyring.
config INTEGRITY_AUDIT
bool "Enables integrity auditing support "
depends on AUDIT

View File

@ -24,15 +24,22 @@
static struct key *keyring[INTEGRITY_KEYRING_MAX];
static const char *keyring_name[INTEGRITY_KEYRING_MAX] = {
#ifndef CONFIG_INTEGRITY_TRUSTED_KEYRING
"_evm",
"_module",
#ifndef CONFIG_IMA_TRUSTED_KEYRING
"_ima",
#else
".evm",
".ima",
#endif
"_module",
};
#ifdef CONFIG_INTEGRITY_TRUSTED_KEYRING
static bool init_keyring __initdata = true;
#else
static bool init_keyring __initdata;
#endif
int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
const char *digest, int digestlen)
{
@ -68,6 +75,9 @@ int __init integrity_init_keyring(const unsigned int id)
const struct cred *cred = current_cred();
int err = 0;
if (!init_keyring)
return 0;
keyring[id] = keyring_alloc(keyring_name[id], KUIDT_INIT(0),
KGIDT_INIT(0), cred,
((KEY_POS_ALL & ~KEY_POS_SETATTR) |

View File

@ -17,6 +17,7 @@
#include <linux/key-type.h>
#include <crypto/public_key.h>
#include <keys/asymmetric-type.h>
#include <keys/system_keyring.h>
#include "integrity.h"
@ -32,9 +33,22 @@ static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid)
pr_debug("key search: \"%s\"\n", name);
key = get_ima_blacklist_keyring();
if (key) {
key_ref_t kref;
kref = keyring_search(make_key_ref(key, 1),
&key_type_asymmetric, name);
if (!IS_ERR(kref)) {
pr_err("Key '%s' is in ima_blacklist_keyring\n", name);
return ERR_PTR(-EKEYREJECTED);
}
}
if (keyring) {
/* search in specific keyring */
key_ref_t kref;
kref = keyring_search(make_key_ref(keyring, 1),
&key_type_asymmetric, name);
if (IS_ERR(kref))

View File

@ -42,3 +42,20 @@ config EVM_EXTRA_SMACK_XATTRS
additional info to the calculation, requires existing EVM
labeled file systems to be relabeled.
config EVM_LOAD_X509
bool "Load an X509 certificate onto the '.evm' trusted keyring"
depends on EVM && INTEGRITY_TRUSTED_KEYRING
default n
help
Load an X509 certificate onto the '.evm' trusted keyring.
This option enables X509 certificate loading from the kernel
onto the '.evm' trusted keyring. A public key can be used to
verify EVM integrity starting from the 'init' process.
config EVM_X509_PATH
string "EVM X509 certificate path"
depends on EVM_LOAD_X509
default "/etc/keys/x509_evm.der"
help
This option defines X509 certificate path.

View File

@ -21,6 +21,9 @@
#include "../integrity.h"
#define EVM_INIT_HMAC 0x0001
#define EVM_INIT_X509 0x0002
extern int evm_initialized;
extern char *evm_hmac;
extern char *evm_hash;

View File

@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/xattr.h>
#include <linux/evm.h>
#include <keys/encrypted-type.h>
#include <crypto/hash.h>
#include "evm.h"
@ -32,6 +33,44 @@ struct crypto_shash *hash_tfm;
static DEFINE_MUTEX(mutex);
#define EVM_SET_KEY_BUSY 0
static unsigned long evm_set_key_flags;
/**
* evm_set_key() - set EVM HMAC key from the kernel
* @key: pointer to a buffer with the key data
* @size: length of the key data
*
* This function allows setting the EVM HMAC key from the kernel
* without using the "encrypted" key subsystem keys. It can be used
* by the crypto HW kernel module which has its own way of managing
* keys.
*
* key length should be between 32 and 128 bytes long
*/
int evm_set_key(void *key, size_t keylen)
{
int rc;
rc = -EBUSY;
if (test_and_set_bit(EVM_SET_KEY_BUSY, &evm_set_key_flags))
goto busy;
rc = -EINVAL;
if (keylen > MAX_KEY_SIZE)
goto inval;
memcpy(evmkey, key, keylen);
evm_initialized |= EVM_INIT_HMAC;
pr_info("key initialized\n");
return 0;
inval:
clear_bit(EVM_SET_KEY_BUSY, &evm_set_key_flags);
busy:
pr_err("key initialization failed\n");
return rc;
}
EXPORT_SYMBOL_GPL(evm_set_key);
static struct shash_desc *init_desc(char type)
{
long rc;
@ -40,6 +79,10 @@ static struct shash_desc *init_desc(char type)
struct shash_desc *desc;
if (type == EVM_XATTR_HMAC) {
if (!(evm_initialized & EVM_INIT_HMAC)) {
pr_err("HMAC key is not set\n");
return ERR_PTR(-ENOKEY);
}
tfm = &hmac_tfm;
algo = evm_hmac;
} else {
@ -240,7 +283,7 @@ int evm_init_key(void)
{
struct key *evm_key;
struct encrypted_key_payload *ekp;
int rc = 0;
int rc;
evm_key = request_key(&key_type_encrypted, EVMKEY, NULL);
if (IS_ERR(evm_key))
@ -248,12 +291,9 @@ int evm_init_key(void)
down_read(&evm_key->sem);
ekp = evm_key->payload.data[0];
if (ekp->decrypted_datalen > MAX_KEY_SIZE) {
rc = -EINVAL;
goto out;
}
memcpy(evmkey, ekp->decrypted_data, ekp->decrypted_datalen);
out:
rc = evm_set_key(ekp->decrypted_data, ekp->decrypted_datalen);
/* burn the original key contents */
memset(ekp->decrypted_data, 0, ekp->decrypted_datalen);
up_read(&evm_key->sem);

View File

@ -358,6 +358,15 @@ int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name)
return evm_protect_xattr(dentry, xattr_name, NULL, 0);
}
static void evm_reset_status(struct inode *inode)
{
struct integrity_iint_cache *iint;
iint = integrity_iint_find(inode);
if (iint)
iint->evm_status = INTEGRITY_UNKNOWN;
}
/**
* evm_inode_post_setxattr - update 'security.evm' to reflect the changes
* @dentry: pointer to the affected dentry
@ -378,6 +387,8 @@ void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name,
&& !posix_xattr_acl(xattr_name)))
return;
evm_reset_status(dentry->d_inode);
evm_update_evmxattr(dentry, xattr_name, xattr_value, xattr_value_len);
}
@ -396,6 +407,8 @@ void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
if (!evm_initialized || !evm_protected_xattr(xattr_name))
return;
evm_reset_status(dentry->d_inode);
evm_update_evmxattr(dentry, xattr_name, NULL, 0);
}
@ -472,21 +485,34 @@ out:
}
EXPORT_SYMBOL_GPL(evm_inode_init_security);
#ifdef CONFIG_EVM_LOAD_X509
void __init evm_load_x509(void)
{
int rc;
rc = integrity_load_x509(INTEGRITY_KEYRING_EVM, CONFIG_EVM_X509_PATH);
if (!rc)
evm_initialized |= EVM_INIT_X509;
}
#endif
static int __init init_evm(void)
{
int error;
evm_init_config();
error = integrity_init_keyring(INTEGRITY_KEYRING_EVM);
if (error)
return error;
error = evm_init_secfs();
if (error < 0) {
pr_info("Error registering secfs\n");
goto err;
return error;
}
return 0;
err:
return error;
}
/*

View File

@ -62,9 +62,9 @@ static ssize_t evm_write_key(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
char temp[80];
int i, error;
int i;
if (!capable(CAP_SYS_ADMIN) || evm_initialized)
if (!capable(CAP_SYS_ADMIN) || (evm_initialized & EVM_INIT_HMAC))
return -EPERM;
if (count >= sizeof(temp) || count == 0)
@ -78,12 +78,8 @@ static ssize_t evm_write_key(struct file *file, const char __user *buf,
if ((sscanf(temp, "%d", &i) != 1) || (i != 1))
return -EINVAL;
error = evm_init_key();
if (!error) {
evm_initialized = 1;
pr_info("initialized\n");
} else
pr_err("initialization failed\n");
evm_init_key();
return count;
}

View File

@ -255,4 +255,5 @@ out:
void __init integrity_load_keys(void)
{
ima_load_x509();
evm_load_x509();
}

View File

@ -107,6 +107,27 @@ config IMA_DEFAULT_HASH
default "sha512" if IMA_DEFAULT_HASH_SHA512
default "wp512" if IMA_DEFAULT_HASH_WP512
config IMA_WRITE_POLICY
bool "Enable multiple writes to the IMA policy"
depends on IMA
default n
help
IMA policy can now be updated multiple times. The new rules get
appended to the original policy. Have in mind that the rules are
scanned in FIFO order so be careful when you design and add new ones.
If unsure, say N.
config IMA_READ_POLICY
bool "Enable reading back the current IMA policy"
depends on IMA
default y if IMA_WRITE_POLICY
default n if !IMA_WRITE_POLICY
help
It is often useful to be able to read back the IMA policy. It is
even more important after introducing CONFIG_IMA_WRITE_POLICY.
This option allows the root user to see the current policy rules.
config IMA_APPRAISE
bool "Appraise integrity measurements"
depends on IMA
@ -123,14 +144,35 @@ config IMA_APPRAISE
If unsure, say N.
config IMA_TRUSTED_KEYRING
bool "Require all keys on the .ima keyring be signed"
bool "Require all keys on the .ima keyring be signed (deprecated)"
depends on IMA_APPRAISE && SYSTEM_TRUSTED_KEYRING
depends on INTEGRITY_ASYMMETRIC_KEYS
select INTEGRITY_TRUSTED_KEYRING
default y
help
This option requires that all keys added to the .ima
keyring be signed by a key on the system trusted keyring.
This option is deprecated in favor of INTEGRITY_TRUSTED_KEYRING
config IMA_MOK_KEYRING
bool "Create IMA machine owner keys (MOK) and blacklist keyrings"
depends on SYSTEM_TRUSTED_KEYRING
depends on IMA_TRUSTED_KEYRING
default n
help
This option creates IMA MOK and blacklist keyrings. IMA MOK is an
intermediate keyring that sits between .system and .ima keyrings,
effectively forming a simple CA hierarchy. To successfully import a
key into .ima_mok it must be signed by a key which CA is in .system
keyring. On turn any key that needs to go in .ima keyring must be
signed by CA in either .system or .ima_mok keyrings. IMA MOK is empty
at kernel boot.
IMA blacklist keyring contains all revoked IMA keys. It is consulted
before any other keyring. If the search is successful the requested
operation is rejected and error is returned to the caller.
config IMA_LOAD_X509
bool "Load X509 certificate onto the '.ima' trusted keyring"
depends on IMA_TRUSTED_KEYRING

View File

@ -8,3 +8,4 @@ obj-$(CONFIG_IMA) += ima.o
ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
ima_policy.o ima_template.o ima_template_lib.o
ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
obj-$(CONFIG_IMA_MOK_KEYRING) += ima_mok.o

View File

@ -166,6 +166,11 @@ void ima_update_policy(void);
void ima_update_policy_flag(void);
ssize_t ima_parse_add_rule(char *);
void ima_delete_rules(void);
int ima_check_policy(void);
void *ima_policy_start(struct seq_file *m, loff_t *pos);
void *ima_policy_next(struct seq_file *m, void *v, loff_t *pos);
void ima_policy_stop(struct seq_file *m, void *v);
int ima_policy_show(struct seq_file *m, void *v);
/* Appraise integrity measurements */
#define IMA_APPRAISE_ENFORCE 0x01
@ -250,17 +255,12 @@ static inline int security_filter_rule_match(u32 secid, u32 field, u32 op,
{
return -EINVAL;
}
#endif /* CONFIG_IMA_LSM_RULES */
#ifdef CONFIG_IMA_TRUSTED_KEYRING
static inline int ima_init_keyring(const unsigned int id)
{
return integrity_init_keyring(id);
}
#else
static inline int ima_init_keyring(const unsigned int id)
{
return 0;
}
#endif /* CONFIG_IMA_TRUSTED_KEYRING */
#endif
#ifdef CONFIG_IMA_READ_POLICY
#define POLICY_FILE_FLAGS (S_IWUSR | S_IRUSR)
#else
#define POLICY_FILE_FLAGS S_IWUSR
#endif /* CONFIG_IMA_WRITE_POLICY */
#endif /* __LINUX_IMA_H */

View File

@ -25,6 +25,8 @@
#include "ima.h"
static DEFINE_MUTEX(ima_write_mutex);
static int valid_policy = 1;
#define TMPBUFLEN 12
static ssize_t ima_show_htable_value(char __user *buf, size_t count,
@ -259,7 +261,7 @@ static const struct file_operations ima_ascii_measurements_ops = {
static ssize_t ima_write_policy(struct file *file, const char __user *buf,
size_t datalen, loff_t *ppos)
{
char *data = NULL;
char *data;
ssize_t result;
if (datalen >= PAGE_SIZE)
@ -279,13 +281,20 @@ static ssize_t ima_write_policy(struct file *file, const char __user *buf,
result = -EFAULT;
if (copy_from_user(data, buf, datalen))
goto out;
goto out_free;
result = mutex_lock_interruptible(&ima_write_mutex);
if (result < 0)
goto out_free;
result = ima_parse_add_rule(data);
mutex_unlock(&ima_write_mutex);
out_free:
kfree(data);
out:
if (result < 0)
valid_policy = 0;
kfree(data);
return result;
}
@ -302,14 +311,31 @@ enum ima_fs_flags {
static unsigned long ima_fs_flags;
#ifdef CONFIG_IMA_READ_POLICY
static const struct seq_operations ima_policy_seqops = {
.start = ima_policy_start,
.next = ima_policy_next,
.stop = ima_policy_stop,
.show = ima_policy_show,
};
#endif
/*
* ima_open_policy: sequentialize access to the policy file
*/
static int ima_open_policy(struct inode *inode, struct file *filp)
{
/* No point in being allowed to open it if you aren't going to write */
if (!(filp->f_flags & O_WRONLY))
if (!(filp->f_flags & O_WRONLY)) {
#ifndef CONFIG_IMA_READ_POLICY
return -EACCES;
#else
if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
return -EACCES;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return seq_open(filp, &ima_policy_seqops);
#endif
}
if (test_and_set_bit(IMA_FS_BUSY, &ima_fs_flags))
return -EBUSY;
return 0;
@ -326,6 +352,14 @@ static int ima_release_policy(struct inode *inode, struct file *file)
{
const char *cause = valid_policy ? "completed" : "failed";
if ((file->f_flags & O_ACCMODE) == O_RDONLY)
return 0;
if (valid_policy && ima_check_policy() < 0) {
cause = "failed";
valid_policy = 0;
}
pr_info("IMA: policy update %s\n", cause);
integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL,
"policy_update", cause, !valid_policy, 0);
@ -336,15 +370,21 @@ static int ima_release_policy(struct inode *inode, struct file *file)
clear_bit(IMA_FS_BUSY, &ima_fs_flags);
return 0;
}
ima_update_policy();
#ifndef CONFIG_IMA_WRITE_POLICY
securityfs_remove(ima_policy);
ima_policy = NULL;
#else
clear_bit(IMA_FS_BUSY, &ima_fs_flags);
#endif
return 0;
}
static const struct file_operations ima_measure_policy_ops = {
.open = ima_open_policy,
.write = ima_write_policy,
.read = seq_read,
.release = ima_release_policy,
.llseek = generic_file_llseek,
};
@ -382,8 +422,7 @@ int __init ima_fs_init(void)
if (IS_ERR(violations))
goto out;
ima_policy = securityfs_create_file("policy",
S_IWUSR,
ima_policy = securityfs_create_file("policy", POLICY_FILE_FLAGS,
ima_dir, NULL,
&ima_measure_policy_ops);
if (IS_ERR(ima_policy))

View File

@ -116,7 +116,7 @@ int __init ima_init(void)
if (!ima_used_chip)
pr_info("No TPM chip found, activating TPM-bypass!\n");
rc = ima_init_keyring(INTEGRITY_KEYRING_IMA);
rc = integrity_init_keyring(INTEGRITY_KEYRING_IMA);
if (rc)
return rc;

View File

@ -0,0 +1,55 @@
/*
* Copyright (C) 2015 Juniper Networks, Inc.
*
* Author:
* Petko Manolov <petko.manolov@konsulko.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cred.h>
#include <linux/err.h>
#include <linux/init.h>
#include <keys/asymmetric-type.h>
struct key *ima_mok_keyring;
struct key *ima_blacklist_keyring;
/*
* Allocate the IMA MOK and blacklist keyrings
*/
__init int ima_mok_init(void)
{
pr_notice("Allocating IMA MOK and blacklist keyrings.\n");
ima_mok_keyring = keyring_alloc(".ima_mok",
KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
(KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ |
KEY_USR_WRITE | KEY_USR_SEARCH,
KEY_ALLOC_NOT_IN_QUOTA, NULL);
ima_blacklist_keyring = keyring_alloc(".ima_blacklist",
KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
(KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ |
KEY_USR_WRITE | KEY_USR_SEARCH,
KEY_ALLOC_NOT_IN_QUOTA, NULL);
if (IS_ERR(ima_mok_keyring) || IS_ERR(ima_blacklist_keyring))
panic("Can't allocate IMA MOK or blacklist keyrings.");
set_bit(KEY_FLAG_TRUSTED_ONLY, &ima_mok_keyring->flags);
set_bit(KEY_FLAG_TRUSTED_ONLY, &ima_blacklist_keyring->flags);
set_bit(KEY_FLAG_KEEP, &ima_blacklist_keyring->flags);
return 0;
}
device_initcall(ima_mok_init);

View File

@ -16,7 +16,9 @@
#include <linux/magic.h>
#include <linux/parser.h>
#include <linux/slab.h>
#include <linux/rculist.h>
#include <linux/genhd.h>
#include <linux/seq_file.h>
#include "ima.h"
@ -38,6 +40,7 @@
#define AUDIT 0x0040
int ima_policy_flag;
static int temp_ima_appraise;
#define MAX_LSM_RULES 6
enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE,
@ -135,11 +138,11 @@ static struct ima_rule_entry default_appraise_rules[] = {
static LIST_HEAD(ima_default_rules);
static LIST_HEAD(ima_policy_rules);
static LIST_HEAD(ima_temp_rules);
static struct list_head *ima_rules;
static DEFINE_MUTEX(ima_rules_mutex);
static int ima_policy __initdata;
static int __init default_measure_policy_setup(char *str)
{
if (ima_policy)
@ -171,21 +174,18 @@ static int __init default_appraise_policy_setup(char *str)
__setup("ima_appraise_tcb", default_appraise_policy_setup);
/*
* Although the IMA policy does not change, the LSM policy can be
* reloaded, leaving the IMA LSM based rules referring to the old,
* stale LSM policy.
*
* Update the IMA LSM based rules to reflect the reloaded LSM policy.
* We assume the rules still exist; and BUG_ON() if they don't.
* The LSM policy can be reloaded, leaving the IMA LSM based rules referring
* to the old, stale LSM policy. Update the IMA LSM based rules to reflect
* the reloaded LSM policy. We assume the rules still exist; and BUG_ON() if
* they don't.
*/
static void ima_lsm_update_rules(void)
{
struct ima_rule_entry *entry, *tmp;
struct ima_rule_entry *entry;
int result;
int i;
mutex_lock(&ima_rules_mutex);
list_for_each_entry_safe(entry, tmp, &ima_policy_rules, list) {
list_for_each_entry(entry, &ima_policy_rules, list) {
for (i = 0; i < MAX_LSM_RULES; i++) {
if (!entry->lsm[i].rule)
continue;
@ -196,7 +196,6 @@ static void ima_lsm_update_rules(void)
BUG_ON(!entry->lsm[i].rule);
}
}
mutex_unlock(&ima_rules_mutex);
}
/**
@ -319,9 +318,9 @@ static int get_subaction(struct ima_rule_entry *rule, int func)
* Measure decision based on func/mask/fsmagic and LSM(subj/obj/type)
* conditions.
*
* (There is no need for locking when walking the policy list,
* as elements in the list are never deleted, nor does the list
* change.)
* Since the IMA policy may be updated multiple times we need to lock the
* list when walking it. Reads are many orders of magnitude more numerous
* than writes so ima_match_policy() is classical RCU candidate.
*/
int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
int flags)
@ -329,7 +328,8 @@ int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
struct ima_rule_entry *entry;
int action = 0, actmask = flags | (flags << 1);
list_for_each_entry(entry, ima_rules, list) {
rcu_read_lock();
list_for_each_entry_rcu(entry, ima_rules, list) {
if (!(entry->action & actmask))
continue;
@ -351,6 +351,7 @@ int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
if (!actmask)
break;
}
rcu_read_unlock();
return action;
}
@ -365,12 +366,12 @@ void ima_update_policy_flag(void)
{
struct ima_rule_entry *entry;
ima_policy_flag = 0;
list_for_each_entry(entry, ima_rules, list) {
if (entry->action & IMA_DO_MASK)
ima_policy_flag |= entry->action;
}
ima_appraise |= temp_ima_appraise;
if (!ima_appraise)
ima_policy_flag &= ~IMA_APPRAISE;
}
@ -415,16 +416,48 @@ void __init ima_init_policy(void)
ima_rules = &ima_default_rules;
}
/* Make sure we have a valid policy, at least containing some rules. */
int ima_check_policy()
{
if (list_empty(&ima_temp_rules))
return -EINVAL;
return 0;
}
/**
* ima_update_policy - update default_rules with new measure rules
*
* Called on file .release to update the default rules with a complete new
* policy. Once updated, the policy is locked, no additional rules can be
* added to the policy.
* policy. What we do here is to splice ima_policy_rules and ima_temp_rules so
* they make a queue. The policy may be updated multiple times and this is the
* RCU updater.
*
* Policy rules are never deleted so ima_policy_flag gets zeroed only once when
* we switch from the default policy to user defined.
*/
void ima_update_policy(void)
{
ima_rules = &ima_policy_rules;
struct list_head *first, *last, *policy;
/* append current policy with the new rules */
first = (&ima_temp_rules)->next;
last = (&ima_temp_rules)->prev;
policy = &ima_policy_rules;
synchronize_rcu();
last->next = policy;
rcu_assign_pointer(list_next_rcu(policy->prev), first);
first->prev = policy->prev;
policy->prev = last;
/* prepare for the next policy rules addition */
INIT_LIST_HEAD(&ima_temp_rules);
if (ima_rules != policy) {
ima_policy_flag = 0;
ima_rules = policy;
}
ima_update_policy_flag();
}
@ -436,8 +469,8 @@ enum {
Opt_obj_user, Opt_obj_role, Opt_obj_type,
Opt_subj_user, Opt_subj_role, Opt_subj_type,
Opt_func, Opt_mask, Opt_fsmagic,
Opt_uid, Opt_euid, Opt_fowner,
Opt_appraise_type, Opt_fsuuid, Opt_permit_directio
Opt_fsuuid, Opt_uid, Opt_euid, Opt_fowner,
Opt_appraise_type, Opt_permit_directio
};
static match_table_t policy_tokens = {
@ -734,9 +767,9 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
if (!result && (entry->action == UNKNOWN))
result = -EINVAL;
else if (entry->func == MODULE_CHECK)
ima_appraise |= IMA_APPRAISE_MODULES;
temp_ima_appraise |= IMA_APPRAISE_MODULES;
else if (entry->func == FIRMWARE_CHECK)
ima_appraise |= IMA_APPRAISE_FIRMWARE;
temp_ima_appraise |= IMA_APPRAISE_FIRMWARE;
audit_log_format(ab, "res=%d", !result);
audit_log_end(ab);
return result;
@ -746,7 +779,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
* ima_parse_add_rule - add a rule to ima_policy_rules
* @rule - ima measurement policy rule
*
* Uses a mutex to protect the policy list from multiple concurrent writers.
* Avoid locking by allowing just one writer at a time in ima_write_policy()
* Returns the length of the rule parsed, an error code on failure
*/
ssize_t ima_parse_add_rule(char *rule)
@ -782,26 +815,230 @@ ssize_t ima_parse_add_rule(char *rule)
return result;
}
mutex_lock(&ima_rules_mutex);
list_add_tail(&entry->list, &ima_policy_rules);
mutex_unlock(&ima_rules_mutex);
list_add_tail(&entry->list, &ima_temp_rules);
return len;
}
/* ima_delete_rules called to cleanup invalid policy */
/**
* ima_delete_rules() called to cleanup invalid in-flight policy.
* We don't need locking as we operate on the temp list, which is
* different from the active one. There is also only one user of
* ima_delete_rules() at a time.
*/
void ima_delete_rules(void)
{
struct ima_rule_entry *entry, *tmp;
int i;
mutex_lock(&ima_rules_mutex);
list_for_each_entry_safe(entry, tmp, &ima_policy_rules, list) {
temp_ima_appraise = 0;
list_for_each_entry_safe(entry, tmp, &ima_temp_rules, list) {
for (i = 0; i < MAX_LSM_RULES; i++)
kfree(entry->lsm[i].args_p);
list_del(&entry->list);
kfree(entry);
}
mutex_unlock(&ima_rules_mutex);
}
#ifdef CONFIG_IMA_READ_POLICY
enum {
mask_exec = 0, mask_write, mask_read, mask_append
};
static char *mask_tokens[] = {
"MAY_EXEC",
"MAY_WRITE",
"MAY_READ",
"MAY_APPEND"
};
enum {
func_file = 0, func_mmap, func_bprm,
func_module, func_firmware, func_post
};
static char *func_tokens[] = {
"FILE_CHECK",
"MMAP_CHECK",
"BPRM_CHECK",
"MODULE_CHECK",
"FIRMWARE_CHECK",
"POST_SETATTR"
};
void *ima_policy_start(struct seq_file *m, loff_t *pos)
{
loff_t l = *pos;
struct ima_rule_entry *entry;
rcu_read_lock();
list_for_each_entry_rcu(entry, ima_rules, list) {
if (!l--) {
rcu_read_unlock();
return entry;
}
}
rcu_read_unlock();
return NULL;
}
void *ima_policy_next(struct seq_file *m, void *v, loff_t *pos)
{
struct ima_rule_entry *entry = v;
rcu_read_lock();
entry = list_entry_rcu(entry->list.next, struct ima_rule_entry, list);
rcu_read_unlock();
(*pos)++;
return (&entry->list == ima_rules) ? NULL : entry;
}
void ima_policy_stop(struct seq_file *m, void *v)
{
}
#define pt(token) policy_tokens[token + Opt_err].pattern
#define mt(token) mask_tokens[token]
#define ft(token) func_tokens[token]
int ima_policy_show(struct seq_file *m, void *v)
{
struct ima_rule_entry *entry = v;
int i = 0;
char tbuf[64] = {0,};
rcu_read_lock();
if (entry->action & MEASURE)
seq_puts(m, pt(Opt_measure));
if (entry->action & DONT_MEASURE)
seq_puts(m, pt(Opt_dont_measure));
if (entry->action & APPRAISE)
seq_puts(m, pt(Opt_appraise));
if (entry->action & DONT_APPRAISE)
seq_puts(m, pt(Opt_dont_appraise));
if (entry->action & AUDIT)
seq_puts(m, pt(Opt_audit));
seq_puts(m, " ");
if (entry->flags & IMA_FUNC) {
switch (entry->func) {
case FILE_CHECK:
seq_printf(m, pt(Opt_func), ft(func_file));
break;
case MMAP_CHECK:
seq_printf(m, pt(Opt_func), ft(func_mmap));
break;
case BPRM_CHECK:
seq_printf(m, pt(Opt_func), ft(func_bprm));
break;
case MODULE_CHECK:
seq_printf(m, pt(Opt_func), ft(func_module));
break;
case FIRMWARE_CHECK:
seq_printf(m, pt(Opt_func), ft(func_firmware));
break;
case POST_SETATTR:
seq_printf(m, pt(Opt_func), ft(func_post));
break;
default:
snprintf(tbuf, sizeof(tbuf), "%d", entry->func);
seq_printf(m, pt(Opt_func), tbuf);
break;
}
seq_puts(m, " ");
}
if (entry->flags & IMA_MASK) {
if (entry->mask & MAY_EXEC)
seq_printf(m, pt(Opt_mask), mt(mask_exec));
if (entry->mask & MAY_WRITE)
seq_printf(m, pt(Opt_mask), mt(mask_write));
if (entry->mask & MAY_READ)
seq_printf(m, pt(Opt_mask), mt(mask_read));
if (entry->mask & MAY_APPEND)
seq_printf(m, pt(Opt_mask), mt(mask_append));
seq_puts(m, " ");
}
if (entry->flags & IMA_FSMAGIC) {
snprintf(tbuf, sizeof(tbuf), "0x%lx", entry->fsmagic);
seq_printf(m, pt(Opt_fsmagic), tbuf);
seq_puts(m, " ");
}
if (entry->flags & IMA_FSUUID) {
seq_puts(m, "fsuuid=");
for (i = 0; i < ARRAY_SIZE(entry->fsuuid); ++i) {
switch (i) {
case 4:
case 6:
case 8:
case 10:
seq_puts(m, "-");
}
seq_printf(m, "%x", entry->fsuuid[i]);
}
seq_puts(m, " ");
}
if (entry->flags & IMA_UID) {
snprintf(tbuf, sizeof(tbuf), "%d", __kuid_val(entry->uid));
seq_printf(m, pt(Opt_uid), tbuf);
seq_puts(m, " ");
}
if (entry->flags & IMA_EUID) {
snprintf(tbuf, sizeof(tbuf), "%d", __kuid_val(entry->uid));
seq_printf(m, pt(Opt_euid), tbuf);
seq_puts(m, " ");
}
if (entry->flags & IMA_FOWNER) {
snprintf(tbuf, sizeof(tbuf), "%d", __kuid_val(entry->fowner));
seq_printf(m, pt(Opt_fowner), tbuf);
seq_puts(m, " ");
}
for (i = 0; i < MAX_LSM_RULES; i++) {
if (entry->lsm[i].rule) {
switch (i) {
case LSM_OBJ_USER:
seq_printf(m, pt(Opt_obj_user),
(char *)entry->lsm[i].args_p);
break;
case LSM_OBJ_ROLE:
seq_printf(m, pt(Opt_obj_role),
(char *)entry->lsm[i].args_p);
break;
case LSM_OBJ_TYPE:
seq_printf(m, pt(Opt_obj_type),
(char *)entry->lsm[i].args_p);
break;
case LSM_SUBJ_USER:
seq_printf(m, pt(Opt_subj_user),
(char *)entry->lsm[i].args_p);
break;
case LSM_SUBJ_ROLE:
seq_printf(m, pt(Opt_subj_role),
(char *)entry->lsm[i].args_p);
break;
case LSM_SUBJ_TYPE:
seq_printf(m, pt(Opt_subj_type),
(char *)entry->lsm[i].args_p);
break;
}
}
}
if (entry->flags & IMA_DIGSIG_REQUIRED)
seq_puts(m, "appraise_type=imasig ");
if (entry->flags & IMA_PERMIT_DIRECTIO)
seq_puts(m, "permit_directio ");
rcu_read_unlock();
seq_puts(m, "\n");
return 0;
}
#endif /* CONFIG_IMA_READ_POLICY */

View File

@ -125,8 +125,8 @@ int integrity_kernel_read(struct file *file, loff_t offset,
int __init integrity_read_file(const char *path, char **data);
#define INTEGRITY_KEYRING_EVM 0
#define INTEGRITY_KEYRING_MODULE 1
#define INTEGRITY_KEYRING_IMA 2
#define INTEGRITY_KEYRING_IMA 1
#define INTEGRITY_KEYRING_MODULE 2
#define INTEGRITY_KEYRING_MAX 3
#ifdef CONFIG_INTEGRITY_SIGNATURE
@ -149,7 +149,6 @@ static inline int integrity_init_keyring(const unsigned int id)
{
return 0;
}
#endif /* CONFIG_INTEGRITY_SIGNATURE */
#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
@ -171,6 +170,14 @@ static inline void ima_load_x509(void)
}
#endif
#ifdef CONFIG_EVM_LOAD_X509
void __init evm_load_x509(void);
#else
static inline void evm_load_x509(void)
{
}
#endif
#ifdef CONFIG_INTEGRITY_AUDIT
/* declarations */
void integrity_audit_msg(int audit_msgno, struct inode *inode,

View File

@ -54,6 +54,7 @@ config TRUSTED_KEYS
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_SHA1
select CRYPTO_HASH_INFO
help
This option provides support for creating, sealing, and unsealing
keys in the kernel. Trusted keys are random number symmetric keys,

View File

@ -429,8 +429,11 @@ static int __key_instantiate_and_link(struct key *key,
awaken = 1;
/* and link it into the destination keyring */
if (keyring)
if (keyring) {
set_bit(KEY_FLAG_KEEP, &key->flags);
__key_link(key, _edit);
}
/* disable the authorisation key */
if (authkey)

View File

@ -358,11 +358,14 @@ error:
* and any links to the key will be automatically garbage collected after a
* certain amount of time (/proc/sys/kernel/keys/gc_delay).
*
* Keys with KEY_FLAG_KEEP set should not be revoked.
*
* If successful, 0 is returned.
*/
long keyctl_revoke_key(key_serial_t id)
{
key_ref_t key_ref;
struct key *key;
long ret;
key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE);
@ -377,8 +380,12 @@ long keyctl_revoke_key(key_serial_t id)
}
}
key_revoke(key_ref_to_ptr(key_ref));
key = key_ref_to_ptr(key_ref);
ret = 0;
if (test_bit(KEY_FLAG_KEEP, &key->flags))
ret = -EPERM;
else
key_revoke(key);
key_ref_put(key_ref);
error:
@ -392,11 +399,14 @@ error:
* The key and any links to the key will be automatically garbage collected
* immediately.
*
* Keys with KEY_FLAG_KEEP set should not be invalidated.
*
* If successful, 0 is returned.
*/
long keyctl_invalidate_key(key_serial_t id)
{
key_ref_t key_ref;
struct key *key;
long ret;
kenter("%d", id);
@ -420,8 +430,12 @@ long keyctl_invalidate_key(key_serial_t id)
}
invalidate:
key_invalidate(key_ref_to_ptr(key_ref));
key = key_ref_to_ptr(key_ref);
ret = 0;
if (test_bit(KEY_FLAG_KEEP, &key->flags))
ret = -EPERM;
else
key_invalidate(key);
error_put:
key_ref_put(key_ref);
error:
@ -433,12 +447,13 @@ error:
* Clear the specified keyring, creating an empty process keyring if one of the
* special keyring IDs is used.
*
* The keyring must grant the caller Write permission for this to work. If
* successful, 0 will be returned.
* The keyring must grant the caller Write permission and not have
* KEY_FLAG_KEEP set for this to work. If successful, 0 will be returned.
*/
long keyctl_keyring_clear(key_serial_t ringid)
{
key_ref_t keyring_ref;
struct key *keyring;
long ret;
keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
@ -460,7 +475,11 @@ long keyctl_keyring_clear(key_serial_t ringid)
}
clear:
ret = keyring_clear(key_ref_to_ptr(keyring_ref));
keyring = key_ref_to_ptr(keyring_ref);
if (test_bit(KEY_FLAG_KEEP, &keyring->flags))
ret = -EPERM;
else
ret = keyring_clear(keyring);
error_put:
key_ref_put(keyring_ref);
error:
@ -511,11 +530,14 @@ error:
* itself need not grant the caller anything. If the last link to a key is
* removed then that key will be scheduled for destruction.
*
* Keys or keyrings with KEY_FLAG_KEEP set should not be unlinked.
*
* If successful, 0 will be returned.
*/
long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
{
key_ref_t keyring_ref, key_ref;
struct key *keyring, *key;
long ret;
keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_WRITE);
@ -530,7 +552,13 @@ long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
goto error2;
}
ret = key_unlink(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref));
keyring = key_ref_to_ptr(keyring_ref);
key = key_ref_to_ptr(key_ref);
if (test_bit(KEY_FLAG_KEEP, &keyring->flags) &&
test_bit(KEY_FLAG_KEEP, &key->flags))
ret = -EPERM;
else
ret = key_unlink(keyring, key);
key_ref_put(key_ref);
error2:
@ -1289,6 +1317,8 @@ error:
* the current time. The key and any links to the key will be automatically
* garbage collected after the timeout expires.
*
* Keys with KEY_FLAG_KEEP set should not be timed out.
*
* If successful, 0 is returned.
*/
long keyctl_set_timeout(key_serial_t id, unsigned timeout)
@ -1320,10 +1350,13 @@ long keyctl_set_timeout(key_serial_t id, unsigned timeout)
okay:
key = key_ref_to_ptr(key_ref);
key_set_timeout(key, timeout);
ret = 0;
if (test_bit(KEY_FLAG_KEEP, &key->flags))
ret = -EPERM;
else
key_set_timeout(key, timeout);
key_put(key);
ret = 0;
error:
return ret;
}

View File

@ -11,6 +11,7 @@
* See Documentation/security/keys-trusted-encrypted.txt
*/
#include <crypto/hash_info.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/init.h>
@ -710,7 +711,10 @@ enum {
Opt_err = -1,
Opt_new, Opt_load, Opt_update,
Opt_keyhandle, Opt_keyauth, Opt_blobauth,
Opt_pcrinfo, Opt_pcrlock, Opt_migratable
Opt_pcrinfo, Opt_pcrlock, Opt_migratable,
Opt_hash,
Opt_policydigest,
Opt_policyhandle,
};
static const match_table_t key_tokens = {
@ -723,6 +727,9 @@ static const match_table_t key_tokens = {
{Opt_pcrinfo, "pcrinfo=%s"},
{Opt_pcrlock, "pcrlock=%s"},
{Opt_migratable, "migratable=%s"},
{Opt_hash, "hash=%s"},
{Opt_policydigest, "policydigest=%s"},
{Opt_policyhandle, "policyhandle=%s"},
{Opt_err, NULL}
};
@ -736,11 +743,23 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
int res;
unsigned long handle;
unsigned long lock;
unsigned long token_mask = 0;
int i;
int tpm2;
tpm2 = tpm_is_tpm2(TPM_ANY_NUM);
if (tpm2 < 0)
return tpm2;
opt->hash = tpm2 ? HASH_ALGO_SHA256 : HASH_ALGO_SHA1;
opt->digest_len = hash_digest_size[opt->hash];
while ((p = strsep(&c, " \t"))) {
if (*p == '\0' || *p == ' ' || *p == '\t')
continue;
token = match_token(p, key_tokens, args);
if (test_and_set_bit(token, &token_mask))
return -EINVAL;
switch (token) {
case Opt_pcrinfo:
@ -787,6 +806,41 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
return -EINVAL;
opt->pcrlock = lock;
break;
case Opt_hash:
if (test_bit(Opt_policydigest, &token_mask))
return -EINVAL;
for (i = 0; i < HASH_ALGO__LAST; i++) {
if (!strcmp(args[0].from, hash_algo_name[i])) {
opt->hash = i;
opt->digest_len =
hash_digest_size[opt->hash];
break;
}
}
if (i == HASH_ALGO__LAST)
return -EINVAL;
if (!tpm2 && i != HASH_ALGO_SHA1) {
pr_info("trusted_key: TPM 1.x only supports SHA-1.\n");
return -EINVAL;
}
break;
case Opt_policydigest:
if (!tpm2 ||
strlen(args[0].from) != (2 * opt->digest_len))
return -EINVAL;
res = hex2bin(opt->policydigest, args[0].from,
opt->digest_len);
if (res < 0)
return -EINVAL;
break;
case Opt_policyhandle:
if (!tpm2)
return -EINVAL;
res = kstrtoul(args[0].from, 16, &handle);
if (res < 0)
return -EINVAL;
opt->policyhandle = handle;
break;
default:
return -EINVAL;
}

View File

@ -697,7 +697,7 @@ int security_inode_killpriv(struct dentry *dentry)
return call_int_hook(inode_killpriv, 0, dentry);
}
int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc)
{
if (unlikely(IS_PRIVATE(inode)))
return -EOPNOTSUPP;
@ -721,7 +721,7 @@ int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer
}
EXPORT_SYMBOL(security_inode_listsecurity);
void security_inode_getsecid(const struct inode *inode, u32 *secid)
void security_inode_getsecid(struct inode *inode, u32 *secid)
{
call_void_hook(inode_getsecid, inode, secid);
}
@ -1161,6 +1161,12 @@ void security_release_secctx(char *secdata, u32 seclen)
}
EXPORT_SYMBOL(security_release_secctx);
void security_inode_invalidate_secctx(struct inode *inode)
{
call_void_hook(inode_invalidate_secctx, inode);
}
EXPORT_SYMBOL(security_inode_invalidate_secctx);
int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
{
return call_int_hook(inode_notifysecctx, 0, inode, ctx, ctxlen);
@ -1763,6 +1769,8 @@ struct security_hook_heads security_hook_heads = {
LIST_HEAD_INIT(security_hook_heads.secctx_to_secid),
.release_secctx =
LIST_HEAD_INIT(security_hook_heads.release_secctx),
.inode_invalidate_secctx =
LIST_HEAD_INIT(security_hook_heads.inode_invalidate_secctx),
.inode_notifysecctx =
LIST_HEAD_INIT(security_hook_heads.inode_notifysecctx),
.inode_setsecctx =

View File

@ -242,6 +242,72 @@ static int inode_alloc_security(struct inode *inode)
return 0;
}
static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry);
/*
* Try reloading inode security labels that have been marked as invalid. The
* @may_sleep parameter indicates when sleeping and thus reloading labels is
* allowed; when set to false, returns ERR_PTR(-ECHILD) when the label is
* invalid. The @opt_dentry parameter should be set to a dentry of the inode;
* when no dentry is available, set it to NULL instead.
*/
static int __inode_security_revalidate(struct inode *inode,
struct dentry *opt_dentry,
bool may_sleep)
{
struct inode_security_struct *isec = inode->i_security;
might_sleep_if(may_sleep);
if (isec->initialized == LABEL_INVALID) {
if (!may_sleep)
return -ECHILD;
/*
* Try reloading the inode security label. This will fail if
* @opt_dentry is NULL and no dentry for this inode can be
* found; in that case, continue using the old label.
*/
inode_doinit_with_dentry(inode, opt_dentry);
}
return 0;
}
static struct inode_security_struct *inode_security_novalidate(struct inode *inode)
{
return inode->i_security;
}
static struct inode_security_struct *inode_security_rcu(struct inode *inode, bool rcu)
{
int error;
error = __inode_security_revalidate(inode, NULL, !rcu);
if (error)
return ERR_PTR(error);
return inode->i_security;
}
/*
* Get the security label of an inode.
*/
static struct inode_security_struct *inode_security(struct inode *inode)
{
__inode_security_revalidate(inode, NULL, true);
return inode->i_security;
}
/*
* Get the security label of a dentry's backing inode.
*/
static struct inode_security_struct *backing_inode_security(struct dentry *dentry)
{
struct inode *inode = d_backing_inode(dentry);
__inode_security_revalidate(inode, dentry, true);
return inode->i_security;
}
static void inode_free_rcu(struct rcu_head *head)
{
struct inode_security_struct *isec;
@ -345,8 +411,6 @@ static const char *labeling_behaviors[7] = {
"uses native labeling",
};
static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry);
static inline int inode_doinit(struct inode *inode)
{
return inode_doinit_with_dentry(inode, NULL);
@ -565,8 +629,8 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
opts->mnt_opts_flags[i++] = DEFCONTEXT_MNT;
}
if (sbsec->flags & ROOTCONTEXT_MNT) {
struct inode *root = d_backing_inode(sbsec->sb->s_root);
struct inode_security_struct *isec = root->i_security;
struct dentry *root = sbsec->sb->s_root;
struct inode_security_struct *isec = backing_inode_security(root);
rc = security_sid_to_context(isec->sid, &context, &len);
if (rc)
@ -621,8 +685,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
int rc = 0, i;
struct superblock_security_struct *sbsec = sb->s_security;
const char *name = sb->s_type->name;
struct inode *inode = d_backing_inode(sbsec->sb->s_root);
struct inode_security_struct *root_isec = inode->i_security;
struct dentry *root = sbsec->sb->s_root;
struct inode_security_struct *root_isec = backing_inode_security(root);
u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0;
u32 defcontext_sid = 0;
char **mount_options = opts->mnt_opts;
@ -802,7 +866,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
goto out;
root_isec->sid = rootcontext_sid;
root_isec->initialized = 1;
root_isec->initialized = LABEL_INITIALIZED;
}
if (defcontext_sid) {
@ -852,8 +916,8 @@ static int selinux_cmp_sb_context(const struct super_block *oldsb,
if ((oldflags & DEFCONTEXT_MNT) && old->def_sid != new->def_sid)
goto mismatch;
if (oldflags & ROOTCONTEXT_MNT) {
struct inode_security_struct *oldroot = d_backing_inode(oldsb->s_root)->i_security;
struct inode_security_struct *newroot = d_backing_inode(newsb->s_root)->i_security;
struct inode_security_struct *oldroot = backing_inode_security(oldsb->s_root);
struct inode_security_struct *newroot = backing_inode_security(newsb->s_root);
if (oldroot->sid != newroot->sid)
goto mismatch;
}
@ -903,17 +967,14 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
if (!set_fscontext)
newsbsec->sid = sid;
if (!set_rootcontext) {
struct inode *newinode = d_backing_inode(newsb->s_root);
struct inode_security_struct *newisec = newinode->i_security;
struct inode_security_struct *newisec = backing_inode_security(newsb->s_root);
newisec->sid = sid;
}
newsbsec->mntpoint_sid = sid;
}
if (set_rootcontext) {
const struct inode *oldinode = d_backing_inode(oldsb->s_root);
const struct inode_security_struct *oldisec = oldinode->i_security;
struct inode *newinode = d_backing_inode(newsb->s_root);
struct inode_security_struct *newisec = newinode->i_security;
const struct inode_security_struct *oldisec = backing_inode_security(oldsb->s_root);
struct inode_security_struct *newisec = backing_inode_security(newsb->s_root);
newisec->sid = oldisec->sid;
}
@ -1293,11 +1354,11 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
unsigned len = 0;
int rc = 0;
if (isec->initialized)
if (isec->initialized == LABEL_INITIALIZED)
goto out;
mutex_lock(&isec->lock);
if (isec->initialized)
if (isec->initialized == LABEL_INITIALIZED)
goto out_unlock;
sbsec = inode->i_sb->s_security;
@ -1469,7 +1530,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
break;
}
isec->initialized = 1;
isec->initialized = LABEL_INITIALIZED;
out_unlock:
mutex_unlock(&isec->lock);
@ -1640,6 +1701,7 @@ static inline int dentry_has_perm(const struct cred *cred,
ad.type = LSM_AUDIT_DATA_DENTRY;
ad.u.dentry = dentry;
__inode_security_revalidate(inode, dentry, true);
return inode_has_perm(cred, inode, av, &ad);
}
@ -1655,6 +1717,7 @@ static inline int path_has_perm(const struct cred *cred,
ad.type = LSM_AUDIT_DATA_PATH;
ad.u.path = *path;
__inode_security_revalidate(inode, path->dentry, true);
return inode_has_perm(cred, inode, av, &ad);
}
@ -1712,13 +1775,13 @@ out:
/*
* Determine the label for an inode that might be unioned.
*/
static int selinux_determine_inode_label(const struct inode *dir,
static int selinux_determine_inode_label(struct inode *dir,
const struct qstr *name,
u16 tclass,
u32 *_new_isid)
{
const struct superblock_security_struct *sbsec = dir->i_sb->s_security;
const struct inode_security_struct *dsec = dir->i_security;
const struct inode_security_struct *dsec = inode_security(dir);
const struct task_security_struct *tsec = current_security();
if ((sbsec->flags & SE_SBINITIALIZED) &&
@ -1747,7 +1810,7 @@ static int may_create(struct inode *dir,
struct common_audit_data ad;
int rc;
dsec = dir->i_security;
dsec = inode_security(dir);
sbsec = dir->i_sb->s_security;
sid = tsec->sid;
@ -1800,8 +1863,8 @@ static int may_link(struct inode *dir,
u32 av;
int rc;
dsec = dir->i_security;
isec = d_backing_inode(dentry)->i_security;
dsec = inode_security(dir);
isec = backing_inode_security(dentry);
ad.type = LSM_AUDIT_DATA_DENTRY;
ad.u.dentry = dentry;
@ -1844,10 +1907,10 @@ static inline int may_rename(struct inode *old_dir,
int old_is_dir, new_is_dir;
int rc;
old_dsec = old_dir->i_security;
old_isec = d_backing_inode(old_dentry)->i_security;
old_dsec = inode_security(old_dir);
old_isec = backing_inode_security(old_dentry);
old_is_dir = d_is_dir(old_dentry);
new_dsec = new_dir->i_security;
new_dsec = inode_security(new_dir);
ad.type = LSM_AUDIT_DATA_DENTRY;
@ -1875,7 +1938,7 @@ static inline int may_rename(struct inode *old_dir,
if (rc)
return rc;
if (d_is_positive(new_dentry)) {
new_isec = d_backing_inode(new_dentry)->i_security;
new_isec = backing_inode_security(new_dentry);
new_is_dir = d_is_dir(new_dentry);
rc = avc_has_perm(sid, new_isec->sid,
new_isec->sclass,
@ -2011,8 +2074,8 @@ static int selinux_binder_transfer_file(struct task_struct *from,
{
u32 sid = task_sid(to);
struct file_security_struct *fsec = file->f_security;
struct inode *inode = d_backing_inode(file->f_path.dentry);
struct inode_security_struct *isec = inode->i_security;
struct dentry *dentry = file->f_path.dentry;
struct inode_security_struct *isec = backing_inode_security(dentry);
struct common_audit_data ad;
int rc;
@ -2028,7 +2091,7 @@ static int selinux_binder_transfer_file(struct task_struct *from,
return rc;
}
if (unlikely(IS_PRIVATE(inode)))
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return avc_has_perm(sid, isec->sid, isec->sclass, file_to_av(file),
@ -2217,7 +2280,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
old_tsec = current_security();
new_tsec = bprm->cred->security;
isec = inode->i_security;
isec = inode_security(inode);
/* Default to the current task SID. */
new_tsec->sid = old_tsec->sid;
@ -2639,7 +2702,7 @@ static int selinux_sb_remount(struct super_block *sb, void *data)
break;
case ROOTCONTEXT_MNT: {
struct inode_security_struct *root_isec;
root_isec = d_backing_inode(sb->s_root)->i_security;
root_isec = backing_inode_security(sb->s_root);
if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid))
goto out_bad_option;
@ -2753,13 +2816,11 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
void **value, size_t *len)
{
const struct task_security_struct *tsec = current_security();
struct inode_security_struct *dsec;
struct superblock_security_struct *sbsec;
u32 sid, newsid, clen;
int rc;
char *context;
dsec = dir->i_security;
sbsec = dir->i_sb->s_security;
sid = tsec->sid;
@ -2777,7 +2838,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
struct inode_security_struct *isec = inode->i_security;
isec->sclass = inode_mode_to_security_class(inode->i_mode);
isec->sid = newsid;
isec->initialized = 1;
isec->initialized = LABEL_INITIALIZED;
}
if (!ss_initialized || !(sbsec->flags & SBLABEL_MNT))
@ -2858,7 +2919,9 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode,
ad.type = LSM_AUDIT_DATA_DENTRY;
ad.u.dentry = dentry;
sid = cred_sid(cred);
isec = inode->i_security;
isec = inode_security_rcu(inode, rcu);
if (IS_ERR(isec))
return PTR_ERR(isec);
return avc_has_perm_flags(sid, isec->sid, isec->sclass, FILE__READ, &ad,
rcu ? MAY_NOT_BLOCK : 0);
@ -2910,7 +2973,9 @@ static int selinux_inode_permission(struct inode *inode, int mask)
perms = file_mask_to_av(inode->i_mode, mask);
sid = cred_sid(cred);
isec = inode->i_security;
isec = inode_security_rcu(inode, flags & MAY_NOT_BLOCK);
if (IS_ERR(isec))
return PTR_ERR(isec);
rc = avc_has_perm_noaudit(sid, isec->sid, isec->sclass, perms, 0, &avd);
audited = avc_audit_required(perms, &avd, rc,
@ -2980,7 +3045,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
struct inode *inode = d_backing_inode(dentry);
struct inode_security_struct *isec = inode->i_security;
struct inode_security_struct *isec = backing_inode_security(dentry);
struct superblock_security_struct *sbsec;
struct common_audit_data ad;
u32 newsid, sid = current_sid();
@ -3057,7 +3122,7 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
int flags)
{
struct inode *inode = d_backing_inode(dentry);
struct inode_security_struct *isec = inode->i_security;
struct inode_security_struct *isec = backing_inode_security(dentry);
u32 newsid;
int rc;
@ -3076,7 +3141,7 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
isec->sclass = inode_mode_to_security_class(inode->i_mode);
isec->sid = newsid;
isec->initialized = 1;
isec->initialized = LABEL_INITIALIZED;
return;
}
@ -3110,12 +3175,12 @@ static int selinux_inode_removexattr(struct dentry *dentry, const char *name)
*
* Permission check is handled by selinux_inode_getxattr hook.
*/
static int selinux_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
static int selinux_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc)
{
u32 size;
int error;
char *context = NULL;
struct inode_security_struct *isec = inode->i_security;
struct inode_security_struct *isec = inode_security(inode);
if (strcmp(name, XATTR_SELINUX_SUFFIX))
return -EOPNOTSUPP;
@ -3154,7 +3219,7 @@ out_nofree:
static int selinux_inode_setsecurity(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
struct inode_security_struct *isec = inode->i_security;
struct inode_security_struct *isec = inode_security(inode);
u32 newsid;
int rc;
@ -3170,7 +3235,7 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
isec->sclass = inode_mode_to_security_class(inode->i_mode);
isec->sid = newsid;
isec->initialized = 1;
isec->initialized = LABEL_INITIALIZED;
return 0;
}
@ -3182,9 +3247,9 @@ static int selinux_inode_listsecurity(struct inode *inode, char *buffer, size_t
return len;
}
static void selinux_inode_getsecid(const struct inode *inode, u32 *secid)
static void selinux_inode_getsecid(struct inode *inode, u32 *secid)
{
struct inode_security_struct *isec = inode->i_security;
struct inode_security_struct *isec = inode_security(inode);
*secid = isec->sid;
}
@ -3207,13 +3272,14 @@ static int selinux_file_permission(struct file *file, int mask)
{
struct inode *inode = file_inode(file);
struct file_security_struct *fsec = file->f_security;
struct inode_security_struct *isec = inode->i_security;
struct inode_security_struct *isec;
u32 sid = current_sid();
if (!mask)
/* No permission to check. Existence test. */
return 0;
isec = inode_security(inode);
if (sid == fsec->sid && fsec->isid == isec->sid &&
fsec->pseqno == avc_policy_seqno())
/* No change since file_open check. */
@ -3242,7 +3308,7 @@ static int ioctl_has_perm(const struct cred *cred, struct file *file,
struct common_audit_data ad;
struct file_security_struct *fsec = file->f_security;
struct inode *inode = file_inode(file);
struct inode_security_struct *isec = inode->i_security;
struct inode_security_struct *isec = inode_security(inode);
struct lsm_ioctlop_audit ioctl;
u32 ssid = cred_sid(cred);
int rc;
@ -3506,7 +3572,7 @@ static int selinux_file_open(struct file *file, const struct cred *cred)
struct inode_security_struct *isec;
fsec = file->f_security;
isec = file_inode(file)->i_security;
isec = inode_security(file_inode(file));
/*
* Save inode label and policy sequence number
* at open-time so that selinux_file_permission
@ -3624,7 +3690,7 @@ static int selinux_kernel_act_as(struct cred *new, u32 secid)
*/
static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode)
{
struct inode_security_struct *isec = inode->i_security;
struct inode_security_struct *isec = inode_security(inode);
struct task_security_struct *tsec = new->security;
u32 sid = current_sid();
int ret;
@ -3748,7 +3814,7 @@ static void selinux_task_to_inode(struct task_struct *p,
u32 sid = task_sid(p);
isec->sid = sid;
isec->initialized = 1;
isec->initialized = LABEL_INITIALIZED;
}
/* Returns error only if unable to parse addresses */
@ -4065,7 +4131,7 @@ static int selinux_socket_post_create(struct socket *sock, int family,
int type, int protocol, int kern)
{
const struct task_security_struct *tsec = current_security();
struct inode_security_struct *isec = SOCK_INODE(sock)->i_security;
struct inode_security_struct *isec = inode_security_novalidate(SOCK_INODE(sock));
struct sk_security_struct *sksec;
int err = 0;
@ -4079,7 +4145,7 @@ static int selinux_socket_post_create(struct socket *sock, int family,
return err;
}
isec->initialized = 1;
isec->initialized = LABEL_INITIALIZED;
if (sock->sk) {
sksec = sock->sk->sk_security;
@ -4265,12 +4331,12 @@ static int selinux_socket_accept(struct socket *sock, struct socket *newsock)
if (err)
return err;
newisec = SOCK_INODE(newsock)->i_security;
newisec = inode_security_novalidate(SOCK_INODE(newsock));
isec = SOCK_INODE(sock)->i_security;
isec = inode_security_novalidate(SOCK_INODE(sock));
newisec->sclass = isec->sclass;
newisec->sid = isec->sid;
newisec->initialized = 1;
newisec->initialized = LABEL_INITIALIZED;
return 0;
}
@ -4605,7 +4671,8 @@ static void selinux_sk_getsecid(struct sock *sk, u32 *secid)
static void selinux_sock_graft(struct sock *sk, struct socket *parent)
{
struct inode_security_struct *isec = SOCK_INODE(parent)->i_security;
struct inode_security_struct *isec =
inode_security_novalidate(SOCK_INODE(parent));
struct sk_security_struct *sksec = sk->sk_security;
if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6 ||
@ -4785,11 +4852,12 @@ static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
if (err) {
if (err == -EINVAL) {
printk(KERN_WARNING
"SELinux: unrecognized netlink message:"
" protocol=%hu nlmsg_type=%hu sclass=%s\n",
pr_warn_ratelimited("SELinux: unrecognized netlink"
" message: protocol=%hu nlmsg_type=%hu sclass=%s"
" pig=%d comm=%s\n",
sk->sk_protocol, nlh->nlmsg_type,
secclass_map[sksec->sclass - 1].name);
secclass_map[sksec->sclass - 1].name,
task_pid_nr(current), current->comm);
if (!selinux_enforcing || security_get_allow_unknown())
err = 0;
}
@ -5762,6 +5830,15 @@ static void selinux_release_secctx(char *secdata, u32 seclen)
kfree(secdata);
}
static void selinux_inode_invalidate_secctx(struct inode *inode)
{
struct inode_security_struct *isec = inode->i_security;
mutex_lock(&isec->lock);
isec->initialized = LABEL_INVALID;
mutex_unlock(&isec->lock);
}
/*
* called with inode->i_mutex locked
*/
@ -5993,6 +6070,7 @@ static struct security_hook_list selinux_hooks[] = {
LSM_HOOK_INIT(secid_to_secctx, selinux_secid_to_secctx),
LSM_HOOK_INIT(secctx_to_secid, selinux_secctx_to_secid),
LSM_HOOK_INIT(release_secctx, selinux_release_secctx),
LSM_HOOK_INIT(inode_invalidate_secctx, selinux_inode_invalidate_secctx),
LSM_HOOK_INIT(inode_notifysecctx, selinux_inode_notifysecctx),
LSM_HOOK_INIT(inode_setsecctx, selinux_inode_setsecctx),
LSM_HOOK_INIT(inode_getsecctx, selinux_inode_getsecctx),

View File

@ -21,7 +21,7 @@ struct security_class_mapping secclass_map[] = {
{ "compute_av", "compute_create", "compute_member",
"check_context", "load_policy", "compute_relabel",
"compute_user", "setenforce", "setbool", "setsecparam",
"setcheckreqprot", "read_policy", NULL } },
"setcheckreqprot", "read_policy", "validate_trans", NULL } },
{ "process",
{ "fork", "transition", "sigchld", "sigkill",
"sigstop", "signull", "signal", "ptrace", "getsched", "setsched",

View File

@ -37,6 +37,12 @@ struct task_security_struct {
u32 sockcreate_sid; /* fscreate SID */
};
enum label_initialized {
LABEL_MISSING, /* not initialized */
LABEL_INITIALIZED, /* inizialized */
LABEL_INVALID /* invalid */
};
struct inode_security_struct {
struct inode *inode; /* back pointer to inode object */
union {

View File

@ -187,6 +187,9 @@ int security_node_sid(u16 domain, void *addr, u32 addrlen,
int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
u16 tclass);
int security_validate_transition_user(u32 oldsid, u32 newsid, u32 tasksid,
u16 tclass);
int security_bounded_transition(u32 oldsid, u32 newsid);
int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid);

View File

@ -116,6 +116,7 @@ enum sel_inos {
SEL_DENY_UNKNOWN, /* export unknown deny handling to userspace */
SEL_STATUS, /* export current status using mmap() */
SEL_POLICY, /* allow userspace to read the in kernel policy */
SEL_VALIDATE_TRANS, /* compute validatetrans decision */
SEL_INO_NEXT, /* The next inode number to use */
};
@ -632,6 +633,83 @@ static const struct file_operations sel_checkreqprot_ops = {
.llseek = generic_file_llseek,
};
static ssize_t sel_write_validatetrans(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
char *oldcon = NULL, *newcon = NULL, *taskcon = NULL;
char *req = NULL;
u32 osid, nsid, tsid;
u16 tclass;
int rc;
rc = task_has_security(current, SECURITY__VALIDATE_TRANS);
if (rc)
goto out;
rc = -ENOMEM;
if (count >= PAGE_SIZE)
goto out;
/* No partial writes. */
rc = -EINVAL;
if (*ppos != 0)
goto out;
rc = -ENOMEM;
req = kzalloc(count + 1, GFP_KERNEL);
if (!req)
goto out;
rc = -EFAULT;
if (copy_from_user(req, buf, count))
goto out;
rc = -ENOMEM;
oldcon = kzalloc(count + 1, GFP_KERNEL);
if (!oldcon)
goto out;
newcon = kzalloc(count + 1, GFP_KERNEL);
if (!newcon)
goto out;
taskcon = kzalloc(count + 1, GFP_KERNEL);
if (!taskcon)
goto out;
rc = -EINVAL;
if (sscanf(req, "%s %s %hu %s", oldcon, newcon, &tclass, taskcon) != 4)
goto out;
rc = security_context_str_to_sid(oldcon, &osid, GFP_KERNEL);
if (rc)
goto out;
rc = security_context_str_to_sid(newcon, &nsid, GFP_KERNEL);
if (rc)
goto out;
rc = security_context_str_to_sid(taskcon, &tsid, GFP_KERNEL);
if (rc)
goto out;
rc = security_validate_transition_user(osid, nsid, tsid, tclass);
if (!rc)
rc = count;
out:
kfree(req);
kfree(oldcon);
kfree(newcon);
kfree(taskcon);
return rc;
}
static const struct file_operations sel_transition_ops = {
.write = sel_write_validatetrans,
.llseek = generic_file_llseek,
};
/*
* Remaining nodes use transaction based IO methods like nfsd/nfsctl.c
*/
@ -1727,6 +1805,8 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
[SEL_DENY_UNKNOWN] = {"deny_unknown", &sel_handle_unknown_ops, S_IRUGO},
[SEL_STATUS] = {"status", &sel_handle_status_ops, S_IRUGO},
[SEL_POLICY] = {"policy", &sel_policy_ops, S_IRUGO},
[SEL_VALIDATE_TRANS] = {"validatetrans", &sel_transition_ops,
S_IWUGO},
/* last one */ {""}
};
ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files);

View File

@ -778,8 +778,8 @@ out:
return -EPERM;
}
int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
u16 orig_tclass)
static int security_compute_validatetrans(u32 oldsid, u32 newsid, u32 tasksid,
u16 orig_tclass, bool user)
{
struct context *ocontext;
struct context *ncontext;
@ -794,11 +794,12 @@ int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
read_lock(&policy_rwlock);
tclass = unmap_class(orig_tclass);
if (!user)
tclass = unmap_class(orig_tclass);
else
tclass = orig_tclass;
if (!tclass || tclass > policydb.p_classes.nprim) {
printk(KERN_ERR "SELinux: %s: unrecognized class %d\n",
__func__, tclass);
rc = -EINVAL;
goto out;
}
@ -832,8 +833,13 @@ int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
while (constraint) {
if (!constraint_expr_eval(ocontext, ncontext, tcontext,
constraint->expr)) {
rc = security_validtrans_handle_fail(ocontext, ncontext,
tcontext, tclass);
if (user)
rc = -EPERM;
else
rc = security_validtrans_handle_fail(ocontext,
ncontext,
tcontext,
tclass);
goto out;
}
constraint = constraint->next;
@ -844,6 +850,20 @@ out:
return rc;
}
int security_validate_transition_user(u32 oldsid, u32 newsid, u32 tasksid,
u16 tclass)
{
return security_compute_validatetrans(oldsid, newsid, tasksid,
tclass, true);
}
int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
u16 orig_tclass)
{
return security_compute_validatetrans(oldsid, newsid, tasksid,
orig_tclass, false);
}
/*
* security_bounded_transition - check whether the given
* transition is directed to bounded, or not.

View File

@ -1465,7 +1465,7 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name)
*
* Returns the size of the attribute or an error code
*/
static int smack_inode_getsecurity(const struct inode *inode,
static int smack_inode_getsecurity(struct inode *inode,
const char *name, void **buffer,
bool alloc)
{
@ -1536,7 +1536,7 @@ static int smack_inode_listsecurity(struct inode *inode, char *buffer,
* @inode: inode to extract the info from
* @secid: where result will be saved
*/
static void smack_inode_getsecid(const struct inode *inode, u32 *secid)
static void smack_inode_getsecid(struct inode *inode, u32 *secid)
{
struct inode_smack *isp = inode->i_security;
@ -1858,12 +1858,34 @@ static int smack_file_receive(struct file *file)
int may = 0;
struct smk_audit_info ad;
struct inode *inode = file_inode(file);
struct socket *sock;
struct task_smack *tsp;
struct socket_smack *ssp;
if (unlikely(IS_PRIVATE(inode)))
return 0;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
if (S_ISSOCK(inode->i_mode)) {
sock = SOCKET_I(inode);
ssp = sock->sk->sk_security;
tsp = current_security();
/*
* If the receiving process can't write to the
* passed socket or if the passed socket can't
* write to the receiving process don't accept
* the passed socket.
*/
rc = smk_access(tsp->smk_task, ssp->smk_out, MAY_WRITE, &ad);
rc = smk_bu_file(file, may, rc);
if (rc < 0)
return rc;
rc = smk_access(ssp->smk_in, tsp->smk_task, MAY_WRITE, &ad);
rc = smk_bu_file(file, may, rc);
return rc;
}
/*
* This code relies on bitmasks.
*/
@ -3756,7 +3778,7 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
if (sip == NULL)
return 0;
switch (sip->sin_family) {
switch (sock->sk->sk_family) {
case AF_INET:
rc = smack_netlabel_send(sock->sk, sip);
break;