linux/security/integrity/ima/ima_kexec.c
Arun KS ca79b0c211 mm: convert totalram_pages and totalhigh_pages variables to atomic
totalram_pages and totalhigh_pages are made static inline function.

Main motivation was that managed_page_count_lock handling was complicating
things.  It was discussed in length here,
https://lore.kernel.org/patchwork/patch/995739/#1181785 So it seemes
better to remove the lock and convert variables to atomic, with preventing
poteintial store-to-read tearing as a bonus.

[akpm@linux-foundation.org: coding style fixes]
Link: http://lkml.kernel.org/r/1542090790-21750-4-git-send-email-arunks@codeaurora.org
Signed-off-by: Arun KS <arunks@codeaurora.org>
Suggested-by: Michal Hocko <mhocko@suse.com>
Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-12-28 12:11:47 -08:00

171 lines
4.3 KiB
C

/*
* Copyright (C) 2016 IBM Corporation
*
* Authors:
* Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
* Mimi Zohar <zohar@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/kexec.h>
#include "ima.h"
#ifdef CONFIG_IMA_KEXEC
static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
unsigned long segment_size)
{
struct ima_queue_entry *qe;
struct seq_file file;
struct ima_kexec_hdr khdr;
int ret = 0;
/* segment size can't change between kexec load and execute */
file.buf = vmalloc(segment_size);
if (!file.buf) {
ret = -ENOMEM;
goto out;
}
file.size = segment_size;
file.read_pos = 0;
file.count = sizeof(khdr); /* reserved space */
memset(&khdr, 0, sizeof(khdr));
khdr.version = 1;
list_for_each_entry_rcu(qe, &ima_measurements, later) {
if (file.count < file.size) {
khdr.count++;
ima_measurements_show(&file, qe);
} else {
ret = -EINVAL;
break;
}
}
if (ret < 0)
goto out;
/*
* fill in reserved space with some buffer details
* (eg. version, buffer size, number of measurements)
*/
khdr.buffer_size = file.count;
if (ima_canonical_fmt) {
khdr.version = cpu_to_le16(khdr.version);
khdr.count = cpu_to_le64(khdr.count);
khdr.buffer_size = cpu_to_le64(khdr.buffer_size);
}
memcpy(file.buf, &khdr, sizeof(khdr));
print_hex_dump(KERN_DEBUG, "ima dump: ", DUMP_PREFIX_NONE,
16, 1, file.buf,
file.count < 100 ? file.count : 100, true);
*buffer_size = file.count;
*buffer = file.buf;
out:
if (ret == -EINVAL)
vfree(file.buf);
return ret;
}
/*
* Called during kexec_file_load so that IMA can add a segment to the kexec
* image for the measurement list for the next kernel.
*
* This function assumes that kexec_mutex is held.
*/
void ima_add_kexec_buffer(struct kimage *image)
{
struct kexec_buf kbuf = { .image = image, .buf_align = PAGE_SIZE,
.buf_min = 0, .buf_max = ULONG_MAX,
.top_down = true };
unsigned long binary_runtime_size;
/* use more understandable variable names than defined in kbuf */
void *kexec_buffer = NULL;
size_t kexec_buffer_size;
size_t kexec_segment_size;
int ret;
/*
* Reserve an extra half page of memory for additional measurements
* added during the kexec load.
*/
binary_runtime_size = ima_get_binary_runtime_size();
if (binary_runtime_size >= ULONG_MAX - PAGE_SIZE)
kexec_segment_size = ULONG_MAX;
else
kexec_segment_size = ALIGN(ima_get_binary_runtime_size() +
PAGE_SIZE / 2, PAGE_SIZE);
if ((kexec_segment_size == ULONG_MAX) ||
((kexec_segment_size >> PAGE_SHIFT) > totalram_pages() / 2)) {
pr_err("Binary measurement list too large.\n");
return;
}
ima_dump_measurement_list(&kexec_buffer_size, &kexec_buffer,
kexec_segment_size);
if (!kexec_buffer) {
pr_err("Not enough memory for the kexec measurement buffer.\n");
return;
}
kbuf.buffer = kexec_buffer;
kbuf.bufsz = kexec_buffer_size;
kbuf.memsz = kexec_segment_size;
ret = kexec_add_buffer(&kbuf);
if (ret) {
pr_err("Error passing over kexec measurement buffer.\n");
return;
}
ret = arch_ima_add_kexec_buffer(image, kbuf.mem, kexec_segment_size);
if (ret) {
pr_err("Error passing over kexec measurement buffer.\n");
return;
}
pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
kbuf.mem);
}
#endif /* IMA_KEXEC */
/*
* Restore the measurement list from the previous kernel.
*/
void ima_load_kexec_buffer(void)
{
void *kexec_buffer = NULL;
size_t kexec_buffer_size = 0;
int rc;
rc = ima_get_kexec_buffer(&kexec_buffer, &kexec_buffer_size);
switch (rc) {
case 0:
rc = ima_restore_measurement_list(kexec_buffer_size,
kexec_buffer);
if (rc != 0)
pr_err("Failed to restore the measurement list: %d\n",
rc);
ima_free_kexec_buffer();
break;
case -ENOTSUPP:
pr_debug("Restoring the measurement list not supported\n");
break;
case -ENOENT:
pr_debug("No measurement list to restore\n");
break;
default:
pr_debug("Error restoring the measurement list: %d\n", rc);
}
}