summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/Makefile4
-rw-r--r--drivers/md/dm-cache-target.c24
-rw-r--r--drivers/md/dm-clone-target.c5
-rw-r--r--drivers/md/dm-core.h5
-rw-r--r--drivers/md/dm-crypt.c38
-rw-r--r--drivers/md/dm-delay.c4
-rw-r--r--drivers/md/dm-dust.c4
-rw-r--r--drivers/md/dm-ebs-target.c3
-rw-r--r--drivers/md/dm-era-target.c4
-rw-r--r--drivers/md/dm-flakey.c4
-rw-r--r--drivers/md/dm-ima.c750
-rw-r--r--drivers/md/dm-ima.h78
-rw-r--r--drivers/md/dm-integrity.c24
-rw-r--r--drivers/md/dm-ioctl.c24
-rw-r--r--drivers/md/dm-linear.c10
-rw-r--r--drivers/md/dm-log-userspace-base.c3
-rw-r--r--drivers/md/dm-log-writes.c4
-rw-r--r--drivers/md/dm-log.c10
-rw-r--r--drivers/md/dm-mpath.c40
-rw-r--r--drivers/md/dm-ps-historical-service-time.c3
-rw-r--r--drivers/md/dm-ps-io-affinity.c3
-rw-r--r--drivers/md/dm-ps-queue-length.c3
-rw-r--r--drivers/md/dm-ps-round-robin.c4
-rw-r--r--drivers/md/dm-ps-service-time.c3
-rw-r--r--drivers/md/dm-raid.c39
-rw-r--r--drivers/md/dm-raid1.c17
-rw-r--r--drivers/md/dm-snap-persistent.c4
-rw-r--r--drivers/md/dm-snap-transient.c4
-rw-r--r--drivers/md/dm-snap.c13
-rw-r--r--drivers/md/dm-stripe.c15
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-thin.c8
-rw-r--r--drivers/md/dm-unstripe.c4
-rw-r--r--drivers/md/dm-verity-target.c43
-rw-r--r--drivers/md/dm-writecache.c467
-rw-r--r--drivers/md/dm-zoned-target.c3
-rw-r--r--drivers/md/dm.c12
37 files changed, 1493 insertions, 194 deletions
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index a74aaf8b1445..816945eeed7f 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -96,6 +96,10 @@ ifeq ($(CONFIG_BLK_DEV_ZONED),y)
dm-mod-objs += dm-zone.o
endif
+ifeq ($(CONFIG_IMA),y)
+dm-mod-objs += dm-ima.o
+endif
+
ifeq ($(CONFIG_DM_VERITY_FEC),y)
dm-verity-objs += dm-verity-fec.o
endif
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 8e4ced5a2516..bdd500447dea 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3122,6 +3122,30 @@ static void cache_status(struct dm_target *ti, status_type_t type,
DMEMIT(" %s", cache->ctr_args[i]);
if (cache->nr_ctr_args)
DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
+ break;
+
+ case STATUSTYPE_IMA:
+ DMEMIT_TARGET_NAME_VERSION(ti->type);
+ if (get_cache_mode(cache) == CM_FAIL)
+ DMEMIT(",metadata_mode=fail");
+ else if (get_cache_mode(cache) == CM_READ_ONLY)
+ DMEMIT(",metadata_mode=ro");
+ else
+ DMEMIT(",metadata_mode=rw");
+
+ format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
+ DMEMIT(",cache_metadata_device=%s", buf);
+ format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
+ DMEMIT(",cache_device=%s", buf);
+ format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
+ DMEMIT(",cache_origin_device=%s", buf);
+ DMEMIT(",writethrough=%c", writethrough_mode(cache) ? 'y' : 'n');
+ DMEMIT(",writeback=%c", writeback_mode(cache) ? 'y' : 'n');
+ DMEMIT(",passthrough=%c", passthrough_mode(cache) ? 'y' : 'n');
+ DMEMIT(",metadata2=%c", cache->features.metadata_version == 2 ? 'y' : 'n');
+ DMEMIT(",no_discard_passdown=%c", cache->features.discard_passdown ? 'n' : 'y');
+ DMEMIT(";");
+ break;
}
return;
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index a90bdf9b2ca6..84dbe08ad205 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -1499,6 +1499,11 @@ static void clone_status(struct dm_target *ti, status_type_t type,
for (i = 0; i < clone->nr_ctr_args; i++)
DMEMIT(" %s", clone->ctr_args[i]);
+ break;
+
+ case STATUSTYPE_IMA:
+ *result = '\0';
+ break;
}
return;
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index edc1553c4eea..55dccdfbcb22 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -18,6 +18,7 @@
#include <trace/events/block.h>
#include "dm.h"
+#include "dm-ima.h"
#define DM_RESERVED_MAX_IOS 1024
@@ -119,6 +120,10 @@ struct mapped_device {
unsigned int nr_zones;
unsigned int *zwp_offset;
#endif
+
+#ifdef CONFIG_IMA
+ struct dm_ima_measurements ima;
+#endif
};
/*
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 50f4cbd600d5..916b7da16de2 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2223,11 +2223,11 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
(bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
/*
- * in_irq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
+ * in_hardirq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
* irqs_disabled(): the kernel may run some IO completion from the idle thread, but
* it is being executed with irqs disabled.
*/
- if (in_irq() || irqs_disabled()) {
+ if (in_hardirq() || irqs_disabled()) {
tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
tasklet_schedule(&io->tasklet);
return;
@@ -2661,7 +2661,12 @@ static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
struct crypt_config *cc = pool_data;
struct page *page;
- if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
+ /*
+ * Note, percpu_counter_read_positive() may over (and under) estimate
+ * the current usage by at most (batch - 1) * num_online_cpus() pages,
+ * but avoids potential spinlock contention of an exact result.
+ */
+ if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) >= dm_crypt_pages_per_client) &&
likely(gfp_mask & __GFP_NORETRY))
return NULL;
@@ -3485,7 +3490,34 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
DMEMIT(" iv_large_sectors");
}
+ break;
+ case STATUSTYPE_IMA:
+ DMEMIT_TARGET_NAME_VERSION(ti->type);
+ DMEMIT(",allow_discards=%c", ti->num_discard_bios ? 'y' : 'n');
+ DMEMIT(",same_cpu_crypt=%c", test_bit(DM_CRYPT_SAME_CPU, &cc->flags) ? 'y' : 'n');
+ DMEMIT(",submit_from_crypt_cpus=%c", test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags) ?
+ 'y' : 'n');
+ DMEMIT(",no_read_workqueue=%c", test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags) ?
+ 'y' : 'n');
+ DMEMIT(",no_write_workqueue=%c", test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags) ?
+ 'y' : 'n');
+ DMEMIT(",iv_large_sectors=%c", test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags) ?
+ 'y' : 'n');
+
+ if (cc->on_disk_tag_size)
+ DMEMIT(",integrity_tag_size=%u,cipher_auth=%s",
+ cc->on_disk_tag_size, cc->cipher_auth);
+ if (cc->sector_size != (1 << SECTOR_SHIFT))
+ DMEMIT(",sector_size=%d", cc->sector_size);
+ if (cc->cipher_string)
+ DMEMIT(",cipher_string=%s", cc->cipher_string);
+
+ DMEMIT(",key_size=%u", cc->key_size);
+ DMEMIT(",key_parts=%u", cc->key_parts);
+ DMEMIT(",key_extra_size=%u", cc->key_extra_size);
+ DMEMIT(",key_mac_size=%u", cc->key_mac_size);
+ DMEMIT(";");
break;
}
}
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 2628a832787b..59e51d285b0e 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -326,6 +326,10 @@ static void delay_status(struct dm_target *ti, status_type_t type,
DMEMIT_DELAY_CLASS(&dc->flush);
}
break;
+
+ case STATUSTYPE_IMA:
+ *result = '\0';
+ break;
}
}
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index cbe1058ee589..3163e2b1418e 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -527,6 +527,10 @@ static void dust_status(struct dm_target *ti, status_type_t type,
DMEMIT("%s %llu %u", dd->dev->name,
(unsigned long long)dd->start, dd->blksz);
break;
+
+ case STATUSTYPE_IMA:
+ *result = '\0';
+ break;
}
}
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index 0c509dae0ff8..d25989660a76 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -401,6 +401,9 @@ static void ebs_status(struct dm_target *ti, status_type_t type,
snprintf(result, maxlen, ec->u_bs_set ? "%s %llu %u %u" : "%s %llu %u",
ec->dev->name, (unsigned long long) ec->start, ec->e_bs, ec->u_bs);
break;
+ case STATUSTYPE_IMA:
+ *result = '\0';
+ break;
}
}
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 3b748393fca5..2a78f6874143 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1644,6 +1644,10 @@ static void era_status(struct dm_target *ti, status_type_t type,
format_dev_t(buf, era->origin_dev->bdev->bd_dev);
DMEMIT("%s %u", buf, era->sectors_per_block);
break;
+
+ case STATUSTYPE_IMA:
+ *result = '\0';
+ break;
}
return;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 5877220c01ed..4b94ffe6f2d4 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -440,6 +440,10 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
fc->corrupt_bio_value, fc->corrupt_bio_flags);
break;
+
+ case STATUSTYPE_IMA:
+ result[0] = '\0';
+ break;
}
}
diff --git a/drivers/md/dm-ima.c b/drivers/md/dm-ima.c
new file mode 100644
index 000000000000..3fd69ab12a8e
--- /dev/null
+++ b/drivers/md/dm-ima.c
@@ -0,0 +1,750 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Microsoft Corporation
+ *
+ * Author: Tushar Sugandhi <tusharsu@linux.microsoft.com>
+ *
+ * File: dm-ima.c
+ * Enables IMA measurements for DM targets
+ */
+
+#include "dm-core.h"
+#include "dm-ima.h"
+
+#include <linux/ima.h>
+#include <crypto/hash.h>
+#include <linux/crypto.h>
+#include <crypto/hash_info.h>
+
+#define DM_MSG_PREFIX "ima"
+
+/*
+ * Internal function to prefix separator characters in input buffer with escape
+ * character, so that they don't interfere with the construction of key-value pairs,
+ * and clients can split the key1=val1,key2=val2,key3=val3; pairs properly.
+ */
+static void fix_separator_chars(char **buf)
+{
+ int l = strlen(*buf);
+ int i, j, sp = 0;
+
+ for (i = 0; i < l; i++)
+ if ((*buf)[i] == '\\' || (*buf)[i] == ';' || (*buf)[i] == '=' || (*buf)[i] == ',')
+ sp++;
+
+ if (!sp)
+ return;
+
+ for (i = l-1, j = i+sp; i >= 0; i--) {
+ (*buf)[j--] = (*buf)[i];
+ if ((*buf)[i] == '\\' || (*buf)[i] == ';' || (*buf)[i] == '=' || (*buf)[i] == ',')
+ (*buf)[j--] = '\\';
+ }
+}
+
+/*
+ * Internal function to allocate memory for IMA measurements.
+ */
+static void *dm_ima_alloc(size_t len, gfp_t flags, bool noio)
+{
+ unsigned int noio_flag;
+ void *ptr;
+
+ if (noio)
+ noio_flag = memalloc_noio_save();
+
+ ptr = kzalloc(len, flags);
+
+ if (noio)
+ memalloc_noio_restore(noio_flag);
+
+ return ptr;
+}
+
+/*
+ * Internal function to allocate and copy name and uuid for IMA measurements.
+ */
+static int dm_ima_alloc_and_copy_name_uuid(struct mapped_device *md, char **dev_name,
+ char **dev_uuid, bool noio)
+{
+ int r;
+ *dev_name = dm_ima_alloc(DM_NAME_LEN*2, GFP_KERNEL, noio);
+ if (!(*dev_name)) {
+ r = -ENOMEM;
+ goto error;
+ }
+
+ *dev_uuid = dm_ima_alloc(DM_UUID_LEN*2, GFP_KERNEL, noio);
+ if (!(*dev_uuid)) {
+ r = -ENOMEM;
+ goto error;
+ }
+
+ r = dm_copy_name_and_uuid(md, *dev_name, *dev_uuid);
+ if (r)
+ goto error;
+
+ fix_separator_chars(dev_name);
+ fix_separator_chars(dev_uuid);
+
+ return 0;
+error:
+ kfree(*dev_name);
+ kfree(*dev_uuid);
+ *dev_name = NULL;
+ *dev_uuid = NULL;
+ return r;
+}
+
+/*
+ * Internal function to allocate and copy device data for IMA measurements.
+ */
+static int dm_ima_alloc_and_copy_device_data(struct mapped_device *md, char **device_data,
+ unsigned int num_targets, bool noio)
+{
+ char *dev_name = NULL, *dev_uuid = NULL;
+ int r;
+
+ r = dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio);
+ if (r)
+ return r;
+
+ *device_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, GFP_KERNEL, noio);
+ if (!(*device_data)) {
+ r = -ENOMEM;
+ goto error;
+ }
+
+ scnprintf(*device_data, DM_IMA_DEVICE_BUF_LEN,
+ "name=%s,uuid=%s,major=%d,minor=%d,minor_count=%d,num_targets=%u;",
+ dev_name, dev_uuid, md->disk->major, md->disk->first_minor,
+ md->disk->minors, num_targets);
+error:
+ kfree(dev_name);
+ kfree(dev_uuid);
+ return r;
+}
+
+/*
+ * Internal wrapper function to call IMA to measure DM data.
+ */
+static void dm_ima_measure_data(const char *event_name, const void *buf, size_t buf_len,
+ bool noio)
+{
+ unsigned int noio_flag;
+
+ if (noio)
+ noio_flag = memalloc_noio_save();
+
+ ima_measure_critical_data(DM_NAME, event_name, buf, buf_len, false);
+
+ if (noio)
+ memalloc_noio_restore(noio_flag);
+}
+
+/*
+ * Internal function to allocate and copy current device capacity for IMA measurements.
+ */
+static int dm_ima_alloc_and_copy_capacity_str(struct mapped_device *md, char **capacity_str,
+ bool noio)
+{
+ sector_t capacity;
+
+ capacity = get_capacity(md->disk);
+
+ *capacity_str = dm_ima_alloc(DM_IMA_DEVICE_CAPACITY_BUF_LEN, GFP_KERNEL, noio);
+ if (!(*capacity_str))
+ return -ENOMEM;
+
+ scnprintf(*capacity_str, DM_IMA_DEVICE_BUF_LEN, "current_device_capacity=%llu;",
+ capacity);
+
+ return 0;
+}
+
+/*
+ * Initialize/reset the dm ima related data structure variables.
+ */
+void dm_ima_reset_data(struct mapped_device *md)
+{
+ memset(&(md->ima), 0, sizeof(md->ima));
+ md->ima.dm_version_str_len = strlen(DM_IMA_VERSION_STR);
+}
+
+/*
+ * Build up the IMA data for each target, and finally measure.
+ */
+void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_flags)
+{
+ size_t device_data_buf_len, target_metadata_buf_len, target_data_buf_len, l = 0;
+ char *target_metadata_buf = NULL, *target_data_buf = NULL, *digest_buf = NULL;
+ char *ima_buf = NULL, *device_data_buf = NULL;
+ int digest_size, last_target_measured = -1, r;
+ status_type_t type = STATUSTYPE_IMA;
+ size_t cur_total_buf_len = 0;
+ unsigned int num_targets, i;
+ SHASH_DESC_ON_STACK(shash, NULL);
+ struct crypto_shash *tfm = NULL;
+ u8 *digest = NULL;
+ bool noio = false;
+ /*
+ * In below hash_alg_prefix_len assignment +1 is for the additional char (':'),
+ * when prefixing the hash value with the hash algorithm name. e.g. sha256:<hash_value>.
+ */
+ const size_t hash_alg_prefix_len = strlen(DM_IMA_TABLE_HASH_ALG) + 1;
+ char table_load_event_name[] = "dm_table_load";
+
+ ima_buf = dm_ima_alloc(DM_IMA_MEASUREMENT_BUF_LEN, GFP_KERNEL, noio);
+ if (!ima_buf)
+ return;
+
+ target_metadata_buf = dm_ima_alloc(DM_IMA_TARGET_METADATA_BUF_LEN, GFP_KERNEL, noio);
+ if (!target_metadata_buf)
+ goto error;
+
+ target_data_buf = dm_ima_alloc(DM_IMA_TARGET_DATA_BUF_LEN, GFP_KERNEL, noio);
+ if (!target_data_buf)
+ goto error;
+
+ num_targets = dm_table_get_num_targets(table);
+
+ if (dm_ima_alloc_and_copy_device_data(table->md, &device_data_buf, num_targets, noio))
+ goto error;
+
+ tfm = crypto_alloc_shash(DM_IMA_TABLE_HASH_ALG, 0, 0);
+ if (IS_ERR(tfm))
+ goto error;
+
+ shash->tfm = tfm;
+ digest_size = crypto_shash_digestsize(tfm);
+ digest = dm_ima_alloc(digest_size, GFP_KERNEL, noio);
+ if (!digest)
+ goto error;
+
+ r = crypto_shash_init(shash);
+ if (r)
+ goto error;
+
+ memcpy(ima_buf + l, DM_IMA_VERSION_STR, table->md->ima.dm_version_str_len);
+ l += table->md->ima.dm_version_str_len;
+
+ device_data_buf_len = strlen(device_data_buf);
+ memcpy(ima_buf + l, device_data_buf, device_data_buf_len);
+ l += device_data_buf_len;
+
+ for (i = 0; i < num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(table, i);
+
+ if (!ti)
+ goto error;
+
+ last_target_measured = 0;
+
+ /*
+ * First retrieve the target metadata.
+ */
+ scnprintf(target_metadata_buf, DM_IMA_TARGET_METADATA_BUF_LEN,
+ "target_index=%d,target_begin=%llu,target_len=%llu,",
+ i, ti->begin, ti->len);
+ target_metadata_buf_len = strlen(target_metadata_buf);
+
+ /*
+ * Then retrieve the actual target data.
+ */
+ if (ti->type->status)
+ ti->type->status(ti, type, status_flags, target_data_buf,
+ DM_IMA_TARGET_DATA_BUF_LEN);
+ else
+ target_data_buf[0] = '\0';
+
+ target_data_buf_len = strlen(target_data_buf);
+
+ /*
+ * Check if the total data can fit into the IMA buffer.
+ */
+ cur_total_buf_len = l + target_metadata_buf_len + target_data_buf_len;
+
+ /*
+ * IMA measurements for DM targets are best-effort.
+ * If the total data buffered so far, including the current target,
+ * is too large to fit into DM_IMA_MEASUREMENT_BUF_LEN, measure what
+ * we have in the current buffer, and continue measuring the remaining
+ * targets by prefixing the device metadata again.
+ */
+ if (unlikely(cur_total_buf_len >= DM_IMA_MEASUREMENT_BUF_LEN)) {
+ dm_ima_measure_data(table_load_event_name, ima_buf, l, noio);
+ r = crypto_shash_update(shash, (const u8 *)ima_buf, l);
+ if (r < 0)
+ goto error;
+
+ memset(ima_buf, 0, DM_IMA_MEASUREMENT_BUF_LEN);
+ l = 0;
+
+ /*
+ * Each new "dm_table_load" entry in IMA log should have device data
+ * prefix, so that multiple records from the same "dm_table_load" for
+ * a given device can be linked together.
+ */
+ memcpy(ima_buf + l, DM_IMA_VERSION_STR, table->md->ima.dm_version_str_len);
+ l += table->md->ima.dm_version_str_len;
+
+ memcpy(ima_buf + l, device_data_buf, device_data_buf_len);
+ l += device_data_buf_len;
+
+ /*
+ * If this iteration of the for loop turns out to be the last target
+ * in the table, dm_ima_measure_data("dm_table_load", ...) doesn't need
+ * to be called again, just the hash needs to be finalized.
+ * "last_target_measured" tracks this state.
+ */
+ last_target_measured = 1;
+ }
+
+ /*
+ * Fill-in all the target metadata, so that multiple targets for the same
+ * device can be linked together.
+ */
+ memcpy(ima_buf + l, target_metadata_buf, target_metadata_buf_len);
+ l += target_metadata_buf_len;
+
+ memcpy(ima_buf + l, target_data_buf, target_data_buf_len);
+ l += target_data_buf_len;
+ }
+
+ if (!last_target_measured) {
+ dm_ima_measure_data(table_load_event_name, ima_buf, l, noio);
+
+ r = crypto_shash_update(shash, (const u8 *)ima_buf, l);
+ if (r < 0)
+ goto error;
+ }
+
+ /*
+ * Finalize the table hash, and store it in table->md->ima.inactive_table.hash,
+ * so that the table data can be verified against the future device state change
+ * events, e.g. resume, rename, remove, table-clear etc.
+ */
+ r = crypto_shash_final(shash, digest);
+ if (r < 0)
+ goto error;
+
+ digest_buf = dm_ima_alloc((digest_size*2) + hash_alg_prefix_len + 1, GFP_KERNEL, noio);
+
+ if (!digest_buf)
+ goto error;
+
+ snprintf(digest_buf, hash_alg_prefix_len + 1, "%s:", DM_IMA_TABLE_HASH_ALG);
+
+ for (i = 0; i < digest_size; i++)
+ snprintf((digest_buf + hash_alg_prefix_len + (i*2)), 3, "%02x", digest[i]);
+
+ if (table->md->ima.active_table.hash != table->md->ima.inactive_table.hash)
+ kfree(table->md->ima.inactive_table.hash);
+
+ table->md->ima.inactive_table.hash = digest_buf;
+ table->md->ima.inactive_table.hash_len = strlen(digest_buf);
+ table->md->ima.inactive_table.num_targets = num_targets;
+
+ if (table->md->ima.active_table.device_metadata !=
+ table->md->ima.inactive_table.device_metadata)
+ kfree(table->md->ima.inactive_table.device_metadata);
+
+ table->md->ima.inactive_table.device_metadata = device_data_buf;
+ table->md->ima.inactive_table.device_metadata_len = device_data_buf_len;
+
+ goto exit;
+error:
+ kfree(digest_buf);
+ kfree(device_data_buf);
+exit:
+ kfree(digest);
+ if (tfm)
+ crypto_free_shash(tfm);
+ kfree(ima_buf);
+ kfree(target_metadata_buf);
+ kfree(target_data_buf);
+}
+
+/*
+ * Measure IMA data on device resume.
+ */
+void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap)
+{
+ char *device_table_data, *dev_name = NULL, *dev_uuid = NULL, *capacity_str = NULL;
+ char active[] = "active_table_hash=";
+ unsigned int active_len = strlen(active), capacity_len = 0;
+ unsigned int l = 0;
+ bool noio = true;
+ bool nodata = true;
+ int r;
+
+ device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, GFP_KERNEL, noio);
+ if (!device_table_data)
+ return;
+
+ r = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
+ if (r)
+ goto error;
+
+ memcpy(device_table_data + l, DM_IMA_VERSION_STR, md->ima.dm_version_str_len);
+ l += md->ima.dm_version_str_len;
+
+ if (swap) {
+ if (md->ima.active_table.hash != md->ima.inactive_table.hash)
+ kfree(md->ima.active_table.hash);
+
+ md->ima.active_table.hash = NULL;
+ md->ima.active_table.hash_len = 0;
+
+ if (md->ima.active_table.device_metadata !=
+ md->ima.inactive_table.device_metadata)
+ kfree(md->ima.active_table.device_metadata);
+
+ md->ima.active_table.device_metadata = NULL;
+ md->ima.active_table.device_metadata_len = 0;
+ md->ima.active_table.num_targets = 0;
+
+ if (md->ima.inactive_table.hash) {
+ md->ima.active_table.hash = md->ima.inactive_table.hash;
+ md->ima.active_table.hash_len = md->ima.inactive_table.hash_len;
+ md->ima.inactive_table.hash = NULL;
+ md->ima.inactive_table.hash_len = 0;
+ }
+
+ if (md->ima.inactive_table.device_metadata) {
+ md->ima.active_table.device_metadata =
+ md->ima.inactive_table.device_metadata;
+ md->ima.active_table.device_metadata_len =
+ md->ima.inactive_table.device_metadata_len;
+ md->ima.active_table.num_targets = md->ima.inactive_table.num_targets;
+ md->ima.inactive_table.device_metadata = NULL;
+ md->ima.inactive_table.device_metadata_len = 0;
+ md->ima.inactive_table.num_targets = 0;
+ }
+ }
+
+ if (md->ima.active_table.device_metadata) {
+ memcpy(device_table_data + l, md->ima.active_table.device_metadata,
+ md->ima.active_table.device_metadata_len);
+ l += md->ima.active_table.device_metadata_len;
+
+ nodata = false;
+ }
+
+ if (md->ima.active_table.hash) {
+ memcpy(device_table_data + l, active, active_len);
+ l += active_len;
+
+ memcpy(device_table_data + l, md->ima.active_table.hash,
+ md->ima.active_table.hash_len);
+ l += md->ima.active_table.hash_len;
+
+ memcpy(device_table_data + l, ";", 1);
+ l++;
+
+ nodata = false;
+ }
+
+ if (nodata) {
+ r = dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio);
+ if (r)
+ goto error;
+
+ scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
+ "%sname=%s,uuid=%s;device_resume=no_data;",
+ DM_IMA_VERSION_STR, dev_name, dev_uuid);
+ l += strlen(device_table_data);
+
+ }
+
+ capacity_len = strlen(capacity_str);
+ memcpy(device_table_data + l, capacity_str, capacity_len);
+ l += capacity_len;
+
+ dm_ima_measure_data("dm_device_resume", device_table_data, l, noio);
+
+ kfree(dev_name);
+ kfree(dev_uuid);
+error:
+ kfree(capacity_str);
+ kfree(device_table_data);
+}
+
+/*
+ * Measure IMA data on remove.
+ */
+void dm_ima_measure_on_device_remove(struct mapped_device *md, bool remove_all)
+{
+ char *device_table_data, *dev_name = NULL, *dev_uuid = NULL, *capacity_str = NULL;
+ char active_table_str[] = "active_table_hash=";
+ char inactive_table_str[] = "inactive_table_hash=";
+ char device_active_str[] = "device_active_metadata=";
+ char device_inactive_str[] = "device_inactive_metadata=";
+ char remove_all_str[] = "remove_all=";
+ unsigned int active_table_len = strlen(active_table_str);
+ unsigned int inactive_table_len = strlen(inactive_table_str);
+ unsigned int device_active_len = strlen(device_active_str);
+ unsigned int device_inactive_len = strlen(device_inactive_str);
+ unsigned int remove_all_len = strlen(remove_all_str);
+ unsigned int capacity_len = 0;
+ unsigned int l = 0;
+ bool noio = true;
+ bool nodata = true;
+ int r;
+
+ device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN*2, GFP_KERNEL, noio);
+ if (!device_table_data)
+ goto exit;
+
+ r = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
+ if (r) {
+ kfree(device_table_data);
+ goto exit;
+ }
+
+ memcpy(device_table_data + l, DM_IMA_VERSION_STR, md->ima.dm_version_str_len);
+ l += md->ima.dm_version_str_len;
+
+ if (md->ima.active_table.device_metadata) {
+ memcpy(device_table_data + l, device_active_str, device_active_len);
+ l += device_active_len;
+
+ memcpy(device_table_data + l, md->ima.active_table.device_metadata,
+ md->ima.active_table.device_metadata_len);
+ l += md->ima.active_table.device_metadata_len;
+
+ nodata = false;
+ }
+
+ if (md->ima.inactive_table.device_metadata) {
+ memcpy(device_table_data + l, device_inactive_str, device_inactive_len);
+ l += device_inactive_len;
+
+ memcpy(device_table_data + l, md->ima.inactive_table.device_metadata,
+ md->ima.inactive_table.device_metadata_len);
+ l += md->ima.inactive_table.device_metadata_len;
+
+ nodata = false;
+ }
+
+ if (md->ima.active_table.hash) {
+ memcpy(device_table_data + l, active_table_str, active_table_len);
+ l += active_table_len;
+
+ memcpy(device_table_data + l, md->ima.active_table.hash,
+ md->ima.active_table.hash_len);
+ l += md->ima.active_table.hash_len;
+
+ memcpy(device_table_data + l, ",", 1);
+ l++;
+
+ nodata = false;
+ }
+
+ if (md->ima.inactive_table.hash) {
+ memcpy(device_table_data + l, inactive_table_str, inactive_table_len);
+ l += inactive_table_len;
+
+ memcpy(device_table_data + l, md->ima.inactive_table.hash,
+ md->ima.inactive_table.hash_len);
+ l += md->ima.inactive_table.hash_len;
+
+ memcpy(device_table_data + l, ",", 1);
+ l++;
+
+ nodata = false;
+ }
+ /*
+ * In case both active and inactive tables, and corresponding
+ * device metadata is cleared/missing - record the name and uuid
+ * in IMA measurements.
+ */
+ if (nodata) {
+ if (dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio))
+ goto error;
+
+ scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
+ "%sname=%s,uuid=%s;device_remove=no_data;",
+ DM_IMA_VERSION_STR, dev_name, dev_uuid);
+ l += strlen(device_table_data);
+ }
+
+ memcpy(device_table_data + l, remove_all_str, remove_all_len);
+ l += remove_all_len;