summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-integrity.c4
-rw-r--r--drivers/md/dm-raid.c7
-rw-r--r--drivers/md/dm-verity-target.c118
-rw-r--r--drivers/md/dm-verity.h4
-rw-r--r--drivers/md/md-bitmap.c570
-rw-r--r--drivers/md/md-bitmap.h268
-rw-r--r--drivers/md/md-cluster.c91
-rw-r--r--drivers/md/md.c332
-rw-r--r--drivers/md/md.h13
-rw-r--r--drivers/md/raid1-10.c9
-rw-r--r--drivers/md/raid1.c99
-rw-r--r--drivers/md/raid10.c75
-rw-r--r--drivers/md/raid5-cache.c14
-rw-r--r--drivers/md/raid5.c157
-rw-r--r--drivers/md/raid5.h2
15 files changed, 994 insertions, 769 deletions
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 42c9dc2ee0c0..ee9f7cecd78e 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2183,6 +2183,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
unsigned int journal_section, journal_entry;
unsigned int journal_read_pos;
+ sector_t recalc_sector;
struct completion read_comp;
bool discard_retried = false;
bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
@@ -2323,6 +2324,7 @@ offload_to_thread:
goto lock_retry;
}
}
+ recalc_sector = le64_to_cpu(ic->sb->recalc_sector);
spin_unlock_irq(&ic->endio_wait.lock);
if (unlikely(journal_read_pos != NOT_FOUND)) {
@@ -2377,7 +2379,7 @@ offload_to_thread:
if (need_sync_io) {
wait_for_completion_io(&read_comp);
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
- dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
+ dio->range.logical_sector + dio->range.n_sectors > recalc_sector)
goto skip_check;
if (ic->mode == 'B') {
if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 32c8240c1873..1e0d3b9b75d6 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3949,7 +3949,9 @@ static int __load_dirty_region_bitmap(struct raid_set *rs)
/* Try loading the bitmap unless "raid0", which does not have one */
if (!rs_is_raid0(rs) &&
!test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
- r = md_bitmap_load(&rs->md);
+ struct mddev *mddev = &rs->md;
+
+ r = mddev->bitmap_ops->load(mddev);
if (r)
DMERR("Failed to load bitmap");
}
@@ -4066,7 +4068,8 @@ static int raid_preresume(struct dm_target *ti)
mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize;
- r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors, chunksize, 0);
+ r = mddev->bitmap_ops->resize(mddev, mddev->dev_sectors,
+ chunksize, false);
if (r)
DMERR("Failed to resize bitmap");
}
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index a95c1b9cc5b5..36e4ddfe2d15 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -22,6 +22,7 @@
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/jump_label.h>
+#include <linux/security.h>
#define DM_MSG_PREFIX "verity"
@@ -949,6 +950,41 @@ static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->dma_alignment = limits->logical_block_size - 1;
}
+#ifdef CONFIG_SECURITY
+
+static int verity_init_sig(struct dm_verity *v, const void *sig,
+ size_t sig_size)
+{
+ v->sig_size = sig_size;
+
+ if (sig) {
+ v->root_digest_sig = kmemdup(sig, v->sig_size, GFP_KERNEL);
+ if (!v->root_digest_sig)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void verity_free_sig(struct dm_verity *v)
+{
+ kfree(v->root_digest_sig);
+}
+
+#else
+
+static inline int verity_init_sig(struct dm_verity *v, const void *sig,
+ size_t sig_size)
+{
+ return 0;
+}
+
+static inline void verity_free_sig(struct dm_verity *v)
+{
+}
+
+#endif /* CONFIG_SECURITY */
+
static void verity_dtr(struct dm_target *ti)
{
struct dm_verity *v = ti->private;
@@ -968,6 +1004,7 @@ static void verity_dtr(struct dm_target *ti)
kfree(v->initial_hashstate);
kfree(v->root_digest);
kfree(v->zero_digest);
+ verity_free_sig(v);
if (v->ahash_tfm) {
static_branch_dec(&ahash_enabled);
@@ -1437,6 +1474,13 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->error = "Root hash verification failed";
goto bad;
}
+
+ r = verity_init_sig(v, verify_args.sig, verify_args.sig_size);
+ if (r < 0) {
+ ti->error = "Cannot allocate root digest signature";
+ goto bad;
+ }
+
v->hash_per_block_bits =
__fls((1 << v->hash_dev_block_bits) / v->digest_size);
@@ -1578,8 +1622,79 @@ int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned i
return 0;
}
+#ifdef CONFIG_SECURITY
+
+#ifdef CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG
+
+static int verity_security_set_signature(struct block_device *bdev,
+ struct dm_verity *v)
+{
+ /*
+ * if the dm-verity target is unsigned, v->root_digest_sig will
+ * be NULL, and the hook call is still required to let LSMs mark
+ * the device as unsigned. This information is crucial for LSMs to
+ * block operations such as execution on unsigned files
+ */
+ return security_bdev_setintegrity(bdev,
+ LSM_INT_DMVERITY_SIG_VALID,
+ v->root_digest_sig,
+ v->sig_size);
+}
+
+#else
+
+static inline int verity_security_set_signature(struct block_device *bdev,
+ struct dm_verity *v)
+{
+ return 0;
+}
+
+#endif /* CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG */
+
+/*
+ * Expose verity target's root hash and signature data to LSMs before resume.
+ *
+ * Returns 0 on success, or -ENOMEM if the system is out of memory.
+ */
+static int verity_preresume(struct dm_target *ti)
+{
+ struct block_device *bdev;
+ struct dm_verity_digest root_digest;
+ struct dm_verity *v;
+ int r;
+
+ v = ti->private;
+ bdev = dm_disk(dm_table_get_md(ti->table))->part0;
+ root_digest.digest = v->root_digest;
+ root_digest.digest_len = v->digest_size;
+ if (static_branch_unlikely(&ahash_enabled) && !v->shash_tfm)
+ root_digest.alg = crypto_ahash_alg_name(v->ahash_tfm);
+ else
+ root_digest.alg = crypto_shash_alg_name(v->shash_tfm);
+
+ r = security_bdev_setintegrity(bdev, LSM_INT_DMVERITY_ROOTHASH, &root_digest,
+ sizeof(root_digest));
+ if (r)
+ return r;
+
+ r = verity_security_set_signature(bdev, v);
+ if (r)
+ goto bad;
+
+ return 0;
+
+bad:
+
+ security_bdev_setintegrity(bdev, LSM_INT_DMVERITY_ROOTHASH, NULL, 0);
+
+ return r;
+}
+
+#endif /* CONFIG_SECURITY */
+
static struct target_type verity_target = {
.name = "verity",
+/* Note: the LSMs depend on the singleton and immutable features */
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
.version = {1, 10, 0},
.module = THIS_MODULE,
@@ -1590,6 +1705,9 @@ static struct target_type verity_target = {
.prepare_ioctl = verity_prepare_ioctl,
.iterate_devices = verity_iterate_devices,
.io_hints = verity_io_hints,
+#ifdef CONFIG_SECURITY
+ .preresume = verity_preresume,
+#endif /* CONFIG_SECURITY */
};
module_dm(verity);
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index aac3a1b1d94a..754e70bb5fe0 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -45,6 +45,10 @@ struct dm_verity {
u8 *salt; /* salt: its size is salt_size */
u8 *initial_hashstate; /* salted initial state, if shash_tfm is set */
u8 *zero_digest; /* digest for a zero block */
+#ifdef CONFIG_SECURITY
+ u8 *root_digest_sig; /* signature of the root digest */
+ unsigned int sig_size; /* root digest signature size */
+#endif /* CONFIG_SECURITY */
unsigned int salt_size;
sector_t data_start; /* data offset in 512-byte sectors */
sector_t hash_start; /* hash start in blocks */
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 08232d8dc815..29da10e6f703 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -32,11 +32,210 @@
#include "md.h"
#include "md-bitmap.h"
+#define BITMAP_MAJOR_LO 3
+/* version 4 insists the bitmap is in little-endian order
+ * with version 3, it is host-endian which is non-portable
+ * Version 5 is currently set only for clustered devices
+ */
+#define BITMAP_MAJOR_HI 4
+#define BITMAP_MAJOR_CLUSTERED 5
+#define BITMAP_MAJOR_HOSTENDIAN 3
+
+/*
+ * in-memory bitmap:
+ *
+ * Use 16 bit block counters to track pending writes to each "chunk".
+ * The 2 high order bits are special-purpose, the first is a flag indicating
+ * whether a resync is needed. The second is a flag indicating whether a
+ * resync is active.
+ * This means that the counter is actually 14 bits:
+ *
+ * +--------+--------+------------------------------------------------+
+ * | resync | resync | counter |
+ * | needed | active | |
+ * | (0-1) | (0-1) | (0-16383) |
+ * +--------+--------+------------------------------------------------+
+ *
+ * The "resync needed" bit is set when:
+ * a '1' bit is read from storage at startup.
+ * a write request fails on some drives
+ * a resync is aborted on a chunk with 'resync active' set
+ * It is cleared (and resync-active set) when a resync starts across all drives
+ * of the chunk.
+ *
+ *
+ * The "resync active" bit is set when:
+ * a resync is started on all drives, and resync_needed is set.
+ * resync_needed will be cleared (as long as resync_active wasn't already set).
+ * It is cleared when a resync completes.
+ *
+ * The counter counts pending write requests, plus the on-disk bit.
+ * When the counter is '1' and the resync bits are clear, the on-disk
+ * bit can be cleared as well, thus setting the counter to 0.
+ * When we set a bit, or in the counter (to start a write), if the fields is
+ * 0, we first set the disk bit and set the counter to 1.
+ *
+ * If the counter is 0, the on-disk bit is clear and the stripe is clean
+ * Anything that dirties the stripe pushes the counter to 2 (at least)
+ * and sets the on-disk bit (lazily).
+ * If a periodic sweep find the counter at 2, it is decremented to 1.
+ * If the sweep find the counter at 1, the on-disk bit is cleared and the
+ * counter goes to zero.
+ *
+ * Also, we'll hijack the "map" pointer itself and use it as two 16 bit block
+ * counters as a fallback when "page" memory cannot be allocated:
+ *
+ * Normal case (page memory allocated):
+ *
+ * page pointer (32-bit)
+ *
+ * [ ] ------+
+ * |
+ * +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters)
+ * c1 c2 c2048
+ *
+ * Hijacked case (page memory allocation failed):
+ *
+ * hijacked page pointer (32-bit)
+ *
+ * [ ][ ] (no page memory allocated)
+ * counter #1 (16-bit) counter #2 (16-bit)
+ *
+ */
+
+#define PAGE_BITS (PAGE_SIZE << 3)
+#define PAGE_BIT_SHIFT (PAGE_SHIFT + 3)
+
+#define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK)
+#define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK)
+#define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX)
+
+/* how many counters per page? */
+#define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS)
+/* same, except a shift value for more efficient bitops */
+#define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT)
+/* same, except a mask value for more efficient bitops */
+#define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1)
+
+#define BITMAP_BLOCK_SHIFT 9
+
+/*
+ * bitmap structures:
+ */
+
+/* the in-memory bitmap is represented by bitmap_pages */
+struct bitmap_page {
+ /*
+ * map points to the actual memory page
+ */
+ char *map;
+ /*
+ * in emergencies (when map cannot be alloced), hijack the map
+ * pointer and use it as two counters itself
+ */
+ unsigned int hijacked:1;
+ /*
+ * If any counter in this page is '1' or '2' - and so could be
+ * cleared then that page is marked as 'pending'
+ */
+ unsigned int pending:1;
+ /*
+ * count of dirty bits on the page
+ */
+ unsigned int count:30;
+};
+
+/* the main bitmap structure - one per mddev */
+struct bitmap {
+
+ struct bitmap_counts {
+ spinlock_t lock;
+ struct bitmap_page *bp;
+ /* total number of pages in the bitmap */
+ unsigned long pages;
+ /* number of pages not yet allocated */
+ unsigned long missing_pages;
+ /* chunksize = 2^chunkshift (for bitops) */
+ unsigned long chunkshift;
+ /* total number of data chunks for the array */
+ unsigned long chunks;
+ } counts;
+
+ struct mddev *mddev; /* the md device that the bitmap is for */
+
+ __u64 events_cleared;
+ int need_sync;
+
+ struct bitmap_storage {
+ /* backing disk file */
+ struct file *file;
+ /* cached copy of the bitmap file superblock */
+ struct page *sb_page;
+ unsigned long sb_index;
+ /* list of cache pages for the file */
+ struct page **filemap;
+ /* attributes associated filemap pages */
+ unsigned long *filemap_attr;
+ /* number of pages in the file */
+ unsigned long file_pages;
+ /* total bytes in the bitmap */
+ unsigned long bytes;
+ } storage;
+
+ unsigned long flags;
+
+ int allclean;
+
+ atomic_t behind_writes;
+ /* highest actual value at runtime */
+ unsigned long behind_writes_used;
+
+ /*
+ * the bitmap daemon - periodically wakes up and sweeps the bitmap
+ * file, cleaning up bits and flushing out pages to disk as necessary
+ */
+ unsigned long daemon_lastrun; /* jiffies of last run */
+ /*
+ * when we lasted called end_sync to update bitmap with resync
+ * progress.
+ */
+ unsigned long last_end_sync;
+
+ /* pending writes to the bitmap file */
+ atomic_t pending_writes;
+ wait_queue_head_t write_wait;
+ wait_queue_head_t overflow_wait;
+ wait_queue_head_t behind_wait;
+
+ struct kernfs_node *sysfs_can_clear;
+ /* slot offset for clustered env */
+ int cluster_slot;
+};
+
+static int __bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ int chunksize, bool init);
+
static inline char *bmname(struct bitmap *bitmap)
{
return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
}
+static bool __bitmap_enabled(struct bitmap *bitmap)
+{
+ return bitmap->storage.filemap &&
+ !test_bit(BITMAP_STALE, &bitmap->flags);
+}
+
+static bool bitmap_enabled(struct mddev *mddev)
+{
+ struct bitmap *bitmap = mddev->bitmap;
+
+ if (!bitmap)
+ return false;
+
+ return __bitmap_enabled(bitmap);
+}
+
/*
* check a page and, if necessary, allocate it (or hijack it if the alloc fails)
*
@@ -360,7 +559,7 @@ static int read_file_page(struct file *file, unsigned long index,
pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT);
- bh = alloc_page_buffers(page, blocksize, false);
+ bh = alloc_page_buffers(page, blocksize);
if (!bh) {
ret = -ENOMEM;
goto out;
@@ -472,9 +671,10 @@ static void md_bitmap_wait_writes(struct bitmap *bitmap)
/* update the event counter and sync the superblock to disk */
-void md_bitmap_update_sb(struct bitmap *bitmap)
+static void bitmap_update_sb(void *data)
{
bitmap_super_t *sb;
+ struct bitmap *bitmap = data;
if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
return;
@@ -510,10 +710,8 @@ void md_bitmap_update_sb(struct bitmap *bitmap)
write_sb_page(bitmap, bitmap->storage.sb_index,
bitmap->storage.sb_page, 1);
}
-EXPORT_SYMBOL(md_bitmap_update_sb);
-/* print out the bitmap file superblock */
-void md_bitmap_print_sb(struct bitmap *bitmap)
+static void bitmap_print_sb(struct bitmap *bitmap)
{
bitmap_super_t *sb;
@@ -760,7 +958,7 @@ out_no_sb:
bitmap->mddev->bitmap_info.space > sectors_reserved)
bitmap->mddev->bitmap_info.space = sectors_reserved;
} else {
- md_bitmap_print_sb(bitmap);
+ bitmap_print_sb(bitmap);
if (bitmap->cluster_slot < 0)
md_cluster_stop(bitmap->mddev);
}
@@ -893,7 +1091,7 @@ static void md_bitmap_file_unmap(struct bitmap_storage *store)
static void md_bitmap_file_kick(struct bitmap *bitmap)
{
if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
- md_bitmap_update_sb(bitmap);
+ bitmap_update_sb(bitmap);
if (bitmap->storage.file) {
pr_warn("%s: kicking failed bitmap file %pD4 from array!\n",
@@ -1028,13 +1226,13 @@ static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
/* this gets called when the md device is ready to unplug its underlying
* (slave) device queues -- before we let any writes go down, we need to
* sync the dirty pages of the bitmap file to disk */
-void md_bitmap_unplug(struct bitmap *bitmap)
+static void __bitmap_unplug(struct bitmap *bitmap)
{
unsigned long i;
int dirty, need_write;
int writing = 0;
- if (!md_bitmap_enabled(bitmap))
+ if (!__bitmap_enabled(bitmap))
return;
/* look at each page to see if there are any set bits that need to be
@@ -1060,7 +1258,6 @@ void md_bitmap_unplug(struct bitmap *bitmap)
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
md_bitmap_file_kick(bitmap);
}
-EXPORT_SYMBOL(md_bitmap_unplug);
struct bitmap_unplug_work {
struct work_struct work;
@@ -1073,11 +1270,11 @@ static void md_bitmap_unplug_fn(struct work_struct *work)
struct bitmap_unplug_work *unplug_work =
container_of(work, struct bitmap_unplug_work, work);
- md_bitmap_unplug(unplug_work->bitmap);
+ __bitmap_unplug(unplug_work->bitmap);
complete(unplug_work->done);
}
-void md_bitmap_unplug_async(struct bitmap *bitmap)
+static void bitmap_unplug_async(struct bitmap *bitmap)
{
DECLARE_COMPLETION_ONSTACK(done);
struct bitmap_unplug_work unplug_work;
@@ -1089,7 +1286,19 @@ void md_bitmap_unplug_async(struct bitmap *bitmap)
queue_work(md_bitmap_wq, &unplug_work.work);
wait_for_completion(&done);
}
-EXPORT_SYMBOL(md_bitmap_unplug_async);
+
+static void bitmap_unplug(struct mddev *mddev, bool sync)
+{
+ struct bitmap *bitmap = mddev->bitmap;
+
+ if (!bitmap)
+ return;
+
+ if (sync)
+ __bitmap_unplug(bitmap);
+ else
+ bitmap_unplug_async(bitmap);
+}
static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
@@ -1226,22 +1435,21 @@ static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
return ret;
}
-void md_bitmap_write_all(struct bitmap *bitmap)
+/* just flag bitmap pages as needing to be written. */
+static void bitmap_write_all(struct mddev *mddev)
{
- /* We don't actually write all bitmap blocks here,
- * just flag them as needing to be written
- */
int i;
+ struct bitmap *bitmap = mddev->bitmap;
if (!bitmap || !bitmap->storage.filemap)
return;
+
+ /* Only one copy, so nothing needed */
if (bitmap->storage.file)
- /* Only one copy, so nothing needed */
return;
for (i = 0; i < bitmap->storage.file_pages; i++)
- set_page_attr(bitmap, i,
- BITMAP_PAGE_NEEDWRITE);
+ set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
bitmap->allclean = 0;
}
@@ -1290,7 +1498,7 @@ out:
* bitmap daemon -- periodically wakes up to clean bits and flush pages
* out to disk
*/
-void md_bitmap_daemon_work(struct mddev *mddev)
+static void bitmap_daemon_work(struct mddev *mddev)
{
struct bitmap *bitmap;
unsigned long j;
@@ -1461,8 +1669,11 @@ __acquires(bitmap->lock)
&(bitmap->bp[page].map[pageoff]);
}
-int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
+static int bitmap_startwrite(struct mddev *mddev, sector_t offset,
+ unsigned long sectors, bool behind)
{
+ struct bitmap *bitmap = mddev->bitmap;
+
if (!bitmap)
return 0;
@@ -1523,13 +1734,15 @@ int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long s
}
return 0;
}
-EXPORT_SYMBOL(md_bitmap_startwrite);
-void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
- unsigned long sectors, int success, int behind)
+static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
+ unsigned long sectors, bool success, bool behind)
{
+ struct bitmap *bitmap = mddev->bitmap;
+
if (!bitmap)
return;
+
if (behind) {
if (atomic_dec_and_test(&bitmap->behind_writes))
wake_up(&bitmap->behind_wait);
@@ -1576,26 +1789,27 @@ void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
sectors = 0;
}
}
-EXPORT_SYMBOL(md_bitmap_endwrite);
-static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
- int degraded)
+static bool __bitmap_start_sync(struct bitmap *bitmap, sector_t offset,
+ sector_t *blocks, bool degraded)
{
bitmap_counter_t *bmc;
- int rv;
+ bool rv;
+
if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
*blocks = 1024;
- return 1; /* always resync if no bitmap */
+ return true; /* always resync if no bitmap */
}
spin_lock_irq(&bitmap->counts.lock);
+
+ rv = false;
bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
- rv = 0;
if (bmc) {
/* locked */
- if (RESYNC(*bmc))
- rv = 1;
- else if (NEEDED(*bmc)) {
- rv = 1;
+ if (RESYNC(*bmc)) {
+ rv = true;
+ } else if (NEEDED(*bmc)) {
+ rv = true;
if (!degraded) { /* don't set/clear bits if degraded */
*bmc |= RESYNC_MASK;
*bmc &= ~NEEDED_MASK;
@@ -1603,11 +1817,12 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t
}
}
spin_unlock_irq(&bitmap->counts.lock);
+
return rv;
}
-int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
- int degraded)
+static bool bitmap_start_sync(struct mddev *mddev, sector_t offset,
+ sector_t *blocks, bool degraded)
{
/* bitmap_start_sync must always report on multiples of whole
* pages, otherwise resync (which is very PAGE_SIZE based) will
@@ -1616,21 +1831,22 @@ int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *block
* At least PAGE_SIZE>>9 blocks are covered.
* Return the 'or' of the result.
*/
- int rv = 0;
+ bool rv = false;
sector_t blocks1;
*blocks = 0;
while (*blocks < (PAGE_SIZE>>9)) {
- rv |= __bitmap_start_sync(bitmap, offset,
+ rv |= __bitmap_start_sync(mddev->bitmap, offset,
&blocks1, degraded);
offset += blocks1;
*blocks += blocks1;
}
+
return rv;
}
-EXPORT_SYMBOL(md_bitmap_start_sync);
-void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
+static void __bitmap_end_sync(struct bitmap *bitmap, sector_t offset,
+ sector_t *blocks, bool aborted)
{
bitmap_counter_t *bmc;
unsigned long flags;
@@ -1659,9 +1875,14 @@ void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks
unlock:
spin_unlock_irqrestore(&bitmap->counts.lock, flags);
}
-EXPORT_SYMBOL(md_bitmap_end_sync);
-void md_bitmap_close_sync(struct bitmap *bitmap)
+static void bitmap_end_sync(struct mddev *mddev, sector_t offset,
+ sector_t *blocks)
+{
+ __bitmap_end_sync(mddev->bitmap, offset, blocks, true);
+}
+
+static void bitmap_close_sync(struct mddev *mddev)
{
/* Sync has finished, and any bitmap chunks that weren't synced
* properly have been aborted. It remains to us to clear the
@@ -1669,19 +1890,23 @@ void md_bitmap_close_sync(struct bitmap *bitmap)
*/
sector_t sector = 0;
sector_t blocks;
+ struct bitmap *bitmap = mddev->bitmap;
+
if (!bitmap)
return;
+
while (sector < bitmap->mddev->resync_max_sectors) {
- md_bitmap_end_sync(bitmap, sector, &blocks, 0);
+ __bitmap_end_sync(bitmap, sector, &blocks, false);
sector += blocks;
}
}
-EXPORT_SYMBOL(md_bitmap_close_sync);
-void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
+static void bitmap_cond_end_sync(struct mddev *mddev, sector_t sector,
+ bool force)
{
sector_t s = 0;
sector_t blocks;
+ struct bitmap *bitmap = mddev->bitmap;
if (!bitmap)
return;
@@ -1700,34 +1925,32 @@ void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
s = 0;
while (s < sector && s < bitmap->mddev->resync_max_sectors) {
- md_bitmap_end_sync(bitmap, s, &blocks, 0);
+ __bitmap_end_sync(bitmap, s, &blocks, false);
s += blocks;
}
bitmap->last_end_sync = jiffies;
sysfs_notify_dirent_safe(bitmap->mddev->sysfs_completed);
}
-EXPORT_SYMBOL(md_bitmap_cond_end_sync);
-void md_bitmap_sync_with_cluster(struct mddev *mddev,
- sector_t old_lo, sector_t old_hi,
- sector_t new_lo, sector_t new_hi)
+static void bitmap_sync_with_cluster(struct mddev *mddev,
+ sector_t old_lo, sector_t old_hi,
+ sector_t new_lo, sector_t new_hi)
{
struct bitmap *bitmap = mddev->bitmap;
sector_t sector, blocks = 0;
for (sector = old_lo; sector < new_lo; ) {
- md_bitmap_end_sync(bitmap, sector, &blocks, 0);
+ __bitmap_end_sync(bitmap, sector, &blocks, false);
sector += blocks;
}
WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
for (sector = old_hi; sector < new_hi; ) {
- md_bitmap_start_sync(bitmap, sector, &blocks, 0);
+ bitmap_start_sync(mddev, sector, &blocks, false);
sector += blocks;
}
WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
}
-EXPORT_SYMBOL(md_bitmap_sync_with_cluster);
static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
{
@@ -1756,12 +1979,18 @@ static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, in
}
/* dirty the memory and file bits for bitmap chunks "s" to "e" */
-void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
+static void bitmap_dirty_bits(struct mddev *mddev, unsigned long s,
+ unsigned long e)
{
unsigned long chunk;
+ struct bitmap *bitmap = mddev->bitmap;
+
+ if (!bitmap)
+ return;
for (chunk = s; chunk <= e; chunk++) {
sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
+
md_bitmap_set_memory_bits(bitmap, sec, 1);
md_bitmap_file_set_bit(bitmap, sec);
if (sec < bitmap->mddev->recovery_cp)
@@ -1773,10 +2002,7 @@ void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long
}
}
-/*
- * flush out any pending updates
- */
-void md_bitmap_flush(struct mddev *mddev)
+static void bitmap_flush(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
long sleep;
@@ -1789,23 +2015,21 @@ void md_bitmap_flush(struct mddev *mddev)
*/
sleep = mddev->bitmap_info.daemon_sleep * 2;
bitmap->daemon_lastrun -= sleep;
- md_bitmap_daemon_work(mddev);
+ bitmap_daemon_work(mddev);
bitmap->daemon_lastrun -= sleep;
- md_bitmap_daemon_work(mddev);
+ bitmap_daemon_work(mddev);
bitmap->daemon_lastrun -= sleep;
- md_bitmap_daemon_work(mddev);
+ bitmap_daemon_work(mddev);
if (mddev->bitmap_info.external)
md_super_wait(mddev);
- md_bitmap_update_sb(bitmap);
+ bitmap_update_sb(bitmap);
}
-/*
- * free memory that was allocated
- */
-void md_bitmap_free(struct bitmap *bitmap)
+static void md_bitmap_free(void *data)
{
unsigned long k, pages;
struct bitmap_page *bp;
+ struct bitmap *bitmap = data;
if (!bitmap) /* there was no bitmap */
return;
@@ -1836,9 +2060,8 @@ void md_bitmap_free(struct bitmap *bitmap)
kfree(bp);
kfree(bitmap);
}
-EXPORT_SYMBOL(md_bitmap_free);
-void md_bitmap_wait_behind_writes(struct mddev *mddev)
+static void bitmap_wait_behind_writes(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
@@ -1852,14 +2075,14 @@ void md_bitmap_wait_behind_writes(struct mddev *mddev)
}
}
-void md_bitmap_destroy(struct mddev *mddev)
+static void bitmap_destroy(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
if (!bitmap) /* there was no bitmap */
return;
- md_bitmap_wait_behind_writes(mddev);
+ bitmap_wait_behind_writes(mddev);
if (!mddev->serialize_policy)
mddev_destroy_serial_pool(mddev, NULL);
@@ -1878,7 +2101,7 @@ void md_bitmap_destroy(struct mddev *mddev)
* if this returns an error, bitmap_destroy must be called to do clean up
* once mddev->bitmap is set
*/
-struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
+static struct bitmap *__bitmap_create(struct mddev *mddev, int slot)
{
struct bitmap *bitmap;
sector_t blocks = mddev->resync_max_sectors;
@@ -1948,7 +2171,8 @@ struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
goto error;
bitmap->daemon_lastrun = jiffies;
- err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
+ err = __bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize,
+ true);
if (err)
goto error;
@@ -1965,7 +2189,18 @@ struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
return ERR_PTR(err);
}
-int md_bitmap_load(struct mddev *mddev)
+static int bitmap_create(struct mddev *mddev, int slot)
+{
+ struct bitmap *bitmap = __bitmap_create(mddev, slot);
+
+ if (IS_ERR(bitmap))
+ return PTR_ERR(bitmap);
+
+ mddev->bitmap = bitmap;
+ return 0;
+}
+
+static int bitmap_load(struct mddev *mddev)
{
int err = 0;
sector_t start = 0;
@@ -1989,10 +2224,10 @@ int md_bitmap_load(struct mddev *mddev)
*/
while (sector < mddev->resync_max_sectors) {
sector_t blocks;
- md_bitmap_start_sync(bitmap, sector, &blocks, 0);
+ bitmap_start_sync(mddev, sector, &blocks, false);
sector += blocks;
}
- md_bitmap_close_sync(bitmap);
+ bitmap_close_sync(mddev);
if (mddev->degraded == 0
|| bitmap->events_cleared == mddev->events)
@@ -2014,22 +2249,21 @@ int md_bitmap_load(struct mddev *mddev)
mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true);
md_wakeup_thread(mddev->thread);
- md_bitmap_update_sb(bitmap);
+ bitmap_update_sb(bitmap);
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
err = -EIO;
out:
return err;
}
-EXPORT_SYMBOL_GPL(md_bitmap_load);
/* caller need to free returned bitmap with md_bitmap_free() */
-struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
+static void *bitmap_get_from_slot(struct mddev *mddev, int slot)
{
int rv = 0;
struct bitmap *bitmap;
- bitmap = md_bitmap_create(mddev, slot);
+ bitmap = __bitmap_create(mddev, slot);
if (IS_ERR(bitmap)) {
rv = PTR_ERR(bitmap);
return ERR_PTR(rv);
@@ -2043,20 +2277,19 @@ struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)