summaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/Kconfig17
-rw-r--r--drivers/block/aoe/aoechr.c2
-rw-r--r--drivers/block/brd.c1
-rw-r--r--drivers/block/drbd/drbd_actlog.c13
-rw-r--r--drivers/block/drbd/drbd_bitmap.c13
-rw-r--r--drivers/block/drbd/drbd_int.h120
-rw-r--r--drivers/block/drbd/drbd_main.c72
-rw-r--r--drivers/block/drbd/drbd_nl.c25
-rw-r--r--drivers/block/drbd/drbd_receiver.c108
-rw-r--r--drivers/block/drbd/drbd_req.c30
-rw-r--r--drivers/block/drbd/drbd_req.h11
-rw-r--r--drivers/block/drbd/drbd_state.c31
-rw-r--r--drivers/block/drbd/drbd_worker.c114
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c18
-rw-r--r--drivers/block/nbd.c26
-rw-r--r--drivers/block/null_blk/Kconfig2
-rw-r--r--drivers/block/null_blk/main.c136
-rw-r--r--drivers/block/null_blk/null_blk.h7
-rw-r--r--drivers/block/pktcdvd.c60
-rw-r--r--drivers/block/rbd.c34
-rw-r--r--drivers/block/rnbd/rnbd-clt-sysfs.c2
-rw-r--r--drivers/block/rnbd/rnbd-proto.h2
-rw-r--r--drivers/block/rnbd/rnbd-srv-sysfs.c2
-rw-r--r--drivers/block/ublk_drv.c216
-rw-r--r--drivers/block/virtio_blk.c269
-rw-r--r--drivers/block/xen-blkback/blkback.c126
-rw-r--r--drivers/block/xen-blkback/common.h103
-rw-r--r--drivers/block/zram/zram_drv.c393
-rw-r--r--drivers/block/zram/zram_drv.h1
30 files changed, 1056 insertions, 900 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index f79f20430ef7..5b9d4aaebb81 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -385,6 +385,23 @@ config BLK_DEV_UBLK
can handle batch more effectively, but task_work_add() isn't exported
for module, so ublk has to be built to kernel.
+config BLKDEV_UBLK_LEGACY_OPCODES
+ bool "Support legacy command opcode"
+ depends on BLK_DEV_UBLK
+ default y
+ help
+ ublk driver started to take plain command encoding, which turns out
+ one bad way. The traditional ioctl command opcode encodes more
+ info and basically defines each code uniquely, so opcode conflict
+ is avoided, and driver can handle wrong command easily, meantime it
+ may help security subsystem to audit io_uring command.
+
+ Say Y if your application still uses legacy command opcode.
+
+ Say N if you don't want to support legacy command opcode. It is
+ suggested to enable N if your application(ublk server) switches to
+ ioctl command encoding.
+
source "drivers/block/rnbd/Kconfig"
endif # BLK_DEV
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 7a368c90467d..4c666f72203f 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -290,7 +290,7 @@ aoechr_init(void)
}
init_completion(&emsgs_comp);
spin_lock_init(&emsgs_lock);
- aoe_class = class_create(THIS_MODULE, "aoe");
+ aoe_class = class_create("aoe");
if (IS_ERR(aoe_class)) {
unregister_chrdev(AOE_MAJOR, "aoechr");
return PTR_ERR(aoe_class);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 34177f1bd97d..bcad9b926b0c 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -404,7 +404,6 @@ static int brd_alloc(int i)
/* Tell the block layer that this is not a rotational device */
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue);
err = add_disk(disk);
if (err)
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 429255876800..64b3a1c76f03 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -735,8 +735,9 @@ static bool update_rs_extent(struct drbd_device *device,
return false;
}
-void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go)
+void drbd_advance_rs_marks(struct drbd_peer_device *peer_device, unsigned long still_to_go)
{
+ struct drbd_device *device = peer_device->device;
unsigned long now = jiffies;
unsigned long last = device->rs_mark_time[device->rs_last_mark];
int next = (device->rs_last_mark + 1) % DRBD_SYNC_MARKS;
@@ -819,7 +820,7 @@ static int update_sync_bits(struct drbd_device *device,
if (mode == SET_IN_SYNC) {
unsigned long still_to_go = drbd_bm_total_weight(device);
bool rs_is_done = (still_to_go <= device->rs_failed);
- drbd_advance_rs_marks(device, still_to_go);
+ drbd_advance_rs_marks(first_peer_device(device), still_to_go);
if (cleared || rs_is_done)
maybe_schedule_on_disk_bitmap_update(device, rs_is_done);
} else if (mode == RECORD_RS_FAILED)
@@ -843,10 +844,11 @@ static bool plausible_request_size(int size)
* called by worker on C_SYNC_TARGET and receiver on SyncSource.
*
*/
-int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
+int __drbd_change_sync(struct drbd_peer_device *peer_device, sector_t sector, int size,
enum update_sync_bits_mode mode)
{
/* Is called from worker and receiver context _only_ */
+ struct drbd_device *device = peer_device->device;
unsigned long sbnr, ebnr, lbnr;
unsigned long count = 0;
sector_t esector, nr_sectors;
@@ -1009,14 +1011,15 @@ retry:
* tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN
* if there is still application IO going on in this area.
*/
-int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector)
+int drbd_try_rs_begin_io(struct drbd_peer_device *peer_device, sector_t sector)
{
+ struct drbd_device *device = peer_device->device;
unsigned int enr = BM_SECT_TO_EXT(sector);
const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT;
struct lc_element *e;
struct bm_extent *bm_ext;
int i;
- bool throttle = drbd_rs_should_slow_down(device, sector, true);
+ bool throttle = drbd_rs_should_slow_down(peer_device, sector, true);
/* If we need to throttle, a half-locked (only marked BME_NO_WRITES,
* not yet BME_LOCKED) extent needs to be kicked out explicitly if we
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 289876ffbc31..6ac8c54b44c7 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1216,7 +1216,9 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
* drbd_bm_read() - Read the whole bitmap from its on disk location.
* @device: DRBD device.
*/
-int drbd_bm_read(struct drbd_device *device) __must_hold(local)
+int drbd_bm_read(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local)
+
{
return bm_rw(device, BM_AIO_READ, 0);
}
@@ -1227,7 +1229,8 @@ int drbd_bm_read(struct drbd_device *device) __must_hold(local)
*
* Will only write pages that have changed since last IO.
*/
-int drbd_bm_write(struct drbd_device *device) __must_hold(local)
+int drbd_bm_write(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local)
{
return bm_rw(device, 0, 0);
}
@@ -1238,7 +1241,8 @@ int drbd_bm_write(struct drbd_device *device) __must_hold(local)
*
* Will write all pages.
*/
-int drbd_bm_write_all(struct drbd_device *device) __must_hold(local)
+int drbd_bm_write_all(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local)
{
return bm_rw(device, BM_AIO_WRITE_ALL_PAGES, 0);
}
@@ -1264,7 +1268,8 @@ int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_ho
* verify is aborted due to a failed peer disk, while local IO continues, or
* pending resync acks are still being processed.
*/
-int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local)
+int drbd_bm_write_copy_pages(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local)
{
return bm_rw(device, BM_AIO_COPY_PAGES, 0);
}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index d89b7d03d4c8..a30a5ed811be 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -66,6 +66,7 @@ extern int drbd_proc_details;
struct drbd_device;
struct drbd_connection;
+struct drbd_peer_device;
/* Defines to control fault insertion */
enum {
@@ -126,8 +127,8 @@ struct bm_xfer_ctx {
unsigned bytes[2];
};
-extern void INFO_bm_xfer_stats(struct drbd_device *device,
- const char *direction, struct bm_xfer_ctx *c);
+extern void INFO_bm_xfer_stats(struct drbd_peer_device *peer_device,
+ const char *direction, struct bm_xfer_ctx *c);
static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
{
@@ -541,9 +542,10 @@ struct drbd_md_io {
struct bm_io_work {
struct drbd_work w;
+ struct drbd_peer_device *peer_device;
char *why;
enum bm_flag flags;
- int (*io_fn)(struct drbd_device *device);
+ int (*io_fn)(struct drbd_device *device, struct drbd_peer_device *peer_device);
void (*done)(struct drbd_device *device, int rv);
};
@@ -1041,7 +1043,7 @@ extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
enum drbd_packet cmd);
extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
-extern int drbd_send_bitmap(struct drbd_device *device);
+extern int drbd_send_bitmap(struct drbd_device *device, struct drbd_peer_device *peer_device);
extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
@@ -1065,17 +1067,22 @@ extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold
extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
extern void drbd_md_mark_dirty(struct drbd_device *device);
extern void drbd_queue_bitmap_io(struct drbd_device *device,
- int (*io_fn)(struct drbd_device *),
+ int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
void (*done)(struct drbd_device *, int),
- char *why, enum bm_flag flags);
+ char *why, enum bm_flag flags,
+ struct drbd_peer_device *peer_device);
extern int drbd_bitmap_io(struct drbd_device *device,
- int (*io_fn)(struct drbd_device *),
- char *why, enum bm_flag flags);
+ int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
+ char *why, enum bm_flag flags,
+ struct drbd_peer_device *peer_device);
extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
- int (*io_fn)(struct drbd_device *),
- char *why, enum bm_flag flags);
-extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
-extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
+ int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
+ char *why, enum bm_flag flags,
+ struct drbd_peer_device *peer_device);
+extern int drbd_bmio_set_n_write(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local);
+extern int drbd_bmio_clear_n_write(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local);
/* Meta data layout
*
@@ -1284,14 +1291,18 @@ extern void _drbd_bm_set_bits(struct drbd_device *device,
const unsigned long s, const unsigned long e);
extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
-extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
+extern int drbd_bm_read(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local);
extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
-extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
+extern int drbd_bm_write(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local);
extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
-extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
-extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
+extern int drbd_bm_write_all(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local);
+extern int drbd_bm_write_copy_pages(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local);
extern size_t drbd_bm_words(struct drbd_device *device);
extern unsigned long drbd_bm_bits(struct drbd_device *device);
extern sector_t drbd_bm_capacity(struct drbd_device *device);
@@ -1422,21 +1433,24 @@ void drbd_resync_after_changed(struct drbd_device *device);
extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
extern void resume_next_sg(struct drbd_device *device);
extern void suspend_other_sg(struct drbd_device *device);
-extern int drbd_resync_finished(struct drbd_device *device);
+extern int drbd_resync_finished(struct drbd_peer_device *peer_device);
/* maybe rather drbd_main.c ? */
extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
extern void drbd_md_put_buffer(struct drbd_device *device);
extern int drbd_md_sync_page_io(struct drbd_device *device,
struct drbd_backing_dev *bdev, sector_t sector, enum req_op op);
-extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
+extern void drbd_ov_out_of_sync_found(struct drbd_peer_device *peer_device,
+ sector_t sector, int size);
extern void wait_until_done_or_force_detached(struct drbd_device *device,
struct drbd_backing_dev *bdev, unsigned int *done);
-extern void drbd_rs_controller_reset(struct drbd_device *device);
+extern void drbd_rs_controller_reset(struct drbd_peer_device *peer_device);
-static inline void ov_out_of_sync_print(struct drbd_device *device)
+static inline void ov_out_of_sync_print(struct drbd_peer_device *peer_device)
{
+ struct drbd_device *device = peer_device->device;
+
if (device->ov_last_oos_size) {
- drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
+ drbd_err(peer_device, "Out of sync: start=%llu, size=%lu (sectors)\n",
(unsigned long long)device->ov_last_oos_start,
(unsigned long)device->ov_last_oos_size);
}
@@ -1475,7 +1489,7 @@ extern int drbd_ack_receiver(struct drbd_thread *thi);
extern void drbd_send_ping_wf(struct work_struct *ws);
extern void drbd_send_acks_wf(struct work_struct *ws);
extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
-extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
+extern bool drbd_rs_should_slow_down(struct drbd_peer_device *peer_device, sector_t sector,
bool throttle_if_app_is_waiting);
extern int drbd_submit_peer_request(struct drbd_peer_request *peer_req);
extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
@@ -1531,22 +1545,22 @@ extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i
extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
-extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
+extern int drbd_try_rs_begin_io(struct drbd_peer_device *peer_device, sector_t sector);
extern void drbd_rs_cancel_all(struct drbd_device *device);
extern int drbd_rs_del_all(struct drbd_device *device);
-extern void drbd_rs_failed_io(struct drbd_device *device,
+extern void drbd_rs_failed_io(struct drbd_peer_device *peer_device,
sector_t sector, int size);
-extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
+extern void drbd_advance_rs_marks(struct drbd_peer_device *peer_device, unsigned long still_to_go);
enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
-extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
+extern int __drbd_change_sync(struct drbd_peer_device *peer_device, sector_t sector, int size,
enum update_sync_bits_mode mode);
-#define drbd_set_in_sync(device, sector, size) \
- __drbd_change_sync(device, sector, size, SET_IN_SYNC)
-#define drbd_set_out_of_sync(device, sector, size) \
- __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
-#define drbd_rs_failed_io(device, sector, size) \
- __drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
+#define drbd_set_in_sync(peer_device, sector, size) \
+ __drbd_change_sync(peer_device, sector, size, SET_IN_SYNC)
+#define drbd_set_out_of_sync(peer_device, sector, size) \
+ __drbd_change_sync(peer_device, sector, size, SET_OUT_OF_SYNC)
+#define drbd_rs_failed_io(peer_device, sector, size) \
+ __drbd_change_sync(peer_device, sector, size, RECORD_RS_FAILED)
extern void drbd_al_shrink(struct drbd_device *device);
extern int drbd_al_initialize(struct drbd_device *, void *);
@@ -1918,18 +1932,14 @@ static inline void inc_ap_pending(struct drbd_device *device)
atomic_inc(&device->ap_pending_cnt);
}
-#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
- if (atomic_read(&device->which) < 0) \
- drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
- func, line, \
- atomic_read(&device->which))
-
-#define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
-static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
+#define dec_ap_pending(device) ((void)expect((device), __dec_ap_pending(device) >= 0))
+static inline int __dec_ap_pending(struct drbd_device *device)
{
- if (atomic_dec_and_test(&device->ap_pending_cnt))
+ int ap_pending_cnt = atomic_dec_return(&device->ap_pending_cnt);
+
+ if (ap_pending_cnt == 0)
wake_up(&device->misc_wait);
- ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
+ return ap_pending_cnt;
}
/* counts how many resync-related answers we still expect from the peer
@@ -1938,16 +1948,16 @@ static inline void _dec_ap_pending(struct drbd_device *device, const char *func,
* C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK with ID_SYNCER)
* (or P_NEG_ACK with ID_SYNCER)
*/
-static inline void inc_rs_pending(struct drbd_device *device)
+static inline void inc_rs_pending(struct drbd_peer_device *peer_device)
{
- atomic_inc(&device->rs_pending_cnt);
+ atomic_inc(&peer_device->device->rs_pending_cnt);
}
-#define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
-static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
+#define dec_rs_pending(peer_device) \
+ ((void)expect((peer_device), __dec_rs_pending(peer_device) >= 0))
+static inline int __dec_rs_pending(struct drbd_peer_device *peer_device)
{
- atomic_dec(&device->rs_pending_cnt);
- ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
+ return atomic_dec_return(&peer_device->device->rs_pending_cnt);
}
/* counts how many answers we still need to send to the peer.
@@ -1964,18 +1974,16 @@ static inline void inc_unacked(struct drbd_device *device)
atomic_inc(&device->unacked_cnt);
}
-#define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
-static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
+#define dec_unacked(device) ((void)expect(device, __dec_unacked(device) >= 0))
+static inline int __dec_unacked(struct drbd_device *device)
{
- atomic_dec(&device->unacked_cnt);
- ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
+ return atomic_dec_return(&device->unacked_cnt);
}
-#define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
-static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
+#define sub_unacked(device, n) ((void)expect(device, __sub_unacked(device) >= 0))
+static inline int __sub_unacked(struct drbd_device *device, int n)
{
- atomic_sub(n, &device->unacked_cnt);
- ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
+ return atomic_sub_return(n, &device->unacked_cnt);
}
static inline bool is_sync_target_state(enum drbd_conns connection_state)
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 2c764f7ee4a7..83987e7a5ef2 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -231,9 +231,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
}
req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
+ struct drbd_peer_device *peer_device;
if (req->epoch != expect_epoch)
break;
- _req_mod(req, BARRIER_ACKED);
+ peer_device = conn_peer_device(connection, req->device->vnr);
+ _req_mod(req, BARRIER_ACKED, peer_device);
}
spin_unlock_irq(&connection->resource->req_lock);
@@ -256,10 +258,13 @@ bail:
/* must hold resource->req_lock */
void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
{
+ struct drbd_peer_device *peer_device;
struct drbd_request *req, *r;
- list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
- _req_mod(req, what);
+ list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
+ peer_device = conn_peer_device(connection, req->device->vnr);
+ _req_mod(req, what, peer_device);
+ }
}
void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
@@ -297,7 +302,7 @@ void tl_abort_disk_io(struct drbd_device *device)
continue;
if (req->device != device)
continue;
- _req_mod(req, ABORT_DISK_IO);
+ _req_mod(req, ABORT_DISK_IO, NULL);
}
spin_unlock_irq(&connection->resource->req_lock);
}
@@ -1198,10 +1203,11 @@ static int fill_bitmap_rle_bits(struct drbd_device *device,
* code upon failure.
*/
static int
-send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
+send_bitmap_rle_or_plain(struct drbd_peer_device *peer_device, struct bm_xfer_ctx *c)
{
- struct drbd_socket *sock = &first_peer_device(device)->connection->data;
- unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
+ struct drbd_device *device = peer_device->device;
+ struct drbd_socket *sock = &peer_device->connection->data;
+ unsigned int header_size = drbd_header_size(peer_device->connection);
struct p_compressed_bm *p = sock->sbuf + header_size;
int len, err;
@@ -1212,7 +1218,7 @@ send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
if (len) {
dcbp_set_code(p, RLE_VLI_Bits);
- err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
+ err = __send_command(peer_device->connection, device->vnr, sock,
P_COMPRESSED_BITMAP, sizeof(*p) + len,
NULL, 0);
c->packets[0]++;
@@ -1233,7 +1239,8 @@ send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
len = num_words * sizeof(*p);
if (len)
drbd_bm_get_lel(device, c->word_offset, num_words, p);
- err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
+ err = __send_command(peer_device->connection, device->vnr, sock, P_BITMAP,
+ len, NULL, 0);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
@@ -1245,7 +1252,7 @@ send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
}
if (!err) {
if (len == 0) {
- INFO_bm_xfer_stats(device, "send", c);
+ INFO_bm_xfer_stats(peer_device, "send", c);
return 0;
} else
return 1;
@@ -1254,7 +1261,8 @@ send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
}
/* See the comment at receive_bitmap() */
-static int _drbd_send_bitmap(struct drbd_device *device)
+static int _drbd_send_bitmap(struct drbd_device *device,
+ struct drbd_peer_device *peer_device)
{
struct bm_xfer_ctx c;
int err;
@@ -1266,7 +1274,7 @@ static int _drbd_send_bitmap(struct drbd_device *device)
if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
drbd_bm_set_all(device);
- if (drbd_bm_write(device)) {
+ if (drbd_bm_write(device, peer_device)) {
/* write_bm did fail! Leave full sync flag set in Meta P_DATA
* but otherwise process as per normal - need to tell other
* side that a full resync is required! */
@@ -1285,20 +1293,20 @@ static int _drbd_send_bitmap(struct drbd_device *device)
};
do {
- err = send_bitmap_rle_or_plain(device, &c);
+ err = send_bitmap_rle_or_plain(peer_device, &c);
} while (err > 0);
return err == 0;
}
-int drbd_send_bitmap(struct drbd_device *device)
+int drbd_send_bitmap(struct drbd_device *device, struct drbd_peer_device *peer_device)
{
- struct drbd_socket *sock = &first_peer_device(device)->connection->data;
+ struct drbd_socket *sock = &peer_device->connection->data;
int err = -1;
mutex_lock(&sock->mutex);
if (sock->socket)
- err = !_drbd_send_bitmap(device);
+ err = !_drbd_send_bitmap(device, peer_device);
mutex_unlock(&sock->mutex);
return err;
}
@@ -3406,7 +3414,9 @@ void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
*
* Sets all bits in the bitmap and writes the whole bitmap to stable storage.
*/
-int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local)
+int drbd_bmio_set_n_write(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local)
+
{
int rv = -EIO;
@@ -3414,7 +3424,7 @@ int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local)
drbd_md_sync(device);
drbd_bm_set_all(device);
- rv = drbd_bm_write(device);
+ rv = drbd_bm_write(device, peer_device);
if (!rv) {
drbd_md_clear_flag(device, MDF_FULL_SYNC);
@@ -3430,11 +3440,13 @@ int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local)
*
* Clears all bits in the bitmap and writes the whole bitmap to stable storage.
*/
-int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local)
+int drbd_bmio_clear_n_write(struct drbd_device *device,
+ struct drbd_peer_device *peer_device) __must_hold(local)
+
{
drbd_resume_al(device);
drbd_bm_clear_all(device);
- return drbd_bm_write(device);
+ return drbd_bm_write(device, peer_device);
}
static int w_bitmap_io(struct drbd_work *w, int unused)
@@ -3453,7 +3465,7 @@ static int w_bitmap_io(struct drbd_work *w, int unused)
if (get_ldev(device)) {
drbd_bm_lock(device, work->why, work->flags);
- rv = work->io_fn(device);
+ rv = work->io_fn(device, work->peer_device);
drbd_bm_unlock(device);
put_ldev(device);
}
@@ -3488,11 +3500,12 @@ static int w_bitmap_io(struct drbd_work *w, int unused)
* put_ldev().
*/
void drbd_queue_bitmap_io(struct drbd_device *device,
- int (*io_fn)(struct drbd_device *),
+ int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
void (*done)(struct drbd_device *, int),
- char *why, enum bm_flag flags)
+ char *why, enum bm_flag flags,
+ struct drbd_peer_device *peer_device)
{
- D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
+ D_ASSERT(device, current == peer_device->connection->worker.task);
D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
@@ -3501,6 +3514,7 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
why, device->bm_io_work.why);
+ device->bm_io_work.peer_device = peer_device;
device->bm_io_work.io_fn = io_fn;
device->bm_io_work.done = done;
device->bm_io_work.why = why;
@@ -3512,7 +3526,7 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
* application IO does not conflict anyways. */
if (flags == BM_LOCKED_CHANGE_ALLOWED || atomic_read(&device->ap_bio_cnt) == 0) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
- drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ drbd_queue_work(&peer_device->connection->sender_work,
&device->bm_io_work.w);
}
spin_unlock_irq(&device->resource->req_lock);
@@ -3528,8 +3542,10 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
* freezes application IO while that the actual IO operations runs. This
* functions MAY NOT be called from worker context.
*/
-int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
- char *why, enum bm_flag flags)
+int drbd_bitmap_io(struct drbd_device *device,
+ int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
+ char *why, enum bm_flag flags,
+ struct drbd_peer_device *peer_device)
{
/* Only suspend io, if some operation is supposed to be locked out */
const bool do_suspend_io = flags & (BM_DONT_CLEAR|BM_DONT_SET|BM_DONT_TEST);
@@ -3541,7 +3557,7 @@ int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *
drbd_suspend_io(device);
drbd_bm_lock(device, why, flags);
- rv = io_fn(device);
+ rv = io_fn(device, peer_device);
drbd_bm_unlock(device);
if (do_suspend_io)
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 60757ac31701..1a5d3d72d91d 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1053,7 +1053,7 @@ drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct
la_size_changed ? "size changed" : "md moved");
/* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
- "size changed", BM_LOCKED_MASK);
+ "size changed", BM_LOCKED_MASK, NULL);
/* on-disk bitmap and activity log is authoritative again
* (unless there was an IO error meanwhile...) */
@@ -1615,7 +1615,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
drbd_send_sync_param(peer_device);
}
- kvfree_rcu(old_disk_conf);
+ kvfree_rcu_mightsleep(old_disk_conf);
kfree(old_plan);
mod_timer(&device->request_timer, jiffies + HZ);
goto success;
@@ -2027,13 +2027,15 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
drbd_info(device, "Assuming that all blocks are out of sync "
"(aka FullSync)\n");
if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
- "set_n_write from attaching", BM_LOCKE