diff options
| -rw-r--r-- | fs/gfs2/bmap.c | 6 | ||||
| -rw-r--r-- | fs/gfs2/glops.c | 38 | ||||
| -rw-r--r-- | fs/gfs2/incore.h | 16 | ||||
| -rw-r--r-- | fs/gfs2/lock_dlm.c | 8 | ||||
| -rw-r--r-- | fs/gfs2/log.c | 516 | ||||
| -rw-r--r-- | fs/gfs2/log.h | 20 | ||||
| -rw-r--r-- | fs/gfs2/lops.c | 19 | ||||
| -rw-r--r-- | fs/gfs2/lops.h | 23 | ||||
| -rw-r--r-- | fs/gfs2/main.c | 4 | ||||
| -rw-r--r-- | fs/gfs2/ops_fstype.c | 71 | ||||
| -rw-r--r-- | fs/gfs2/recovery.c | 14 | ||||
| -rw-r--r-- | fs/gfs2/super.c | 70 | ||||
| -rw-r--r-- | fs/gfs2/super.h | 8 | ||||
| -rw-r--r-- | fs/gfs2/trans.c | 102 | ||||
| -rw-r--r-- | fs/gfs2/trans.h | 3 | ||||
| -rw-r--r-- | fs/gfs2/util.c | 59 | ||||
| -rw-r--r-- | fs/gfs2/util.h | 3 | ||||
| -rw-r--r-- | fs/gfs2/xattr.c | 48 | ||||
| -rw-r--r-- | include/uapi/linux/gfs2_ondisk.h | 5 |
19 files changed, 597 insertions, 436 deletions
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index cf6ccdd00587..7a358ae05185 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1230,6 +1230,9 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length, gfs2_inplace_release(ip); + if (ip->i_qadata && ip->i_qadata->qa_qd_num) + gfs2_quota_unlock(ip); + if (length != written && (iomap->flags & IOMAP_F_NEW)) { /* Deallocate blocks that were just allocated. */ loff_t blockmask = i_blocksize(inode) - 1; @@ -1242,9 +1245,6 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length, } } - if (ip->i_qadata && ip->i_qadata->qa_qd_num) - gfs2_quota_unlock(ip); - if (unlikely(!written)) goto out_unlock; diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 3faa421568b0..8e32d569c8bf 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -86,16 +86,12 @@ static int gfs2_ail_empty_gl(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_trans tr; + unsigned int revokes; int ret; - memset(&tr, 0, sizeof(tr)); - INIT_LIST_HEAD(&tr.tr_buf); - INIT_LIST_HEAD(&tr.tr_databuf); - INIT_LIST_HEAD(&tr.tr_ail1_list); - INIT_LIST_HEAD(&tr.tr_ail2_list); - tr.tr_revokes = atomic_read(&gl->gl_ail_count); + revokes = atomic_read(&gl->gl_ail_count); - if (!tr.tr_revokes) { + if (!revokes) { bool have_revokes; bool log_in_flight; @@ -122,20 +118,14 @@ static int gfs2_ail_empty_gl(struct gfs2_glock *gl) return 0; } - /* A shortened, inline version of gfs2_trans_begin() - * tr->alloced is not set since the transaction structure is - * on the stack */ - tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes); - tr.tr_ip = _RET_IP_; - ret = gfs2_log_reserve(sdp, tr.tr_reserved); - if (ret < 0) - return ret; - WARN_ON_ONCE(current->journal_info); - current->journal_info = &tr; - - __gfs2_ail_flush(gl, 0, tr.tr_revokes); - + memset(&tr, 0, sizeof(tr)); + set_bit(TR_ONSTACK, &tr.tr_flags); + ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_); + if (ret) + goto flush; + __gfs2_ail_flush(gl, 0, revokes); gfs2_trans_end(sdp); + flush: gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_AIL_EMPTY_GL); @@ -146,19 +136,15 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; unsigned int revokes = atomic_read(&gl->gl_ail_count); - unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); int ret; if (!revokes) return; - while (revokes > max_revokes) - max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); - - ret = gfs2_trans_begin(sdp, 0, max_revokes); + ret = gfs2_trans_begin(sdp, 0, revokes); if (ret) return; - __gfs2_ail_flush(gl, fsync, max_revokes); + __gfs2_ail_flush(gl, fsync, revokes); gfs2_trans_end(sdp); gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_AIL_FLUSH); diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 92f5a920ce61..0957119f7744 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -470,7 +470,7 @@ struct gfs2_quota_data { enum { TR_TOUCHED = 1, TR_ATTACHED = 2, - TR_ALLOCED = 3, + TR_ONSTACK = 3, }; struct gfs2_trans { @@ -486,7 +486,6 @@ struct gfs2_trans { unsigned int tr_num_buf_rm; unsigned int tr_num_databuf_rm; unsigned int tr_num_revoke; - unsigned int tr_num_revoke_rm; struct list_head tr_list; struct list_head tr_databuf; @@ -511,6 +510,7 @@ struct gfs2_jdesc { unsigned int nr_extents; struct work_struct jd_work; struct inode *jd_inode; + struct bio *jd_log_bio; unsigned long jd_flags; #define JDF_RECOVERY 1 unsigned int jd_jid; @@ -565,6 +565,7 @@ struct gfs2_args { unsigned int ar_errors:2; /* errors=withdraw | panic */ unsigned int ar_nobarrier:1; /* do not send barriers */ unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */ + unsigned int ar_got_rgrplvb:1; /* Was the rgrplvb opt given? */ unsigned int ar_loccookie:1; /* use location based readdir cookies */ s32 ar_commit; /* Commit interval */ @@ -801,7 +802,6 @@ struct gfs2_sbd { struct gfs2_trans *sd_log_tr; unsigned int sd_log_blks_reserved; - int sd_log_committed_revoke; atomic_t sd_log_pinned; unsigned int sd_log_num_revoke; @@ -814,24 +814,22 @@ struct gfs2_sbd { atomic_t sd_log_thresh2; atomic_t sd_log_blks_free; atomic_t sd_log_blks_needed; + atomic_t sd_log_revokes_available; wait_queue_head_t sd_log_waitq; wait_queue_head_t sd_logd_waitq; u64 sd_log_sequence; - unsigned int sd_log_head; - unsigned int sd_log_tail; int sd_log_idle; struct rw_semaphore sd_log_flush_lock; atomic_t sd_log_in_flight; - struct bio *sd_log_bio; wait_queue_head_t sd_log_flush_wait; int sd_log_error; /* First log error */ wait_queue_head_t sd_withdraw_wait; - atomic_t sd_reserving_log; - wait_queue_head_t sd_reserving_log_wait; - + unsigned int sd_log_tail; + unsigned int sd_log_flush_tail; + unsigned int sd_log_head; unsigned int sd_log_flush_head; spinlock_t sd_ail_lock; diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 9f2b5609f225..153272f82984 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -284,7 +284,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct lm_lockstruct *ls = &sdp->sd_lockstruct; - int lvb_needs_unlock = 0; int error; if (gl->gl_lksb.sb_lkid == 0) { @@ -297,13 +296,10 @@ static void gdlm_put_lock(struct gfs2_glock *gl) gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_update_request_times(gl); - /* don't want to skip dlm_unlock writing the lvb when lock is ex */ - - if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE)) - lvb_needs_unlock = 1; + /* don't want to skip dlm_unlock writing the lvb when lock has one */ if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && - !lvb_needs_unlock) { + !gl->gl_lksb.sb_lvbptr) { gfs2_glock_free(gl); return; } diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 2e9314091c81..a6fbde9f609f 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -50,10 +50,12 @@ unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct) unsigned int blks; unsigned int first, second; + /* The initial struct gfs2_log_descriptor block */ blks = 1; first = sdp->sd_ldptrs; if (nstruct > first) { + /* Subsequent struct gfs2_meta_header blocks */ second = sdp->sd_inptrs; blks += DIV_ROUND_UP(nstruct - first, second); } @@ -240,6 +242,45 @@ static void gfs2_ail1_start(struct gfs2_sbd *sdp) return gfs2_ail1_flush(sdp, &wbc); } +static void gfs2_log_update_flush_tail(struct gfs2_sbd *sdp) +{ + unsigned int new_flush_tail = sdp->sd_log_head; + struct gfs2_trans *tr; + + if (!list_empty(&sdp->sd_ail1_list)) { + tr = list_last_entry(&sdp->sd_ail1_list, + struct gfs2_trans, tr_list); + new_flush_tail = tr->tr_first; + } + sdp->sd_log_flush_tail = new_flush_tail; +} + +static void gfs2_log_update_head(struct gfs2_sbd *sdp) +{ + unsigned int new_head = sdp->sd_log_flush_head; + + if (sdp->sd_log_flush_tail == sdp->sd_log_head) + sdp->sd_log_flush_tail = new_head; + sdp->sd_log_head = new_head; +} + +/** + * gfs2_ail_empty_tr - empty one of the ail lists of a transaction + */ + +static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr, + struct list_head *head) +{ + struct gfs2_bufdata *bd; + + while (!list_empty(head)) { + bd = list_first_entry(head, struct gfs2_bufdata, + bd_ail_st_list); + gfs2_assert(sdp, bd->bd_tr == tr); + gfs2_remove_from_ail(bd); + } +} + /** * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced * @sdp: the filesystem @@ -315,6 +356,7 @@ static int gfs2_ail1_empty(struct gfs2_sbd *sdp, int max_revokes) else oldest_tr = 0; } + gfs2_log_update_flush_tail(sdp); ret = list_empty(&sdp->sd_ail1_list); spin_unlock(&sdp->sd_ail_lock); @@ -348,47 +390,69 @@ static void gfs2_ail1_wait(struct gfs2_sbd *sdp) spin_unlock(&sdp->sd_ail_lock); } -/** - * gfs2_ail_empty_tr - empty one of the ail lists for a transaction - */ - -static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr, - struct list_head *head) +static void __ail2_empty(struct gfs2_sbd *sdp, struct gfs2_trans *tr) { - struct gfs2_bufdata *bd; - - while (!list_empty(head)) { - bd = list_first_entry(head, struct gfs2_bufdata, - bd_ail_st_list); - gfs2_assert(sdp, bd->bd_tr == tr); - gfs2_remove_from_ail(bd); - } + gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list); + list_del(&tr->tr_list); + gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list)); + gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list)); + gfs2_trans_free(sdp, tr); } static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail) { - struct gfs2_trans *tr, *safe; + struct list_head *ail2_list = &sdp->sd_ail2_list; unsigned int old_tail = sdp->sd_log_tail; - int wrap = (new_tail < old_tail); - int a, b, rm; + struct gfs2_trans *tr, *safe; spin_lock(&sdp->sd_ail_lock); + if (old_tail <= new_tail) { + list_for_each_entry_safe(tr, safe, ail2_list, tr_list) { + if (old_tail <= tr->tr_first && tr->tr_first < new_tail) + __ail2_empty(sdp, tr); + } + } else { + list_for_each_entry_safe(tr, safe, ail2_list, tr_list) { + if (old_tail <= tr->tr_first || tr->tr_first < new_tail) + __ail2_empty(sdp, tr); + } + } + spin_unlock(&sdp->sd_ail_lock); +} - list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) { - a = (old_tail <= tr->tr_first); - b = (tr->tr_first < new_tail); - rm = (wrap) ? (a || b) : (a && b); - if (!rm) - continue; +/** + * gfs2_log_is_empty - Check if the log is empty + * @sdp: The GFS2 superblock + */ - gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list); - list_del(&tr->tr_list); - gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list)); - gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list)); - gfs2_trans_free(sdp, tr); +bool gfs2_log_is_empty(struct gfs2_sbd *sdp) { + return atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks; +} + +static bool __gfs2_log_try_reserve_revokes(struct gfs2_sbd *sdp, unsigned int revokes) +{ + unsigned int available; + + available = atomic_read(&sdp->sd_log_revokes_available); + while (available >= revokes) { + if (atomic_try_cmpxchg(&sdp->sd_log_revokes_available, + &available, available - revokes)) + return true; } + return false; +} - spin_unlock(&sdp->sd_ail_lock); +/** + * gfs2_log_release_revokes - Release a given number of revokes + * @sdp: The GFS2 superblock + * @revokes: The number of revokes to release + * + * sdp->sd_log_flush_lock must be held. + */ +void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes) +{ + if (revokes) + atomic_add(revokes, &sdp->sd_log_revokes_available); } /** @@ -400,86 +464,141 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail) void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks) { - atomic_add(blks, &sdp->sd_log_blks_free); trace_gfs2_log_blocks(sdp, blks); gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks); - up_read(&sdp->sd_log_flush_lock); + if (atomic_read(&sdp->sd_log_blks_needed)) + wake_up(&sdp->sd_log_waitq); } /** - * gfs2_log_reserve - Make a log reservation + * __gfs2_log_try_reserve - Try to make a log reservation + * @sdp: The GFS2 superblock + * @blks: The number of blocks to reserve + * @taboo_blks: The number of blocks to leave free + * + * Try to do the same as __gfs2_log_reserve(), but fail if no more log + * space is immediately available. + */ +static bool __gfs2_log_try_reserve(struct gfs2_sbd *sdp, unsigned int blks, + unsigned int taboo_blks) +{ + unsigned wanted = blks + taboo_blks; + unsigned int free_blocks; + + free_blocks = atomic_read(&sdp->sd_log_blks_free); + while (free_blocks >= wanted) { + if (atomic_try_cmpxchg(&sdp->sd_log_blks_free, &free_blocks, + free_blocks - blks)) { + trace_gfs2_log_blocks(sdp, -blks); + return true; + } + } + return false; +} + +/** + * __gfs2_log_reserve - Make a log reservation * @sdp: The GFS2 superblock * @blks: The number of blocks to reserve + * @taboo_blks: The number of blocks to leave free * - * Note that we never give out the last few blocks of the journal. Thats - * due to the fact that there is a small number of header blocks - * associated with each log flush. The exact number can't be known until - * flush time, so we ensure that we have just enough free blocks at all - * times to avoid running out during a log flush. + * @taboo_blks is set to 0 for logd, and to GFS2_LOG_FLUSH_MIN_BLOCKS + * for all other processes. This ensures that when the log is almost full, + * logd will still be able to call gfs2_log_flush one more time without + * blocking, which will advance the tail and make some more log space + * available. * * We no longer flush the log here, instead we wake up logd to do that * for us. To avoid the thundering herd and to ensure that we deal fairly * with queued waiters, we use an exclusive wait. This means that when we * get woken with enough journal space to get our reservation, we need to * wake the next waiter on the list. - * - * Returns: errno */ -int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) +static void __gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks, + unsigned int taboo_blks) { - int ret = 0; - unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize); - unsigned wanted = blks + reserved_blks; - DEFINE_WAIT(wait); - int did_wait = 0; + unsigned wanted = blks + taboo_blks; unsigned int free_blocks; - if (gfs2_assert_warn(sdp, blks) || - gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks)) - return -EINVAL; atomic_add(blks, &sdp->sd_log_blks_needed); -retry: - free_blocks = atomic_read(&sdp->sd_log_blks_free); - if (unlikely(free_blocks <= wanted)) { - do { - prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait, - TASK_UNINTERRUPTIBLE); + for (;;) { + if (current != sdp->sd_logd_process) wake_up(&sdp->sd_logd_waitq); - did_wait = 1; - if (atomic_read(&sdp->sd_log_blks_free) <= wanted) - io_schedule(); - free_blocks = atomic_read(&sdp->sd_log_blks_free); - } while(free_blocks <= wanted); - finish_wait(&sdp->sd_log_waitq, &wait); - } - atomic_inc(&sdp->sd_reserving_log); - if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks, - free_blocks - blks) != free_blocks) { - if (atomic_dec_and_test(&sdp->sd_reserving_log)) - wake_up(&sdp->sd_reserving_log_wait); - goto retry; + io_wait_event(sdp->sd_log_waitq, + (free_blocks = atomic_read(&sdp->sd_log_blks_free), + free_blocks >= wanted)); + do { + if (atomic_try_cmpxchg(&sdp->sd_log_blks_free, + &free_blocks, + free_blocks - blks)) + goto reserved; + } while (free_blocks >= wanted); } - atomic_sub(blks, &sdp->sd_log_blks_needed); - trace_gfs2_log_blocks(sdp, -blks); - /* - * If we waited, then so might others, wake them up _after_ we get - * our share of the log. - */ - if (unlikely(did_wait)) +reserved: + trace_gfs2_log_blocks(sdp, -blks); + if (atomic_sub_return(blks, &sdp->sd_log_blks_needed)) wake_up(&sdp->sd_log_waitq); +} + +/** + * gfs2_log_try_reserve - Try to make a log reservation + * @sdp: The GFS2 superblock + * @tr: The transaction + * @extra_revokes: The number of additional revokes reserved (output) + * + * This is similar to gfs2_log_reserve, but sdp->sd_log_flush_lock must be + * held for correct revoke accounting. + */ - down_read(&sdp->sd_log_flush_lock); - if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) { - gfs2_log_release(sdp, blks); - ret = -EROFS; +bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr, + unsigned int *extra_revokes) +{ + unsigned int blks = tr->tr_reserved; + unsigned int revokes = tr->tr_revokes; + unsigned int revoke_blks = 0; + + *extra_revokes = 0; + if (revokes && !__gfs2_log_try_reserve_revokes(sdp, revokes)) { + revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs); + *extra_revokes = revoke_blks * sdp->sd_inptrs - revokes; + blks += revoke_blks; } - if (atomic_dec_and_test(&sdp->sd_reserving_log)) - wake_up(&sdp->sd_reserving_log_wait); - return ret; + if (!blks) + return true; + if (__gfs2_log_try_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS)) + return true; + if (!revoke_blks) + gfs2_log_release_revokes(sdp, revokes); + return false; +} + +/** + * gfs2_log_reserve - Make a log reservation + * @sdp: The GFS2 superblock + * @tr: The transaction + * @extra_revokes: The number of additional revokes reserved (output) + * + * sdp->sd_log_flush_lock must not be held. + */ + +void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr, + unsigned int *extra_revokes) +{ + unsigned int blks = tr->tr_reserved; + unsigned int revokes = tr->tr_revokes; + unsigned int revoke_blks = 0; + + *extra_revokes = 0; + if (revokes) { + revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs); + *extra_revokes = revoke_blks * sdp->sd_inptrs - revokes; + blks += revoke_blks; + } + __gfs2_log_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS); } /** @@ -507,24 +626,20 @@ static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer } /** - * calc_reserved - Calculate the number of blocks to reserve when - * refunding a transaction's unused buffers. + * calc_reserved - Calculate the number of blocks to keep reserved * @sdp: The GFS2 superblock * * This is complex. We need to reserve room for all our currently used - * metadata buffers (e.g. normal file I/O rewriting file time stamps) and - * all our journaled data buffers for journaled files (e.g. files in the + * metadata blocks (e.g. normal file I/O rewriting file time stamps) and + * all our journaled data blocks for journaled files (e.g. files in the * meta_fs like rindex, or files for which chattr +j was done.) - * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush - * will count it as free space (sd_log_blks_free) and corruption will follow. + * If we don't reserve enough space, corruption will follow. * - * We can have metadata bufs and jdata bufs in the same journal. So each - * type gets its own log header, for which we need to reserve a block. - * In fact, each type has the potential for needing more than one header - * in cases where we have more buffers than will fit on a journal page. + * We can have metadata blocks and jdata blocks in the same journal. Each + * type gets its own log descriptor, for which we need to reserve a block. + * In fact, each type has the potential for needing more than one log descriptor + * in cases where we have more blocks than will fit in a log descriptor. * Metadata journal entries take up half the space of journaled buffer entries. - * Thus, metadata entries have buf_limit (502) and journaled buffers have - * databuf_limit (251) before they cause a wrap around. * * Also, we need to reserve blocks for revoke journal entries and one for an * overall header for the lot. @@ -533,59 +648,29 @@ static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer */ static unsigned int calc_reserved(struct gfs2_sbd *sdp) { - unsigned int reserved = 0; - unsigned int mbuf; - unsigned int dbuf; + unsigned int reserved = GFS2_LOG_FLUSH_MIN_BLOCKS; + unsigned int blocks; struct gfs2_trans *tr = sdp->sd_log_tr; if (tr) { - mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm; - dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm; - reserved = mbuf + dbuf; - /* Account for header blocks */ - reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp)); - reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp)); + blocks = tr->tr_num_buf_new - tr->tr_num_buf_rm; + reserved += blocks + DIV_ROUND_UP(blocks, buf_limit(sdp)); + blocks = tr->tr_num_databuf_new - tr->tr_num_databuf_rm; + reserved += blocks + DIV_ROUND_UP(blocks, databuf_limit(sdp)); } - - if (sdp->sd_log_committed_revoke > 0) - reserved += gfs2_struct2blk(sdp, sdp->sd_log_committed_revoke); - /* One for the overall header */ - if (reserved) - reserved++; return reserved; } -static unsigned int current_tail(struct gfs2_sbd *sdp) -{ - struct gfs2_trans *tr; - unsigned int tail; - - spin_lock(&sdp->sd_ail_lock); - - if (list_empty(&sdp->sd_ail1_list)) { - tail = sdp->sd_log_head; - } else { - tr = list_last_entry(&sdp->sd_ail1_list, struct gfs2_trans, - tr_list); - tail = tr->tr_first; - } - - spin_unlock(&sdp->sd_ail_lock); - - return tail; -} - -static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) +static void log_pull_tail(struct gfs2_sbd *sdp) { - unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail); + unsigned int new_tail = sdp->sd_log_flush_tail; + unsigned int dist; + if (new_tail == sdp->sd_log_tail) + return; + dist = log_distance(sdp, new_tail, sdp->sd_log_tail); ail2_empty(sdp, new_tail); - - atomic_add(dist, &sdp->sd_log_blks_free); - trace_gfs2_log_blocks(sdp, dist); - gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= - sdp->sd_jdesc->jd_blocks); - + gfs2_log_release(sdp, dist); sdp->sd_log_tail = new_tail; } @@ -698,7 +783,7 @@ void gfs2_glock_remove_revoke(struct gfs2_glock *gl) } /** - * gfs2_write_revokes - Add as many revokes to the system transaction as we can + * gfs2_flush_revokes - Add as many revokes to the system transaction as we can * @sdp: The GFS2 superblock * * Our usual strategy is to defer writing revokes as much as we can in the hope @@ -709,38 +794,14 @@ void gfs2_glock_remove_revoke(struct gfs2_glock *gl) * been written back. This will basically come at no cost now, and will save * us from having to keep track of those blocks on the AIL2 list later. */ -void gfs2_write_revokes(struct gfs2_sbd *sdp) +void gfs2_flush_revokes(struct gfs2_sbd *sdp) { /* number of revokes we still have room for */ - int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); + unsigned int max_revokes = atomic_read(&sdp->sd_log_revokes_available); gfs2_log_lock(sdp); - while (sdp->sd_log_num_revoke > max_revokes) - max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); - max_revokes -= sdp->sd_log_num_revoke; - if (!sdp->sd_log_num_revoke) { - atomic_dec(&sdp->sd_log_blks_free); - /* If no blocks have been reserved, we need to also - * reserve a block for the header */ - if (!sdp->sd_log_blks_reserved) { - atomic_dec(&sdp->sd_log_blks_free); - trace_gfs2_log_blocks(sdp, -2); - } else { - trace_gfs2_log_blocks(sdp, -1); - } - } gfs2_ail1_empty(sdp, max_revokes); gfs2_log_unlock(sdp); - - if (!sdp->sd_log_num_revoke) { - atomic_inc(&sdp->sd_log_blks_free); - if (!sdp->sd_log_blks_reserved) { - atomic_inc(&sdp->sd_log_blks_free); - trace_gfs2_log_blocks(sdp, 2); - } else { - trace_gfs2_log_blocks(sdp, 1); - } - } } /** @@ -769,7 +830,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, u64 dblock; if (gfs2_withdrawn(sdp)) - goto out; + return; page = mempool_alloc(gfs2_page_pool, GFP_NOIO); lh = page_address(page); @@ -822,10 +883,8 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, sb->s_blocksize - LH_V1_SIZE - 4); lh->lh_crc = cpu_to_be32(crc); - gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock); - gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags); -out: - log_flush_wait(sdp); + gfs2_log_write(sdp, jd, page, sb->s_blocksize, 0, dblock); + gfs2_log_submit_bio(&jd->jd_log_bio, REQ_OP_WRITE | op_flags); } /** @@ -838,25 +897,24 @@ out: static void log_write_header(struct gfs2_sbd *sdp, u32 flags) { - unsigned int tail; int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC; enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); gfs2_assert_withdraw(sdp, (state != SFS_FROZEN)); - tail = current_tail(sdp); if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { gfs2_ordered_wait(sdp); log_flush_wait(sdp); op_flags = REQ_SYNC | REQ_META | REQ_PRIO; } - sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); - gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail, - sdp->sd_log_flush_head, flags, op_flags); + sdp->sd_log_idle = (sdp->sd_log_flush_tail == sdp->sd_log_flush_head); + gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, + sdp->sd_log_flush_tail, sdp->sd_log_flush_head, + flags, op_flags); gfs2_log_incr_head(sdp); - - if (sdp->sd_log_tail != tail) - log_pull_tail(sdp, tail); + log_flush_wait(sdp); + log_pull_tail(sdp); + gfs2_log_update_head(sdp); } /** @@ -956,10 +1014,15 @@ static void trans_drain(struct gfs2_trans *tr) void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) { struct gfs2_trans *tr = NULL; + unsigned int reserved_blocks = 0, used_blocks = 0; enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); + unsigned int first_log_head; + unsigned int reserved_revokes = 0; down_write(&sdp->sd_log_flush_lock); + trace_gfs2_log_flush(sdp, 1, flags); +repeat: /* * Do this check while holding the log_flush_lock to prevent new * buffers from being added to the ail via gfs2_pin() @@ -970,28 +1033,47 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) /* Log might have been flushed while we waited for the flush lock */ if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) goto out; - trace_gfs2_log_flush(sdp, 1, flags); - if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN) - clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); + first_log_head = sdp->sd_log_head; + sdp->sd_log_flush_head = first_log_head; - sdp->sd_log_flush_head = sdp->sd_log_head; tr = sdp->sd_log_tr; - if (tr) { - sdp->sd_log_tr = NULL; - tr->tr_first = sdp->sd_log_flush_head; - if (unlikely (state == SFS_FROZEN)) - if (gfs2_assert_withdraw_delayed(sdp, - !tr->tr_num_buf_new && !tr->tr_num_databuf_new)) - goto out_withdraw; + if (tr || sdp->sd_log_num_revoke) { + if (reserved_blocks) + gfs2_log_release(sdp, reserved_blocks); + reserved_blocks = sdp->sd_log_blks_reserved; + reserved_revokes = sdp->sd_log_num_revoke; + if (tr) { + sdp->sd_log_tr = NULL; + tr->tr_first = first_log_head; + if (unlikely (state == SFS_FROZEN)) { + if (gfs2_assert_withdraw_delayed(sdp, + !tr->tr_num_buf_new && !tr->tr_num_databuf_new)) + goto out_withdraw; + } + } + } else if (!reserved_blocks) { + unsigned int taboo_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS; + + reserved_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS; + if (current == sdp->sd_logd_process) + taboo_blocks = 0; + + if (!__gfs2_log_try_reserve(sdp, reserved_blocks, taboo_blocks)) { + up_write(&sdp->sd_log_flush_lock); + __gfs2_log_reserve(sdp, reserved_blocks, taboo_blocks); + down_write(&sdp->sd_log_flush_lock); + goto repeat; + } + BUG_ON(sdp->sd_log_num_revoke); } + if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN) + clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); + if (unlikely(state == SFS_FROZEN)) - if (gfs2_assert_withdraw_delayed(sdp, !sdp->sd_log_num_revoke)) + if (gfs2_assert_withdraw_delayed(sdp, !reserved_revokes)) goto out_withdraw; - if (gfs2_assert_withdraw_delayed(sdp, - sdp->sd_log_num_revoke == sdp->sd_log_committed_revoke)) - goto out_withdraw; gfs2_ordered_write(sdp); if (gfs2_withdrawn(sdp)) @@ -999,16 +1081,13 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) lops_before_commit(sdp, tr); if (gfs2_withdrawn(sdp)) goto out_withdraw; - gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE); + gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE); if (gfs2_withdrawn(sdp)) goto out_withdraw; if (sdp->sd_log_head != sdp->sd_log_flush_head) { - log_flush_wait(sdp); log_write_header(sdp, flags); - } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ - atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ - trace_gfs2_log_blocks(sdp, -1); + } else if (sdp->sd_log_tail != sdp->sd_log_flush_tail && !sdp->sd_log_idle) { log_write_header(sdp, flags); } if (gfs2_withdrawn(sdp)) @@ -1016,9 +1095,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) lops_after_commit(sdp, tr); gfs2_log_lock(sdp); - sdp->sd_log_head = sdp->sd_log_flush_head; sdp->sd_log_blks_reserved = 0; - sdp->sd_log_committed_revoke = 0; spin_lock(&sdp->sd_ail_lock); if (tr && !list_empty(&tr->tr_ail1_list)) { @@ -1033,10 +1110,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) empty_ail1_list(sdp); if (gfs2_withdrawn(sdp)) goto out_withdraw; - atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ - trace_gfs2_log_blocks(sdp, -1); log_write_header(sdp, flags); - sdp->sd_log_head = sdp->sd_log_flush_head; } if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN | GFS2_LOG_HEAD_FLUSH_FREEZE)) @@ -1046,12 +1120,22 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) } out_end: - trace_gfs2_log_flush(sdp, 0, flags); + used_blocks = log_distance(sdp, sdp->sd_log_flush_head, first_log_head); + reserved_revokes += atomic_read(&sdp->sd_log_revokes_available); + atomic_set(&sdp->s |
