// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "alloc_foreground.h"
#include "bkey_buf.h"
#include "btree_update.h"
#include "buckets.h"
#include "compress.h"
#include "data_update.h"
#include "disk_groups.h"
#include "ec.h"
#include "error.h"
#include "extents.h"
#include "io_write.h"
#include "keylist.h"
#include "move.h"
#include "nocow_locking.h"
#include "rebalance.h"
#include "snapshot.h"
#include "subvolume.h"
#include "trace.h"
#include <linux/ioprio.h>
static const char * const bch2_data_update_type_strs[] = {
#define x(t, n, ...) [n] = #t,
BCH_DATA_UPDATE_TYPES()
#undef x
NULL
};
static void bkey_put_dev_refs(struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
bkey_for_each_ptr(ptrs, ptr)
bch2_dev_put(bch2_dev_have_ref(c, ptr->dev));
}
static bool bkey_get_dev_refs(struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
bkey_for_each_ptr(ptrs, ptr) {
if (unlikely(!bch2_dev_tryget(c, ptr->dev))) {
bkey_for_each_ptr(ptrs, ptr2) {
if (ptr2 == ptr)
break;
bch2_dev_put(bch2_dev_have_ref(c, ptr2->dev));
}
return false;
}
}
return true;
}
static void bkey_nocow_unlock(struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
}
}
static noinline_for_stack
bool __bkey_nocow_lock(struct bch_fs *c, struct moving_context *ctxt, struct bkey_ptrs_c ptrs,
const struct bch_extent_ptr *start)
{
if (!ctxt) {
bkey_for_each_ptr(ptrs, ptr) {
if (ptr == start)
break;
struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
}
return false;
}
__bkey_for_each_ptr(start, ptrs.end, ptr) {
struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
bool locked;
move_ctxt_wait_event(ctxt,
(locked = bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) ||
list_empty(&ctxt->ios));
if (!locked)
bch2_bucket_nocow_lock(&c->nocow_locks, bucket, 0);
}
return true;
}
static bool bkey_nocow_lock(struct bch_fs *c, struct moving_context *ctxt, struct bkey_ptrs_c ptrs)
{
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
if (!bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0))
return __bkey_nocow_lock(c, ctxt, ptrs, ptr);
}
return true;
}
noinline_for_stack
static void trace_io_move_finish2(struct data_update *u,
struct bkey_i *new,
struct bkey_i *insert)
{
struct bch_fs *c = u->op.c;
struct printbuf buf = PRINTBUF;
prt_newline(&buf);
bch2_data_update_to_text(&buf, u);
prt_newline(&buf);
prt_str_indented(&buf, "new replicas:\t");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
prt_newline(&buf);
prt_str_indented(&buf, "insert:\t");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
prt_newline(&buf);
trace_io_move_finish(c, buf.buf);
printbuf_exit(&buf);
}
noinline_for_stack
static void trace_io_move_fail2(struct data_update *m,
struct bkey_s_c new,
struct bkey_s_c wrote,
struct bkey_i *insert,
const char *msg)
{
struct bch_fs *c = m->op.c;
struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
struct printbuf buf = PRINTBUF;
unsigned rewrites_found = 0;
if (!trace_io_move_fail_enabled())
return;
prt_str(&buf, msg);
if (insert) {
const union bch_extent_entry *entry;
struct bch_extent_ptr *ptr;
struct extent_ptr_decoded p;
unsigned ptr_bit = 1;
bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
if ((ptr_bit & m->data_opts.rewrite_ptrs) &&
(ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
!ptr->cached)
rewrites_found