/*
* Copyright (C) 2015 Facebook. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include "ctree.h"
#include "disk-io.h"
#include "locking.h"
#include "free-space-tree.h"
#include "transaction.h"
static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path);
void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache)
{
u32 bitmap_range;
size_t bitmap_size;
u64 num_bitmaps, total_bitmap_size;
/*
* We convert to bitmaps when the disk space required for using extents
* exceeds that required for using bitmaps.
*/
bitmap_range = cache->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1,
bitmap_range);
bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE;
total_bitmap_size = num_bitmaps * bitmap_size;
cache->bitmap_high_thresh = div_u64(total_bitmap_size,
sizeof(struct btrfs_item));
/*
* We allow for a small buffer between the high threshold and low
* threshold to avoid thrashing back and forth between the two formats.
*/
if (cache->bitmap_high_thresh > 100)
cache->bitmap_low_thresh = cache->bitmap_high_thresh - 100;
else
cache->bitmap_low_thresh = 0;
}
static int add_new_free_space_info(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path)
{
struct btrfs_root *root = fs_info->free_space_root;
struct btrfs_free_space_info *info;
struct btrfs_key key;
struct extent_buffer *leaf;
int ret;
key.objectid = block_group->key.objectid;
key.type = BTRFS_FREE_SPACE_INFO_KEY;
key.offset = block_group->key.offset;
ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info));
if (ret)
goto out;
leaf = path->nodes[0];
info = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_free_space_info);
btrfs_set_free_space_extent_count(leaf, info, 0);
btrfs_set_free_space_flags(leaf, info, 0);
btrfs_mark_buffer_dirty(leaf);
ret = 0;
out:
btrfs_release_path(path);
return ret;
}
struct btrfs_free_space_info *
search_free_space_info(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path, int cow)
{
struct btrfs_root *root = fs_info->free_space_root;
struct btrfs_key key;
int ret;
key.objectid = block_group->key.objectid;
key.type = BTRFS_FREE_SPACE_INFO_KEY;
key.offset = block_group->key.offset;
ret = btrfs_search_slot(trans, root, &key, path, 0, cow);
if (ret < 0)
return ERR_PTR(ret);
if (ret != 0) {
btrfs_warn(fs_info, "missing free space info for %llu",
block_group->key.objectid);
ASSERT(0);
return ERR_PTR(-ENOENT);
}
return btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_free_space_info);
}
/*
* btrfs_search_slot() but we're looking for the greatest key less than the
* passed key.
*/
static int btrfs_search_prev_slot(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_key *key, struct btrfs_path *p,
int ins_len, int cow)
{
int ret;
ret = btrfs_search_slot(trans, root, key, p, ins_len, cow);
if (ret < 0)
return ret;
if (ret == 0) {
ASSERT(0);
return -EIO;
}
if (p->slots[0] == 0) {
ASSERT(0);
return -EIO;
}
p->slots[0]--;
return 0;
}
static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize)
{
return DIV_ROUND_UP((u32)div_u64(size, sectorsize), BITS_PER_BYTE);
}
static u8 *alloc_bitmap(u32 bitmap_size)
{
void *mem;
/*
* The allocation size varies, observed numbers were < 4K up to 16K.
* Using vmalloc unconditionally would be too heavy, we'll try
* contiguous allocations first.
*/
if (bitmap_size <= PAGE_SIZE)
return kzalloc(bitmap_size, GFP_NOFS);
mem = kzalloc(bitmap_size, GFP_NOFS | __GFP_NOWARN);
if (mem)
return mem;
return __vmalloc(bitmap_size, GFP_NOFS | __GFP_HIGHMEM | __GFP_ZERO,
PAGE_KERNEL);
}
int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path)
{
struct btrfs_r
|