f2fs学习四: f2fs文件系统挂载
一. 用戶空間 f2fs文件系統 Mount(掛載)
?????在掛載之前,我們需要使用mkfs.f2fs工具對塊設備進行格式化為f2fs文件系統(如果在Mount之前不格式化成f2fs文件系統,后續Mount掛載的時候會報錯,不識別f2fs文件系統)
?????mkfs.f2fs工具作用就是在塊設備上建立f2fs文件系統
?????1. # mkfs.f2fs -l label /dev/block_device?
????將塊設備掛載到/mnt/f2fs目錄,掛載的文件系統格式為F2fs文件系統
?????2. # mount -t f2fs /dev/block_device /mnt/f2fs
二. 用戶空間Mount 系統調用進入內核空間f2fs_mount
三. 內核空間f2fs文件系統Mount (f2fs_mount)
用戶空間執行Mount命令---->系統調用 ---->內核空間f2fs文件系統 f2fs_mount
1. f2fs文件系統 f2fs_fs_type 結構體含有 mount成員,當用戶在用戶空間執行mount操作時,會回調到這個mount位置,由f2fs_mount 執行接下去的Mount動作,
static struct file_system_type f2fs_fs_type = {.owner = THIS_MODULE,.name = "f2fs",.mount = f2fs_mount,.kill_sb = kill_f2fs_super,.fs_flags = FS_REQUIRES_DEV, };2. f2fs_mount是針對塊設備掛載成f2fs文件系統的函數
static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,const char *dev_name, void *data) {return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super); }f2fs_fill_super:填充f2fs super block信息3.mount_bdev是針對塊設備掛載時使用的函數,執行塊設備掛載,這里塊設備指的時 /dev/block_device ,此外還有mount_nodev, mount_single等函數,分別用于不同的掛載情況
struct dentry *mount_bdev(struct file_system_type *fs_type,int flags, const char *dev_name, void *data,int (*fill_super)(struct super_block *, void *, int)) {struct block_device *bdev;struct super_block *s;fmode_t mode = FMODE_READ | FMODE_EXCL;int error = 0;if (!(flags & SB_RDONLY))mode |= FMODE_WRITE;/* 打開由dev_name/mode/fs_type描述的塊設備*/ bdev = blkdev_get_by_path(dev_name, mode, fs_type);if (IS_ERR(bdev))return ERR_CAST(bdev);/** once the super is inserted into the list by sget, s_umount* will protect the lockfs code from trying to start a snapshot* while we are mounting*/mutex_lock(&bdev->bd_fsfreeze_mutex);if (bdev->bd_fsfreeze_count > 0) {mutex_unlock(&bdev->bd_fsfreeze_mutex);error = -EBUSY;goto error_bdev;}/* find or create a superblock */s = sget(fs_type, test_bdev_super, set_bdev_super, flags | SB_NOSEC,bdev);mutex_unlock(&bdev->bd_fsfreeze_mutex);if (IS_ERR(s))goto error_s;if (s->s_root) {if ((flags ^ s->s_flags) & SB_RDONLY) {deactivate_locked_super(s);error = -EBUSY;goto error_bdev;}/** s_umount nests inside bd_mutex during* __invalidate_device(). blkdev_put() acquires* bd_mutex and can't be called under s_umount. Drop* s_umount temporarily. This is safe as we're* holding an active reference.*/up_write(&s->s_umount);blkdev_put(bdev, mode);down_write(&s->s_umount);} else {s->s_mode = mode;snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);sb_set_blocksize(s, block_size(bdev));/* 填充f2fs super block信息*/error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);if (error) {deactivate_locked_super(s);goto error;}s->s_flags |= SB_ACTIVE;bdev->bd_super = s;}return dget(s->s_root);error_s:error = PTR_ERR(s); error_bdev:blkdev_put(bdev, mode); error:return ERR_PTR(error); } EXPORT_SYMBOL(mount_bdev);4.?f2fs_fill_super是填充f2fs super block信息
Superblock區域
Superblock保存了F2FS的核心元數據的結構,包括磁盤大小,元區域的各個部分的起始地址等。
Superblock在元數據區域的物理結構
Superblock區域是由兩個struct f2fs_super_block結構組成,互為備份
(1)Superblock物理存放區域結構
struct f2fs_super_block是F2FS對Superblock的具體數據結構實現,它保存在磁盤的最開始的位置,F2FS進行掛載時從磁盤的前端直接讀取出來,然后轉換為struct f2fs_super_block結構。它的定義如下:
?
(2)Superblock內存管理結構:
f2fs_super_block在內存中的對應的結構是struct f2fs_sb_info,它除了包含了struct f2fs_super_block的信息以外,還包含了一些額外的功能,如鎖、SIT、NAT對應的內存管理結構等,簡單如下所述:
其中f2fs_sb_info是在init_sb_info函數中進行初始化的
struct f2fs_sb_info {struct super_block *sb; /* pointer to VFS super block */struct proc_dir_entry *s_proc; /* proc entry */struct f2fs_super_block *raw_super; /* raw super block pointer */struct rw_semaphore sb_lock; /* lock for raw super block */int valid_super_block; /* valid super block no */unsigned long s_flag; /* flags for sbi */struct mutex writepages; /* mutex for writepages() */#ifdef CONFIG_BLK_DEV_ZONEDunsigned int blocks_per_blkz; /* F2FS blocks per zone */unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */ #endif/* for node-related operations */struct f2fs_nm_info *nm_info; /* node manager */struct inode *node_inode; /* cache node blocks *//* for segment-related operations */struct f2fs_sm_info *sm_info; /* segment manager *//* for bio operations */struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios *//* keep migration IO order for LFS mode */struct rw_semaphore io_order_lock;mempool_t *write_io_dummy; /* Dummy pages *//* for checkpoint */struct f2fs_ckpt_cmd_control *ccc_info; /* for checkpoint cmd control */struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */int cur_cp_pack; /* remain current cp pack */spinlock_t cp_lock; /* for flag in ckpt */struct inode *meta_inode; /* cache meta blocks */struct mutex cp_mutex; /* checkpoint procedure lock */struct rw_semaphore cp_rwsem; /* blocking FS operations */struct rw_semaphore node_write; /* locking node writes */struct rw_semaphore node_change; /* locking node change */wait_queue_head_t cp_wait;unsigned long last_time[MAX_TIME]; /* to store time in jiffies */long interval_time[MAX_TIME]; /* to store thresholds */struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */spinlock_t fsync_node_lock; /* for node entry lock */struct list_head fsync_node_list; /* node list head */unsigned int fsync_seg_id; /* sequence id */unsigned int fsync_node_num; /* number of node entries *//* for orphan inode, use 0'th array */unsigned int max_orphans; /* max orphan inodes *//* for inode management */struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock *//* for extent tree cache */struct radix_tree_root extent_tree_root;/* cache extent cache entries */struct mutex extent_tree_lock; /* locking extent radix tree */struct list_head extent_list; /* lru list for shrinker */spinlock_t extent_lock; /* locking extent lru list */atomic_t total_ext_tree; /* extent tree count */struct list_head zombie_list; /* extent zombie tree list */atomic_t total_zombie_tree; /* extent zombie tree count */atomic_t total_ext_node; /* extent info count *//* basic filesystem units */unsigned int log_sectors_per_block; /* log2 sectors per block */unsigned int log_blocksize; /* log2 block size */unsigned int blocksize; /* block size */unsigned int root_ino_num; /* root inode number*/unsigned int node_ino_num; /* node inode number*/unsigned int meta_ino_num; /* meta inode number*/unsigned int log_blocks_per_seg; /* log2 blocks per segment */unsigned int blocks_per_seg; /* blocks per segment */unsigned int segs_per_sec; /* segments per section */unsigned int secs_per_zone; /* sections per zone */unsigned int total_sections; /* total section count */unsigned int total_node_count; /* total node block count */unsigned int total_valid_node_count; /* valid node block count */loff_t max_file_blocks; /* max block index of file */int dir_level; /* directory level */int readdir_ra; /* readahead inode in readdir */block_t user_block_count; /* # of user blocks */block_t total_valid_block_count; /* # of valid blocks */block_t discard_blks; /* discard command candidats */block_t last_valid_block_count; /* for recovery */block_t reserved_blocks; /* configurable reserved blocks */block_t current_reserved_blocks; /* current reserved blocks *//* Additional tracking for no checkpoint mode */block_t unusable_block_count; /* # of blocks saved by last cp */unsigned int nquota_files; /* # of quota sysfile *//* # of pages, see count_type */atomic_t nr_pages[NR_COUNT_TYPE];/* # of allocated blocks */struct percpu_counter alloc_valid_block_count;/* writeback control */atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads *//* valid inode count */struct percpu_counter total_valid_inode_count;struct f2fs_mount_info mount_opt; /* mount options *//* for cleaning operations */struct mutex gc_mutex; /* mutex for GC */struct f2fs_gc_kthread *gc_thread; /* GC thread */unsigned int cur_victim_sec; /* current victim section num */unsigned int gc_mode; /* current GC state */unsigned int next_victim_seg[2]; /* next segment in victim section *//* for skip statistic */unsigned int atomic_files; /* # of opened atomic file */unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */unsigned long long skipped_gc_rwsem; /* FG_GC only *//* threshold for gc trials on pinned files */u64 gc_pin_file_threshold;/* maximum # of trials to find a victim segment for SSR and GC */unsigned int max_victim_search;/* migration granularity of garbage collection, unit: segment */unsigned int migration_granularity;/** for stat information.* one is for the LFS mode, and the other is for the SSR mode.*/ #ifdef CONFIG_F2FS_STAT_FSstruct f2fs_stat_info *stat_info; /* FS status information */atomic_t meta_count[META_MAX]; /* # of meta blocks */unsigned int segment_count[2]; /* # of allocated segments */unsigned int block_count[2]; /* # of allocated blocks */atomic_t inplace_count; /* # of inplace update */atomic64_t total_hit_ext; /* # of lookup extent cache */atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */atomic64_t read_hit_largest; /* # of hit largest extent node */atomic64_t read_hit_cached; /* # of hit cached extent node */atomic_t inline_xattr; /* # of inline_xattr inodes */atomic_t inline_inode; /* # of inline_data inodes */atomic_t inline_dir; /* # of inline_dentry inodes */atomic_t vw_cnt; /* # of volatile writes */atomic_t max_aw_cnt; /* max # of atomic writes */atomic_t max_vw_cnt; /* max # of volatile writes */int bg_gc; /* background gc calls */unsigned int io_skip_bggc; /* skip background gc for in-flight IO */unsigned int other_skip_bggc; /* skip background gc for other reasons */unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ #endifspinlock_t stat_lock; /* lock for stat operations *//* For app/fs IO statistics */spinlock_t iostat_lock;unsigned long long write_iostat[NR_IO_TYPE];bool iostat_enable;/* For sysfs suppport */struct kobject s_kobj;struct completion s_kobj_unregister;/* For shrinker support */struct list_head s_list;int s_ndevs; /* number of devices */struct f2fs_dev_info *devs; /* for device list */unsigned int dirty_device; /* for checkpoint data flush */spinlock_t dev_lock; /* protect dirty_device */struct mutex umount_mutex;unsigned int shrinker_run_no;/* For write statistics */u64 sectors_written_start;u64 kbytes_written;/* Reference to checksum algorithm driver via cryptoapi */struct crypto_shash *s_chksum_driver;/* Precomputed FS UUID checksum for seeding other checksums */__u32 s_chksum_seed;struct f2fs_sec_stat_info sec_stat;struct f2fs_sec_fsck_info sec_fsck_stat;/* To gather information of fragmentation */unsigned int s_sec_part_best_extents;unsigned int s_sec_part_current_extents;unsigned int s_sec_part_score;unsigned int s_sec_defrag_writes_kb;unsigned int s_sec_num_apps;unsigned int s_sec_capacity_apps_kb;unsigned int s_sec_cond_fua_mode;#ifdef CONFIG_F2FS_SEC_BLOCK_OPERATIONS_DEBUGunsigned int s_sec_blkops_total;unsigned long long s_sec_blkops_max_elapsed;struct f2fs_sec_blkops_dbg s_sec_dbg_entries[F2FS_SEC_BLKOPS_ENTRIES];struct f2fs_sec_blkops_dbg s_sec_dbg_max_entry; #endif };(3) struct super_block是VFS層描述超級塊的結構體:
?VFS描述文件系統使用超級塊和inode 的方式,所謂超級塊就是對所有文件系統的管理機構,每種文件系統都要把自己的信息掛到super_blocks這么一個全局鏈表上。
內核中是分成2個步驟完成:首先每個文件系統必須通過register_filesystem函數將自己的file_system_type掛接到file_systems這個全局變量上,
然后調用kern_mount函數把自己的文件相關操作函數集合表掛到super_blocks上。每種文件系統類型的讀超級塊的例程(get_sb)必須由自己實現。
?
- ?
? ? 文件系統由子目錄和文件構成。每個子目錄和文件只能由唯一的inode 描述。inode 是Linux管理文件系統的最基本單位,也是文件系統連接任何子目錄、文件的橋梁。
VFS inode的內容取自物理設備上的文件系統,由文件系統指定的操作函數(i_op 屬性指定)填寫。VFS inode只存在于內存中,可通過inode緩存訪問。
?
super_block
- 相關的數據結構為:
?
?
struct super_block {struct list_head s_list; /* Keep this first */dev_t s_dev; /* search index; _not_ kdev_t */unsigned char s_blocksize_bits;unsigned long s_blocksize;loff_t s_maxbytes; /* Max file size */struct file_system_type *s_type;const struct super_operations *s_op;const struct dquot_operations *dq_op;const struct quotactl_ops *s_qcop;const struct export_operations *s_export_op;unsigned long s_flags;unsigned long s_iflags; /* internal SB_I_* flags */unsigned long s_magic;struct dentry *s_root;struct rw_semaphore s_umount;int s_count;atomic_t s_active; #ifdef CONFIG_SECURITYvoid *s_security; #endifconst struct xattr_handler **s_xattr; #ifdef CONFIG_FS_ENCRYPTIONconst struct fscrypt_operations *s_cop; #endifstruct hlist_bl_head s_roots; /* alternate root dentries for NFS */struct list_head s_mounts; /* list of mounts; _not_ for fs use */struct block_device *s_bdev;struct backing_dev_info *s_bdi;struct mtd_info *s_mtd;struct hlist_node s_instances;unsigned int s_quota_types; /* Bitmask of supported quota types */struct quota_info s_dquot; /* Diskquota specific options */struct sb_writers s_writers;char s_id[32]; /* Informational name */uuid_t s_uuid; /* UUID */void *s_fs_info; /* Filesystem private info */unsigned int s_max_links;fmode_t s_mode;/* Granularity of c/m/atime in ns.Cannot be worse than a second */u32 s_time_gran;/** The next field is for VFS *only*. No filesystems have any business* even looking at it. You had been warned.*/struct mutex s_vfs_rename_mutex; /* Kludge *//** Filesystem subtype. If non-empty the filesystem type field* in /proc/mounts will be "type.subtype"*/char *s_subtype;const struct dentry_operations *s_d_op; /* default d_op for dentries *//** Saved pool identifier for cleancache (-1 means none)*/int cleancache_poolid;struct shrinker s_shrink; /* per-sb shrinker handle *//* Number of inodes with nlink == 0 but still referenced */atomic_long_t s_remove_count;/* Pending fsnotify inode refs */atomic_long_t s_fsnotify_inode_refs;/* Being remounted read-only */int s_readonly_remount;/* AIO completions deferred from interrupt context */struct workqueue_struct *s_dio_done_wq;struct hlist_head s_pins;/** Owning user namespace and default context in which to* interpret filesystem uids, gids, quotas, device nodes,* xattrs and security labels.*/struct user_namespace *s_user_ns;/** Keep the lru lists last in the structure so they always sit on their* own individual cachelines.*/struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;struct list_lru s_inode_lru ____cacheline_aligned_in_smp;struct rcu_head rcu;struct work_struct destroy_work;struct mutex s_sync_lock; /* sync serialisation lock *//** Indicates how deep in a filesystem stack this SB is*/int s_stack_depth;/* s_inode_list_lock protects s_inodes */spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp;struct list_head s_inodes; /* all inodes */spinlock_t s_inode_wblist_lock;struct list_head s_inodes_wb; /* writeback inodes */ } __randomize_layout;- super_block存在于兩個鏈表中,一個是系統所有super_block的鏈表, 一個是對于特定的文件系統的super_block鏈表.
? ????所有的super_block都存在于?super_blocks(VFS管理層)?鏈表中:
?
- 對于特定的文件系統(文件系統層的具體文件系統), 該文件系統的所有的super_block 都存在于file_sytem_type中的fs_supers鏈表中.
? ? ?而所有的文件系統,都存在于file_systems鏈表中.這是通過調用register_filesystem接口來注冊文件系統的.
? ? ?int register_filesystem(struct file_system_type * fs)?
?
總結:
f2fs_super_block只在文件系統初始化的時候使用,表示實際存在于磁盤中的數據。大部分情況下系統使用的都是superblock的另外一個結構f2fs_sb_info,簡稱sbi,這個結構在文件系統初始化時侯,通過讀取f2fs_super_block的數據進行初始化,只存于內存當中。這個結構是F2FS文件系統使用最多的數據結構,因為它包含了SIT、NAT、SSA、Checkpoint等多個重要的元數據結構信息,因此幾乎F2FS中所有的動作都需要通過sbi進行處理
(4) f2fs_fill_super函數具體實現:
static int f2fs_fill_super(struct super_block *sb, void *data, int silent) {struct f2fs_sb_info *sbi;struct f2fs_super_block *raw_super;struct inode *root;int err;bool skip_recovery = false, need_fsck = false;char *options = NULL;char *orig_data = kstrdup(data, GFP_KERNEL);int recovery, i, valid_super_block;struct curseg_info *seg_i;int retry_cnt = 1;try_onemore:err = -EINVAL;raw_super = NULL;valid_super_block = -1;recovery = 0;/* allocate memory for f2fs-specific super block info */sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);if (!sbi)return -ENOMEM;sbi->sb = sb;/* Load the checksum driver */sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);if (IS_ERR(sbi->s_chksum_driver)) {f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");err = PTR_ERR(sbi->s_chksum_driver);sbi->s_chksum_driver = NULL;goto free_sbi;}/* set a block size */if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {f2fs_msg(sb, KERN_ERR, "unable to set blocksize");goto free_sbi;}err = read_raw_super_block(sbi, &raw_super, &valid_super_block,&recovery, retry_cnt);if (err)goto free_sbi;sb->s_fs_info = sbi;sbi->raw_super = raw_super;/* precompute checksum seed for metadata */if (f2fs_sb_has_inode_chksum(sbi))sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,sizeof(raw_super->uuid));/** The BLKZONED feature indicates that the drive was formatted with* zone alignment optimization. This is optional for host-aware* devices, but mandatory for host-managed zoned block devices.*/ #ifndef CONFIG_BLK_DEV_ZONEDif (f2fs_sb_has_blkzoned(sbi)) {f2fs_msg(sb, KERN_ERR,"Zoned block device support is not enabled");err = -EOPNOTSUPP;goto free_sb_buf;} #endifdefault_options(sbi, false);/* parse mount options */options = kstrdup((const char *)data, GFP_KERNEL);if (data && !options) {err = -ENOMEM;goto free_sb_buf;}err = parse_options(sb, options);if (err)goto free_options;sbi->max_file_blocks = max_file_blocks();sb->s_maxbytes = sbi->max_file_blocks <<le32_to_cpu(raw_super->log_blocksize);sb->s_max_links = F2FS_LINK_MAX;#ifdef CONFIG_QUOTAsb->dq_op = &f2fs_quota_operations;sb->s_qcop = &f2fs_quotactl_ops;sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;if (f2fs_sb_has_quota_ino(sbi)) {for (i = 0; i < MAXQUOTAS; i++) {if (f2fs_qf_ino(sbi->sb, i))sbi->nquota_files++;}} #endifsb->s_op = &f2fs_sops; #ifdef CONFIG_FS_ENCRYPTIONsb->s_cop = &f2fs_cryptops; #endifsb->s_xattr = f2fs_xattr_handlers;sb->s_export_op = &f2fs_export_ops;sb->s_magic = F2FS_SUPER_MAGIC;sb->s_time_gran = 1;sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));sb->s_iflags |= SB_I_CGROUPWB;/* init f2fs-specific super block info */sbi->valid_super_block = valid_super_block;mutex_init(&sbi->gc_mutex);mutex_init(&sbi->writepages);mutex_init(&sbi->cp_mutex);init_rwsem(&sbi->node_write);init_rwsem(&sbi->node_change);/* disallow all the data/node/meta page writes */set_sbi_flag(sbi, SBI_POR_DOING);spin_lock_init(&sbi->stat_lock);/* init iostat info */spin_lock_init(&sbi->iostat_lock);sbi->iostat_enable = false;for (i = 0; i < NR_PAGE_TYPE; i++) {int n = (i == META) ? 1: NR_TEMP_TYPE;int j;sbi->write_io[i] =f2fs_kmalloc(sbi,array_size(n,sizeof(struct f2fs_bio_info)),GFP_KERNEL);if (!sbi->write_io[i]) {err = -ENOMEM;goto free_bio_info;}for (j = HOT; j < n; j++) {init_rwsem(&sbi->write_io[i][j].io_rwsem);sbi->write_io[i][j].sbi = sbi;sbi->write_io[i][j].bio = NULL;spin_lock_init(&sbi->write_io[i][j].io_lock);INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);}}init_rwsem(&sbi->cp_rwsem);init_waitqueue_head(&sbi->cp_wait);init_sb_info(sbi);err = init_percpu_info(sbi);if (err)goto free_bio_info;if (F2FS_IO_SIZE(sbi) > 1) {sbi->write_io_dummy =mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);if (!sbi->write_io_dummy) {err = -ENOMEM;goto free_percpu;}}/* get an inode for meta space */sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));if (IS_ERR(sbi->meta_inode)) {f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");err = PTR_ERR(sbi->meta_inode);goto free_io_dummy;}err = f2fs_get_valid_checkpoint(sbi);if (err) {f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");goto free_meta_inode;}if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;}/* Initialize device list */err = f2fs_scan_devices(sbi);if (err) {f2fs_msg(sb, KERN_ERR, "Failed to find devices");goto free_devices;}sbi->total_valid_node_count =le32_to_cpu(sbi->ckpt->valid_node_count);percpu_counter_set(&sbi->total_valid_inode_count,le32_to_cpu(sbi->ckpt->valid_inode_count));sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);sbi->total_valid_block_count =le64_to_cpu(sbi->ckpt->valid_block_count);sbi->last_valid_block_count = sbi->total_valid_block_count;sbi->reserved_blocks = 0;sbi->current_reserved_blocks = 0;limit_reserve_root(sbi);for (i = 0; i < NR_INODE_TYPE; i++) {INIT_LIST_HEAD(&sbi->inode_list[i]);spin_lock_init(&sbi->inode_lock[i]);}f2fs_init_extent_cache_info(sbi);f2fs_init_ino_entry_info(sbi);f2fs_init_fsync_node_info(sbi);/* setup checkpoint_cmd_control */err = f2fs_create_checkpoint_cmd_control(sbi);if (err) {f2fs_msg(sb, KERN_ERR,"Failed to initialize F2FS checkpoint_cmd_control");goto free_ccc;}/* setup f2fs internal modules */err = f2fs_build_segment_manager(sbi);if (err) {f2fs_msg(sb, KERN_ERR,"Failed to initialize F2FS segment manager");goto free_sm;}err = f2fs_build_node_manager(sbi);if (err) {f2fs_msg(sb, KERN_ERR,"Failed to initialize F2FS node manager");goto free_nm;}/* For write statistics */if (sb->s_bdev->bd_part)sbi->sectors_written_start =(u64)part_stat_read(sb->s_bdev->bd_part,sectors[STAT_WRITE]);/* Read accumulated write IO statistics if exists */seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);if (__exist_node_summaries(sbi))sbi->kbytes_written =le64_to_cpu(seg_i->journal->info.kbytes_written);f2fs_build_gc_manager(sbi);err = f2fs_build_stats(sbi);if (err)goto free_nm;/* get an inode for node space */sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));if (IS_ERR(sbi->node_inode)) {f2fs_msg(sb, KERN_ERR, "Failed to read node inode");err = PTR_ERR(sbi->node_inode);goto free_stats;}/* read root inode and dentry */root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));if (IS_ERR(root)) {f2fs_msg(sb, KERN_ERR, "Failed to read root inode");err = PTR_ERR(root);goto free_node_inode;}if (!S_ISDIR(root->i_mode) || !root->i_blocks ||!root->i_size || !root->i_nlink) {iput(root);err = -EINVAL;goto free_node_inode;}sb->s_root = d_make_root(root); /* allocate root dentry */if (!sb->s_root) {err = -ENOMEM;goto free_node_inode;}err = f2fs_register_sysfs(sbi);if (err)goto free_root_inode;#ifdef CONFIG_QUOTA/* Enable quota usage during mount */if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {err = f2fs_enable_quotas(sb);if (err)f2fs_msg(sb, KERN_ERR,"Cannot turn on quotas: error %d", err);} #endif/* if there are nt orphan nodes free them */err = f2fs_recover_orphan_inodes(sbi);if (err)goto free_meta;if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))goto reset_checkpoint;/* recover fsynced data */if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {/** mount should be failed, when device has readonly mode, and* previous checkpoint was not done by clean system shutdown.*/if (f2fs_hw_is_readonly(sbi)) {if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {err = -EROFS;f2fs_msg(sb, KERN_ERR,"Need to recover fsync data, but ""write access unavailable");goto free_meta;}f2fs_msg(sbi->sb, KERN_INFO, "write access ""unavailable, skipping recovery");goto reset_checkpoint;}if (need_fsck)set_sbi_flag(sbi, SBI_NEED_FSCK);if (skip_recovery)goto reset_checkpoint;err = f2fs_recover_fsync_data(sbi, false);if (err < 0) {if (err != -ENOMEM)skip_recovery = true;need_fsck = true;f2fs_msg(sb, KERN_ERR,"Cannot recover all fsync data errno=%d", err);goto free_meta;}} else {err = f2fs_recover_fsync_data(sbi, true);if (!f2fs_readonly(sb) && err > 0) {err = -EINVAL;f2fs_msg(sb, KERN_ERR,"Need to recover fsync data");goto free_meta;}} reset_checkpoint:/* f2fs_recover_fsync_data() cleared this already */clear_sbi_flag(sbi, SBI_POR_DOING);if (test_opt(sbi, DISABLE_CHECKPOINT)) {err = f2fs_disable_checkpoint(sbi);if (err)goto sync_free_meta;} else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {f2fs_enable_checkpoint(sbi);}/** If filesystem is not mounted as read-only then* do start the gc_thread.*/if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {/* After POR, we can run background GC thread.*/err = f2fs_start_gc_thread(sbi);if (err)goto sync_free_meta;}kvfree(options);/* recover broken superblock */if (recovery) {err = f2fs_commit_super(sbi, true);f2fs_msg(sb, KERN_INFO,"Try to recover %dth superblock, ret: %d",sbi->valid_super_block ? 1 : 2, err);}f2fs_join_shrinker(sbi);f2fs_tuning_parameters(sbi);f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx""Opts: %s", cur_cp_version(F2FS_CKPT(sbi)), orig_data);kfree(orig_data);f2fs_update_time(sbi, CP_TIME);f2fs_update_time(sbi, REQ_TIME);clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);return 0;sync_free_meta:/* safe to flush all the data */sync_filesystem(sbi->sb);retry_cnt = 0;free_meta: #ifdef CONFIG_QUOTAf2fs_truncate_quota_inode_pages(sb);if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))f2fs_quota_off_umount(sbi->sb); #endif/** Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()* failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()* followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which* falls into an infinite loop in f2fs_sync_meta_pages().*/truncate_inode_pages_final(META_MAPPING(sbi));/* evict some inodes being cached by GC */evict_inodes(sb);f2fs_unregister_sysfs(sbi); free_root_inode:dput(sb->s_root);sb->s_root = NULL; free_node_inode:f2fs_release_ino_entry(sbi, true);truncate_inode_pages_final(NODE_MAPPING(sbi));iput(sbi->node_inode);sbi->node_inode = NULL; free_stats:f2fs_destroy_stats(sbi); free_nm:f2fs_destroy_node_manager(sbi); free_sm:f2fs_destroy_segment_manager(sbi); free_ccc:f2fs_destroy_checkpoint_cmd_control(sbi, true); free_devices:destroy_device_list(sbi);kvfree(sbi->ckpt); free_meta_inode:make_bad_inode(sbi->meta_inode);iput(sbi->meta_inode);sbi->meta_inode = NULL; free_io_dummy:mempool_destroy(sbi->write_io_dummy); free_percpu:destroy_percpu_info(sbi); free_bio_info:for (i = 0; i < NR_PAGE_TYPE; i++)kvfree(sbi->write_io[i]); free_options: #ifdef CONFIG_QUOTAfor (i = 0; i < MAXQUOTAS; i++)kvfree(F2FS_OPTION(sbi).s_qf_names[i]); #endifkvfree(options); free_sb_buf:kvfree(raw_super); free_sbi:if (sbi->s_chksum_driver)crypto_free_shash(sbi->s_chksum_driver);kvfree(sbi);/* give only one another chance */if (retry_cnt > 0 && skip_recovery) {retry_cnt--;shrink_dcache_sb(sb);goto try_onemore;}kfree(orig_data);return err; }?
?
總結
以上是生活随笔為你收集整理的f2fs学习四: f2fs文件系统挂载的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: dcs world f15c教学_烟台T
- 下一篇: 键盘响应c语言,c 键盘响应