Chinaunix首页 | 论坛 | 博客
  • 博客访问: 1907117
  • 博文数量: 211
  • 博客积分: 464
  • 博客等级: 下士
  • 技术积分: 3794
  • 用 户 组: 普通用户
  • 注册时间: 2011-01-24 18:25
个人简介

阿弥陀佛

文章分类

全部博文(211)

文章存档

2020年(2)

2019年(3)

2018年(5)

2017年(6)

2016年(10)

2015年(9)

2014年(73)

2013年(90)

2012年(13)

分类: 架构设计与优化

2013-02-13 12:19:04

/*
  the buffer could optimze by reduce the copy in memory. Because 
  lot of data in dmu buffer only have one dr. So we needn't
  copy the data  by dbuf_fix_old_data.
  */
dbuf_dirty_record_t *
dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
	dnode_t *dn;
	objset_t *os;
	dbuf_dirty_record_t **drp, *dr;
	int drop_struct_lock = FALSE;
	boolean_t do_free_accounting = B_FALSE;
	int txgoff = tx->tx_txg & TXG_MASK;

	ASSERT(tx->tx_txg != 0);
	ASSERT(!refcount_is_zero(&db->db_holds));
	DMU_TX_DIRTY_BUF(tx, db);

	DB_DNODE_ENTER(db);
	dn = DB_DNODE(db);
	/*
	 * Shouldn't dirty a regular buffer in syncing context.  Private
	 * objects may be dirtied in syncing context, but only if they
	 * were already pre-dirtied in open context.
	 */
	ASSERT(!dmu_tx_is_syncing(tx) ||
	    BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
	    DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
	    dn->dn_objset->os_dsl_dataset == NULL);
	/*
	 * We make this assert for private objects as well, but after we
	 * check if we're already dirty.  They are allowed to re-dirty
	 * in syncing context.
	 */
	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));

	mutex_enter(&db->db_mtx);
	/*
	 * XXX make this true for indirects too?  The problem is that
	 * transactions created with dmu_tx_create_assigned() from
	 * syncing context don't bother holding ahead.
	 */
	ASSERT(db->db_level != 0 ||
	    db->db_state == DB_CACHED || db->db_state == DB_FILL ||
	    db->db_state == DB_NOFILL);
/* added by shenyan:Mark a dnode as dirty if it is not ready
  */
	mutex_enter(&dn->dn_mtx);
	/*
	 * Don't set dirtyctx to SYNC if we're just modifying this as we
	 * initialize the objset.
	 */
	if (dn->dn_dirtyctx == DN_UNDIRTIED &&
	    !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
		dn->dn_dirtyctx =
		    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
		ASSERT(dn->dn_dirtyctx_firstset == NULL);
		dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_PUSHPAGE);
	}
	mutex_exit(&dn->dn_mtx);
/*   added by shenyan: end dnode to mark dirty end
  */
	if (db->db_blkid == DMU_SPILL_BLKID)
		dn->dn_have_spill = B_TRUE;

	/*
	 * If this buffer is already dirty, we're done.
	 */
	drp = &db->db_last_dirty;
	ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
	    db->db.db_object == DMU_META_DNODE_OBJECT);
	while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
		drp = &dr->dr_next;
	/*
		now :dr->dr_txg <= tx->tx_txg 
         */
	if (dr && dr->dr_txg == tx->tx_txg) {
		DB_DNODE_EXIT(db);

		if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
			/*
			 * If this buffer has already been written out,
			 * we now need to reset its state.
			 */
			dbuf_unoverride(dr);
			if (db->db.db_object != DMU_META_DNODE_OBJECT &&
			    db->db_state != DB_NOFILL)
				arc_buf_thaw(db->db_buf);
		}
		mutex_exit(&db->db_mtx);
		return (dr);
	}

	/*
	 * Only valid if not already dirty.
	 */
	ASSERT(dn->dn_object == 0 ||
	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));

	ASSERT3U(dn->dn_nlevels, >, db->db_level);
	ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
	    dn->dn_phys->dn_nlevels > db->db_level ||
	    dn->dn_next_nlevels[txgoff] > db->db_level ||
	    dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
	    dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);

	/*
	 * We should only be dirtying in syncing context if it's the
	 * mos or we're initializing the os or it's a special object.
	 * However, we are allowed to dirty in syncing context provided
	 * we already dirtied it in open context.  Hence we must make
	 * this assertion only if we're not already dirty.
	 */
	os = dn->dn_objset;
	ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
	    os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
	ASSERT(db->db.db_size != 0);

	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);

	if (db->db_blkid != DMU_BONUS_BLKID) {
		/*
		 * Update the accounting.
		 * Note: we delay "free accounting" until after we drop
		 * the db_mtx.  This keeps us from grabbing other locks
		 * (and possibly deadlocking) in bp_get_dsize() while
		 * also holding the db_mtx.
		 */
		dnode_willuse_space(dn, db->db.db_size, tx);
		do_free_accounting = dbuf_block_freeable(db);
	}

	/*
	 * If this buffer is dirty in an old transaction group we need
	 * to make a copy of it so that the changes we make in this
	 * transaction group won't leak out when we sync the older txg.
	 */
	dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_PUSHPAGE);
	list_link_init(&dr->dr_dirty_node);
	if (db->db_level == 0) {
		void *data_old = db->db_buf;

		if (db->db_state != DB_NOFILL) {
			if (db->db_blkid == DMU_BONUS_BLKID) {
				dbuf_fix_old_data(db, tx->tx_txg);
				/*
				db->db.db_data is copied to db->last_dirty->dr.dt.dl.dr_data
				*/
				data_old = db->db.db_data;
			} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
				/*
				 * Release the data buffer from the cache so
				 * that we can modify it without impacting
				 * possible other users of this cached data
				 * block.  Note that indirect blocks and
				 * private objects are not released until the
				 * syncing state (since they are only modified
				 * then).
				 */
				arc_release(db->db_buf, db);
				dbuf_fix_old_data(db, tx->tx_txg);
				data_old = db->db_buf;
			}
			ASSERT(data_old != NULL);
		}
		dr->dt.dl.dr_data = data_old;
	} else {
		mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
		list_create(&dr->dt.di.dr_children,
		    sizeof (dbuf_dirty_record_t),
		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
	}
	dr->dr_dbuf = db;
	dr->dr_txg = tx->tx_txg;
	dr->dr_next = *drp;
	*drp = dr;

	/*
	 * We could have been freed_in_flight between the dbuf_noread
	 * and dbuf_dirty.  We win, as though the dbuf_noread() had
	 * happened after the free.
	 */
	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
	    db->db_blkid != DMU_SPILL_BLKID) {
		mutex_enter(&dn->dn_mtx);
		dnode_clear_range(dn, db->db_blkid, 1, tx);
		mutex_exit(&dn->dn_mtx);
		db->db_freed_in_flight = FALSE;
	}

	/*
	 * This buffer is now part of this txg
	 */
	dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
	db->db_dirtycnt += 1;
	ASSERT3U(db->db_dirtycnt, <=, 3);

	mutex_exit(&db->db_mtx);

	if (db->db_blkid == DMU_BONUS_BLKID ||
	    db->db_blkid == DMU_SPILL_BLKID) {
		mutex_enter(&dn->dn_mtx);
		ASSERT(!list_link_active(&dr->dr_dirty_node));
		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
		mutex_exit(&dn->dn_mtx);
		dnode_setdirty(dn, tx);
		DB_DNODE_EXIT(db);
		return (dr);
	} else if (do_free_accounting) {
		blkptr_t *bp = db->db_blkptr;
		int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
		    bp_get_dsize(os->os_spa, bp) : db->db.db_size;
		/*
		 * This is only a guess -- if the dbuf is dirty
		 * in a previous txg, we don't know how much
		 * space it will use on disk yet.  We should
		 * really have the struct_rwlock to access
		 * db_blkptr, but since this is just a guess,
		 * it's OK if we get an odd answer.
		 */
		ddt_prefetch(os->os_spa, bp);
		dnode_willuse_space(dn, -willfree, tx);
	}

	if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
		rw_enter(&dn->dn_struct_rwlock, RW_READER);
		drop_struct_lock = TRUE;
	}

	if (db->db_level == 0) {
		dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
		ASSERT(dn->dn_maxblkid >= db->db_blkid);
	}

	if (db->db_level+1 < dn->dn_nlevels) {
		dmu_buf_impl_t *parent = db->db_parent;
		dbuf_dirty_record_t *di;
		int parent_held = FALSE;

		if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;

			parent = dbuf_hold_level(dn, db->db_level+1,
			    db->db_blkid >> epbs, FTAG);
			ASSERT(parent != NULL);
			parent_held = TRUE;
		}
		if (drop_struct_lock)
			rw_exit(&dn->dn_struct_rwlock);
		ASSERT3U(db->db_level+1, ==, parent->db_level);
		di = dbuf_dirty(parent, tx);
		if (parent_held)
			dbuf_rele(parent, FTAG);

		mutex_enter(&db->db_mtx);
		/*  possible race with dbuf_undirty() */
		if (db->db_last_dirty == dr ||
		    dn->dn_object == DMU_META_DNODE_OBJECT) {
			mutex_enter(&di->dt.di.dr_mtx);
			ASSERT3U(di->dr_txg, ==, tx->tx_txg);
			ASSERT(!list_link_active(&dr->dr_dirty_node));
			list_insert_tail(&di->dt.di.dr_children, dr);
			mutex_exit(&di->dt.di.dr_mtx);
			dr->dr_parent = di;
		}
		mutex_exit(&db->db_mtx);
	} else {
		ASSERT(db->db_level+1 == dn->dn_nlevels);
		ASSERT(db->db_blkid < dn->dn_nblkptr);
		ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
		mutex_enter(&dn->dn_mtx);
		ASSERT(!list_link_active(&dr->dr_dirty_node));
		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
		mutex_exit(&dn->dn_mtx);
		if (drop_struct_lock)
			rw_exit(&dn->dn_struct_rwlock);
	}

	dnode_setdirty(dn, tx);
	DB_DNODE_EXIT(db);
	return (dr);
}
阅读(11942) | 评论(1) | 转发(0) |
0

上一篇:存储自测答案总结

下一篇:zfs脏记录分析

给主人留下些什么吧!~~