diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c index 1531bd0ee359c92469a0813c857c00deeace266b..ae18c032778e515805ab58c67e689381f71b89fd 100644 --- a/fs/xfs/libxfs/xfs_ag.c +++ b/fs/xfs/libxfs/xfs_ag.c @@ -967,9 +967,7 @@ xfs_ag_shrink_space( * Disable perag reservations so it doesn't cause the allocation request * to fail. We'll reestablish reservation before we return. */ - error = xfs_ag_resv_free(pag); - if (error) - return error; + xfs_ag_resv_free(pag); /* internal log shouldn't also show up in the free space btrees */ error = xfs_alloc_vextent_exact_bno(&args, diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c index 7fd1fea95552f163ab341f9bf193118ab0587b8c..7f3e4562e4ac97dca446027280809bff1f3ceb0a 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.c +++ b/fs/xfs/libxfs/xfs_ag_resv.c @@ -126,14 +126,13 @@ xfs_ag_resv_needed( } /* Clean out a reservation */ -static int +static void __xfs_ag_resv_free( struct xfs_perag *pag, enum xfs_ag_resv_type type) { struct xfs_ag_resv *resv; xfs_extlen_t oldresv; - int error; trace_xfs_ag_resv_free(pag, type, 0); @@ -149,30 +148,19 @@ __xfs_ag_resv_free( oldresv = resv->ar_orig_reserved; else oldresv = resv->ar_reserved; - error = xfs_mod_fdblocks(pag->pag_mount, oldresv, true); + xfs_add_fdblocks(pag->pag_mount, oldresv); resv->ar_reserved = 0; resv->ar_asked = 0; resv->ar_orig_reserved = 0; - - if (error) - trace_xfs_ag_resv_free_error(pag->pag_mount, pag->pag_agno, - error, _RET_IP_); - return error; } /* Free a per-AG reservation. */ -int +void xfs_ag_resv_free( struct xfs_perag *pag) { - int error; - int err2; - - error = __xfs_ag_resv_free(pag, XFS_AG_RESV_RMAPBT); - err2 = __xfs_ag_resv_free(pag, XFS_AG_RESV_METADATA); - if (err2 && !error) - error = err2; - return error; + __xfs_ag_resv_free(pag, XFS_AG_RESV_RMAPBT); + __xfs_ag_resv_free(pag, XFS_AG_RESV_METADATA); } static int @@ -216,7 +204,7 @@ __xfs_ag_resv_init( if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_AG_RESV_FAIL)) error = -ENOSPC; else - error = xfs_mod_fdblocks(mp, -(int64_t)hidden_space, true); + error = xfs_dec_fdblocks(mp, hidden_space, true); if (error) { trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno, error, _RET_IP_); diff --git a/fs/xfs/libxfs/xfs_ag_resv.h b/fs/xfs/libxfs/xfs_ag_resv.h index b74b210008ea7e973f296b3fca524887fe27bef3..ff20ed93de77243610125edd75b8d69e4f101e20 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.h +++ b/fs/xfs/libxfs/xfs_ag_resv.h @@ -6,7 +6,7 @@ #ifndef __XFS_AG_RESV_H__ #define __XFS_AG_RESV_H__ -int xfs_ag_resv_free(struct xfs_perag *pag); +void xfs_ag_resv_free(struct xfs_perag *pag); int xfs_ag_resv_init(struct xfs_perag *pag, struct xfs_trans *tp); bool xfs_ag_resv_critical(struct xfs_perag *pag, enum xfs_ag_resv_type type); diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 415e96476232201c7c4314382eabaf120a128b8e..6c31154b26e5b6f4748be0bdc3a908d8a6d87549 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -79,7 +79,7 @@ xfs_prealloc_blocks( } /* - * The number of blocks per AG that we withhold from xfs_mod_fdblocks to + * The number of blocks per AG that we withhold from xfs_dec_fdblocks to * guarantee that we can refill the AGFL prior to allocating space in a nearly * full AG. Although the space described by the free space btrees, the * blocks used by the freesp btrees themselves, and the blocks owned by the @@ -89,7 +89,7 @@ xfs_prealloc_blocks( * until the fs goes down, we subtract this many AG blocks from the incore * fdblocks to ensure user allocation does not overcommit the space the * filesystem needs for the AGFLs. The rmap btree uses a per-AG reservation to - * withhold space from xfs_mod_fdblocks, so we do not account for that here. + * withhold space from xfs_dec_fdblocks, so we do not account for that here. */ #define XFS_ALLOCBT_AGFL_RESERVE 4 diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 9ed22d82c000fd065d60c9821a52053db7b25915..c78a56b1ecca49a94ee169023e7d62021de46de1 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -1941,9 +1941,10 @@ xfs_bmap_add_extent_delay_real( } /* adjust for changes in reserved delayed indirect blocks */ - if (da_new != da_old) - error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), - true); + if (da_new < da_old) + xfs_add_fdblocks(mp, da_old - da_new); + else if (da_new > da_old) + error = xfs_dec_fdblocks(mp, da_new - da_old, true); xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); done: @@ -2621,8 +2622,8 @@ xfs_bmap_add_extent_hole_delay( } if (oldlen != newlen) { ASSERT(oldlen > newlen); - xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), - false); + xfs_add_fdblocks(ip->i_mount, oldlen - newlen); + /* * Nothing to do for disk quota accounting here. */ @@ -4028,11 +4029,11 @@ xfs_bmapi_reserve_delalloc( indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); ASSERT(indlen > 0); - error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); + error = xfs_dec_fdblocks(mp, alen, false); if (error) goto out_unreserve_quota; - error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); + error = xfs_dec_fdblocks(mp, indlen, false); if (error) goto out_unreserve_blocks; @@ -4060,7 +4061,7 @@ xfs_bmapi_reserve_delalloc( return 0; out_unreserve_blocks: - xfs_mod_fdblocks(mp, alen, false); + xfs_add_fdblocks(mp, alen); out_unreserve_quota: if (XFS_IS_QUOTA_ON(mp)) xfs_quota_unreserve_blkres(ip, alen); @@ -4919,7 +4920,7 @@ xfs_bmap_del_extent_delay( uint64_t rtexts = del->br_blockcount; do_div(rtexts, mp->m_sb.sb_rextsize); - xfs_mod_frextents(mp, rtexts); + xfs_add_frextents(mp, rtexts); } /* @@ -5007,7 +5008,7 @@ xfs_bmap_del_extent_delay( if (!isrt) da_diff += del->br_blockcount; if (da_diff) { - xfs_mod_fdblocks(mp, da_diff, false); + xfs_add_fdblocks(mp, da_diff); xfs_mod_delalloc(mp, -da_diff); } return error; diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c index 5799e9a94f1f665dba31af5b5908248311aa1fb1..5c6d7244078942fc3379120014ed95f34612484a 100644 --- a/fs/xfs/scrub/fscounters.c +++ b/fs/xfs/scrub/fscounters.c @@ -516,7 +516,7 @@ xchk_fscounters( /* * If the filesystem is not frozen, the counter summation calls above - * can race with xfs_mod_freecounter, which subtracts a requested space + * can race with xfs_dec_freecounter, which subtracts a requested space * reservation from the counter and undoes the subtraction if that made * the counter go negative. Therefore, it's possible to see negative * values here, and we should only flag that as a corruption if we diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index c3f0e3cae87e56b248f47aefb5877219b042747b..6fd6ef8a9c8147b58a39ac8684691cea2dd6b2cd 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -213,10 +213,8 @@ xfs_growfs_data_private( struct xfs_perag *pag; pag = xfs_perag_get(mp, id.agno); - error = xfs_ag_resv_free(pag); + xfs_ag_resv_free(pag); xfs_perag_put(pag); - if (error) - return error; } /* * Reserve AG metadata blocks. ENOSPC here does not mean there @@ -424,14 +422,14 @@ xfs_reserve_blocks( */ if (mp->m_resblks > request) { lcounter = mp->m_resblks_avail - request; - if (lcounter > 0) { /* release unused blocks */ + if (lcounter > 0) { /* release unused blocks */ fdblks_delta = lcounter; mp->m_resblks_avail -= lcounter; } mp->m_resblks = request; if (fdblks_delta) { spin_unlock(&mp->m_sb_lock); - error = xfs_mod_fdblocks(mp, fdblks_delta, 0); + xfs_add_fdblocks(mp, fdblks_delta); spin_lock(&mp->m_sb_lock); } @@ -467,9 +465,9 @@ xfs_reserve_blocks( */ fdblks_delta = min(free, delta); spin_unlock(&mp->m_sb_lock); - error = xfs_mod_fdblocks(mp, -fdblks_delta, 0); + error = xfs_dec_fdblocks(mp, fdblks_delta, 0); if (!error) - xfs_mod_fdblocks(mp, fdblks_delta, 0); + xfs_add_fdblocks(mp, fdblks_delta); spin_lock(&mp->m_sb_lock); } out: @@ -600,24 +598,13 @@ xfs_fs_reserve_ag_blocks( /* * Free space reserved for per-AG metadata. */ -int +void xfs_fs_unreserve_ag_blocks( struct xfs_mount *mp) { xfs_agnumber_t agno; struct xfs_perag *pag; - int error = 0; - int err2; - for_each_perag(mp, agno, pag) { - err2 = xfs_ag_resv_free(pag); - if (err2 && !error) - error = err2; - } - - if (error) - xfs_warn(mp, - "Error %d freeing per-AG metadata reserve pool.", error); - - return error; + for_each_perag(mp, agno, pag) + xfs_ag_resv_free(pag); } diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h index 2cffe51a31e8b24e9de9eeb5432fb2b838fda4e6..dba17c404e7d105e8cbc13cd58aaf0850fc49ab8 100644 --- a/fs/xfs/xfs_fsops.h +++ b/fs/xfs/xfs_fsops.h @@ -14,6 +14,6 @@ extern int xfs_reserve_blocks(xfs_mount_t *mp, uint64_t *inval, extern int xfs_fs_goingdown(xfs_mount_t *mp, uint32_t inflags); extern int xfs_fs_reserve_ag_blocks(struct xfs_mount *mp); -extern int xfs_fs_unreserve_ag_blocks(struct xfs_mount *mp); +extern void xfs_fs_unreserve_ag_blocks(struct xfs_mount *mp); #endif /* __XFS_FSOPS_H__ */ diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 78c72d0aa0a60be0b3f8116c314f697ded0beeb7..ece135ddfe982f629931ac17e34e97f4e0ae1190 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1142,16 +1142,44 @@ xfs_fs_writable( return true; } -/* Adjust m_fdblocks or m_frextents. */ +void +xfs_add_freecounter( + struct xfs_mount *mp, + struct percpu_counter *counter, + uint64_t delta) +{ + bool has_resv_pool = (counter == &mp->m_fdblocks); + uint64_t res_used; + + /* + * If the reserve pool is depleted, put blocks back into it first. + * Most of the time the pool is full. + */ + if (!has_resv_pool || mp->m_resblks == mp->m_resblks_avail) { + percpu_counter_add(counter, delta); + return; + } + + spin_lock(&mp->m_sb_lock); + res_used = mp->m_resblks - mp->m_resblks_avail; + if (res_used > delta) { + mp->m_resblks_avail += delta; + } else { + delta -= res_used; + mp->m_resblks_avail = mp->m_resblks; + percpu_counter_add(counter, delta); + } + spin_unlock(&mp->m_sb_lock); +} + int -xfs_mod_freecounter( +xfs_dec_freecounter( struct xfs_mount *mp, struct percpu_counter *counter, - int64_t delta, + uint64_t delta, bool rsvd) { int64_t lcounter; - long long res_used; uint64_t set_aside = 0; s32 batch; bool has_resv_pool; @@ -1161,31 +1189,6 @@ xfs_mod_freecounter( if (rsvd) ASSERT(has_resv_pool); - if (delta > 0) { - /* - * If the reserve pool is depleted, put blocks back into it - * first. Most of the time the pool is full. - */ - if (likely(!has_resv_pool || - mp->m_resblks == mp->m_resblks_avail)) { - percpu_counter_add(counter, delta); - return 0; - } - - spin_lock(&mp->m_sb_lock); - res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); - - if (res_used > delta) { - mp->m_resblks_avail += delta; - } else { - delta -= res_used; - mp->m_resblks_avail = mp->m_resblks; - percpu_counter_add(counter, delta); - } - spin_unlock(&mp->m_sb_lock); - return 0; - } - /* * Taking blocks away, need to be more accurate the closer we * are to zero. @@ -1213,7 +1216,7 @@ xfs_mod_freecounter( */ if (has_resv_pool) set_aside = xfs_fdblocks_unavailable(mp); - percpu_counter_add_batch(counter, delta, batch); + percpu_counter_add_batch(counter, -((int64_t)delta), batch); if (__percpu_counter_compare(counter, set_aside, XFS_FDBLOCKS_BATCH) >= 0) { /* we had space! */ @@ -1225,11 +1228,11 @@ xfs_mod_freecounter( * that took us to ENOSPC. */ spin_lock(&mp->m_sb_lock); - percpu_counter_add(counter, -delta); + percpu_counter_add(counter, delta); if (!has_resv_pool || !rsvd) goto fdblocks_enospc; - lcounter = (long long)mp->m_resblks_avail + delta; + lcounter = (long long)mp->m_resblks_avail - delta; if (lcounter >= 0) { mp->m_resblks_avail = lcounter; spin_unlock(&mp->m_sb_lock); diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index d19cca099bc3a701786c3e5e9871f086ac3f290c..25f3a52964dcd76ad795a2a3bd7cf5fc61e62c4c 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -532,19 +532,30 @@ xfs_fdblocks_unavailable( return mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks); } -int xfs_mod_freecounter(struct xfs_mount *mp, struct percpu_counter *counter, - int64_t delta, bool rsvd); +int xfs_dec_freecounter(struct xfs_mount *mp, struct percpu_counter *counter, + uint64_t delta, bool rsvd); +void xfs_add_freecounter(struct xfs_mount *mp, struct percpu_counter *counter, + uint64_t delta); -static inline int -xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta, bool reserved) +static inline int xfs_dec_fdblocks(struct xfs_mount *mp, uint64_t delta, + bool reserved) { - return xfs_mod_freecounter(mp, &mp->m_fdblocks, delta, reserved); + return xfs_dec_freecounter(mp, &mp->m_fdblocks, delta, reserved); } -static inline int -xfs_mod_frextents(struct xfs_mount *mp, int64_t delta) +static inline void xfs_add_fdblocks(struct xfs_mount *mp, uint64_t delta) { - return xfs_mod_freecounter(mp, &mp->m_frextents, delta, false); + xfs_add_freecounter(mp, &mp->m_fdblocks, delta); +} + +static inline int xfs_dec_frextents(struct xfs_mount *mp, uint64_t delta) +{ + return xfs_dec_freecounter(mp, &mp->m_frextents, delta, false); +} + +static inline void xfs_add_frextents(struct xfs_mount *mp, uint64_t delta) +{ + xfs_add_freecounter(mp, &mp->m_frextents, delta); } extern int xfs_readsb(xfs_mount_t *, int); diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index ca82472d2558b18f7b0776bcb0b02356bc2dfd53..46b2a220cce46724fae046592c8cae074ad584a0 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1889,11 +1889,7 @@ xfs_remount_ro( xfs_inodegc_stop(mp); /* Free the per-AG metadata reservation pool. */ - error = xfs_fs_unreserve_ag_blocks(mp); - if (error) { - xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); - return error; - } + xfs_fs_unreserve_ag_blocks(mp); /* * Before we sync the metadata, we need to free up the reserve block diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index d562028281f5c7f7bea7facae3f2cf13bca1da66..576837e10fe70ac76c188149c9c96267a5a0c07b 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -2915,7 +2915,6 @@ DEFINE_AG_RESV_EVENT(xfs_ag_resv_free_extent); DEFINE_AG_RESV_EVENT(xfs_ag_resv_critical); DEFINE_AG_RESV_EVENT(xfs_ag_resv_needed); -DEFINE_AG_ERROR_EVENT(xfs_ag_resv_free_error); DEFINE_AG_ERROR_EVENT(xfs_ag_resv_init_error); /* refcount tracepoint classes */ diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 3385813dc080bd7e62575993ff60ed7ee38a2584..a0e4bae90f77d820958070769d7c284299275300 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -163,7 +163,7 @@ xfs_trans_reserve( * fail if the count would go below zero. */ if (blocks > 0) { - error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd); + error = xfs_dec_fdblocks(mp, blocks, rsvd); if (error != 0) return -ENOSPC; tp->t_blk_res += blocks; @@ -210,7 +210,7 @@ xfs_trans_reserve( * fail if the count would go below zero. */ if (rtextents > 0) { - error = xfs_mod_frextents(mp, -((int64_t)rtextents)); + error = xfs_dec_frextents(mp, rtextents); if (error) { error = -ENOSPC; goto undo_log; @@ -234,7 +234,7 @@ xfs_trans_reserve( undo_blocks: if (blocks > 0) { - xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd); + xfs_add_fdblocks(mp, blocks); tp->t_blk_res = 0; } return error; @@ -600,12 +600,10 @@ xfs_trans_unreserve_and_mod_sb( struct xfs_trans *tp) { struct xfs_mount *mp = tp->t_mountp; - bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; int64_t blkdelta = 0; int64_t rtxdelta = 0; int64_t idelta = 0; int64_t ifreedelta = 0; - int error; /* calculate deltas */ if (tp->t_blk_res > 0) @@ -628,10 +626,8 @@ xfs_trans_unreserve_and_mod_sb( } /* apply the per-cpu counters */ - if (blkdelta) { - error = xfs_mod_fdblocks(mp, blkdelta, rsvd); - ASSERT(!error); - } + if (blkdelta) + xfs_add_fdblocks(mp, blkdelta); if (idelta) percpu_counter_add_batch(&mp->m_icount, idelta, @@ -640,10 +636,8 @@ xfs_trans_unreserve_and_mod_sb( if (ifreedelta) percpu_counter_add(&mp->m_ifree, ifreedelta); - if (rtxdelta) { - error = xfs_mod_frextents(mp, rtxdelta); - ASSERT(!error); - } + if (rtxdelta) + xfs_add_frextents(mp, rtxdelta); if (!(tp->t_flags & XFS_TRANS_SB_DIRTY)) return; @@ -675,7 +669,6 @@ xfs_trans_unreserve_and_mod_sb( */ ASSERT(mp->m_sb.sb_imax_pct >= 0); ASSERT(mp->m_sb.sb_rextslog >= 0); - return; } /* Add the given log item to the transaction's list of log items. */