struct xfs_scrub_context *sc,
struct xfs_buf *bp)
{
+ struct xfs_mount *mp = sc->mp;
+ xfs_agnumber_t agno = sc->sm->sm_agno;
+ xfs_agblock_t agbno;
+ int error;
+
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
+
+ agbno = XFS_SB_BLOCK(mp);
+
+ error = xfs_scrub_ag_init(sc, agno, &sc->sa);
+ if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error))
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
+
+ /* scrub teardown will take care of sc->sa for us */
}
/*
/* AGF */
+/* Tally freespace record lengths. */
+STATIC int
+xfs_scrub_agf_record_bno_lengths(
+ struct xfs_btree_cur *cur,
+ struct xfs_alloc_rec_incore *rec,
+ void *priv)
+{
+ xfs_extlen_t *blocks = priv;
+
+ (*blocks) += rec->ar_blockcount;
+ return 0;
+}
+
+/* Check agf_freeblks */
+static inline void
+xfs_scrub_agf_xref_freeblks(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ xfs_extlen_t blocks = 0;
+ int error;
+
+ if (!sc->sa.bno_cur)
+ return;
+
+ error = xfs_alloc_query_all(sc->sa.bno_cur,
+ xfs_scrub_agf_record_bno_lengths, &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
+ return;
+ if (blocks != be32_to_cpu(agf->agf_freeblks))
+ xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+}
+
/* Cross-reference with the other btrees. */
STATIC void
xfs_scrub_agf_xref(
struct xfs_scrub_context *sc)
{
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t agbno;
+ int error;
+
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
+
+ agbno = XFS_AGF_BLOCK(mp);
+
+ error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
+ if (error)
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
+ xfs_scrub_agf_xref_freeblks(sc);
+
+ /* scrub teardown will take care of sc->sa for us */
}
/* Scrub the AGF. */
{
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
}
/* Scrub an AGFL block. */
xfs_scrub_agfl_xref(
struct xfs_scrub_context *sc)
{
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t agbno;
+ int error;
+
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
+
+ agbno = XFS_AGFL_BLOCK(mp);
+
+ error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
+ if (error)
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
+
+ /*
+ * Scrub teardown will take care of sc->sa for us. Leave sc->sa
+ * active so that the agfl block xref can use it too.
+ */
}
/* Scrub the AGFL. */
xfs_scrub_agi_xref(
struct xfs_scrub_context *sc)
{
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t agbno;
+ int error;
+
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
+
+ agbno = XFS_AGI_BLOCK(mp);
+
+ error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
+ if (error)
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
+
+ /* scrub teardown will take care of sc->sa for us */
}
/* Scrub the AGI. */
{
return xfs_scrub_allocbt(sc, XFS_BTNUM_CNT);
}
+
+/* xref check that the extent is not free */
+void
+xfs_scrub_xref_is_used_space(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ bool is_freesp;
+ int error;
+
+ if (!sc->sa.bno_cur)
+ return;
+
+ error = xfs_alloc_has_record(sc->sa.bno_cur, agbno, len, &is_freesp);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
+ return;
+ if (is_freesp)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0);
+}
struct xfs_btree_cur *cur,
struct xfs_bmbt_irec *irec)
{
+ struct xfs_mount *mp = info->sc->mp;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_extlen_t len;
+ int error;
+
if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
+
+ agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
+ agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
+ len = irec->br_blockcount;
+
+ error = xfs_scrub_ag_init(info->sc, agno, &info->sc->sa);
+ if (!xfs_scrub_fblock_process_error(info->sc, info->whichfork,
+ irec->br_startoff, &error))
+ return;
+
+ xfs_scrub_xref_is_used_space(info->sc, agbno, len);
+
+ xfs_scrub_ag_free(info->sc, &info->sc->sa);
}
/* Scrub a single extent record. */
xfs_daddr_t daddr)
{
xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_btnum_t btnum;
bool init_sa;
int error = 0;
if (!bs->cur)
return 0;
+ btnum = bs->cur->bc_btnum;
agno = xfs_daddr_to_agno(bs->cur->bc_mp, daddr);
+ agbno = xfs_daddr_to_agbno(bs->cur->bc_mp, daddr);
init_sa = bs->cur->bc_flags & XFS_BTREE_LONG_PTRS;
if (init_sa) {
return error;
}
+ xfs_scrub_xref_is_used_space(bs->sc, agbno, 1);
+ /*
+ * The bnobt scrubber aliases bs->cur to bs->sc->sa.bno_cur, so we
+ * have to nullify it (to shut down further block owner checks) if
+ * self-xref encounters problems.
+ */
+ if (!bs->sc->sa.bno_cur && btnum == XFS_BTNUM_BNO)
+ bs->cur = NULL;
+
if (init_sa)
xfs_scrub_ag_free(bs->sc, &bs->sc->sa);
{
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, len);
}
/* Is this chunk worth checking? */
xfs_ino_t ino,
struct xfs_dinode *dip)
{
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ int error;
+
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
+
+ agno = XFS_INO_TO_AGNO(sc->mp, ino);
+ agbno = XFS_INO_TO_AGBNO(sc->mp, ino);
+
+ error = xfs_scrub_ag_init(sc, agno, &sc->sa);
+ if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error))
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
+
+ xfs_scrub_ag_free(sc, &sc->sa);
}
/* Scrub an inode. */
{
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, len);
}
/* Scrub a refcountbt record. */
struct xfs_scrub_context *sc,
struct xfs_rmap_irec *irec)
{
+ xfs_agblock_t agbno = irec->rm_startblock;
+ xfs_extlen_t len = irec->rm_blockcount;
+
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, len);
}
/* Scrub an rmapbt record. */
}
#endif
+/* cross-referencing helpers */
+void xfs_scrub_xref_is_used_space(struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno, xfs_extlen_t len);
+
#endif /* __XFS_SCRUB_SCRUB_H__ */