Skip to content

Commit 13a2b85

Browse files
melanieplagemanCommitfest Bot
authored andcommitted
Remove XLOG_HEAP2_VISIBLE entirely
As no remaining users emit XLOG_HEAP2_VISIBLE records. This includes deleting the xl_heap_visible struct and all functions responsible for emitting or replaying XLOG_HEAP2_VISIBLE records. Author: Melanie Plageman <[email protected]> Reviewed-by: Andrey Borodin <[email protected]>
1 parent a4f0551 commit 13a2b85

File tree

13 files changed

+54
-378
lines changed

13 files changed

+54
-378
lines changed

src/backend/access/common/bufmask.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,8 @@ mask_page_hint_bits(Page page)
5656

5757
/*
5858
* During replay, if the page LSN has advanced past our XLOG record's LSN,
59-
* we don't mark the page all-visible. See heap_xlog_visible() for
60-
* details.
59+
* we don't mark the page all-visible. See heap_xlog_prune_and_freeze()
60+
* for more details.
6161
*/
6262
PageClearAllVisible(page);
6363
}

src/backend/access/heap/heapam.c

Lines changed: 5 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -2524,11 +2524,11 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
25242524
else if (all_frozen_set)
25252525
{
25262526
PageSetAllVisible(page);
2527-
visibilitymap_set_vmbits(BufferGetBlockNumber(buffer),
2528-
vmbuffer,
2529-
VISIBILITYMAP_ALL_VISIBLE |
2530-
VISIBILITYMAP_ALL_FROZEN,
2531-
relation->rd_locator);
2527+
visibilitymap_set(BufferGetBlockNumber(buffer),
2528+
vmbuffer,
2529+
VISIBILITYMAP_ALL_VISIBLE |
2530+
VISIBILITYMAP_ALL_FROZEN,
2531+
relation->rd_locator);
25322532
}
25332533

25342534
/*
@@ -8797,50 +8797,6 @@ bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
87978797
return nblocksfavorable;
87988798
}
87998799

8800-
/*
8801-
* Perform XLogInsert for a heap-visible operation. 'block' is the block
8802-
* being marked all-visible, and vm_buffer is the buffer containing the
8803-
* corresponding visibility map block. Both should have already been modified
8804-
* and dirtied.
8805-
*
8806-
* snapshotConflictHorizon comes from the largest xmin on the page being
8807-
* marked all-visible. REDO routine uses it to generate recovery conflicts.
8808-
*
8809-
* If checksums or wal_log_hints are enabled, we may also generate a full-page
8810-
* image of heap_buffer. Otherwise, we optimize away the FPI (by specifying
8811-
* REGBUF_NO_IMAGE for the heap buffer), in which case the caller should *not*
8812-
* update the heap page's LSN.
8813-
*/
8814-
XLogRecPtr
8815-
log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer,
8816-
TransactionId snapshotConflictHorizon, uint8 vmflags)
8817-
{
8818-
xl_heap_visible xlrec;
8819-
XLogRecPtr recptr;
8820-
uint8 flags;
8821-
8822-
Assert(BufferIsValid(heap_buffer));
8823-
Assert(BufferIsValid(vm_buffer));
8824-
8825-
xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
8826-
xlrec.flags = vmflags;
8827-
if (RelationIsAccessibleInLogicalDecoding(rel))
8828-
xlrec.flags |= VISIBILITYMAP_XLOG_CATALOG_REL;
8829-
XLogBeginInsert();
8830-
XLogRegisterData(&xlrec, SizeOfHeapVisible);
8831-
8832-
XLogRegisterBuffer(0, vm_buffer, 0);
8833-
8834-
flags = REGBUF_STANDARD;
8835-
if (!XLogHintBitIsNeeded())
8836-
flags |= REGBUF_NO_IMAGE;
8837-
XLogRegisterBuffer(1, heap_buffer, flags);
8838-
8839-
recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE);
8840-
8841-
return recptr;
8842-
}
8843-
88448800
/*
88458801
* Perform XLogInsert for a heap-update operation. Caller must already
88468802
* have modified the buffer(s) and marked them dirty.

src/backend/access/heap/heapam_xlog.c

Lines changed: 8 additions & 147 deletions
Original file line numberDiff line numberDiff line change
@@ -251,7 +251,7 @@ heap_xlog_prune_freeze(XLogReaderState *record)
251251
if (PageIsNew(vmpage))
252252
PageInit(vmpage, BLCKSZ, 0);
253253

254-
visibilitymap_set_vmbits(blkno, vmbuffer, vmflags, rlocator);
254+
visibilitymap_set(blkno, vmbuffer, vmflags, rlocator);
255255

256256
Assert(BufferIsDirty(vmbuffer));
257257
PageSetLSN(vmpage, lsn);
@@ -264,142 +264,6 @@ heap_xlog_prune_freeze(XLogReaderState *record)
264264
XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
265265
}
266266

267-
/*
268-
* Replay XLOG_HEAP2_VISIBLE records.
269-
*
270-
* The critical integrity requirement here is that we must never end up with
271-
* a situation where the visibility map bit is set, and the page-level
272-
* PD_ALL_VISIBLE bit is clear. If that were to occur, then a subsequent
273-
* page modification would fail to clear the visibility map bit.
274-
*/
275-
static void
276-
heap_xlog_visible(XLogReaderState *record)
277-
{
278-
XLogRecPtr lsn = record->EndRecPtr;
279-
xl_heap_visible *xlrec = (xl_heap_visible *) XLogRecGetData(record);
280-
Buffer vmbuffer = InvalidBuffer;
281-
Buffer buffer;
282-
Page page;
283-
RelFileLocator rlocator;
284-
BlockNumber blkno;
285-
XLogRedoAction action;
286-
287-
Assert((xlrec->flags & VISIBILITYMAP_XLOG_VALID_BITS) == xlrec->flags);
288-
289-
XLogRecGetBlockTag(record, 1, &rlocator, NULL, &blkno);
290-
291-
/*
292-
* If there are any Hot Standby transactions running that have an xmin
293-
* horizon old enough that this page isn't all-visible for them, they
294-
* might incorrectly decide that an index-only scan can skip a heap fetch.
295-
*
296-
* NB: It might be better to throw some kind of "soft" conflict here that
297-
* forces any index-only scan that is in flight to perform heap fetches,
298-
* rather than killing the transaction outright.
299-
*/
300-
if (InHotStandby)
301-
ResolveRecoveryConflictWithSnapshot(xlrec->snapshotConflictHorizon,
302-
xlrec->flags & VISIBILITYMAP_XLOG_CATALOG_REL,
303-
rlocator);
304-
305-
/*
306-
* Read the heap page, if it still exists. If the heap file has dropped or
307-
* truncated later in recovery, we don't need to update the page, but we'd
308-
* better still update the visibility map.
309-
*/
310-
action = XLogReadBufferForRedo(record, 1, &buffer);
311-
if (action == BLK_NEEDS_REDO)
312-
{
313-
/*
314-
* We don't bump the LSN of the heap page when setting the visibility
315-
* map bit (unless checksums or wal_hint_bits is enabled, in which
316-
* case we must). This exposes us to torn page hazards, but since
317-
* we're not inspecting the existing page contents in any way, we
318-
* don't care.
319-
*/
320-
page = BufferGetPage(buffer);
321-
322-
PageSetAllVisible(page);
323-
324-
if (XLogHintBitIsNeeded())
325-
PageSetLSN(page, lsn);
326-
327-
MarkBufferDirty(buffer);
328-
}
329-
else if (action == BLK_RESTORED)
330-
{
331-
/*
332-
* If heap block was backed up, we already restored it and there's
333-
* nothing more to do. (This can only happen with checksums or
334-
* wal_log_hints enabled.)
335-
*/
336-
}
337-
338-
if (BufferIsValid(buffer))
339-
{
340-
Size space = PageGetFreeSpace(BufferGetPage(buffer));
341-
342-
UnlockReleaseBuffer(buffer);
343-
344-
/*
345-
* Since FSM is not WAL-logged and only updated heuristically, it
346-
* easily becomes stale in standbys. If the standby is later promoted
347-
* and runs VACUUM, it will skip updating individual free space
348-
* figures for pages that became all-visible (or all-frozen, depending
349-
* on the vacuum mode,) which is troublesome when FreeSpaceMapVacuum
350-
* propagates too optimistic free space values to upper FSM layers;
351-
* later inserters try to use such pages only to find out that they
352-
* are unusable. This can cause long stalls when there are many such
353-
* pages.
354-
*
355-
* Forestall those problems by updating FSM's idea about a page that
356-
* is becoming all-visible or all-frozen.
357-
*
358-
* Do this regardless of a full-page image being applied, since the
359-
* FSM data is not in the page anyway.
360-
*/
361-
if (xlrec->flags & VISIBILITYMAP_VALID_BITS)
362-
XLogRecordPageWithFreeSpace(rlocator, blkno, space);
363-
}
364-
365-
/*
366-
* Even if we skipped the heap page update due to the LSN interlock, it's
367-
* still safe to update the visibility map. Any WAL record that clears
368-
* the visibility map bit does so before checking the page LSN, so any
369-
* bits that need to be cleared will still be cleared.
370-
*/
371-
if (XLogReadBufferForRedoExtended(record, 0, RBM_ZERO_ON_ERROR, false,
372-
&vmbuffer) == BLK_NEEDS_REDO)
373-
{
374-
Page vmpage = BufferGetPage(vmbuffer);
375-
Relation reln;
376-
uint8 vmbits;
377-
378-
/* initialize the page if it was read as zeros */
379-
if (PageIsNew(vmpage))
380-
PageInit(vmpage, BLCKSZ, 0);
381-
382-
/* remove VISIBILITYMAP_XLOG_* */
383-
vmbits = xlrec->flags & VISIBILITYMAP_VALID_BITS;
384-
385-
/*
386-
* XLogReadBufferForRedoExtended locked the buffer. But
387-
* visibilitymap_set will handle locking itself.
388-
*/
389-
LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
390-
391-
reln = CreateFakeRelcacheEntry(rlocator);
392-
393-
visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
394-
xlrec->snapshotConflictHorizon, vmbits);
395-
396-
ReleaseBuffer(vmbuffer);
397-
FreeFakeRelcacheEntry(reln);
398-
}
399-
else if (BufferIsValid(vmbuffer))
400-
UnlockReleaseBuffer(vmbuffer);
401-
}
402-
403267
/*
404268
* Given an "infobits" field from an XLog record, set the correct bits in the
405269
* given infomask and infomask2 for the tuple touched by the record.
@@ -777,8 +641,8 @@ heap_xlog_multi_insert(XLogReaderState *record)
777641
*
778642
* During recovery, however, no concurrent writers exist. Therefore,
779643
* updating the VM without holding the heap page lock is safe enough. This
780-
* same approach is taken when replaying xl_heap_visible records (see
781-
* heap_xlog_visible()).
644+
* same approach is taken when replaying XLOG_HEAP2_PRUNE* records (see
645+
* heap_xlog_prune_and_freeze()).
782646
*/
783647
if ((xlrec->flags & XLH_INSERT_ALL_FROZEN_SET) &&
784648
XLogReadBufferForRedoExtended(record, 1, RBM_ZERO_ON_ERROR, false,
@@ -790,11 +654,11 @@ heap_xlog_multi_insert(XLogReaderState *record)
790654
if (PageIsNew(vmpage))
791655
PageInit(vmpage, BLCKSZ, 0);
792656

793-
visibilitymap_set_vmbits(blkno,
794-
vmbuffer,
795-
VISIBILITYMAP_ALL_VISIBLE |
796-
VISIBILITYMAP_ALL_FROZEN,
797-
rlocator);
657+
visibilitymap_set(blkno,
658+
vmbuffer,
659+
VISIBILITYMAP_ALL_VISIBLE |
660+
VISIBILITYMAP_ALL_FROZEN,
661+
rlocator);
798662

799663
Assert(BufferIsDirty(vmbuffer));
800664
PageSetLSN(vmpage, lsn);
@@ -1375,9 +1239,6 @@ heap2_redo(XLogReaderState *record)
13751239
case XLOG_HEAP2_PRUNE_VACUUM_CLEANUP:
13761240
heap_xlog_prune_freeze(record);
13771241
break;
1378-
case XLOG_HEAP2_VISIBLE:
1379-
heap_xlog_visible(record);
1380-
break;
13811242
case XLOG_HEAP2_MULTI_INSERT:
13821243
heap_xlog_multi_insert(record);
13831244
break;

src/backend/access/heap/pruneheap.c

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1031,9 +1031,9 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
10311031
{
10321032
Assert(PageIsAllVisible(page));
10331033

1034-
old_vmbits = visibilitymap_set_vmbits(blockno,
1035-
vmbuffer, new_vmbits,
1036-
params->relation->rd_locator);
1034+
old_vmbits = visibilitymap_set(blockno,
1035+
vmbuffer, new_vmbits,
1036+
params->relation->rd_locator);
10371037
if (old_vmbits == new_vmbits)
10381038
{
10391039
LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
@@ -2308,14 +2308,18 @@ get_conflict_xid(bool do_prune, bool do_freeze, bool do_set_vm,
23082308
*
23092309
* This is used for several different page maintenance operations:
23102310
*
2311-
* - Page pruning, in VACUUM's 1st pass or on access: Some items are
2311+
* - Page pruning, in vacuum phase I or on-access: Some items are
23122312
* redirected, some marked dead, and some removed altogether.
23132313
*
2314-
* - Freezing: Items are marked as 'frozen'.
2314+
* - Freezing: During vacuum phase I, items are marked as 'frozen'
23152315
*
2316-
* - Vacuum, 2nd pass: Items that are already LP_DEAD are marked as unused.
2316+
* - Reaping: During vacuum phase III, items that are already LP_DEAD are
2317+
* marked as unused.
23172318
*
2318-
* They have enough commonalities that we use a single WAL record for them
2319+
* - VM updates: After vacuum phases I and III, the heap page may be marked
2320+
* all-visible and all-frozen.
2321+
*
2322+
* These changes all happen together, so we use a single WAL record for them
23192323
* all.
23202324
*
23212325
* If replaying the record requires a cleanup lock, pass cleanup_lock = true.

src/backend/access/heap/vacuumlazy.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1900,11 +1900,11 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno,
19001900
log_newpage_buffer(buf, true);
19011901

19021902
PageSetAllVisible(page);
1903-
visibilitymap_set_vmbits(blkno,
1904-
vmbuffer,
1905-
VISIBILITYMAP_ALL_VISIBLE |
1906-
VISIBILITYMAP_ALL_FROZEN,
1907-
vacrel->rel->rd_locator);
1903+
visibilitymap_set(blkno,
1904+
vmbuffer,
1905+
VISIBILITYMAP_ALL_VISIBLE |
1906+
VISIBILITYMAP_ALL_FROZEN,
1907+
vacrel->rel->rd_locator);
19081908

19091909
/*
19101910
* Emit WAL for setting PD_ALL_VISIBLE on the heap page and
@@ -2786,9 +2786,9 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
27862786
* set PD_ALL_VISIBLE.
27872787
*/
27882788
PageSetAllVisible(page);
2789-
visibilitymap_set_vmbits(blkno,
2790-
vmbuffer, vmflags,
2791-
vacrel->rel->rd_locator);
2789+
visibilitymap_set(blkno,
2790+
vmbuffer, vmflags,
2791+
vacrel->rel->rd_locator);
27922792
conflict_xid = visibility_cutoff_xid;
27932793
}
27942794

0 commit comments

Comments
 (0)