@@ -251,7 +251,7 @@ heap_xlog_prune_freeze(XLogReaderState *record)
251251 if (PageIsNew (vmpage ))
252252 PageInit (vmpage , BLCKSZ , 0 );
253253
254- visibilitymap_set_vmbits (blkno , vmbuffer , vmflags , rlocator );
254+ visibilitymap_set (blkno , vmbuffer , vmflags , rlocator );
255255
256256 Assert (BufferIsDirty (vmbuffer ));
257257 PageSetLSN (vmpage , lsn );
@@ -264,142 +264,6 @@ heap_xlog_prune_freeze(XLogReaderState *record)
264264 XLogRecordPageWithFreeSpace (rlocator , blkno , freespace );
265265}
266266
267- /*
268- * Replay XLOG_HEAP2_VISIBLE records.
269- *
270- * The critical integrity requirement here is that we must never end up with
271- * a situation where the visibility map bit is set, and the page-level
272- * PD_ALL_VISIBLE bit is clear. If that were to occur, then a subsequent
273- * page modification would fail to clear the visibility map bit.
274- */
275- static void
276- heap_xlog_visible (XLogReaderState * record )
277- {
278- XLogRecPtr lsn = record -> EndRecPtr ;
279- xl_heap_visible * xlrec = (xl_heap_visible * ) XLogRecGetData (record );
280- Buffer vmbuffer = InvalidBuffer ;
281- Buffer buffer ;
282- Page page ;
283- RelFileLocator rlocator ;
284- BlockNumber blkno ;
285- XLogRedoAction action ;
286-
287- Assert ((xlrec -> flags & VISIBILITYMAP_XLOG_VALID_BITS ) == xlrec -> flags );
288-
289- XLogRecGetBlockTag (record , 1 , & rlocator , NULL , & blkno );
290-
291- /*
292- * If there are any Hot Standby transactions running that have an xmin
293- * horizon old enough that this page isn't all-visible for them, they
294- * might incorrectly decide that an index-only scan can skip a heap fetch.
295- *
296- * NB: It might be better to throw some kind of "soft" conflict here that
297- * forces any index-only scan that is in flight to perform heap fetches,
298- * rather than killing the transaction outright.
299- */
300- if (InHotStandby )
301- ResolveRecoveryConflictWithSnapshot (xlrec -> snapshotConflictHorizon ,
302- xlrec -> flags & VISIBILITYMAP_XLOG_CATALOG_REL ,
303- rlocator );
304-
305- /*
306- * Read the heap page, if it still exists. If the heap file has dropped or
307- * truncated later in recovery, we don't need to update the page, but we'd
308- * better still update the visibility map.
309- */
310- action = XLogReadBufferForRedo (record , 1 , & buffer );
311- if (action == BLK_NEEDS_REDO )
312- {
313- /*
314- * We don't bump the LSN of the heap page when setting the visibility
315- * map bit (unless checksums or wal_hint_bits is enabled, in which
316- * case we must). This exposes us to torn page hazards, but since
317- * we're not inspecting the existing page contents in any way, we
318- * don't care.
319- */
320- page = BufferGetPage (buffer );
321-
322- PageSetAllVisible (page );
323-
324- if (XLogHintBitIsNeeded ())
325- PageSetLSN (page , lsn );
326-
327- MarkBufferDirty (buffer );
328- }
329- else if (action == BLK_RESTORED )
330- {
331- /*
332- * If heap block was backed up, we already restored it and there's
333- * nothing more to do. (This can only happen with checksums or
334- * wal_log_hints enabled.)
335- */
336- }
337-
338- if (BufferIsValid (buffer ))
339- {
340- Size space = PageGetFreeSpace (BufferGetPage (buffer ));
341-
342- UnlockReleaseBuffer (buffer );
343-
344- /*
345- * Since FSM is not WAL-logged and only updated heuristically, it
346- * easily becomes stale in standbys. If the standby is later promoted
347- * and runs VACUUM, it will skip updating individual free space
348- * figures for pages that became all-visible (or all-frozen, depending
349- * on the vacuum mode,) which is troublesome when FreeSpaceMapVacuum
350- * propagates too optimistic free space values to upper FSM layers;
351- * later inserters try to use such pages only to find out that they
352- * are unusable. This can cause long stalls when there are many such
353- * pages.
354- *
355- * Forestall those problems by updating FSM's idea about a page that
356- * is becoming all-visible or all-frozen.
357- *
358- * Do this regardless of a full-page image being applied, since the
359- * FSM data is not in the page anyway.
360- */
361- if (xlrec -> flags & VISIBILITYMAP_VALID_BITS )
362- XLogRecordPageWithFreeSpace (rlocator , blkno , space );
363- }
364-
365- /*
366- * Even if we skipped the heap page update due to the LSN interlock, it's
367- * still safe to update the visibility map. Any WAL record that clears
368- * the visibility map bit does so before checking the page LSN, so any
369- * bits that need to be cleared will still be cleared.
370- */
371- if (XLogReadBufferForRedoExtended (record , 0 , RBM_ZERO_ON_ERROR , false,
372- & vmbuffer ) == BLK_NEEDS_REDO )
373- {
374- Page vmpage = BufferGetPage (vmbuffer );
375- Relation reln ;
376- uint8 vmbits ;
377-
378- /* initialize the page if it was read as zeros */
379- if (PageIsNew (vmpage ))
380- PageInit (vmpage , BLCKSZ , 0 );
381-
382- /* remove VISIBILITYMAP_XLOG_* */
383- vmbits = xlrec -> flags & VISIBILITYMAP_VALID_BITS ;
384-
385- /*
386- * XLogReadBufferForRedoExtended locked the buffer. But
387- * visibilitymap_set will handle locking itself.
388- */
389- LockBuffer (vmbuffer , BUFFER_LOCK_UNLOCK );
390-
391- reln = CreateFakeRelcacheEntry (rlocator );
392-
393- visibilitymap_set (reln , blkno , InvalidBuffer , lsn , vmbuffer ,
394- xlrec -> snapshotConflictHorizon , vmbits );
395-
396- ReleaseBuffer (vmbuffer );
397- FreeFakeRelcacheEntry (reln );
398- }
399- else if (BufferIsValid (vmbuffer ))
400- UnlockReleaseBuffer (vmbuffer );
401- }
402-
403267/*
404268 * Given an "infobits" field from an XLog record, set the correct bits in the
405269 * given infomask and infomask2 for the tuple touched by the record.
@@ -777,8 +641,8 @@ heap_xlog_multi_insert(XLogReaderState *record)
777641 *
778642 * During recovery, however, no concurrent writers exist. Therefore,
779643 * updating the VM without holding the heap page lock is safe enough. This
780- * same approach is taken when replaying xl_heap_visible records (see
781- * heap_xlog_visible ()).
644+ * same approach is taken when replaying XLOG_HEAP2_PRUNE* records (see
645+ * heap_xlog_prune_and_freeze ()).
782646 */
783647 if ((xlrec -> flags & XLH_INSERT_ALL_FROZEN_SET ) &&
784648 XLogReadBufferForRedoExtended (record , 1 , RBM_ZERO_ON_ERROR , false,
@@ -790,11 +654,11 @@ heap_xlog_multi_insert(XLogReaderState *record)
790654 if (PageIsNew (vmpage ))
791655 PageInit (vmpage , BLCKSZ , 0 );
792656
793- visibilitymap_set_vmbits (blkno ,
794- vmbuffer ,
795- VISIBILITYMAP_ALL_VISIBLE |
796- VISIBILITYMAP_ALL_FROZEN ,
797- rlocator );
657+ visibilitymap_set (blkno ,
658+ vmbuffer ,
659+ VISIBILITYMAP_ALL_VISIBLE |
660+ VISIBILITYMAP_ALL_FROZEN ,
661+ rlocator );
798662
799663 Assert (BufferIsDirty (vmbuffer ));
800664 PageSetLSN (vmpage , lsn );
@@ -1375,9 +1239,6 @@ heap2_redo(XLogReaderState *record)
13751239 case XLOG_HEAP2_PRUNE_VACUUM_CLEANUP :
13761240 heap_xlog_prune_freeze (record );
13771241 break ;
1378- case XLOG_HEAP2_VISIBLE :
1379- heap_xlog_visible (record );
1380- break ;
13811242 case XLOG_HEAP2_MULTI_INSERT :
13821243 heap_xlog_multi_insert (record );
13831244 break ;
0 commit comments