@@ -5754,17 +5754,23 @@ log_heap_freeze(Relation reln, Buffer buffer,
57545754 * being marked all-visible, and vm_buffer is the buffer containing the
57555755 * corresponding visibility map block. Both should have already been modified
57565756 * and dirtied.
5757+ *
5758+ * If checksums are enabled, we also add the heap_buffer to the chain to
5759+ * protect it from being torn.
57575760 */
57585761XLogRecPtr
5759- log_heap_visible (RelFileNode rnode , BlockNumber block , Buffer vm_buffer ,
5762+ log_heap_visible (RelFileNode rnode , Buffer heap_buffer , Buffer vm_buffer ,
57605763 TransactionId cutoff_xid )
57615764{
57625765 xl_heap_visible xlrec ;
57635766 XLogRecPtr recptr ;
5764- XLogRecData rdata [2 ];
5767+ XLogRecData rdata [3 ];
5768+
5769+ Assert (BufferIsValid (heap_buffer ));
5770+ Assert (BufferIsValid (vm_buffer ));
57655771
57665772 xlrec .node = rnode ;
5767- xlrec .block = block ;
5773+ xlrec .block = BufferGetBlockNumber ( heap_buffer ) ;
57685774 xlrec .cutoff_xid = cutoff_xid ;
57695775
57705776 rdata [0 ].data = (char * ) & xlrec ;
@@ -5778,6 +5784,17 @@ log_heap_visible(RelFileNode rnode, BlockNumber block, Buffer vm_buffer,
57785784 rdata [1 ].buffer_std = false;
57795785 rdata [1 ].next = NULL ;
57805786
5787+ if (DataChecksumsEnabled ())
5788+ {
5789+ rdata [1 ].next = & (rdata [2 ]);
5790+
5791+ rdata [2 ].data = NULL ;
5792+ rdata [2 ].len = 0 ;
5793+ rdata [2 ].buffer = heap_buffer ;
5794+ rdata [2 ].buffer_std = true;
5795+ rdata [2 ].next = NULL ;
5796+ }
5797+
57815798 recptr = XLogInsert (RM_HEAP2_ID , XLOG_HEAP2_VISIBLE , rdata );
57825799
57835800 return recptr ;
@@ -6139,8 +6156,6 @@ static void
61396156heap_xlog_visible (XLogRecPtr lsn , XLogRecord * record )
61406157{
61416158 xl_heap_visible * xlrec = (xl_heap_visible * ) XLogRecGetData (record );
6142- Buffer buffer ;
6143- Page page ;
61446159
61456160 /*
61466161 * If there are any Hot Standby transactions running that have an xmin
@@ -6155,39 +6170,56 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
61556170 ResolveRecoveryConflictWithSnapshot (xlrec -> cutoff_xid , xlrec -> node );
61566171
61576172 /*
6158- * Read the heap page, if it still exists. If the heap file has been
6159- * dropped or truncated later in recovery, we don't need to update the
6160- * page, but we'd better still update the visibility map.
6173+ * If heap block was backed up, restore it. This can only happen with
6174+ * checksums enabled.
61616175 */
6162- buffer = XLogReadBufferExtended (xlrec -> node , MAIN_FORKNUM , xlrec -> block ,
6163- RBM_NORMAL );
6164- if (BufferIsValid (buffer ))
6176+ if (record -> xl_info & XLR_BKP_BLOCK (1 ))
61656177 {
6166- LockBuffer (buffer , BUFFER_LOCK_EXCLUSIVE );
6167-
6168- page = (Page ) BufferGetPage (buffer );
6178+ Assert (DataChecksumsEnabled ());
6179+ (void ) RestoreBackupBlock (lsn , record , 1 , false, false);
6180+ }
6181+ else
6182+ {
6183+ Buffer buffer ;
6184+ Page page ;
61696185
61706186 /*
6171- * We don't bump the LSN of the heap page when setting the visibility
6172- * map bit, because that would generate an unworkable volume of
6173- * full-page writes. This exposes us to torn page hazards, but since
6174- * we're not inspecting the existing page contents in any way, we
6175- * don't care.
6176- *
6177- * However, all operations that clear the visibility map bit *do* bump
6178- * the LSN, and those operations will only be replayed if the XLOG LSN
6179- * follows the page LSN. Thus, if the page LSN has advanced past our
6180- * XLOG record's LSN, we mustn't mark the page all-visible, because
6181- * the subsequent update won't be replayed to clear the flag.
6187+ * Read the heap page, if it still exists. If the heap file has been
6188+ * dropped or truncated later in recovery, we don't need to update the
6189+ * page, but we'd better still update the visibility map.
61826190 */
6183- if (lsn > PageGetLSN (page ))
6191+ buffer = XLogReadBufferExtended (xlrec -> node , MAIN_FORKNUM ,
6192+ xlrec -> block , RBM_NORMAL );
6193+ if (BufferIsValid (buffer ))
61846194 {
6185- PageSetAllVisible (page );
6186- MarkBufferDirty (buffer );
6187- }
6195+ LockBuffer (buffer , BUFFER_LOCK_EXCLUSIVE );
61886196
6189- /* Done with heap page. */
6190- UnlockReleaseBuffer (buffer );
6197+ page = (Page ) BufferGetPage (buffer );
6198+
6199+ /*
6200+ * We don't bump the LSN of the heap page when setting the
6201+ * visibility map bit (unless checksums are enabled, in which case
6202+ * we must), because that would generate an unworkable volume of
6203+ * full-page writes. This exposes us to torn page hazards, but
6204+ * since we're not inspecting the existing page contents in any
6205+ * way, we don't care.
6206+ *
6207+ * However, all operations that clear the visibility map bit *do*
6208+ * bump the LSN, and those operations will only be replayed if the
6209+ * XLOG LSN follows the page LSN. Thus, if the page LSN has
6210+ * advanced past our XLOG record's LSN, we mustn't mark the page
6211+ * all-visible, because the subsequent update won't be replayed to
6212+ * clear the flag.
6213+ */
6214+ if (lsn > PageGetLSN (page ))
6215+ {
6216+ PageSetAllVisible (page );
6217+ MarkBufferDirty (buffer );
6218+ }
6219+
6220+ /* Done with heap page. */
6221+ UnlockReleaseBuffer (buffer );
6222+ }
61916223 }
61926224
61936225 /*
@@ -6218,7 +6250,7 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
62186250 * real harm is done; and the next VACUUM will fix it.
62196251 */
62206252 if (lsn > PageGetLSN (BufferGetPage (vmbuffer )))
6221- visibilitymap_set (reln , xlrec -> block , lsn , vmbuffer ,
6253+ visibilitymap_set (reln , xlrec -> block , InvalidBuffer , lsn , vmbuffer ,
62226254 xlrec -> cutoff_xid );
62236255
62246256 ReleaseBuffer (vmbuffer );
0 commit comments