diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 568696333c25..bc9b115d279d 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -61,6 +61,7 @@ static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tuple, bool all_visible_cleared, bool new_all_visible_cleared); +static void log_heap_precheck(Relation reln, HeapTuple tp); #ifdef USE_ASSERT_CHECKING static void check_lock_if_inplace_updateable_rel(Relation relation, ItemPointer otid, @@ -9061,6 +9062,34 @@ log_heap_update(Relation reln, Buffer oldbuf, return recptr; } +/* + * Pre-check potential XLogRecord oversize. XLogRecord will be created + * later, and it's size will be checked. However, this operation will + * occur within a critical section, and in the event of failure, a core + * dump will be generated. + * It does't seem good, so to avoid this, we can calculate the approximate + * xlog record size here and check it. + + * Size prediction is based on xlog update and xlog delete logic, and can + * be revised if it changes. For now, the buf size is limited by + * UINT16_MAX (Assert(regbuf->rdata_len <= UINT16_MAX) in xloginsert). + + * Anyway, to accommodate some overhead, 1M is subtracted from the predicted + * value. It seems like that's enough for now. + */ +static void +log_heap_precheck(Relation reln, HeapTuple tp) +{ +#define XLogRecordMaxOverhead ((uint32) (1024 * 1024)) + + if (tp && RelationIsLogicallyLogged(reln)) + { + uint32 data_len = tp->t_len - SizeofHeapTupleHeader; + + XLogPreCheckSize(data_len + XLogRecordMaxOverhead); + } +} + /* * Perform XLogInsert of an XLOG_HEAP2_NEW_CID record * @@ -9178,6 +9207,7 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required, *copy = true; tp = toast_flatten_tuple(tp, desc); } + log_heap_precheck(relation, tp); return tp; } @@ -9234,6 +9264,7 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required, heap_freetuple(oldtup); } + log_heap_precheck(relation, key_tuple); return key_tuple; } diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c index 58cb4b1b00c9..ffec1d7bcf2a 100644 --- a/src/backend/access/transam/xloginsert.c +++ b/src/backend/access/transam/xloginsert.c @@ -1062,6 +1062,26 @@ XLogCheckBufferNeedsBackup(Buffer buffer) return false; /* buffer does not need to be backed up */ } +/* + * Ensure that the XLogRecord is not too large. + * + * XLogReader machinery is only able to handle records up to a certain + * size (ignoring machine resource limitations), so make sure that we will + * not emit records larger than the sizes advertised to be supported. + */ +void +XLogPreCheckSize(uint32 data_len) +{ + if (data_len > XLogRecordMaxSize) + { + ereport(ERROR, + (errmsg_internal("oversized WAL record"), + errdetail_internal("WAL record would be %u bytes (of maximum " + "%u bytes).", + data_len, XLogRecordMaxSize))); + } +} + /* * Write a backup block if needed when we are setting a hint. Note that * this may be called for a variety of page types, not just heaps. diff --git a/src/include/access/xloginsert.h b/src/include/access/xloginsert.h index d6a71415d4fd..7b40a824d9bd 100644 --- a/src/include/access/xloginsert.h +++ b/src/include/access/xloginsert.h @@ -54,6 +54,7 @@ extern void XLogRegisterBlock(uint8 block_id, RelFileLocator *rlocator, extern void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len); extern void XLogResetInsertion(void); extern bool XLogCheckBufferNeedsBackup(Buffer buffer); +extern void XLogPreCheckSize(uint32 size); extern XLogRecPtr log_newpage(RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blkno, Page page, bool page_std);