Skip to content

Commit 041e8b9

Browse files
committed
Get rid of our dependency on type "long" for memory size calculations.
Consistently use "Size" (or size_t, or in some places int64 or double) as the type for variables holding memory allocation sizes. In most places variables' data types were fine already, but we had an ancient habit of computing bytes from kilobytes-units GUCs with code like "work_mem * 1024L". That risks overflow on Win64 where they did not make "long" as wide as "size_t". We worked around that by restricting such GUCs' ranges, so you couldn't set work_mem et al higher than 2GB on Win64. This patch removes that restriction, after replacing such calculations with "work_mem * (Size) 1024" or variants of that. It should be noted that this patch was constructed by searching outwards from the GUCs that have MAX_KILOBYTES as upper limit. So I can't positively guarantee there are no other places doing memory-size arithmetic in int or long variables. I do however feel pretty confident that increasing MAX_KILOBYTES on Win64 is safe now. Also, nothing in our code should be dealing in multiple-gigabyte allocations without authorization from a relevant GUC, so it seems pretty likely that this search caught everything that could be at risk of overflow. Author: Vladlen Popolitov <v.popolitov@postgrespro.ru> Co-authored-by: Tom Lane <tgl@sss.pgh.pa.us> Discussion: https://postgr.es/m/1a01f0-66ec2d80-3b-68487680@27595217
1 parent f8d8581 commit 041e8b9

File tree

20 files changed

+50
-46
lines changed

20 files changed

+50
-46
lines changed

src/backend/access/gin/ginfast.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
int gin_pending_list_limit = 0;
4040

4141
#define GIN_PAGE_FREESIZE \
42-
( BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - MAXALIGN(sizeof(GinPageOpaqueData)) )
42+
( (Size) BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - MAXALIGN(sizeof(GinPageOpaqueData)) )
4343

4444
typedef struct KeyArray
4545
{
@@ -456,7 +456,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
456456
* ginInsertCleanup() should not be called inside our CRIT_SECTION.
457457
*/
458458
cleanupSize = GinGetPendingListCleanupSize(index);
459-
if (metadata->nPendingPages * GIN_PAGE_FREESIZE > cleanupSize * 1024L)
459+
if (metadata->nPendingPages * GIN_PAGE_FREESIZE > cleanupSize * (Size) 1024)
460460
needCleanup = true;
461461

462462
UnlockReleaseBuffer(metabuffer);
@@ -795,7 +795,7 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
795795
blknoFinish;
796796
bool cleanupFinish = false;
797797
bool fsm_vac = false;
798-
Size workMemory;
798+
int workMemory;
799799

800800
/*
801801
* We would like to prevent concurrent cleanup process. For that we will
@@ -901,7 +901,7 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
901901
*/
902902
if (GinPageGetOpaque(page)->rightlink == InvalidBlockNumber ||
903903
(GinPageHasFullRow(page) &&
904-
(accum.allocatedMemory >= workMemory * 1024L)))
904+
accum.allocatedMemory >= workMemory * (Size) 1024))
905905
{
906906
ItemPointerData *list;
907907
uint32 nlist;

src/backend/access/gin/ginget.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
125125
CompactAttribute *attr;
126126

127127
/* Initialize empty bitmap result */
128-
scanEntry->matchBitmap = tbm_create(work_mem * 1024L, NULL);
128+
scanEntry->matchBitmap = tbm_create(work_mem * (Size) 1024, NULL);
129129

130130
/* Null query cannot partial-match anything */
131131
if (scanEntry->isPartialMatch &&

src/backend/access/gin/gininsert.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -288,7 +288,7 @@ ginBuildCallback(Relation index, ItemPointer tid, Datum *values,
288288
values[i], isnull[i], tid);
289289

290290
/* If we've maxed out our available memory, dump everything to the index */
291-
if (buildstate->accum.allocatedMemory >= (Size) maintenance_work_mem * 1024L)
291+
if (buildstate->accum.allocatedMemory >= maintenance_work_mem * (Size) 1024)
292292
{
293293
ItemPointerData *list;
294294
Datum key;

src/backend/access/hash/hash.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ hashbuild(Relation heap, Relation index, IndexInfo *indexInfo)
120120
double reltuples;
121121
double allvisfrac;
122122
uint32 num_buckets;
123-
long sort_threshold;
123+
Size sort_threshold;
124124
HashBuildState buildstate;
125125

126126
/*
@@ -155,13 +155,13 @@ hashbuild(Relation heap, Relation index, IndexInfo *indexInfo)
155155
* one page. Also, "initial index size" accounting does not include the
156156
* metapage, nor the first bitmap page.
157157
*/
158-
sort_threshold = (maintenance_work_mem * 1024L) / BLCKSZ;
158+
sort_threshold = (maintenance_work_mem * (Size) 1024) / BLCKSZ;
159159
if (index->rd_rel->relpersistence != RELPERSISTENCE_TEMP)
160160
sort_threshold = Min(sort_threshold, NBuffers);
161161
else
162162
sort_threshold = Min(sort_threshold, NLocBuffer);
163163

164-
if (num_buckets >= (uint32) sort_threshold)
164+
if (num_buckets >= sort_threshold)
165165
buildstate.spool = _h_spoolinit(heap, index, num_buckets);
166166
else
167167
buildstate.spool = NULL;

src/backend/access/heap/vacuumlazy.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2070,7 +2070,7 @@ lazy_vacuum(LVRelState *vacrel)
20702070
*/
20712071
threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
20722072
bypass = (vacrel->lpdead_item_pages < threshold &&
2073-
(TidStoreMemoryUsage(vacrel->dead_items) < (32L * 1024L * 1024L)));
2073+
TidStoreMemoryUsage(vacrel->dead_items) < 32 * 1024 * 1024);
20742074
}
20752075

20762076
if (bypass)
@@ -3037,7 +3037,7 @@ dead_items_alloc(LVRelState *vacrel, int nworkers)
30373037
*/
30383038

30393039
dead_items_info = (VacDeadItemsInfo *) palloc(sizeof(VacDeadItemsInfo));
3040-
dead_items_info->max_bytes = vac_work_mem * 1024L;
3040+
dead_items_info->max_bytes = vac_work_mem * (Size) 1024;
30413041
dead_items_info->num_items = 0;
30423042
vacrel->dead_items_info = dead_items_info;
30433043

src/backend/access/nbtree/nbtpage.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2953,7 +2953,7 @@ _bt_lock_subtree_parent(Relation rel, Relation heaprel, BlockNumber child,
29532953
void
29542954
_bt_pendingfsm_init(Relation rel, BTVacState *vstate, bool cleanuponly)
29552955
{
2956-
int64 maxbufsize;
2956+
Size maxbufsize;
29572957

29582958
/*
29592959
* Don't bother with optimization in cleanup-only case -- we don't expect
@@ -2969,12 +2969,13 @@ _bt_pendingfsm_init(Relation rel, BTVacState *vstate, bool cleanuponly)
29692969
* int overflow here.
29702970
*/
29712971
vstate->bufsize = 256;
2972-
maxbufsize = (work_mem * 1024L) / sizeof(BTPendingFSM);
2973-
maxbufsize = Min(maxbufsize, INT_MAX);
2972+
maxbufsize = (work_mem * (Size) 1024) / sizeof(BTPendingFSM);
29742973
maxbufsize = Min(maxbufsize, MaxAllocSize / sizeof(BTPendingFSM));
2974+
/* BTVacState.maxbufsize has type int */
2975+
maxbufsize = Min(maxbufsize, INT_MAX);
29752976
/* Stay sane with small work_mem */
29762977
maxbufsize = Max(maxbufsize, vstate->bufsize);
2977-
vstate->maxbufsize = maxbufsize;
2978+
vstate->maxbufsize = (int) maxbufsize;
29782979

29792980
/* Allocate buffer, indicate that there are currently 0 pending pages */
29802981
vstate->pendingpages = palloc(sizeof(BTPendingFSM) * vstate->bufsize);

src/backend/commands/vacuumparallel.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -375,7 +375,7 @@ parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes,
375375
(nindexes_mwm > 0) ?
376376
maintenance_work_mem / Min(parallel_workers, nindexes_mwm) :
377377
maintenance_work_mem;
378-
shared->dead_items_info.max_bytes = vac_work_mem * 1024L;
378+
shared->dead_items_info.max_bytes = vac_work_mem * (size_t) 1024;
379379

380380
/* Prepare DSA space for dead items */
381381
dead_items = TidStoreCreateShared(shared->dead_items_info.max_bytes,

src/backend/executor/execUtils.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -326,7 +326,7 @@ CreateWorkExprContext(EState *estate)
326326
Size maxBlockSize = ALLOCSET_DEFAULT_MAXSIZE;
327327

328328
/* choose the maxBlockSize to be no larger than 1/16 of work_mem */
329-
while (16 * maxBlockSize > work_mem * 1024L)
329+
while (maxBlockSize > work_mem * (Size) 1024 / 16)
330330
maxBlockSize >>= 1;
331331

332332
if (maxBlockSize < ALLOCSET_DEFAULT_INITSIZE)

src/backend/executor/nodeBitmapIndexscan.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ MultiExecBitmapIndexScan(BitmapIndexScanState *node)
9191
else
9292
{
9393
/* XXX should we use less than work_mem for this? */
94-
tbm = tbm_create(work_mem * 1024L,
94+
tbm = tbm_create(work_mem * (Size) 1024,
9595
((BitmapIndexScan *) node->ss.ps.plan)->isshared ?
9696
node->ss.ps.state->es_query_dsa : NULL);
9797
}

src/backend/executor/nodeBitmapOr.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ MultiExecBitmapOr(BitmapOrState *node)
143143
if (result == NULL) /* first subplan */
144144
{
145145
/* XXX should we use less than work_mem for this? */
146-
result = tbm_create(work_mem * 1024L,
146+
result = tbm_create(work_mem * (Size) 1024,
147147
((BitmapOr *) node->ps.plan)->isshared ?
148148
node->ps.state->es_query_dsa : NULL);
149149
}

0 commit comments

Comments
 (0)