Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
8c0c8ce
docs: update shared_buffers guidance with pg_buffercache
sebastianwebber Feb 4, 2026
6df1d9c
docs: improve shared_buffers and effective_cache_size docs
sebastianwebber Feb 4, 2026
a01ea49
docs: enhance work_mem documentation with OOM warnings
sebastianwebber Feb 4, 2026
6bec0ef
docs: update maintenance_work_mem with version-specific info
sebastianwebber Feb 4, 2026
1f494b7
docs: update min_wal_size and max_wal_size guidance
sebastianwebber Feb 4, 2026
33ab67a
docs: improve checkpoint_completion_target with example
sebastianwebber Feb 4, 2026
3a743a5
feat: optimize wal_buffers for write-heavy workloads
sebastianwebber Feb 4, 2026
0044109
docs: enhance network parameters with security and pooling
sebastianwebber Feb 4, 2026
189251d
feat: optimize min/max_wal_size per workload profile
sebastianwebber Feb 4, 2026
11b1034
feat: optimize random_page_cost for DW analytical workloads
sebastianwebber Feb 4, 2026
281e49e
docs: improve effective_io_concurrency documentation
sebastianwebber Feb 4, 2026
4c202ef
docs: improve io_method documentation with usage guidance
sebastianwebber Feb 4, 2026
227b6d9
docs: improve io_workers documentation with sizing guidance
sebastianwebber Feb 4, 2026
314d816
docs: improve maintenance_io_concurrency documentation
sebastianwebber Feb 5, 2026
29b6c90
docs: improve io_combine_limit and io_max_combine_limit docs
sebastianwebber Feb 5, 2026
c51e32f
docs: improve io_max_concurrency documentation
sebastianwebber Feb 5, 2026
510dfe2
docs: improve file_copy_method documentation
sebastianwebber Feb 5, 2026
241f0a4
docs: improve max_worker_processes documentation
sebastianwebber Feb 5, 2026
582b6ef
docs: improve max_parallel_workers_per_gather documentation
sebastianwebber Feb 5, 2026
d49b238
docs: improve max_parallel_workers documentation
sebastianwebber Feb 5, 2026
0850b68
feat: make worker parameters dynamic based on CPU and profile
sebastianwebber Feb 5, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 55 additions & 3 deletions pkg/category/checkpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package category
import (
"github.com/pgconfig/api/pkg/input"
"github.com/pgconfig/api/pkg/input/bytes"
"github.com/pgconfig/api/pkg/input/profile"
)

// CheckpointCfg is the checkpoint related category
Expand All @@ -19,11 +20,62 @@ type CheckpointCfg struct {
// For wal_buffers setting automatic by default. check this commit and the comments in the
// function check_wal_buffers on https://github.com/postgres/postgres/commit/2594cf0e8c04406ffff19b1651c5a406d376657c#diff-0cf91b3df8a1bbd72140d10a0b4541b5R4915
func NewCheckpointCfg(in input.Input) *CheckpointCfg {
// Calculate shared_buffers to determine wal_buffers tuning
// Same calculation as in memory.go: totalRAM * MaxMemoryProfilePercent * SharedBufferPerc (25%)
maxMemoryProfilePercent := map[profile.Profile]float32{
profile.Web: 1,
profile.OLTP: 1,
profile.DW: 1,
profile.Mixed: 0.5,
profile.Desktop: 0.2,
}
totalRAM := float32(in.TotalRAM) * maxMemoryProfilePercent[in.Profile]
sharedBuffers := bytes.Byte(totalRAM * 0.25) // SharedBufferPerc = 0.25

// DW (Data Warehouse) workloads benefit from larger wal_buffers
// for write-heavy operations
// OLTP with large shared_buffers (>8GB) indicates high concurrent writes
walBuffers := bytes.Byte(-1) // -1 means automatic tuning

if in.Profile == profile.DW {
walBuffers = 64 * bytes.MB
} else if in.Profile == profile.OLTP && sharedBuffers > 8*bytes.GB {
walBuffers = 32 * bytes.MB
}

// WAL size tuning per profile
// Recommended to hold ~1 hour of WAL for most systems
minWALSize := bytes.Byte(2 * bytes.GB)
maxWALSize := bytes.Byte(3 * bytes.GB)

switch in.Profile {
case profile.DW:
// Data Warehouse: write-heavy with batch jobs
minWALSize = 4 * bytes.GB
maxWALSize = 16 * bytes.GB
case profile.OLTP:
// OLTP: frequent transactions
minWALSize = 2 * bytes.GB
maxWALSize = 8 * bytes.GB
case profile.Web:
// Web: moderate writes
minWALSize = 1 * bytes.GB
maxWALSize = 4 * bytes.GB
case profile.Mixed:
// Mixed: balanced workload
minWALSize = 2 * bytes.GB
maxWALSize = 6 * bytes.GB
case profile.Desktop:
// Desktop: low activity
minWALSize = 512 * bytes.MB
maxWALSize = 2 * bytes.GB
}

return &CheckpointCfg{
MinWALSize: bytes.Byte(2 * bytes.GB),
MaxWALSize: bytes.Byte(3 * bytes.GB),
MinWALSize: minWALSize,
MaxWALSize: maxWALSize,
CheckpointCompletionTarget: 0.9,
WALBuffers: -1, // -1 means automatic tuning
WALBuffers: walBuffers,
CheckpointSegments: 16,
}
}
Expand Down
44 changes: 40 additions & 4 deletions pkg/category/worker.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
package category

import "github.com/pgconfig/api/pkg/input"
import (
"github.com/pgconfig/api/pkg/input"
"github.com/pgconfig/api/pkg/input/profile"
)

// WorkerCfg is the main workers category
type WorkerCfg struct {
Expand All @@ -11,9 +14,42 @@ type WorkerCfg struct {

// NewWorkerCfg creates a new Worker Configuration
func NewWorkerCfg(in input.Input) *WorkerCfg {
// max_worker_processes: at least 8 (default), or CPU count
maxWorkerProcesses := max(8, in.TotalCPU)

// max_parallel_workers: at least 8, or CPU count (limited by max_worker_processes)
maxParallelWorkers := max(8, in.TotalCPU)

// max_parallel_workers_per_gather: varies by profile
// OLTP/transactional workloads keep default (2)
// DW/analytical workloads benefit from higher parallelism
maxParallelWorkerPerGather := 2
if in.Profile == profile.DW {
// DW: use half of CPU cores, limited by max_parallel_workers
maxParallelWorkerPerGather = min(in.TotalCPU/2, maxParallelWorkers)
// Ensure at least 2
if maxParallelWorkerPerGather < 2 {
maxParallelWorkerPerGather = 2
}
}

return &WorkerCfg{
MaxWorkerProcesses: 8, /* pg >= 9.4 */
MaxParallelWorkerPerGather: 2, /* pg >= 9.6 */
MaxParallelWorkers: 2, /* pg >= 10 */
MaxWorkerProcesses: maxWorkerProcesses,
MaxParallelWorkerPerGather: maxParallelWorkerPerGather,
MaxParallelWorkers: maxParallelWorkers,
}
}

func max(a, b int) int {
if a > b {
return a
}
return b
}

func min(a, b int) int {
if a < b {
return a
}
return b
}
135 changes: 135 additions & 0 deletions pkg/category/worker_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
package category

import (
"testing"

"github.com/pgconfig/api/pkg/input"
"github.com/pgconfig/api/pkg/input/bytes"
"github.com/pgconfig/api/pkg/input/profile"
)

func TestNewWorkerCfg(t *testing.T) {
tests := []struct {
name string
profile profile.Profile
totalCPU int
expectedMaxWorkerProcesses int
expectedMaxParallelWorkers int
expectedMaxParallelWorkerPerGather int
description string
}{
{
name: "Desktop with 4 cores",
profile: profile.Desktop,
totalCPU: 4,
expectedMaxWorkerProcesses: 8,
expectedMaxParallelWorkers: 8,
expectedMaxParallelWorkerPerGather: 2,
description: "Small system uses minimum of 8 for worker processes",
},
{
name: "Web with 8 cores",
profile: profile.Web,
totalCPU: 8,
expectedMaxWorkerProcesses: 8,
expectedMaxParallelWorkers: 8,
expectedMaxParallelWorkerPerGather: 2,
description: "Web keeps default parallel workers per gather",
},
{
name: "OLTP with 16 cores",
profile: profile.OLTP,
totalCPU: 16,
expectedMaxWorkerProcesses: 16,
expectedMaxParallelWorkers: 16,
expectedMaxParallelWorkerPerGather: 2,
description: "OLTP scales workers with CPU but keeps per-gather at 2",
},
{
name: "Mixed with 16 cores",
profile: profile.Mixed,
totalCPU: 16,
expectedMaxWorkerProcesses: 16,
expectedMaxParallelWorkers: 16,
expectedMaxParallelWorkerPerGather: 2,
description: "Mixed workload uses default parallel workers per gather",
},
{
name: "DW with 8 cores",
profile: profile.DW,
totalCPU: 8,
expectedMaxWorkerProcesses: 8,
expectedMaxParallelWorkers: 8,
expectedMaxParallelWorkerPerGather: 4,
description: "DW uses CPU/2 for parallel workers per gather",
},
{
name: "DW with 16 cores",
profile: profile.DW,
totalCPU: 16,
expectedMaxWorkerProcesses: 16,
expectedMaxParallelWorkers: 16,
expectedMaxParallelWorkerPerGather: 8,
description: "DW scales parallel workers per gather with CPU",
},
{
name: "DW with 32 cores",
profile: profile.DW,
totalCPU: 32,
expectedMaxWorkerProcesses: 32,
expectedMaxParallelWorkers: 32,
expectedMaxParallelWorkerPerGather: 16,
description: "DW with many cores gets high parallelism",
},
{
name: "DW with 2 cores ensures minimum",
profile: profile.DW,
totalCPU: 2,
expectedMaxWorkerProcesses: 8,
expectedMaxParallelWorkers: 8,
expectedMaxParallelWorkerPerGather: 2,
description: "DW with few cores still gets minimum values",
},
{
name: "Large system with 64 cores",
profile: profile.OLTP,
totalCPU: 64,
expectedMaxWorkerProcesses: 64,
expectedMaxParallelWorkers: 64,
expectedMaxParallelWorkerPerGather: 2,
description: "Large OLTP system scales workers but not per-gather",
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
in := input.Input{
OS: "linux",
Arch: "amd64",
Profile: tt.profile,
TotalCPU: tt.totalCPU,
TotalRAM: 16 * bytes.GB,
MaxConnections: 100,
DiskType: "SSD",
PostgresVersion: 16.0,
}

cfg := NewWorkerCfg(in)

if cfg.MaxWorkerProcesses != tt.expectedMaxWorkerProcesses {
t.Errorf("%s: expected max_worker_processes = %d, got %d",
tt.description, tt.expectedMaxWorkerProcesses, cfg.MaxWorkerProcesses)
}

if cfg.MaxParallelWorkers != tt.expectedMaxParallelWorkers {
t.Errorf("%s: expected max_parallel_workers = %d, got %d",
tt.description, tt.expectedMaxParallelWorkers, cfg.MaxParallelWorkers)
}

if cfg.MaxParallelWorkerPerGather != tt.expectedMaxParallelWorkerPerGather {
t.Errorf("%s: expected max_parallel_workers_per_gather = %d, got %d",
tt.description, tt.expectedMaxParallelWorkerPerGather, cfg.MaxParallelWorkerPerGather)
}
})
}
}
Loading