Skip to content

Commit

Permalink
Set the default vm_memory_high_watermark to 0.6 (#12161)
Browse files Browse the repository at this point in the history
The default of 0.4 was very conservative even when it was
set years ago. Since then:
- we moved to CQv2, which have much more predictable memory usage than (non-lazy) CQv1 used to
- we removed CQ mirroring which caused large sudden memory spikes in some situations
- we removed the option to store message payload in memory in quorum queues

For the past two years or so, we've been running all our internal tests and benchmarks
using the value of 0.8 with no OOMkills at all (note: we do this on
Kubernetes where the Cluster Operators overrides the available memory
levaing some additional headroom, but effectively we are still using  more than
0.6 of memory).
  • Loading branch information
mkuratczyk committed Aug 29, 2024
1 parent b6e8586 commit 8a03975
Show file tree
Hide file tree
Showing 6 changed files with 7 additions and 7 deletions.
2 changes: 1 addition & 1 deletion deps/rabbit/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ _APP_ENV = """[
{ssl_listeners, []},
{num_ssl_acceptors, 10},
{ssl_options, []},
{vm_memory_high_watermark, 0.4},
{vm_memory_high_watermark, 0.6},
{vm_memory_calculation_strategy, rss},
{disk_free_limit, 50000000}, %% 50MB
{backing_queue_module, rabbit_variable_queue},
Expand Down
2 changes: 1 addition & 1 deletion deps/rabbit/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ define PROJECT_ENV
{ssl_listeners, []},
{num_ssl_acceptors, 10},
{ssl_options, []},
{vm_memory_high_watermark, 0.4},
{vm_memory_high_watermark, 0.6},
{vm_memory_calculation_strategy, rss},
{disk_free_limit, 50000000}, %% 50MB
{backing_queue_module, rabbit_variable_queue},
Expand Down
2 changes: 1 addition & 1 deletion deps/rabbit/docs/rabbitmq.conf.example
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,7 @@

## Memory-based Flow Control threshold.
##
# vm_memory_high_watermark.relative = 0.4
# vm_memory_high_watermark.relative = 0.6

## Alternatively, we can set a limit (in bytes) of RAM used by the node.
##
Expand Down
2 changes: 1 addition & 1 deletion deps/rabbit/priv/schema/rabbit.schema
Original file line number Diff line number Diff line change
Expand Up @@ -1103,7 +1103,7 @@ end}.

%% Memory-based Flow Control threshold.
%%
%% {vm_memory_high_watermark, 0.4},
%% {vm_memory_high_watermark, 0.6},

%% Alternatively, we can set a limit (in bytes) of RAM used by the node.
%%
Expand Down
2 changes: 1 addition & 1 deletion deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl
Original file line number Diff line number Diff line change
Expand Up @@ -118,4 +118,4 @@ set_and_verify_vm_memory_high_watermark_absolute(MemLimit0) ->
_ ->
ct:fail("Expected memory high watermark to be ~tp but it was ~tp", [Interpreted, MemLimit])
end,
vm_memory_monitor:set_vm_memory_high_watermark(0.4).
vm_memory_monitor:set_vm_memory_high_watermark(0.6).
4 changes: 2 additions & 2 deletions deps/rabbitmq_mqtt/test/shared_SUITE.erl
Original file line number Diff line number Diff line change
Expand Up @@ -1417,7 +1417,7 @@ block(Config) ->
puback_timeout = publish_qos1_timeout(C, Topic, <<"Still blocked">>, 1000),

%% Unblock
rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.4]),
rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.6]),
ok = expect_publishes(C, Topic, [<<"Not blocked yet">>,
<<"Now blocked">>,
<<"Still blocked">>]),
Expand Down Expand Up @@ -1458,7 +1458,7 @@ block_only_publisher(Config) ->
?assertEqual(puback_timeout, publish_qos1_timeout(Con, Topic, <<"from Con 2">>, 500)),
?assertEqual(pong, emqtt:ping(Sub)),

rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.4]),
rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.6]),
%% Let it unblock
timer:sleep(100),

Expand Down

0 comments on commit 8a03975

Please sign in to comment.