Skip to content

Commit 35528be

Browse files
authored
Merge pull request #15063 from rockwotj/memset
2 parents b58d8bb + b7846d7 commit 35528be

File tree

4 files changed

+86
-22
lines changed

4 files changed

+86
-22
lines changed

src/v/wasm/allocator.cc

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020

2121
#include <sys/mman.h>
2222

23+
#include <span>
2324
#include <stdexcept>
2425
#include <unistd.h>
2526

@@ -31,6 +32,7 @@ heap_allocator::heap_allocator(config c) {
3132
for (size_t i = 0; i < c.num_heaps; ++i) {
3233
auto buffer = ss::allocate_aligned_buffer<uint8_t>(
3334
_max_size, page_size);
35+
std::memset(buffer.get(), 0, _max_size);
3436
_memory_pool.emplace_back(std::move(buffer), _max_size);
3537
}
3638
}
@@ -48,7 +50,8 @@ std::optional<heap_memory> heap_allocator::allocate(request req) {
4850
return front;
4951
}
5052

51-
void heap_allocator::deallocate(heap_memory m) {
53+
void heap_allocator::deallocate(heap_memory m, size_t used_amount) {
54+
std::memset(m.data.get(), 0, used_amount);
5255
_memory_pool.push_back(std::move(m));
5356
}
5457

@@ -97,6 +100,7 @@ stack_memory stack_allocator::allocate(size_t size) {
97100
if (_tracking_enabled) {
98101
_live_stacks.emplace(mem.bounds());
99102
}
103+
std::memset(mem.bounds().bottom, 0, mem.size());
100104
return mem;
101105
}
102106

src/v/wasm/allocator.h

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,13 +72,19 @@ class heap_allocator {
7272

7373
/**
7474
* Allocate heap memory by taking a memory instance from the pool.
75+
*
76+
* Memory returned from this method will be zero-filled.
7577
*/
7678
std::optional<heap_memory> allocate(request);
7779

7880
/**
7981
* Deallocate heap memory by returing a memory instance to the pool.
82+
*
83+
* used_amount is to zero out the used memory from the heap, this is
84+
* required so that if only a portion of a large memory space was used we
85+
* don't have to touch all the bytes.
8086
*/
81-
void deallocate(heap_memory);
87+
void deallocate(heap_memory, size_t used_amount);
8288

8389
/**
8490
* The maximum size of a heap_memory that can be allocated.
@@ -143,7 +149,8 @@ class stack_memory {
143149
// Stack memory must be page-aligned and a multiple of page size.
144150
// Some architectures require this alignment for stacks. Additionally, we
145151
// always allocate a guard page at the bottom, which is unprotected when memory
146-
// is "deallocated".
152+
// is "deallocated". Lastly, memory returned from this allocator is always
153+
// zero-filled as assumed by the Wasm VM.
147154
//
148155
// The allocator also supports the ability to query if the current stack being
149156
// used has been allocated from this allocator and returns the bounds. This is

src/v/wasm/tests/wasm_allocator_test.cc

Lines changed: 55 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,14 @@
1616
#include <gtest/gtest.h>
1717

1818
#include <limits>
19+
#include <span>
1920
#include <stdexcept>
2021
#include <unistd.h>
2122

2223
namespace wasm {
2324

25+
using ::testing::Optional;
26+
2427
TEST(HeapAllocatorParamsTest, SizeIsAligned) {
2528
size_t page_size = ::getpagesize();
2629
heap_allocator allocator(heap_allocator::config{
@@ -89,13 +92,41 @@ TEST(HeapAllocatorTest, CanReturnMemoryToThePool) {
8992
EXPECT_FALSE(mem.has_value());
9093
mem = std::move(allocated.back());
9194
allocated.pop_back();
92-
allocator.deallocate(std::move(*mem));
95+
allocator.deallocate(std::move(*mem), /*used_amount=*/0);
9396
mem = allocator.allocate(req);
9497
EXPECT_TRUE(mem.has_value());
9598
mem = allocator.allocate(req);
9699
EXPECT_FALSE(mem.has_value());
97100
}
98101

102+
MATCHER(HeapIsZeroed, "is zeroed") {
103+
std::span<uint8_t> d = {arg.data.get(), arg.size};
104+
for (auto e : d) {
105+
if (e != 0) {
106+
return false;
107+
}
108+
}
109+
return true;
110+
}
111+
112+
TEST(HeapAllocatorTest, MemoryIsZeroFilled) {
113+
size_t page_size = ::getpagesize();
114+
heap_allocator allocator(heap_allocator::config{
115+
.heap_memory_size = page_size,
116+
.num_heaps = 1,
117+
});
118+
heap_allocator::request req{.minimum = page_size, .maximum = page_size};
119+
auto allocated = allocator.allocate(req);
120+
ASSERT_TRUE(allocated.has_value());
121+
EXPECT_THAT(allocated, Optional(HeapIsZeroed()));
122+
std::fill_n(allocated->data.get(), 4, 1);
123+
allocator.deallocate(*std::move(allocated), 4);
124+
125+
allocated = allocator.allocate(req);
126+
ASSERT_TRUE(allocated.has_value());
127+
EXPECT_THAT(allocated, Optional(HeapIsZeroed()));
128+
}
129+
99130
TEST(StackAllocatorParamsTest, TrackingCanBeEnabled) {
100131
stack_allocator allocator(stack_allocator::config{
101132
.tracking_enabled = true,
@@ -117,8 +148,6 @@ TEST(StackAllocatorTest, CanAllocateOne) {
117148
EXPECT_EQ(stack.bounds().top - stack.bounds().bottom, page_size * 4);
118149
}
119150

120-
using ::testing::Optional;
121-
122151
TEST(StackAllocatorTest, CanLookupMemory) {
123152
stack_allocator allocator(stack_allocator::config{
124153
.tracking_enabled = true,
@@ -164,4 +193,27 @@ TEST(StackAllocatorTest, CanReturnMemoryToThePool) {
164193
EXPECT_EQ(stack.bounds(), bounds);
165194
}
166195

196+
MATCHER(StackIsZeroed, "is zeroed") {
197+
std::span<uint8_t> d = {arg.bounds().bottom, arg.size()};
198+
for (auto e : d) {
199+
if (e != 0) {
200+
return false;
201+
}
202+
}
203+
return true;
204+
}
205+
206+
TEST(StackAllocatorTest, MemoryIsZeroFilled) {
207+
stack_allocator allocator(stack_allocator::config{
208+
.tracking_enabled = true,
209+
});
210+
size_t page_size = ::getpagesize();
211+
auto stack = allocator.allocate(page_size * 4);
212+
EXPECT_THAT(stack, StackIsZeroed());
213+
std::fill_n(stack.bounds().bottom, 4, 1);
214+
allocator.deallocate(std::move(stack));
215+
stack = allocator.allocate(page_size * 4);
216+
EXPECT_THAT(stack, StackIsZeroed());
217+
}
218+
167219
} // namespace wasm

src/v/wasm/wasmtime.cc

Lines changed: 17 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1311,14 +1311,11 @@ wasmtime_error_t* wasmtime_runtime::allocate_stack_memory(
13111311
auto stack = _stack_allocator.local().allocate(size);
13121312
struct vm_stack {
13131313
stack_memory underlying;
1314-
ss::noncopyable_function<void(stack_memory)> reclaimer;
1314+
wasm::stack_allocator* allocator;
13151315
};
13161316
memory_ret->env = new vm_stack{
13171317
.underlying = std::move(stack),
1318-
.reclaimer =
1319-
[this](stack_memory mem) {
1320-
_stack_allocator.local().deallocate(std::move(mem));
1321-
},
1318+
.allocator = &_stack_allocator.local(),
13221319
};
13231320
memory_ret->get_stack_memory = [](void* env, size_t* len_ret) -> uint8_t* {
13241321
auto* mem = static_cast<vm_stack*>(env);
@@ -1327,7 +1324,7 @@ wasmtime_error_t* wasmtime_runtime::allocate_stack_memory(
13271324
};
13281325
memory_ret->finalizer = [](void* env) {
13291326
auto* mem = static_cast<vm_stack*>(env);
1330-
mem->reclaimer(std::move(mem->underlying));
1327+
mem->allocator->deallocate(std::move(mem->underlying));
13311328
// NOLINTNEXTLINE(cppcoreguidelines-owning-memory)
13321329
delete mem;
13331330
};
@@ -1368,37 +1365,41 @@ wasmtime_error_t* wasmtime_runtime::allocate_heap_memory(
13681365
}
13691366
struct linear_memory {
13701367
heap_memory underlying;
1371-
ss::noncopyable_function<void(heap_memory)> reclaimer;
1368+
size_t used_memory;
1369+
wasm::heap_allocator* allocator;
13721370
};
13731371
memory_ret->env = new linear_memory{
13741372
.underlying = *std::move(memory),
1375-
.reclaimer =
1376-
[this](heap_memory mem) {
1377-
_heap_allocator.local().deallocate(std::move(mem));
1378-
},
1373+
.used_memory = minimum,
1374+
.allocator = &_heap_allocator.local(),
13791375
};
13801376
memory_ret->finalizer = [](void* env) {
13811377
auto* mem = static_cast<linear_memory*>(env);
1382-
mem->reclaimer(std::move(mem->underlying));
1378+
mem->allocator->deallocate(
1379+
std::move(mem->underlying), /*used_amount=*/mem->used_memory);
13831380
// NOLINTNEXTLINE(cppcoreguidelines-owning-memory)
13841381
delete mem;
13851382
};
13861383
memory_ret->get_memory =
13871384
// NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
13881385
[](void* env, size_t* byte_size, size_t* max_byte_size) {
13891386
auto* mem = static_cast<linear_memory*>(env);
1390-
*byte_size = mem->underlying.size;
1387+
*byte_size = mem->used_memory;
13911388
*max_byte_size = mem->underlying.size;
13921389
return mem->underlying.data.get();
13931390
};
13941391
memory_ret->grow_memory =
13951392
[](void* env, size_t new_size) -> wasmtime_error_t* {
13961393
auto* mem = static_cast<linear_memory*>(env);
1397-
if (new_size == mem->underlying.size) {
1394+
if (new_size <= mem->underlying.size) {
1395+
mem->used_memory = std::max(new_size, mem->used_memory);
13981396
return nullptr;
13991397
}
1400-
return wasmtime_error_new(
1401-
"linear memories are fixed size and ungrowable");
1398+
auto msg = ss::format(
1399+
"unable to grow memory past {} to {}",
1400+
human::bytes(double(mem->underlying.size)),
1401+
human::bytes(double(new_size)));
1402+
return wasmtime_error_new(msg.c_str());
14021403
};
14031404
return nullptr;
14041405
}

0 commit comments

Comments
 (0)