Skip to content

Commit 731dbba

Browse files
committed
Unwrap optional
1 parent 40d054b commit 731dbba

File tree

1 file changed

+23
-25
lines changed

1 file changed

+23
-25
lines changed

src/server/tiered_storage.cc

Lines changed: 23 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -85,8 +85,7 @@ tiering::DiskSegment FromCoolItem(const PrimeValue::CoolItem& item) {
8585
// TODO(vlad): Maybe split into different accessors?
8686
// Do NOT enforce rules depending on dynamic runtime values as this is called
8787
// when scheduling stash and just before succeeeding and is expected to return the same results
88-
optional<pair<size_t /*size*/, CompactObj::ExternalRep>> EstimateSerializedSize(
89-
const PrimeValue& pv) {
88+
pair<size_t /*size*/, CompactObj::ExternalRep> DetermineSerializationParams(const PrimeValue& pv) {
9089
switch (pv.ObjType()) {
9190
case OBJ_STRING:
9291
if (pv.IsInline())
@@ -95,8 +94,8 @@ optional<pair<size_t /*size*/, CompactObj::ExternalRep>> EstimateSerializedSize(
9594
case OBJ_HASH:
9695
if (pv.Encoding() == kEncodingListPack) {
9796
auto* lp = static_cast<uint8_t*>(pv.RObjPtr());
98-
size_t bytes = lpBytes(lp);
99-
bytes += lpLength(lp) * 2 * 4;
97+
size_t bytes = 4 + lpBytes(lp); // encoded length and data bytes
98+
bytes += lpLength(lp) * 2 * 4; // 4 bytes for encoded key/value lengths
10099
return std::make_pair(bytes, CompactObj::ExternalRep::SERIALIZED_MAP);
101100
}
102101
return {};
@@ -106,7 +105,7 @@ optional<pair<size_t /*size*/, CompactObj::ExternalRep>> EstimateSerializedSize(
106105
}
107106

108107
size_t Serialize(CompactObj::ExternalRep rep, const PrimeValue& pv, io::MutableBytes buffer) {
109-
DCHECK_LE(EstimateSerializedSize(pv)->first, buffer.size());
108+
DCHECK_LE(DetermineSerializationParams(pv).first, buffer.size());
110109
switch (rep) {
111110
case CompactObj::ExternalRep::STRING: {
112111
auto sv = pv.GetRawString();
@@ -126,11 +125,11 @@ size_t Serialize(CompactObj::ExternalRep rep, const PrimeValue& pv, io::MutableB
126125
return 0;
127126
}
128127

129-
string SerializeString(const PrimeValue& pv) {
130-
auto estimate = EstimateSerializedSize(pv);
131-
string s(estimate->first, 0);
132-
size_t written =
133-
Serialize(estimate->second, pv, {reinterpret_cast<uint8_t*>(s.data()), s.size()});
128+
// TODO: Remove with proper no-copy serialization
129+
string SerializeToString(const PrimeValue& pv) {
130+
auto [size, type] = DetermineSerializationParams(pv);
131+
string s(size, 0);
132+
size_t written = Serialize(type, pv, {reinterpret_cast<uint8_t*>(s.data()), s.size()});
134133
s.resize(written);
135134
return s;
136135
}
@@ -224,7 +223,7 @@ class TieredStorage::ShardOpManager : public tiering::OpManager {
224223
stats->tiered_used_bytes += segment.length;
225224
stats_.total_stashes++;
226225

227-
CompactObj::ExternalRep rep = EstimateSerializedSize(*pv)->second;
226+
CompactObj::ExternalRep rep = DetermineSerializationParams(*pv).second;
228227
if (ts_->config_.experimental_cooling) {
229228
RetireColdEntries(pv->MallocUsed());
230229
ts_->CoolDown(key.first, key.second, segment, rep, pv);
@@ -458,27 +457,27 @@ std::optional<util::fb2::Future<bool>> TieredStorage::TryStash(DbIndex dbid, str
458457
return {};
459458
}
460459

461-
auto estimated = EstimateSerializedSize(*value);
462-
DCHECK(estimated);
460+
auto [est_size, rep] = DetermineSerializationParams(*value);
461+
DCHECK_GT(est_size, 0u);
463462

464463
tiering::OpManager::EntryId id;
465464
error_code ec;
466465

467466
value->SetStashPending(true);
468-
if (OccupiesWholePages(estimated->first)) { // large enough for own page
467+
if (OccupiesWholePages(est_size)) { // large enough for own page
469468
id = KeyRef(dbid, key);
470-
if (auto prepared = op_manager_->PrepareStash(estimated->first); prepared) {
469+
if (auto prepared = op_manager_->PrepareStash(est_size); prepared) {
471470
auto [offset, buf] = *prepared;
472-
size_t written = Serialize(estimated->second, *value, buf.bytes);
471+
size_t written = Serialize(rep, *value, buf.bytes);
473472
tiering::DiskSegment segment{offset, written};
474473
op_manager_->Stash(id, segment, buf);
475474
} else {
476475
ec = prepared.error();
477476
}
478-
} else if (auto bin = bins_->Stash(dbid, key, SerializeString(*value)); bin) {
477+
} else if (auto bin = bins_->Stash(dbid, key, SerializeToString(*value)); bin) {
479478
id = bin->first;
480479
// TODO(vlad): Write bin to prepared buffer instead of allocating one
481-
if (auto prepared = op_manager_->PrepareStash(estimated->first); prepared) {
480+
if (auto prepared = op_manager_->PrepareStash(est_size); prepared) {
482481
auto [offset, buf] = *prepared;
483482
memcpy(buf.bytes.data(), bin->second.data(), bin->second.size());
484483
tiering::DiskSegment segment{offset, bin->second.size()};
@@ -530,8 +529,9 @@ void TieredStorage::CancelStash(DbIndex dbid, std::string_view key, PrimeValue*
530529
if (auto node = stash_backpressure_.extract(make_pair(dbid, key)); !node.empty())
531530
std::move(node.mapped()).Resolve(false);
532531

533-
auto estimated = EstimateSerializedSize(*value);
534-
if (OccupiesWholePages(estimated->first)) {
532+
// TODO: Don't recompute size estimate, try-delete bin first
533+
size_t size = DetermineSerializationParams(*value).first;
534+
if (OccupiesWholePages(size)) {
535535
op_manager_->Delete(KeyRef(dbid, key));
536536
} else if (auto bin = bins_->Delete(dbid, key); bin) {
537537
op_manager_->Delete(*bin);
@@ -694,18 +694,16 @@ bool TieredStorage::ShouldStash(const PrimeValue& pv) const {
694694
return false;
695695

696696
// Estimate value size
697-
auto estimation = EstimateSerializedSize(pv);
698-
if (!estimation)
697+
auto [size, rep] = DetermineSerializationParams(pv);
698+
if (size < config_.min_value_size)
699699
return false;
700700

701701
// For now, hash offloading is conditional
702702
if (pv.ObjType() == OBJ_HASH && !config_.experimental_hash_offload)
703703
return false;
704704

705705
const auto& disk_stats = op_manager_->GetStats().disk_stats;
706-
return estimation->first >= config_.min_value_size &&
707-
disk_stats.allocated_bytes + tiering::kPageSize + estimation->first <
708-
disk_stats.max_file_size;
706+
return disk_stats.allocated_bytes + tiering::kPageSize + size < disk_stats.max_file_size;
709707
}
710708

711709
void TieredStorage::CoolDown(DbIndex db_ind, std::string_view str,

0 commit comments

Comments
 (0)