Skip to content

Commit d986d0d

Browse files
committed
fixup! feat(mem): introduce KVM slots per GuestMemoryRegion
1 parent 213c931 commit d986d0d

File tree

1 file changed

+68
-64
lines changed

1 file changed

+68
-64
lines changed

src/vmm/src/vstate/memory.rs

Lines changed: 68 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,54 @@ impl From<&GuestMemorySlot<'_>> for kvm_userspace_memory_region {
117117
}
118118

119119
impl<'a> GuestMemorySlot<'a> {
120+
/// Dumps the dirty pages in this slot onto the writer
121+
pub(crate) fn dump_dirty<T: WriteVolatile + std::io::Seek>(
122+
&self,
123+
writer: &mut T,
124+
kvm_bitmap: &[u64],
125+
page_size: usize,
126+
) -> Result<(), GuestMemoryError> {
127+
let firecracker_bitmap = self.slice.bitmap();
128+
let mut write_size = 0;
129+
let mut skip_size = 0;
130+
let mut dirty_batch_start = 0;
131+
132+
for (i, v) in kvm_bitmap.iter().enumerate() {
133+
for j in 0..64 {
134+
let is_kvm_page_dirty = ((v >> j) & 1u64) != 0u64;
135+
let page_offset = ((i * 64) + j) * page_size;
136+
let is_firecracker_page_dirty = firecracker_bitmap.dirty_at(page_offset);
137+
138+
if is_kvm_page_dirty || is_firecracker_page_dirty {
139+
// We are at the start of a new batch of dirty pages.
140+
if skip_size > 0 {
141+
// Seek forward over the unmodified pages.
142+
writer
143+
.seek(SeekFrom::Current(skip_size.try_into().unwrap()))
144+
.unwrap();
145+
dirty_batch_start = page_offset;
146+
}
147+
write_size += page_size;
148+
skip_size = 0;
149+
} else if write_size > 0 {
150+
// We are at the end of a batch of dirty pages.
151+
writer
152+
.write_all_volatile(&self.slice.subslice(dirty_batch_start, write_size)?)?;
153+
154+
write_size = 0;
155+
} else {
156+
skip_size += page_size;
157+
}
158+
}
159+
}
160+
161+
if write_size > 0 {
162+
writer.write_all_volatile(&self.slice.subslice(dirty_batch_start, write_size)?)?;
163+
}
164+
165+
Ok(())
166+
}
167+
120168
/// Makes the slot host memory PROT_NONE (true) or PROT_READ|PROT_WRITE (false)
121169
pub(crate) fn protect(&self, protected: bool) -> Result<(), MemoryError> {
122170
let prot = if protected {
@@ -201,20 +249,18 @@ impl GuestRegionMmapExt {
201249
u32::try_from(u64_to_usize(self.len()) / self.slot_size).unwrap()
202250
}
203251

204-
pub(crate) fn mem_slot(&self, slot: u32) -> Option<GuestMemorySlot<'_>> {
205-
if slot < self.slot_from || slot >= self.slot_from + self.slot_cnt() {
206-
None
207-
} else {
208-
let offset = ((slot - self.slot_from) as u64) * (self.slot_size as u64);
209-
210-
Some(GuestMemorySlot {
211-
slot,
212-
guest_addr: self.start_addr().checked_add(offset)?,
213-
slice: self
214-
.inner
215-
.get_slice(MemoryRegionAddress(offset), self.slot_size)
216-
.ok()?,
217-
})
252+
pub(crate) fn mem_slot(&self, slot: u32) -> GuestMemorySlot<'_> {
253+
assert!(slot >= self.slot_from && slot < self.slot_from + self.slot_cnt());
254+
255+
let offset = ((slot - self.slot_from) as u64) * (self.slot_size as u64);
256+
257+
GuestMemorySlot {
258+
slot,
259+
guest_addr: self.start_addr().unchecked_add(offset),
260+
slice: self
261+
.inner
262+
.get_slice(MemoryRegionAddress(offset), self.slot_size)
263+
.expect("slot range should be valid"),
218264
}
219265
}
220266

@@ -229,8 +275,7 @@ impl GuestRegionMmapExt {
229275
.enumerate()
230276
.map(|(i, b)| {
231277
(
232-
self.mem_slot(self.slot_from + u32::try_from(i).unwrap())
233-
.unwrap(),
278+
self.mem_slot(self.slot_from + u32::try_from(i).unwrap()),
234279
*b,
235280
)
236281
})
@@ -597,11 +642,8 @@ impl GuestMemoryExtension for GuestMemoryMmap {
597642
.flat_map(|region| region.slots())
598643
.try_for_each(|(mem_slot, plugged)| {
599644
if !plugged {
600-
writer
601-
.seek(SeekFrom::Current(
602-
i64::try_from(mem_slot.slice.len()).unwrap(),
603-
))
604-
.unwrap();
645+
let ilen = i64::try_from(mem_slot.slice.len()).unwrap();
646+
writer.seek(SeekFrom::Current(ilen)).unwrap();
605647
} else {
606648
writer.write_all_volatile(&mem_slot.slice)?;
607649
}
@@ -616,57 +658,19 @@ impl GuestMemoryExtension for GuestMemoryMmap {
616658
writer: &mut T,
617659
dirty_bitmap: &DirtyBitmap,
618660
) -> Result<(), MemoryError> {
619-
let mut writer_offset = 0;
620661
let page_size = get_page_size().map_err(MemoryError::PageSize)?;
621662

622663
let write_result =
623664
self.iter()
624665
.flat_map(|region| region.slots())
625666
.try_for_each(|(mem_slot, plugged)| {
626667
if !plugged {
627-
writer_offset += mem_slot.slice.len() as u64;
628-
return Ok(());
629-
}
630-
631-
let kvm_bitmap = dirty_bitmap.get(&mem_slot.slot).unwrap();
632-
let firecracker_bitmap = mem_slot.slice.bitmap();
633-
let mut write_size = 0;
634-
let mut dirty_batch_start = 0;
635-
636-
for (i, v) in kvm_bitmap.iter().enumerate() {
637-
for j in 0..64 {
638-
let is_kvm_page_dirty = ((v >> j) & 1u64) != 0u64;
639-
let page_offset = ((i * 64) + j) * page_size;
640-
let is_firecracker_page_dirty =
641-
firecracker_bitmap.dirty_at(page_offset);
642-
643-
if is_kvm_page_dirty || is_firecracker_page_dirty {
644-
// We are at the start of a new batch of dirty pages.
645-
if write_size == 0 {
646-
// Seek forward over the unmodified pages.
647-
writer
648-
.seek(SeekFrom::Start(writer_offset + page_offset as u64))
649-
.unwrap();
650-
dirty_batch_start = page_offset;
651-
}
652-
write_size += page_size;
653-
} else if write_size > 0 {
654-
// We are at the end of a batch of dirty pages.
655-
writer.write_all_volatile(
656-
&mem_slot.slice.subslice(dirty_batch_start, write_size)?,
657-
)?;
658-
659-
write_size = 0;
660-
}
661-
}
662-
}
663-
664-
if write_size > 0 {
665-
writer.write_all_volatile(
666-
&mem_slot.slice.subslice(dirty_batch_start, write_size)?,
667-
)?;
668+
let ilen = i64::try_from(mem_slot.slice.len()).unwrap();
669+
writer.seek(SeekFrom::Current(ilen)).unwrap();
670+
} else {
671+
let kvm_bitmap = dirty_bitmap.get(&mem_slot.slot).unwrap();
672+
mem_slot.dump_dirty(writer, kvm_bitmap, page_size)?;
668673
}
669-
writer_offset += mem_slot.slice.len() as u64;
670674
Ok(())
671675
});
672676

0 commit comments

Comments
 (0)