1use std::collections::BTreeMap;
9use std::ffi::OsStr;
10use std::ffi::OsString;
11use std::fs::DirEntry;
12use std::fs::File;
13use std::os::unix::ffi::OsStrExt;
14use std::path::Path;
15
16use anyhow::anyhow;
17use anyhow::bail;
18use anyhow::Context;
19use anyhow::Result;
20use base::info;
21use zerocopy::FromBytes;
22use zerocopy::Immutable;
23use zerocopy::IntoBytes;
24use zerocopy::KnownLayout;
25
26use crate::arena::Arena;
27use crate::arena::BlockId;
28use crate::blockgroup::BlockGroupDescriptor;
29use crate::blockgroup::GroupMetaData;
30use crate::blockgroup::BLOCK_SIZE;
31use crate::builder::Builder;
32use crate::inode::Inode;
33use crate::inode::InodeBlock;
34use crate::inode::InodeBlocksCount;
35use crate::inode::InodeNum;
36use crate::inode::InodeType;
37use crate::superblock::SuperBlock;
38use crate::xattr::InlineXattrs;
39
40#[repr(C)]
41#[derive(Copy, Clone, Debug, FromBytes, Immutable, IntoBytes, KnownLayout)]
42struct DirEntryRaw {
43 inode: u32,
44 rec_len: u16,
45 name_len: u8,
46 file_type: u8,
47}
48
49struct DirEntryWithName<'a> {
50 de: &'a mut DirEntryRaw,
51 name: OsString,
52}
53
54impl std::fmt::Debug for DirEntryWithName<'_> {
55 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
56 f.debug_struct("DirEntry")
57 .field("de", &self.de)
58 .field("name", &self.name)
59 .finish()
60 }
61}
62
63impl<'a> DirEntryWithName<'a> {
64 fn new(
65 arena: &'a Arena<'a>,
66 inode: InodeNum,
67 typ: InodeType,
68 name_str: &OsStr,
69 dblock: &mut DirEntryBlock,
70 ) -> Result<Self> {
71 let cs = name_str.as_bytes();
72 let name_len = cs.len();
73 let aligned_name_len = name_len
74 .checked_next_multiple_of(4)
75 .expect("name length must be 4-byte aligned");
76
77 let rec_len = 8 + aligned_name_len as u16;
82
83 let de = arena.allocate(dblock.block_id, dblock.offset)?;
84 *de = DirEntryRaw {
85 inode: inode.into(),
86 rec_len,
87 name_len: name_len as u8,
88 file_type: typ.into_dir_entry_file_type(),
89 };
90 dblock.offset += std::mem::size_of::<DirEntryRaw>();
91
92 let name_slice = arena.allocate_slice(dblock.block_id, dblock.offset, aligned_name_len)?;
93 dblock.offset += aligned_name_len;
94 name_slice[..cs.len()].copy_from_slice(cs);
95
96 if dblock.entries.is_empty() {
97 de.rec_len = BLOCK_SIZE as u16;
98 } else {
99 let last = dblock
100 .entries
101 .last_mut()
102 .expect("parent_dir must not be empty");
103 let last_rec_len = last.de.rec_len;
104 last.de.rec_len = (8 + last.name.as_os_str().as_bytes().len() as u16)
105 .checked_next_multiple_of(4)
106 .expect("overflow to calculate rec_len");
107 de.rec_len = last_rec_len - last.de.rec_len;
108 }
109
110 Ok(Self {
111 de,
112 name: name_str.into(),
113 })
114 }
115}
116
117#[derive(Debug)]
118struct DirEntryBlock<'a> {
119 block_id: BlockId,
120 offset: usize,
121 entries: Vec<DirEntryWithName<'a>>,
122}
123
124impl DirEntryBlock<'_> {
125 fn has_enough_space(&self, name: &OsStr) -> bool {
126 let dir_entry_size = std::mem::size_of::<DirEntryRaw>();
127 let aligned_name_len = name
128 .as_bytes()
129 .len()
130 .checked_next_multiple_of(4)
131 .expect("length must be < 256 bytes so it must not overflow");
132 self.offset + dir_entry_size + aligned_name_len <= BLOCK_SIZE
133 }
134}
135
136pub(crate) struct Ext2<'a> {
138 sb: &'a mut SuperBlock,
139 cur_block_group: usize,
140 cur_inode_table: usize,
141
142 group_metadata: Vec<GroupMetaData<'a>>,
143
144 dir_entries: BTreeMap<InodeNum, Vec<DirEntryBlock<'a>>>,
145}
146
147impl<'a> Ext2<'a> {
148 pub(crate) fn new(builder: &Builder, arena: &'a Arena<'a>) -> Result<Self> {
149 let sb = SuperBlock::new(arena, builder)?;
150 let mut group_metadata = vec![];
151 for i in 0..sb.num_groups() {
152 group_metadata.push(GroupMetaData::new(arena, sb, i)?);
153 }
154
155 let mut ext2 = Ext2 {
156 sb,
157 cur_block_group: 0,
158 cur_inode_table: 0,
159 group_metadata,
160 dir_entries: BTreeMap::new(),
161 };
162
163 let root_inode = InodeNum::new(2)?;
165 let root_xattr = match &builder.root_dir {
166 Some(dir) => Some(InlineXattrs::from_path(dir)?),
167 None => None,
168 };
169 ext2.add_reserved_dir(arena, root_inode, root_inode, OsStr::new("/"), root_xattr)?;
170 let lost_found_inode = ext2.allocate_inode()?;
171 ext2.add_reserved_dir(
172 arena,
173 lost_found_inode,
174 root_inode,
175 OsStr::new("lost+found"),
176 None,
177 )?;
178
179 Ok(ext2)
180 }
181
182 fn allocate_inode(&mut self) -> Result<InodeNum> {
183 if self.sb.free_inodes_count == 0 {
184 bail!(
185 "no free inodes: run out of s_inodes_count={}",
186 self.sb.inodes_count
187 );
188 }
189
190 if self.group_metadata[self.cur_inode_table]
191 .group_desc
192 .free_inodes_count
193 == 0
194 {
195 self.cur_inode_table += 1;
196 }
197
198 let gm = &mut self.group_metadata[self.cur_inode_table];
199 let alloc_inode = InodeNum::new(gm.first_free_inode)?;
200 gm.inode_bitmap
202 .set(
203 (usize::from(alloc_inode) - 1) % self.sb.inodes_per_group as usize,
204 true,
205 )
206 .context("failed to set inode bitmap")?;
207
208 gm.first_free_inode += 1;
209 gm.group_desc.free_inodes_count -= 1;
210 self.sb.free_inodes_count -= 1;
211 Ok(alloc_inode)
212 }
213
214 fn allocate_block(&mut self) -> Result<BlockId> {
215 self.allocate_contiguous_blocks(1).map(|v| v[0][0])
216 }
217
218 fn allocate_contiguous_blocks(&mut self, n: u16) -> Result<Vec<Vec<BlockId>>> {
219 if n == 0 {
220 bail!("n must be positive");
221 }
222 if self.sb.free_blocks_count < n as u32 {
223 bail!(
224 "no free blocks: run out of free_blocks_count={} < {n}",
225 self.sb.free_blocks_count
226 );
227 }
228
229 let mut contig_blocks = vec![];
230 let mut remaining = n;
231 while remaining > 0 {
232 let alloc_block_num = std::cmp::min(
233 remaining,
234 self.group_metadata[self.cur_block_group]
235 .group_desc
236 .free_blocks_count,
237 ) as u32;
238
239 let gm = &mut self.group_metadata[self.cur_block_group];
240 let alloc_blocks = (gm.first_free_block..gm.first_free_block + alloc_block_num)
241 .map(BlockId::from)
242 .collect();
243 gm.first_free_block += alloc_block_num;
244 gm.group_desc.free_blocks_count -= alloc_block_num as u16;
245 self.sb.free_blocks_count -= alloc_block_num;
246 for &b in &alloc_blocks {
247 let index = u32::from(b) as usize
248 - self.cur_block_group * self.sb.blocks_per_group as usize;
249 gm.block_bitmap
250 .set(index, true)
251 .with_context(|| format!("failed to set block_bitmap at {index}"))?;
252 }
253 remaining -= alloc_block_num as u16;
254 if self.group_metadata[self.cur_block_group]
255 .group_desc
256 .free_blocks_count
257 == 0
258 {
259 self.cur_block_group += 1;
260 }
261 contig_blocks.push(alloc_blocks);
262 }
263
264 Ok(contig_blocks)
265 }
266
267 fn group_num_for_inode(&self, inode: InodeNum) -> usize {
268 inode.to_table_index() / self.sb.inodes_per_group as usize
269 }
270
271 fn get_inode_mut(&mut self, num: InodeNum) -> Result<&mut &'a mut Inode> {
272 let group_id = self.group_num_for_inode(num);
273 self.group_metadata[group_id]
274 .inode_table
275 .get_mut(&num)
276 .ok_or_else(|| anyhow!("{:?} not found", num))
277 }
278
279 fn allocate_dir_entry(
280 &mut self,
281 arena: &'a Arena<'a>,
282 parent: InodeNum,
283 inode: InodeNum,
284 typ: InodeType,
285 name: &OsStr,
286 ) -> Result<()> {
287 if name.is_empty() {
288 bail!("directory name must not be empty");
289 } else if name.len() > 255 {
290 bail!("name length must not exceed 255: {:?}", name);
291 }
292
293 #[allow(clippy::map_entry)]
296 if !self.dir_entries.contains_key(&parent) {
297 let block_id = self.allocate_block()?;
298 let inode = self.get_inode_mut(parent)?;
299 inode.block.set_direct_blocks(&[block_id])?;
300 inode.blocks = InodeBlocksCount::from_bytes_len(BLOCK_SIZE as u32);
301 self.dir_entries.insert(
302 parent,
303 vec![DirEntryBlock {
304 block_id,
305 offset: 0,
306 entries: Vec::new(),
307 }],
308 );
309 }
310
311 if !self
313 .dir_entries
314 .get(&parent)
315 .ok_or_else(|| anyhow!("parent {:?} not found for {:?}", parent, inode))?
316 .last()
317 .expect("directory entries must not be empty")
318 .has_enough_space(name)
319 {
320 let idx = self.dir_entries.get(&parent).unwrap().len();
321 let block_id = self.allocate_block()?;
322 let parent_inode = self.get_inode_mut(parent)?;
323 parent_inode.block.set_block_id(idx, &block_id)?;
324 parent_inode.blocks.add(BLOCK_SIZE as u32);
325 parent_inode.size += BLOCK_SIZE as u32;
326 self.dir_entries
327 .get_mut(&parent)
328 .unwrap()
329 .push(DirEntryBlock {
330 block_id,
331 offset: 0,
332 entries: Vec::new(),
333 });
334 }
335
336 if typ == InodeType::Directory {
337 let parent = self.get_inode_mut(parent)?;
338 parent.links_count += 1;
339 }
340
341 let parent_dir = self
342 .dir_entries
343 .get_mut(&parent)
344 .ok_or_else(|| anyhow!("parent {:?} not found for {:?}", parent, inode))?
345 .last_mut()
346 .expect("directory entries must not be empty");
347
348 let dir_entry = DirEntryWithName::new(arena, inode, typ, name, parent_dir)?;
349
350 parent_dir.entries.push(dir_entry);
351
352 Ok(())
353 }
354
355 fn add_inode(&mut self, num: InodeNum, inode: &'a mut Inode) -> Result<()> {
356 let typ = inode.typ().ok_or_else(|| anyhow!("unknown inode type"))?;
357 let group_id = self.group_num_for_inode(num);
358 let gm = &mut self.group_metadata[group_id];
359 if gm.inode_table.contains_key(&num) {
360 bail!("inode {:?} already exists", &num);
361 }
362
363 if typ == InodeType::Directory {
364 gm.group_desc.used_dirs_count += 1;
365 }
366
367 gm.inode_table.insert(num, inode);
368 let inode_index = num.to_table_index() % self.sb.inodes_per_group as usize;
369 gm.inode_bitmap
370 .set(inode_index, true)
371 .with_context(|| format!("failed to set inode bitmap at {}", num.to_table_index()))?;
372
373 Ok(())
374 }
375
376 fn add_reserved_dir(
379 &mut self,
380 arena: &'a Arena<'a>,
381 inode_num: InodeNum,
382 parent_inode: InodeNum,
383 name: &OsStr,
384 xattr: Option<InlineXattrs>,
385 ) -> Result<()> {
386 let group_id = self.group_num_for_inode(inode_num);
387 let inode = Inode::new(
388 arena,
389 &mut self.group_metadata[group_id],
390 inode_num,
391 InodeType::Directory,
392 BLOCK_SIZE as u32,
393 xattr,
394 )?;
395 self.add_inode(inode_num, inode)?;
396
397 self.allocate_dir_entry(
398 arena,
399 inode_num,
400 inode_num,
401 InodeType::Directory,
402 OsStr::new("."),
403 )?;
404 self.allocate_dir_entry(
405 arena,
406 inode_num,
407 parent_inode,
408 InodeType::Directory,
409 OsStr::new(".."),
410 )?;
411
412 if inode_num != parent_inode {
413 self.allocate_dir_entry(arena, parent_inode, inode_num, InodeType::Directory, name)?;
414 }
415
416 Ok(())
417 }
418
419 fn add_dir(
420 &mut self,
421 arena: &'a Arena<'a>,
422 inode_num: InodeNum,
423 parent_inode: InodeNum,
424 path: &Path,
425 ) -> Result<()> {
426 let group_id = self.group_num_for_inode(inode_num);
427
428 let xattr = InlineXattrs::from_path(path)?;
429 let inode = Inode::from_metadata(
430 arena,
431 &mut self.group_metadata[group_id],
432 inode_num,
433 &std::fs::metadata(path)?,
434 BLOCK_SIZE as u32,
435 0,
436 InodeBlocksCount::from_bytes_len(0),
437 InodeBlock::default(),
438 Some(xattr),
439 )?;
440
441 self.add_inode(inode_num, inode)?;
442
443 self.allocate_dir_entry(
444 arena,
445 inode_num,
446 inode_num,
447 InodeType::Directory,
448 OsStr::new("."),
449 )?;
450 self.allocate_dir_entry(
451 arena,
452 inode_num,
453 parent_inode,
454 InodeType::Directory,
455 OsStr::new(".."),
456 )?;
457
458 if inode_num != parent_inode {
459 let name = path
460 .file_name()
461 .ok_or_else(|| anyhow!("failed to get directory name"))?;
462 self.allocate_dir_entry(arena, parent_inode, inode_num, InodeType::Directory, name)?;
463 }
464
465 Ok(())
466 }
467
468 fn register_mmap_file(
472 &mut self,
473 arena: &'a Arena<'a>,
474 block_num: usize,
475 file: &File,
476 file_size: usize,
477 mut file_offset: usize,
478 ) -> Result<(Vec<BlockId>, usize)> {
479 let contig_blocks = self.allocate_contiguous_blocks(block_num as u16)?;
480
481 let mut remaining = std::cmp::min(file_size - file_offset, block_num * BLOCK_SIZE);
482 let mut written = 0;
483 for blocks in &contig_blocks {
484 if remaining == 0 {
485 panic!("remaining == 0. This is a bug");
486 }
487 let length = std::cmp::min(remaining, BLOCK_SIZE * blocks.len());
488 let start_block = blocks[0];
489 let mem_offset = u32::from(start_block) as usize * BLOCK_SIZE;
490 arena
492 .reserve_for_mmap(
493 mem_offset,
494 length,
495 file.try_clone().context("failed to clone file")?,
496 file_offset,
497 )
498 .context("mmap for direct_block is already occupied")?;
499 remaining -= length;
500 written += length;
501 file_offset += length;
502 }
503 Ok((contig_blocks.concat(), written))
504 }
505
506 fn fill_indirect_block(
507 &mut self,
508 arena: &'a Arena<'a>,
509 indirect_table: BlockId,
510 file: &File,
511 file_size: usize,
512 file_offset: usize,
513 ) -> Result<usize> {
514 let max_num_blocks = BLOCK_SIZE / 4;
519 let max_data_len = max_num_blocks * BLOCK_SIZE;
520
521 let length = std::cmp::min(file_size - file_offset, max_data_len);
522 let block_num = length.div_ceil(BLOCK_SIZE);
523
524 let (allocated_blocks, length) = self
525 .register_mmap_file(arena, block_num, file, file_size, file_offset)
526 .context("failed to reserve mmap regions on indirect block")?;
527
528 let slice = arena.allocate_slice(indirect_table, 0, 4 * block_num)?;
529 slice.copy_from_slice(allocated_blocks.as_bytes());
530
531 Ok(length)
532 }
533
534 fn add_file(
535 &mut self,
536 arena: &'a Arena<'a>,
537 parent_inode: InodeNum,
538 path: &Path,
539 ) -> Result<()> {
540 let inode_num = self.allocate_inode()?;
541
542 let name = path
543 .file_name()
544 .ok_or_else(|| anyhow!("failed to get directory name"))?;
545 let file = File::open(path)?;
546 let file_size = file.metadata()?.len() as usize;
547 let mut block = InodeBlock::default();
548
549 let mut written = 0;
550 let mut used_blocks = 0;
551
552 if file_size > 0 {
553 let block_num = std::cmp::min(
554 file_size.div_ceil(BLOCK_SIZE),
555 InodeBlock::NUM_DIRECT_BLOCKS,
556 );
557 let (allocated_blocks, len) = self
558 .register_mmap_file(arena, block_num, &file, file_size, 0)
559 .context("failed to reserve mmap regions on direct block")?;
560
561 block.set_direct_blocks(&allocated_blocks)?;
562 written += len;
563 used_blocks += block_num;
564 }
565
566 if written < file_size {
568 let indirect_table = self.allocate_block()?;
569 block.set_indirect_block_table(&indirect_table)?;
570 used_blocks += 1;
571
572 let length =
573 self.fill_indirect_block(arena, indirect_table, &file, file_size, written)?;
574 written += length;
575 used_blocks += length.div_ceil(BLOCK_SIZE);
576 }
577
578 if written < file_size {
582 let d_indirect_table = self.allocate_block()?;
583 block.set_double_indirect_block_table(&d_indirect_table)?;
584 used_blocks += 1;
585
586 let mut indirect_blocks: Vec<BlockId> = vec![];
587 for _ in 0..BLOCK_SIZE / 4 {
589 if written >= file_size {
590 break;
591 }
592 let indirect_table = self.allocate_block()?;
593 indirect_blocks.push(indirect_table);
594 used_blocks += 1;
595
596 let length = self
597 .fill_indirect_block(arena, indirect_table, &file, file_size, written)
598 .context("failed to indirect block for doubly-indirect table")?;
599 written += length;
600 used_blocks += length.div_ceil(BLOCK_SIZE);
601 }
602
603 let d_table = arena.allocate_slice(d_indirect_table, 0, indirect_blocks.len() * 4)?;
604 d_table.copy_from_slice(indirect_blocks.as_bytes());
605 }
606
607 if written != file_size {
608 unimplemented!("Triple-indirect block is not supported");
609 }
610
611 let blocks = InodeBlocksCount::from_bytes_len((used_blocks * BLOCK_SIZE) as u32);
612 let group_id = self.group_num_for_inode(inode_num);
613 let size = file_size as u32;
614
615 let xattr = InlineXattrs::from_path(path)?;
616 let inode = Inode::from_metadata(
617 arena,
618 &mut self.group_metadata[group_id],
619 inode_num,
620 &std::fs::metadata(path)?,
621 size,
622 1,
623 blocks,
624 block,
625 Some(xattr),
626 )?;
627
628 self.add_inode(inode_num, inode)?;
629 self.allocate_dir_entry(arena, parent_inode, inode_num, InodeType::Regular, name)?;
630
631 Ok(())
632 }
633
634 fn add_symlink(
635 &mut self,
636 arena: &'a Arena<'a>,
637 parent: InodeNum,
638 entry: &DirEntry,
639 ) -> Result<()> {
640 let link = entry.path();
641 let dst_path = std::fs::read_link(&link)?;
642 let dst = dst_path
643 .to_str()
644 .context("failed to convert symlink destination to str")?;
645
646 if dst.len() >= InodeBlock::max_inline_symlink_len() {
647 return self.add_long_symlink(arena, parent, &link, dst);
648 }
649
650 let inode_num = self.allocate_inode()?;
651 let mut block = InodeBlock::default();
652 block.set_inline_symlink(dst)?;
653 let group_id = self.group_num_for_inode(inode_num);
654 let xattr = InlineXattrs::from_path(&link)?;
655 let inode = Inode::from_metadata(
656 arena,
657 &mut self.group_metadata[group_id],
658 inode_num,
659 &std::fs::symlink_metadata(&link)?,
660 dst.len() as u32,
661 1, InodeBlocksCount::from_bytes_len(0),
663 block,
664 Some(xattr),
665 )?;
666 self.add_inode(inode_num, inode)?;
667
668 let link_name = link.file_name().context("failed to get symlink name")?;
669 self.allocate_dir_entry(arena, parent, inode_num, InodeType::Symlink, link_name)?;
670
671 Ok(())
672 }
673
674 fn add_long_symlink(
675 &mut self,
676 arena: &'a Arena<'a>,
677 parent: InodeNum,
678 link: &Path,
679 dst: &str,
680 ) -> Result<()> {
681 let dst_len = dst.len();
682 if dst_len > BLOCK_SIZE {
683 bail!("symlink longer than block size: {:?}", dst);
684 }
685
686 let symlink_block = self.allocate_block()?;
688 let buf = arena.allocate_slice(symlink_block, 0, dst_len)?;
689 buf.copy_from_slice(dst.as_bytes());
690
691 let inode_num = self.allocate_inode()?;
692 let mut block = InodeBlock::default();
693 block.set_direct_blocks(&[symlink_block])?;
694
695 let group_id = self.group_num_for_inode(inode_num);
696 let xattr = InlineXattrs::from_path(link)?;
697 let inode = Inode::from_metadata(
698 arena,
699 &mut self.group_metadata[group_id],
700 inode_num,
701 &std::fs::symlink_metadata(link)?,
702 dst_len as u32,
703 1, InodeBlocksCount::from_bytes_len(BLOCK_SIZE as u32),
705 block,
706 Some(xattr),
707 )?;
708 self.add_inode(inode_num, inode)?;
709
710 let link_name = link.file_name().context("failed to get symlink name")?;
711 self.allocate_dir_entry(arena, parent, inode_num, InodeType::Symlink, link_name)?;
712
713 Ok(())
714 }
715
716 pub(crate) fn copy_dirtree<P: AsRef<Path>>(
718 &mut self,
719 arena: &'a Arena<'a>,
720 src_dir: P,
721 ) -> Result<()> {
722 let root_inode_num = InodeNum::new(2).expect("2 is a valid inode number");
724 let group_id = self.group_num_for_inode(root_inode_num);
725 let gm = &mut self.group_metadata[group_id];
726 let inode: &mut &mut Inode = gm
727 .inode_table
728 .get_mut(&root_inode_num)
729 .expect("root dir is not stored");
730 let metadata = src_dir
731 .as_ref()
732 .metadata()
733 .with_context(|| format!("failed to get metadata of {:?}", src_dir.as_ref()))?;
734 inode.update_metadata(&metadata);
735
736 self.copy_dirtree_rec(arena, InodeNum(2), src_dir)
737 }
738
739 fn copy_dirtree_rec<P: AsRef<Path>>(
740 &mut self,
741 arena: &'a Arena<'a>,
742 parent_inode: InodeNum,
743 src_dir: P,
744 ) -> Result<()> {
745 for entry in std::fs::read_dir(&src_dir)? {
746 let entry = entry?;
747 let ftype = entry.file_type()?;
748 if ftype.is_dir() {
749 if parent_inode.0 == 2 && entry.path().file_name() == Some(OsStr::new("lost+found"))
751 {
752 info!("ext2: Ignore the existing /lost+found directory");
753 continue;
754 }
755 let inode = self.allocate_inode()?;
756 self.add_dir(arena, inode, parent_inode, &entry.path())
757 .with_context(|| {
758 format!(
759 "failed to add directory {:?} as inode={:?}",
760 entry.path(),
761 inode
762 )
763 })?;
764 self.copy_dirtree_rec(arena, inode, entry.path())?;
765 } else if ftype.is_file() {
766 self.add_file(arena, parent_inode, &entry.path())
767 .with_context(|| {
768 format!(
769 "failed to add file {:?} in inode={:?}",
770 entry.path(),
771 parent_inode
772 )
773 })?;
774 } else if ftype.is_symlink() {
775 self.add_symlink(arena, parent_inode, &entry)?;
776 } else {
777 bail!("unknown file type {:?} for {:?}", ftype, entry.file_name());
778 }
779 }
780
781 Ok(())
782 }
783
784 pub(crate) fn copy_backup_metadata(self, arena: &'a Arena<'a>) -> Result<()> {
785 for i in 1..self.sb.num_groups() as usize {
787 let super_block_id = BlockId::from(self.sb.blocks_per_group * i as u32);
788 let bg_desc_block_id = BlockId::from(u32::from(super_block_id) + 1);
789 self.sb.block_group_nr = i as u16;
790 arena.write_to_mem(super_block_id, 0, self.sb)?;
791 let mut offset = 0;
792 for gm in &self.group_metadata {
793 arena.write_to_mem(bg_desc_block_id, offset, gm.group_desc)?;
794 offset += std::mem::size_of::<BlockGroupDescriptor>();
795 }
796 }
797 Ok(())
798 }
799}