Am 12.06.2024 um 14:43 hat Amjad Alsharafi geschrieben: > Added several tests to verify the implementation of the vvfat driver. > > We needed a way to interact with it, so created a basic `fat16.py` driver > that handled writing correct sectors for us. > > Added `vvfat` to the non-generic formats, as its not a normal image format. > > Signed-off-by: Amjad Alsharafi <amjadsharaf...@gmail.com>
> + def truncate_file( > + self, > + entry: FatDirectoryEntry, > + new_size: int, > + allocate_non_continuous: bool = False, > + ): > + """ > + Truncate the file at the given path to the new size. > + """ > + if entry is None: > + return Exception("entry is None") > + if entry.attributes & 0x10 != 0: > + raise Exception(f"{entry.whole_name()} is a directory") > + > + def clusters_from_size(size: int): > + return ( > + size + self.boot_sector.cluster_bytes() - 1 > + ) // self.boot_sector.cluster_bytes() > + > + # First, allocate new FATs if we need to > + required_clusters = clusters_from_size(new_size) > + current_clusters = clusters_from_size(entry.size_bytes) > + > + affected_clusters = set() > + > + # Keep at least one cluster, easier to manage this way > + if required_clusters == 0: > + required_clusters = 1 > + if current_clusters == 0: > + current_clusters = 1 > + > + if required_clusters > current_clusters: > + # Allocate new clusters > + cluster = entry.cluster > + to_add = required_clusters > + for _ in range(current_clusters - 1): > + to_add -= 1 > + cluster = self.next_cluster(cluster) > + assert required_clusters > 0, "No new clusters to allocate" > + assert cluster is not None, "Cluster is None" > + assert ( > + self.next_cluster(cluster) is None > + ), "Cluster is not the last cluster" > + > + # Allocate new clusters > + for _ in range(to_add - 1): > + new_cluster = self.next_free_cluster() > + if allocate_non_continuous: > + new_cluster = self.next_free_cluster_non_continuous() The normal self.next_free_cluster() could be in an else branch. No reason to search for a free cluster when you immediately overwrite it anyway. > + self.write_fat_entry(cluster, new_cluster) > + self.write_fat_entry(new_cluster, 0xFFFF) > + cluster = new_cluster > + > + elif required_clusters < current_clusters: > + # Truncate the file > + cluster = entry.cluster > + for _ in range(required_clusters - 1): > + cluster = self.next_cluster(cluster) > + assert cluster is not None, "Cluster is None" > + > + next_cluster = self.next_cluster(cluster) > + # mark last as EOF > + self.write_fat_entry(cluster, 0xFFFF) > + # free the rest > + while next_cluster is not None: > + cluster = next_cluster > + next_cluster = self.next_cluster(next_cluster) > + self.write_fat_entry(cluster, 0) > + > + self.flush_fats() > + > + # verify number of clusters > + cluster = entry.cluster > + count = 0 > + while cluster is not None: > + count += 1 > + affected_clusters.add(cluster) > + cluster = self.next_cluster(cluster) > + assert ( > + count == required_clusters > + ), f"Expected {required_clusters} clusters, got {count}" > + > + # update the size > + entry.size_bytes = new_size > + self.update_direntry(entry) > + > + # trigger every affected cluster > + for cluster in affected_clusters: > + first_sector = self.boot_sector.first_sector_of_cluster(cluster) > + first_sector_data = self.read_sectors(first_sector, 1) > + self.write_sectors(first_sector, first_sector_data) Other than this, the patch looks good to me and we seem to test all the cases that are fixed by the previous patches. Reviewed-by: Kevin Wolf <kw...@redhat.com> Tested-by: Kevin Wolf <kw...@redhat.com> Kevin