extfs prediction.

This commit is contained in:
Jonas 'Sortie' Termansen 2015-10-25 22:37:06 +01:00
parent 805c237a7c
commit 8bc1e6a2c0
5 changed files with 20 additions and 2 deletions

View File

@ -134,12 +134,18 @@ void Block::FinishWrite()
void Block::Use()
{
// The block that was used after this one last time.
Block* prediction = prev_block;
Unlink();
Prelink();
if ( prediction )
device->predicted_block = prediction;
}
void Block::Unlink()
{
if ( device->predicted_block == this )
device->predicted_block = NULL;
(prev_block ? prev_block->next_block : device->mru_block) = next_block;
(next_block ? next_block->prev_block : device->lru_block) = prev_block;
size_t bin = block_id % DEVICE_HASH_LENGTH;

View File

@ -48,6 +48,7 @@ Device::Device(int fd, const char* path, uint32_t block_size, bool write)
this->dirty_block = NULL;
for ( size_t i = 0; i < DEVICE_HASH_LENGTH; i++ )
hash_blocks[i] = NULL;
this->predicted_block = NULL;
struct stat st;
fstat(fd, &st);
this->device_size = st.st_size;
@ -154,10 +155,15 @@ Block* Device::GetBlockZeroed(uint32_t block_id)
Block* Device::GetCachedBlock(uint32_t block_id)
{
Block* prediction = predicted_block;
// TODO: Need to profile how accurate this is. Should speed up repeated
// big sequential reads.
if ( predicted_block && prediction->block_id == block_id )
return prediction->Refer(), prediction->Use(), prediction;
size_t bin = block_id % DEVICE_HASH_LENGTH;
for ( Block* iter = hash_blocks[bin]; iter; iter = iter->next_hashed )
if ( iter->block_id == block_id )
return iter->Refer(), iter;
return iter->Refer(), iter->Use(), iter;
return NULL;
}

View File

@ -39,6 +39,7 @@ public:
Block* lru_block;
Block* dirty_block;
Block* hash_blocks[DEVICE_HASH_LENGTH];
Block* predicted_block;
off_t device_size;
const char* path;
uint32_t block_size;

View File

@ -173,7 +173,7 @@ Inode* Filesystem::GetInode(uint32_t inode_id)
size_t bin = inode_id % INODE_HASH_LENGTH;
for ( Inode* iter = hash_inodes[bin]; iter; iter = iter->next_hashed )
if ( iter->inode_id == inode_id )
return iter->Refer(), iter;
return iter->Refer(), iter->Use(), iter;
uint32_t group_id = (inode_id-1) / sb->s_inodes_per_group;
uint32_t tabel_index = (inode_id-1) % sb->s_inodes_per_group;

View File

@ -350,6 +350,7 @@ void Inode::Truncate(uint64_t new_size)
if ( partial )
{
Block* partial_block = GetBlock(new_num_blocks-1);
// TODO: This assumes GetBlock doesn't fail!
uint8_t* data = partial_block->block_data;
partial_block->BeginWrite();
memset(data + partial, 0, filesystem->block_size - partial);
@ -823,6 +824,10 @@ ssize_t Inode::WriteAt(const uint8_t* buf, size_t s_count, off_t o_offset)
uint64_t block_id = offset / filesystem->block_size;
uint32_t block_offset = offset % filesystem->block_size;
uint32_t block_left = filesystem->block_size - block_offset;
// TODO: If we're going to rewrite the whole block, invent a new variant
// of GetBlock that doeesn't zero new blocks, but gets an
// uninitialized block that we promise to initialize. This will
// save a zeroing write for every new block allocated here.
Block* block = GetBlock(block_id);
if ( !block )
return sofar ? (ssize_t) sofar : -1;