mirror of
https://gitlab.com/sortix/sortix.git
synced 2023-02-13 20:55:38 -05:00
Add shared memory.
This commit is contained in:
parent
26db34b8ac
commit
ee1453ad52
22 changed files with 584 additions and 184 deletions
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2012, 2014 Jonas 'Sortie' Termansen.
|
||||
* Copyright (c) 2012, 2014, 2015, 2016 Jonas 'Sortie' Termansen.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -55,11 +55,11 @@ static bool IsInProcessAddressSpace(Process* process)
|
|||
return current_address_space == process->addrspace;
|
||||
}
|
||||
|
||||
static struct segment* FindSegment(Process* process, uintptr_t addr)
|
||||
static Segment* FindSegment(Process* process, uintptr_t addr)
|
||||
{
|
||||
for ( size_t i = 0; i < process->segments_used; i++ )
|
||||
{
|
||||
struct segment* segment = &process->segments[i];
|
||||
Segment* segment = &process->segments[i];
|
||||
if ( addr < segment->addr )
|
||||
continue;
|
||||
if ( segment->addr + segment->size <= addr )
|
||||
|
@ -79,7 +79,7 @@ bool CopyToUser(void* userdst_ptr, const void* ksrc_ptr, size_t count)
|
|||
kthread_mutex_lock(&process->segment_lock);
|
||||
while ( count )
|
||||
{
|
||||
struct segment* segment = FindSegment(process, userdst);
|
||||
Segment* segment = FindSegment(process, userdst);
|
||||
if ( !segment || !(segment->prot & PROT_WRITE) )
|
||||
{
|
||||
errno = EFAULT;
|
||||
|
@ -109,7 +109,7 @@ bool CopyFromUser(void* kdst_ptr, const void* usersrc_ptr, size_t count)
|
|||
kthread_mutex_lock(&process->segment_lock);
|
||||
while ( count )
|
||||
{
|
||||
struct segment* segment = FindSegment(process, usersrc);
|
||||
Segment* segment = FindSegment(process, usersrc);
|
||||
if ( !segment || !(segment->prot & PROT_READ) )
|
||||
{
|
||||
errno = EFAULT;
|
||||
|
@ -157,7 +157,7 @@ bool ZeroUser(void* userdst_ptr, size_t count)
|
|||
kthread_mutex_lock(&process->segment_lock);
|
||||
while ( count )
|
||||
{
|
||||
struct segment* segment = FindSegment(process, userdst);
|
||||
Segment* segment = FindSegment(process, userdst);
|
||||
if ( !segment || !(segment->prot & PROT_WRITE) )
|
||||
{
|
||||
errno = EFAULT;
|
||||
|
@ -191,7 +191,7 @@ char* GetStringFromUser(const char* usersrc_str)
|
|||
while ( !done )
|
||||
{
|
||||
uintptr_t current_at = usersrc + result_length;
|
||||
struct segment* segment = FindSegment(process, current_at);
|
||||
Segment* segment = FindSegment(process, current_at);
|
||||
if ( !segment || !(segment->prot & PROT_READ) )
|
||||
{
|
||||
kthread_mutex_unlock(&process->segment_lock);
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <sortix/mount.h>
|
||||
#include <sortix/seek.h>
|
||||
#include <sortix/stat.h>
|
||||
#include <sortix/mman.h>
|
||||
|
||||
#include <sortix/kernel/copy.h>
|
||||
#include <sortix/kernel/descriptor.h>
|
||||
|
@ -38,6 +39,7 @@
|
|||
#include <sortix/kernel/ioctx.h>
|
||||
#include <sortix/kernel/kernel.h>
|
||||
#include <sortix/kernel/kthread.h>
|
||||
#include <sortix/kernel/memorymanagement.h>
|
||||
#include <sortix/kernel/process.h>
|
||||
#include <sortix/kernel/refcount.h>
|
||||
#include <sortix/kernel/string.h>
|
||||
|
@ -888,4 +890,28 @@ int Descriptor::tcsetattr(ioctx_t* ctx, int actions, const struct termios* tio)
|
|||
return vnode->tcsetattr(ctx, actions, tio);
|
||||
}
|
||||
|
||||
addr_t Descriptor::mmap(ioctx_t* ctx, off_t off)
|
||||
{
|
||||
if ( off & (Page::Size() - 1) )
|
||||
return errno = EINVAL, 0;
|
||||
return vnode->mmap(ctx, off);
|
||||
}
|
||||
|
||||
void Descriptor::munmap(ioctx_t* ctx, off_t off)
|
||||
{
|
||||
assert(!(off & (Page::Size() - 1)));
|
||||
return vnode->munmap(ctx, off);
|
||||
}
|
||||
|
||||
int Descriptor::mprotect(ioctx_t* ctx, int prot)
|
||||
{
|
||||
if ( !(dflags & O_READ) )
|
||||
return errno = EACCES, -1;
|
||||
if ( (prot & (PROT_WRITE | PROT_KWRITE)) && !(dflags & O_WRITE) )
|
||||
return errno = EACCES, -1;
|
||||
if ( (prot & (PROT_WRITE | PROT_KWRITE)) && dflags & O_APPEND )
|
||||
return errno = EACCES, -1;
|
||||
return vnode->mprotect(ctx, prot);
|
||||
}
|
||||
|
||||
} // namespace Sortix
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2011, 2012, 2013, 2014 Jonas 'Sortie' Termansen.
|
||||
* Copyright (c) 2011, 2012, 2013, 2014, 2015, 2016 Jonas 'Sortie' Termansen.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -276,10 +276,7 @@ uintptr_t Load(const void* file_ptr, size_t file_size, Auxiliary* aux)
|
|||
uintptr_t map_end = Page::AlignUp(pheader->p_vaddr + pheader->p_memsz);
|
||||
size_t map_size = map_end - map_start;
|
||||
|
||||
struct segment segment;
|
||||
segment.addr = map_start;
|
||||
segment.size = map_size;
|
||||
segment.prot = kprot;
|
||||
Segment segment(map_start, map_size, kprot);
|
||||
|
||||
assert(IsUserspaceSegment(&segment));
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2013 Jonas 'Sortie' Termansen.
|
||||
* Copyright (c) 2013, 2014, 2016 Jonas 'Sortie' Termansen.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -127,7 +127,7 @@ void BlockCache::ReleaseBlock(BlockCacheBlock* block)
|
|||
// put this block into a list of non-present blocks so we can reuse it
|
||||
// later and reallocate a physical frame for it - then we will just
|
||||
// reuse the block's meta information.
|
||||
block->information &= ~(BCACHE_USED | BCACHE_PRESENT);
|
||||
block->information = 0;
|
||||
return;
|
||||
}
|
||||
UnlinkBlock(block);
|
||||
|
@ -136,7 +136,7 @@ void BlockCache::ReleaseBlock(BlockCacheBlock* block)
|
|||
unused_block->prev_block = block;
|
||||
block->next_block = unused_block;
|
||||
block->prev_block = NULL;
|
||||
block->information &= ~BCACHE_USED;
|
||||
block->information = BCACHE_PRESENT;
|
||||
unused_block = block;
|
||||
}
|
||||
|
||||
|
@ -493,4 +493,33 @@ bool FileCache::ChangeNumBlocks(size_t new_numblocks, bool exact)
|
|||
return true;
|
||||
}
|
||||
|
||||
addr_t FileCache::mmap(ioctx_t* /*ctx*/, off_t off)
|
||||
{
|
||||
uintmax_t block_num = off / Page::Size();
|
||||
// TODO: Technically this violates POSIX that requires that you can make
|
||||
if ( blocks_used <= block_num )
|
||||
return errno = EINVAL, 0;
|
||||
BlockCacheBlock* block = blocks[block_num];
|
||||
assert(block); // TODO: Remove.
|
||||
if ( !block )
|
||||
return errno = EINVAL, 0;
|
||||
block->information |= BCACHE_MMAP;
|
||||
uint8_t* block_data = kernel_block_cache->BlockData(block);
|
||||
addr_t virt = 0;
|
||||
int prot;
|
||||
Memory::LookUp((uintptr_t) block_data, &virt, &prot);
|
||||
assert(virt);
|
||||
// TODO: Prevent truncate() from deallocating this memory!
|
||||
return virt;
|
||||
}
|
||||
|
||||
void FileCache::munmap(ioctx_t* /*ctx*/, off_t /*off*/)
|
||||
{
|
||||
}
|
||||
|
||||
int FileCache::mprotect(ioctx_t* /*ctx*/, int /*prot*/)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace Sortix
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2012, 2013, 2014, 2015 Jonas 'Sortie' Termansen.
|
||||
* Copyright (c) 2012, 2013, 2014, 2015, 2016 Jonas 'Sortie' Termansen.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -183,6 +183,21 @@ int File::statvfs(ioctx_t* ctx, struct statvfs* stvfs)
|
|||
return common_statvfs(ctx, stvfs, dev);
|
||||
}
|
||||
|
||||
addr_t File::mmap(ioctx_t* ctx, off_t off)
|
||||
{
|
||||
return fcache.mmap(ctx, off);
|
||||
}
|
||||
|
||||
void File::munmap(ioctx_t* ctx, off_t off)
|
||||
{
|
||||
return fcache.munmap(ctx, off);
|
||||
}
|
||||
|
||||
int File::mprotect(ioctx_t* ctx, int prot)
|
||||
{
|
||||
return fcache.mprotect(ctx, prot);
|
||||
}
|
||||
|
||||
Dir::Dir(dev_t dev, ino_t ino, uid_t owner, gid_t group, mode_t mode)
|
||||
{
|
||||
inode_type = INODE_TYPE_DIR;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2012, 2013, 2014, 2015 Jonas 'Sortie' Termansen.
|
||||
* Copyright (c) 2012, 2013, 2014, 2015, 2016 Jonas 'Sortie' Termansen.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -49,6 +49,9 @@ public:
|
|||
virtual ssize_t tcgetblob(ioctx_t* ctx, const char* name, void* buffer,
|
||||
size_t count);
|
||||
virtual int statvfs(ioctx_t* ctx, struct statvfs* stvfs);
|
||||
virtual addr_t mmap(ioctx_t* ctx, off_t off);
|
||||
virtual void munmap(ioctx_t* ctx, off_t off);
|
||||
virtual int mprotect(ioctx_t* ctx, int prot);
|
||||
|
||||
private:
|
||||
FileCache fcache;
|
||||
|
|
|
@ -256,6 +256,9 @@ public:
|
|||
virtual pid_t tcgetsid(ioctx_t* ctx);
|
||||
virtual int tcsendbreak(ioctx_t* ctx, int duration);
|
||||
virtual int tcsetattr(ioctx_t* ctx, int actions, const struct termios* tio);
|
||||
virtual addr_t mmap(ioctx_t* ctx, off_t off);
|
||||
virtual void munmap(ioctx_t* ctx, off_t off);
|
||||
virtual int mprotect(ioctx_t* ctx, int prot);
|
||||
|
||||
private:
|
||||
bool SendMessage(Channel* channel, size_t type, void* ptr, size_t size,
|
||||
|
@ -1587,6 +1590,21 @@ int Unode::tcsetattr(ioctx_t* ctx, int actions, const struct termios* user_tio)
|
|||
return ret;
|
||||
}
|
||||
|
||||
addr_t Unode::mmap(ioctx_t* /*ctx*/, off_t /*off*/)
|
||||
{
|
||||
return errno = ENODEV, 0;
|
||||
}
|
||||
|
||||
void Unode::munmap(ioctx_t* /*ctx*/, off_t /*off*/)
|
||||
{
|
||||
errno = ENODEV;
|
||||
}
|
||||
|
||||
int Unode::mprotect(ioctx_t* /*ctx*/, int /*prot*/)
|
||||
{
|
||||
return errno = ENODEV, -1;
|
||||
}
|
||||
|
||||
bool Bootstrap(Ref<Inode>* out_root,
|
||||
Ref<Inode>* out_server,
|
||||
const struct stat* rootst)
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
#include <sortix/timespec.h>
|
||||
|
||||
#include <sortix/kernel/decl.h>
|
||||
#include <sortix/kernel/kthread.h>
|
||||
#include <sortix/kernel/refcount.h>
|
||||
|
||||
|
@ -110,6 +111,9 @@ public:
|
|||
pid_t tcgetsid(ioctx_t* ctx);
|
||||
int tcsendbreak(ioctx_t* ctx, int duration);
|
||||
int tcsetattr(ioctx_t* ctx, int actions, const struct termios* tio);
|
||||
addr_t mmap(ioctx_t* ctx, off_t off);
|
||||
void munmap(ioctx_t* ctx, off_t off);
|
||||
int mprotect(ioctx_t* ctx, int prot);
|
||||
|
||||
private:
|
||||
Ref<Descriptor> open_elem(ioctx_t* ctx, const char* filename, int flags,
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2013 Jonas 'Sortie' Termansen.
|
||||
* Copyright (c) 2013, 2014, 2016 Jonas 'Sortie' Termansen.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -40,9 +40,10 @@ struct BlockCacheBlock;
|
|||
class FileCache;
|
||||
//class FileCacheBackend;
|
||||
|
||||
const uintptr_t BCACHE_PRESENT = 1 << 0;
|
||||
const uintptr_t BCACHE_USED = 1 << 1;
|
||||
const uintptr_t BCACHE_MODIFIED = 1 << 2;
|
||||
static const uintptr_t BCACHE_PRESENT = 1 << 0;
|
||||
static const uintptr_t BCACHE_USED = 1 << 1;
|
||||
static const uintptr_t BCACHE_MODIFIED = 1 << 2;
|
||||
static const uintptr_t BCACHE_MMAP = 1 << 3;
|
||||
|
||||
class BlockCache
|
||||
{
|
||||
|
@ -106,6 +107,9 @@ public:
|
|||
ssize_t pwrite(ioctx_t* ctx, const uint8_t* buf, size_t count, off_t off);
|
||||
int truncate(ioctx_t* ctx, off_t length);
|
||||
off_t lseek(ioctx_t* ctx, off_t offset, int whence);
|
||||
addr_t mmap(ioctx_t* ctx, off_t off);
|
||||
void munmap(ioctx_t* ctx, off_t off);
|
||||
int mprotect(ioctx_t* ctx, int prot);
|
||||
//bool ChangeBackend(FileCacheBackend* backend, bool sync_old);
|
||||
off_t GetFileSize();
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
|
||||
#include <sortix/timespec.h>
|
||||
|
||||
#include <sortix/kernel/decl.h>
|
||||
#include <sortix/kernel/refcount.h>
|
||||
|
||||
struct dirent;
|
||||
|
@ -114,6 +115,9 @@ public:
|
|||
virtual pid_t tcgetsid(ioctx_t* ctx) = 0;
|
||||
virtual int tcsendbreak(ioctx_t* ctx, int duration) = 0;
|
||||
virtual int tcsetattr(ioctx_t* ctx, int actions, const struct termios* tio) = 0;
|
||||
virtual addr_t mmap(ioctx_t* ctx, off_t off) = 0;
|
||||
virtual void munmap(ioctx_t* ctx, off_t off) = 0;
|
||||
virtual int mprotect(ioctx_t* ctx, int prot) = 0;
|
||||
|
||||
};
|
||||
|
||||
|
@ -207,6 +211,9 @@ public:
|
|||
virtual pid_t tcgetsid(ioctx_t* ctx);
|
||||
virtual int tcsendbreak(ioctx_t* ctx, int duration);
|
||||
virtual int tcsetattr(ioctx_t* ctx, int actions, const struct termios* tio);
|
||||
virtual addr_t mmap(ioctx_t* ctx, off_t off);
|
||||
virtual void munmap(ioctx_t* ctx, off_t off);
|
||||
virtual int mprotect(ioctx_t* ctx, int prot);
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2011, 2012, 2013, 2014 Jonas 'Sortie' Termansen.
|
||||
* Copyright (c) 2011, 2012, 2013, 2014, 2015, 2016 Jonas 'Sortie' Termansen.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -46,11 +46,9 @@ class Descriptor;
|
|||
class DescriptorTable;
|
||||
class MountTable;
|
||||
class ProcessTable;
|
||||
struct ProcessSegment;
|
||||
struct ProcessTimer;
|
||||
struct ioctx_struct;
|
||||
typedef struct ioctx_struct ioctx_t;
|
||||
struct segment;
|
||||
|
||||
class Process
|
||||
{
|
||||
|
@ -139,7 +137,7 @@ public:
|
|||
bool threads_exiting;
|
||||
|
||||
public:
|
||||
struct segment* segments;
|
||||
Segment* segments;
|
||||
size_t segments_used;
|
||||
size_t segments_length;
|
||||
kthread_mutex_t segment_write_lock;
|
||||
|
@ -170,8 +168,8 @@ public:
|
|||
void AddChildProcess(Process* child);
|
||||
void ScheduleDeath();
|
||||
void AbortConstruction();
|
||||
bool MapSegment(struct segment* result, void* hint, size_t size, int flags,
|
||||
int prot);
|
||||
bool MapSegment(struct segment_location* result, void* hint, size_t size,
|
||||
int flags, int prot);
|
||||
|
||||
public:
|
||||
Process* Fork();
|
||||
|
|
|
@ -55,7 +55,7 @@ public:
|
|||
Ref(const Ref<T>& r) : obj(r.Get()) { if ( obj ) obj->Refer_Renamed(); }
|
||||
template <class U>
|
||||
Ref(const Ref<U>& r) : obj(r.Get()) { if ( obj ) obj->Refer_Renamed(); }
|
||||
~Ref() { if ( obj ) obj->Unref_Renamed(); }
|
||||
~Ref() { if ( obj ) obj->Unref_Renamed(); obj = NULL; }
|
||||
|
||||
Ref& operator=(const Ref r)
|
||||
{
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2013 Jonas 'Sortie' Termansen.
|
||||
* Copyright (c) 2013, 2016 Jonas 'Sortie' Termansen.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -23,21 +23,35 @@
|
|||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <sortix/kernel/descriptor.h>
|
||||
|
||||
namespace Sortix {
|
||||
|
||||
class Process;
|
||||
|
||||
struct segment
|
||||
struct segment_location
|
||||
{
|
||||
uintptr_t addr;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
class Segment : public segment_location
|
||||
{
|
||||
public:
|
||||
Segment() { } // For operator new[].
|
||||
Segment(uintptr_t addr, size_t size, int prot) :
|
||||
segment_location({addr, size}), prot(prot), desc(NULL), offset(0) { }
|
||||
Segment(uintptr_t addr, size_t size, int prot, Ref<Descriptor> desc, off_t offset) :
|
||||
segment_location({addr, size}), prot(prot), desc(desc), offset(offset) { }
|
||||
int prot;
|
||||
Ref<Descriptor> desc;
|
||||
off_t offset;
|
||||
};
|
||||
|
||||
static inline int segmentcmp(const void* a_ptr, const void* b_ptr)
|
||||
{
|
||||
const struct segment* a = (const struct segment*) a_ptr;
|
||||
const struct segment* b = (const struct segment*) b_ptr;
|
||||
const Segment* a = (const Segment*) a_ptr;
|
||||
const Segment* b = (const Segment*) b_ptr;
|
||||
return a->addr < b->addr ? -1 :
|
||||
b->addr < a->addr ? 1 :
|
||||
a->size < b->size ? -1 :
|
||||
|
@ -45,13 +59,17 @@ static inline int segmentcmp(const void* a_ptr, const void* b_ptr)
|
|||
0 ;
|
||||
}
|
||||
|
||||
bool AreSegmentsOverlapping(const struct segment* a, const struct segment* b);
|
||||
bool IsUserspaceSegment(const struct segment* segment);
|
||||
struct segment* FindOverlappingSegment(Process* process, const struct segment* new_segment);
|
||||
bool IsSegmentOverlapping(Process* process, const struct segment* new_segment);
|
||||
bool AddSegment(Process* process, const struct segment* new_segment);
|
||||
bool PlaceSegment(struct segment* solution, Process* process, void* addr_ptr,
|
||||
size_t size, int flags);
|
||||
bool AreSegmentsOverlapping(const struct segment_location* a,
|
||||
const struct segment_location* b);
|
||||
bool IsUserspaceSegment(const Segment* segment);
|
||||
Segment* FindOverlappingSegment(Process* process,
|
||||
const struct segment_location* location);
|
||||
bool IsSegmentOverlapping(Process* process, const segment_location* location);
|
||||
bool AddSegment(Process* process, const Segment* new_segment);
|
||||
bool PlaceSegment(struct segment_location* solution, Process* process,
|
||||
void* addr_ptr, size_t size, int flags);
|
||||
void UnmapSegment(Segment* segment);
|
||||
void UnmapSegmentRange(Segment* segment, size_t offset, size_t size);
|
||||
|
||||
} // namespace Sortix
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
#include <sortix/timespec.h>
|
||||
|
||||
#include <sortix/kernel/decl.h>
|
||||
#include <sortix/kernel/refcount.h>
|
||||
|
||||
struct dirent;
|
||||
|
@ -107,6 +108,9 @@ public:
|
|||
pid_t tcgetsid(ioctx_t* ctx);
|
||||
int tcsendbreak(ioctx_t* ctx, int duration);
|
||||
int tcsetattr(ioctx_t* ctx, int actions, const struct termios* tio);
|
||||
addr_t mmap(ioctx_t* ctx, off_t off);
|
||||
void munmap(ioctx_t* ctx, off_t off);
|
||||
int mprotect(ioctx_t* ctx, int prot);
|
||||
|
||||
public /*TODO: private*/:
|
||||
Ref<Inode> inode;
|
||||
|
|
|
@ -443,4 +443,19 @@ int AbstractInode::tcsetattr(ioctx_t* /*ctx*/, int /*actions*/, const struct ter
|
|||
return errno = ENOTTY, -1;
|
||||
}
|
||||
|
||||
addr_t AbstractInode::mmap(ioctx_t* /*ctx*/, off_t /*off*/)
|
||||
{
|
||||
return errno = ENODEV, 0;
|
||||
}
|
||||
|
||||
void AbstractInode::munmap(ioctx_t* /*ctx*/, off_t /*off*/)
|
||||
{
|
||||
errno = ENODEV;
|
||||
}
|
||||
|
||||
int AbstractInode::mprotect(ioctx_t* /*ctx*/, int /*prot*/)
|
||||
{
|
||||
return errno = ENODEV, -1;
|
||||
}
|
||||
|
||||
} // namespace Sortix
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2011, 2012, 2013, 2015 Jonas 'Sortie' Termansen.
|
||||
* Copyright (c) 2011, 2012, 2013, 2015, 2016 Jonas 'Sortie' Termansen.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -25,17 +25,22 @@
|
|||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <sortix/fcntl.h>
|
||||
#include <sortix/mman.h>
|
||||
#include <sortix/seek.h>
|
||||
|
||||
#include <sortix/kernel/copy.h>
|
||||
#include <sortix/kernel/descriptor.h>
|
||||
#include <sortix/kernel/inode.h>
|
||||
#include <sortix/kernel/ioctx.h>
|
||||
#include <sortix/kernel/kernel.h>
|
||||
#include <sortix/kernel/memorymanagement.h>
|
||||
#include <sortix/kernel/process.h>
|
||||
#include <sortix/kernel/segment.h>
|
||||
#include <sortix/kernel/syscall.h>
|
||||
#include <sortix/kernel/vnode.h>
|
||||
|
||||
#include "fs/kram.h"
|
||||
|
||||
namespace Sortix {
|
||||
|
||||
|
@ -69,54 +74,56 @@ void UnmapMemory(Process* process, uintptr_t addr, size_t size)
|
|||
if ( !size )
|
||||
return;
|
||||
|
||||
struct segment unmap_segment;
|
||||
unmap_segment.addr = addr;
|
||||
unmap_segment.size = size;
|
||||
unmap_segment.prot = 0;
|
||||
while ( struct segment* conflict = FindOverlappingSegment(process,
|
||||
&unmap_segment) )
|
||||
struct segment_location loc;
|
||||
loc.addr = addr;
|
||||
loc.size = size;
|
||||
while ( Segment* conflict = FindOverlappingSegment(process, &loc) )
|
||||
{
|
||||
// Delete the segment if covered entirely by our request.
|
||||
if ( addr <= conflict->addr && conflict->addr + conflict->size <= addr + size )
|
||||
{
|
||||
uintptr_t conflict_offset = (uintptr_t) conflict - (uintptr_t) process->segments;
|
||||
size_t conflict_index = conflict_offset / sizeof(struct segment);
|
||||
Memory::UnmapRange(conflict->addr, conflict->size, PAGE_USAGE_USER_SPACE);
|
||||
Memory::Flush();
|
||||
size_t conflict_index = conflict_offset / sizeof(Segment);
|
||||
UnmapSegment(conflict);
|
||||
conflict->~Segment();
|
||||
if ( conflict_index + 1 == process->segments_used )
|
||||
{
|
||||
process->segments_used--;
|
||||
continue;
|
||||
}
|
||||
process->segments[conflict_index] = process->segments[--process->segments_used];
|
||||
Segment* old = &process->segments[--process->segments_used];
|
||||
Segment* dst = &process->segments[conflict_index];
|
||||
*dst = *old;
|
||||
old->~Segment();
|
||||
// TODO: It's wrong to qsort the Segment class.
|
||||
qsort(process->segments, process->segments_used,
|
||||
sizeof(struct segment), segmentcmp);
|
||||
sizeof(Segment), segmentcmp);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Delete the middle of the segment if covered there by our request.
|
||||
if ( conflict->addr < addr && addr + size - conflict->addr <= conflict->size )
|
||||
{
|
||||
Memory::UnmapRange(addr, size, PAGE_USAGE_USER_SPACE);
|
||||
Memory::Flush();
|
||||
struct segment right_segment;
|
||||
right_segment.addr = addr + size;
|
||||
right_segment.size = conflict->addr + conflict->size - (addr + size);
|
||||
right_segment.prot = conflict->prot;
|
||||
UnmapSegmentRange(conflict, addr - conflict->addr, size);
|
||||
size_t new_addr = addr + size;
|
||||
size_t new_size = conflict->addr + conflict->size - (addr + size);
|
||||
off_t new_offset = conflict->offset + (new_addr - conflict->addr);
|
||||
Segment right_segment(new_addr, new_size, conflict->prot,
|
||||
conflict->desc, new_offset);
|
||||
conflict->size = addr - conflict->addr;
|
||||
// TODO: This shouldn't really fail as we free memory above, but
|
||||
// this code isn't really provably reliable.
|
||||
if ( !AddSegment(process, &right_segment) )
|
||||
PanicF("Unexpectedly unable to split memory mapped segment");
|
||||
Panic("Unexpectedly unable to split memory mapped segment");
|
||||
continue;
|
||||
}
|
||||
|
||||
// Delete the part of the segment covered partially from the left.
|
||||
if ( addr <= conflict->addr )
|
||||
{
|
||||
Memory::UnmapRange(conflict->addr, addr + size - conflict->addr, PAGE_USAGE_USER_SPACE);
|
||||
Memory::Flush();
|
||||
conflict->size = conflict->addr + conflict->size - (addr + size);
|
||||
UnmapSegmentRange(conflict, 0, addr + size - conflict->addr);
|
||||
conflict->size += conflict->addr - (addr + size);
|
||||
conflict->offset += conflict->addr - (addr + size);
|
||||
conflict->addr = addr + size;
|
||||
continue;
|
||||
}
|
||||
|
@ -124,8 +131,8 @@ void UnmapMemory(Process* process, uintptr_t addr, size_t size)
|
|||
// Delete the part of the segment covered partially from the right.
|
||||
if ( conflict->addr <= addr + size )
|
||||
{
|
||||
Memory::UnmapRange(addr, conflict->addr + conflict->size - addr, PAGE_USAGE_USER_SPACE);
|
||||
Memory::Flush();
|
||||
UnmapSegmentRange(conflict, addr - conflict->addr,
|
||||
conflict->addr + conflict->size - addr);
|
||||
conflict->size -= conflict->addr + conflict->size - addr;
|
||||
continue;
|
||||
}
|
||||
|
@ -145,24 +152,28 @@ bool ProtectMemory(Process* process, uintptr_t addr, size_t size, int prot)
|
|||
// there are no gaps in that region. This is where the operation can fail as
|
||||
// the AddSegment call can run out of memory. There is no harm in splitting
|
||||
// the segments into smaller chunks.
|
||||
bool any_had_desc = false;
|
||||
for ( size_t offset = 0; offset < size; )
|
||||
{
|
||||
struct segment search_region;
|
||||
struct segment_location search_region;
|
||||
search_region.addr = addr + offset;
|
||||
search_region.size = Page::Size();
|
||||
search_region.prot = prot;
|
||||
struct segment* segment = FindOverlappingSegment(process, &search_region);
|
||||
Segment* segment = FindOverlappingSegment(process, &search_region);
|
||||
|
||||
if ( !segment )
|
||||
return errno = EINVAL, false;
|
||||
|
||||
if ( segment->desc )
|
||||
any_had_desc = true;
|
||||
|
||||
// Split the segment into two if it begins before our search region.
|
||||
if ( segment->addr < search_region.addr )
|
||||
{
|
||||
struct segment new_segment;
|
||||
new_segment.addr = search_region.addr;
|
||||
new_segment.size = segment->addr + segment->size - new_segment.addr;
|
||||
new_segment.prot = segment->prot;
|
||||
size_t new_addr = search_region.addr;
|
||||
size_t new_size = segment->size + segment->addr - new_addr;
|
||||
size_t new_offset = segment->offset + segment->addr - new_addr;
|
||||
Segment new_segment(new_addr, new_size, segment->prot,
|
||||
segment->desc, new_offset);
|
||||
segment->size = search_region.addr - segment->addr;
|
||||
|
||||
if ( !AddSegment(process, &new_segment) )
|
||||
|
@ -177,10 +188,11 @@ bool ProtectMemory(Process* process, uintptr_t addr, size_t size, int prot)
|
|||
// Split the segment into two if it ends after addr + size.
|
||||
if ( size < segment->addr + segment->size - addr )
|
||||
{
|
||||
struct segment new_segment;
|
||||
new_segment.addr = addr + size;
|
||||
new_segment.size = segment->addr + segment->size - new_segment.addr;
|
||||
new_segment.prot = segment->prot;
|
||||
size_t new_addr = addr + size;
|
||||
size_t new_size = segment->size + segment->addr - new_addr;
|
||||
size_t new_offset = segment->offset + segment->addr - new_addr;
|
||||
Segment new_segment(new_addr, new_size, segment->prot,
|
||||
segment->desc, new_offset);
|
||||
segment->size = addr + size - segment->addr;
|
||||
|
||||
if ( !AddSegment(process, &new_segment) )
|
||||
|
@ -195,15 +207,30 @@ bool ProtectMemory(Process* process, uintptr_t addr, size_t size, int prot)
|
|||
offset += segment->size;
|
||||
}
|
||||
|
||||
// Verify that any backing files allow the new protection.
|
||||
ioctx_t ctx; SetupUserIOCtx(&ctx);
|
||||
for ( size_t offset = 0; any_had_desc && offset < size; )
|
||||
{
|
||||
struct segment_location search_region;
|
||||
search_region.addr = addr + offset;
|
||||
search_region.size = Page::Size();
|
||||
Segment* segment = FindOverlappingSegment(process, &search_region);
|
||||
assert(segment);
|
||||
|
||||
if ( segment->prot != prot &&
|
||||
segment->desc &&
|
||||
segment->desc->mprotect(&ctx, prot) < 0 )
|
||||
return false;
|
||||
}
|
||||
|
||||
// Run through all the segments in the region [addr, addr+size) and change
|
||||
// the permissions and update the permissions of the virtual memory itself.
|
||||
for ( size_t offset = 0; offset < size; )
|
||||
{
|
||||
struct segment search_region;
|
||||
struct segment_location search_region;
|
||||
search_region.addr = addr + offset;
|
||||
search_region.size = Page::Size();
|
||||
search_region.prot = prot;
|
||||
struct segment* segment = FindOverlappingSegment(process, &search_region);
|
||||
Segment* segment = FindOverlappingSegment(process, &search_region);
|
||||
assert(segment);
|
||||
|
||||
if ( segment->prot != prot )
|
||||
|
@ -234,10 +261,7 @@ bool MapMemory(Process* process, uintptr_t addr, size_t size, int prot)
|
|||
|
||||
UnmapMemory(process, addr, size);
|
||||
|
||||
struct segment new_segment;
|
||||
new_segment.addr = addr;
|
||||
new_segment.size = size;
|
||||
new_segment.prot = prot;
|
||||
Segment new_segment(addr, size, prot);
|
||||
|
||||
if ( !MapRange(new_segment.addr, new_segment.size, new_segment.prot, PAGE_USAGE_USER_SPACE) )
|
||||
return false;
|
||||
|
@ -245,8 +269,7 @@ bool MapMemory(Process* process, uintptr_t addr, size_t size, int prot)
|
|||
|
||||
if ( !AddSegment(process, &new_segment) )
|
||||
{
|
||||
UnmapRange(new_segment.addr, new_segment.size, PAGE_USAGE_USER_SPACE);
|
||||
Memory::Flush();
|
||||
UnmapSegment(&new_segment);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -291,9 +314,6 @@ void* sys_mmap(void* addr_ptr, size_t size, int prot, int flags, int fd,
|
|||
// Verify that MAP_PRIVATE and MAP_SHARED are not both set.
|
||||
if ( bool(flags & MAP_PRIVATE) == bool(flags & MAP_SHARED) )
|
||||
return errno = EINVAL, MAP_FAILED;
|
||||
// TODO: MAP_SHARED is not currently supported.
|
||||
if ( flags & MAP_SHARED )
|
||||
return errno = EINVAL, MAP_FAILED;
|
||||
// Verify the fíle descriptor and the offset is suitable set if needed.
|
||||
if ( !(flags & MAP_ANONYMOUS) &&
|
||||
(fd < 0 || offset < 0 || (offset & (Page::Size()-1))) )
|
||||
|
@ -322,82 +342,174 @@ void* sys_mmap(void* addr_ptr, size_t size, int prot, int flags, int fd,
|
|||
// Verify whether the backing file is usable for memory mapping.
|
||||
ioctx_t ctx; SetupUserIOCtx(&ctx);
|
||||
Ref<Descriptor> desc;
|
||||
if ( !(flags & MAP_ANONYMOUS) )
|
||||
if ( flags & MAP_ANONYMOUS )
|
||||
{
|
||||
// Create an unnamed ramfs file to back this memory mapping.
|
||||
if ( flags & MAP_SHARED )
|
||||
{
|
||||
Ref<Inode> inode(new KRAMFS::File(INODE_TYPE_FILE, S_IFREG, 0, 0,
|
||||
ctx.uid, ctx.gid, 0600));
|
||||
if ( !inode )
|
||||
return MAP_FAILED;
|
||||
Ref<Vnode> vnode(new Vnode(inode, Ref<Vnode>(), 0, 0));
|
||||
inode.Reset();
|
||||
if ( !vnode )
|
||||
return MAP_FAILED;
|
||||
desc = Ref<Descriptor>(new Descriptor(vnode, O_READ | O_WRITE));
|
||||
vnode.Reset();
|
||||
if ( !desc )
|
||||
return MAP_FAILED;
|
||||
if ( (uintmax_t) OFF_MAX < (uintmax_t) size )
|
||||
return errno = EOVERFLOW, MAP_FAILED;
|
||||
if ( desc->truncate(&ctx, size) < 0 )
|
||||
return MAP_FAILED;
|
||||
offset = 0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if ( !(desc = process->GetDescriptor(fd)) )
|
||||
return MAP_FAILED;
|
||||
// Verify that the file is seekable.
|
||||
if ( desc->lseek(&ctx, 0, SEEK_CUR) < 0 )
|
||||
return errno = ENODEV, MAP_FAILED;
|
||||
// Verify that we have read access to the file.
|
||||
if ( desc->read(&ctx, NULL, 0) != 0 )
|
||||
return errno = EACCES, MAP_FAILED;
|
||||
// Verify that we have write access to the file if needed.
|
||||
if ( (prot & PROT_WRITE) && !(flags & MAP_PRIVATE) &&
|
||||
desc->write(&ctx, NULL, 0) != 0 )
|
||||
return errno = EACCES, MAP_FAILED;
|
||||
// Verify if going through the inode mmap interface.
|
||||
if ( flags & MAP_SHARED )
|
||||
{
|
||||
if ( desc->mprotect(&ctx, prot) < 0 )
|
||||
return MAP_FAILED;
|
||||
}
|
||||
// Verify if not going through the inode mmap interface.
|
||||
else if ( flags & MAP_PRIVATE )
|
||||
{
|
||||
// Verify that the file is seekable.
|
||||
if ( desc->lseek(&ctx, 0, SEEK_CUR) < 0 )
|
||||
return errno = ENODEV, MAP_FAILED;
|
||||
// Verify that we have read access to the file.
|
||||
if ( desc->read(&ctx, NULL, 0) != 0 )
|
||||
return errno = EACCES, MAP_FAILED;
|
||||
// Verify that we have write access to the file if needed.
|
||||
if ( (prot & PROT_WRITE) && !(flags & MAP_PRIVATE) &&
|
||||
desc->write(&ctx, NULL, 0) != 0 )
|
||||
return errno = EACCES, MAP_FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
if ( prot & PROT_READ )
|
||||
prot |= PROT_KREAD;
|
||||
if ( prot & PROT_WRITE )
|
||||
prot |= PROT_KWRITE;
|
||||
if ( flags & MAP_PRIVATE )
|
||||
prot |= PROT_FORK;
|
||||
|
||||
ScopedLock lock1(&process->segment_write_lock);
|
||||
ScopedLock lock2(&process->segment_lock);
|
||||
|
||||
// Determine where to put the new segment and its protection.
|
||||
struct segment new_segment;
|
||||
struct segment_location location;
|
||||
if ( flags & MAP_FIXED )
|
||||
new_segment.addr = aligned_addr,
|
||||
new_segment.size = aligned_size;
|
||||
else if ( !PlaceSegment(&new_segment, process, (void*) addr, aligned_size, flags) )
|
||||
return errno = ENOMEM, MAP_FAILED;
|
||||
new_segment.prot = PROT_KWRITE | PROT_FORK;
|
||||
|
||||
// Allocate a memory segment with the desired properties.
|
||||
if ( !Memory::MapMemory(process, new_segment.addr, new_segment.size, new_segment.prot) )
|
||||
return MAP_FAILED;
|
||||
|
||||
// The pread will copy to user-space right requires this lock to be free.
|
||||
lock2.Reset();
|
||||
|
||||
// Read the file contents into the newly allocated memory.
|
||||
if ( !(flags & MAP_ANONYMOUS) )
|
||||
{
|
||||
ioctx_t kctx; SetupKernelIOCtx(&kctx);
|
||||
for ( size_t so_far = 0; so_far < aligned_size; )
|
||||
location.addr = aligned_addr;
|
||||
location.size = aligned_size;
|
||||
}
|
||||
else if ( !PlaceSegment(&location, process, (void*) addr, aligned_size, flags) )
|
||||
return errno = ENOMEM, MAP_FAILED;
|
||||
|
||||
if ( flags & MAP_SHARED )
|
||||
{
|
||||
assert(desc);
|
||||
|
||||
Memory::UnmapMemory(process, location.addr, location.size);
|
||||
|
||||
Segment new_segment(location.addr, 0, prot, desc, offset);
|
||||
|
||||
while ( new_segment.size < location.size )
|
||||
{
|
||||
uint8_t* ptr = (uint8_t*) (new_segment.addr + so_far);
|
||||
size_t left = aligned_size - so_far;
|
||||
off_t pos = offset + so_far;
|
||||
ssize_t num_bytes = desc->pread(&kctx, ptr, left, pos);
|
||||
if ( num_bytes < 0 )
|
||||
off_t offset;
|
||||
if ( __builtin_add_overflow(new_segment.offset, new_segment.size,
|
||||
&offset) )
|
||||
{
|
||||
// TODO: How should this situation be handled? For now we'll
|
||||
// just ignore the error condition.
|
||||
errno = 0;
|
||||
break;
|
||||
errno = EOVERFLOW;
|
||||
Memory::Flush();
|
||||
UnmapSegment(&new_segment);
|
||||
return MAP_FAILED;
|
||||
}
|
||||
if ( !num_bytes )
|
||||
assert(!(offset & (Page::Size() - 1)));
|
||||
|
||||
addr_t addr = desc->mmap(&ctx, offset);
|
||||
if ( !addr )
|
||||
{
|
||||
// We got an unexpected early end-of-file condition, but that's
|
||||
// alright as the MapMemory call zero'd the new memory and we
|
||||
// are expected to zero the remainder.
|
||||
break;
|
||||
Memory::Flush();
|
||||
UnmapSegment(&new_segment);
|
||||
return MAP_FAILED;
|
||||
}
|
||||
so_far += num_bytes;
|
||||
uintptr_t virt = location.addr + new_segment.size;
|
||||
|
||||
if ( !Memory::Map(addr, virt, prot) )
|
||||
{
|
||||
desc->munmap(&ctx, offset);
|
||||
Memory::Flush();
|
||||
UnmapSegment(&new_segment);
|
||||
return MAP_FAILED;
|
||||
}
|
||||
|
||||
new_segment.size += Page::Size();
|
||||
}
|
||||
Memory::Flush();
|
||||
|
||||
if ( !AddSegment(process, &new_segment) )
|
||||
{
|
||||
UnmapSegment(&new_segment);
|
||||
return MAP_FAILED;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
int first_prot = flags & MAP_ANONYMOUS ? prot : PROT_KWRITE | PROT_FORK;
|
||||
Segment new_segment(location.addr, location.size, first_prot);
|
||||
|
||||
// Allocate a memory segment with the desired properties.
|
||||
if ( !Memory::MapMemory(process, new_segment.addr, new_segment.size,
|
||||
new_segment.prot) )
|
||||
return MAP_FAILED;
|
||||
|
||||
// Read the file contents into the newly allocated memory.
|
||||
if ( !(flags & MAP_ANONYMOUS) )
|
||||
{
|
||||
// The pread will copy to user-space right requires this lock to be
|
||||
// free.
|
||||
lock2.Reset();
|
||||
|
||||
ioctx_t kctx; SetupKernelIOCtx(&kctx);
|
||||
for ( size_t so_far = 0; so_far < aligned_size; )
|
||||
{
|
||||
uint8_t* ptr = (uint8_t*) (new_segment.addr + so_far);
|
||||
size_t left = aligned_size - so_far;
|
||||
off_t pos = offset + so_far;
|
||||
ssize_t num_bytes = desc->pread(&kctx, ptr, left, pos);
|
||||
if ( num_bytes < 0 )
|
||||
{
|
||||
// TODO: How should this situation be handled? For now we'll
|
||||
// just ignore the error condition.
|
||||
errno = 0;
|
||||
break;
|
||||
}
|
||||
if ( !num_bytes )
|
||||
{
|
||||
// We got an unexpected early end-of-file condition, but
|
||||
// that's alright as the MapMemory call zero'd the new
|
||||
// memory and we are expected to zero the remainder.
|
||||
break;
|
||||
}
|
||||
so_far += num_bytes;
|
||||
}
|
||||
|
||||
// Finally switch to the desired page protections.
|
||||
kthread_mutex_lock(&process->segment_lock);
|
||||
Memory::ProtectMemory(CurrentProcess(), new_segment.addr,
|
||||
new_segment.size, prot);
|
||||
kthread_mutex_unlock(&process->segment_lock);
|
||||
}
|
||||
}
|
||||
|
||||
// Finally switch to the desired page protections.
|
||||
kthread_mutex_lock(&process->segment_lock);
|
||||
if ( prot & PROT_READ )
|
||||
prot |= PROT_KREAD;
|
||||
if ( prot & PROT_WRITE )
|
||||
prot |= PROT_KWRITE;
|
||||
prot |= PROT_FORK;
|
||||
Memory::ProtectMemory(CurrentProcess(), new_segment.addr, new_segment.size, prot);
|
||||
kthread_mutex_unlock(&process->segment_lock);
|
||||
|
||||
lock1.Reset();
|
||||
|
||||
return (void*) new_segment.addr;
|
||||
return (void*) location.addr;
|
||||
}
|
||||
|
||||
int sys_mprotect(const void* addr_ptr, size_t size, int prot)
|
||||
|
|
|
@ -259,6 +259,7 @@ ssize_t StreamSocket::recv(ioctx_t* ctx, uint8_t* buf, size_t count,
|
|||
ScopedLock lock(&socket_lock);
|
||||
if ( !is_connected )
|
||||
return errno = ENOTCONN, -1;
|
||||
lock.Reset();
|
||||
return incoming.read(ctx, buf, count);
|
||||
}
|
||||
|
||||
|
@ -268,6 +269,7 @@ ssize_t StreamSocket::send(ioctx_t* ctx, const uint8_t* buf, size_t count,
|
|||
ScopedLock lock(&socket_lock);
|
||||
if ( !is_connected )
|
||||
return errno = ENOTCONN, -1;
|
||||
lock.Reset();
|
||||
return outgoing.write(ctx, buf, count);
|
||||
}
|
||||
|
||||
|
|
|
@ -366,12 +366,14 @@ void Process::ResetAddressSpace()
|
|||
assert(Memory::GetAddressSpace() == addrspace);
|
||||
|
||||
for ( size_t i = 0; i < segments_used; i++ )
|
||||
Memory::UnmapRange(segments[i].addr, segments[i].size, PAGE_USAGE_USER_SPACE);
|
||||
|
||||
Memory::Flush();
|
||||
{
|
||||
UnmapSegment(&segments[i]);
|
||||
//segments[i].~Segment();
|
||||
segments[i].desc.Reset();
|
||||
}
|
||||
|
||||
segments_used = segments_length = 0;
|
||||
free(segments);
|
||||
delete[] segments;
|
||||
segments = NULL;
|
||||
}
|
||||
|
||||
|
@ -624,25 +626,68 @@ Process* Process::Fork()
|
|||
return NULL;
|
||||
}
|
||||
|
||||
struct segment* clone_segments = NULL;
|
||||
Segment* clone_segments = NULL;
|
||||
|
||||
// Fork the segment list.
|
||||
kthread_mutex_lock(&segment_lock);
|
||||
bool segment_failure = false;
|
||||
size_t segment_failure_i = 0;
|
||||
size_t segment_failure_o = 0;
|
||||
if ( segments )
|
||||
{
|
||||
size_t segments_size = sizeof(struct segment) * segments_used;
|
||||
if ( !(clone_segments = (struct segment*) malloc(segments_size)) )
|
||||
clone_segments = new Segment[segments_used];
|
||||
if ( !clone_segments )
|
||||
{
|
||||
kthread_mutex_unlock(&segment_lock);
|
||||
delete clone;
|
||||
return NULL;
|
||||
}
|
||||
memcpy(clone_segments, segments, segments_size);
|
||||
|
||||
for ( size_t i = 0; i < segments_used; i++ )
|
||||
clone_segments[i] = segments[i];
|
||||
|
||||
ioctx_t ctx; SetupUserIOCtx(&ctx);
|
||||
for ( size_t i = 0; i < segments_used; i++ )
|
||||
{
|
||||
for ( size_t o = 0;
|
||||
clone_segments[i].desc && o < clone_segments[i].size;
|
||||
o += Page::Size() )
|
||||
{
|
||||
off_t offset = clone_segments[i].offset + o;
|
||||
if ( !clone_segments[i].desc->mmap(&ctx, offset) )
|
||||
{
|
||||
segment_failure = true;
|
||||
segment_failure_i = i;
|
||||
segment_failure_o = o;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if ( segment_failure )
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Fork address-space here and copy memory.
|
||||
clone->addrspace = Memory::Fork();
|
||||
clone->addrspace = !segment_failure? Memory::Fork() : 0;
|
||||
kthread_mutex_unlock(&segment_lock);
|
||||
if ( !clone->addrspace )
|
||||
{
|
||||
free(clone_segments);
|
||||
ioctx_t ctx; SetupUserIOCtx(&ctx);
|
||||
for ( size_t i = 0; i < segments_used; i++ )
|
||||
{
|
||||
if ( segment_failure && i <= segment_failure_i )
|
||||
break;
|
||||
for ( size_t o = 0;
|
||||
clone_segments[i].desc && o < clone_segments[i].size;
|
||||
o += Page::Size() )
|
||||
{
|
||||
if ( segment_failure && o <= segment_failure_o )
|
||||
break;
|
||||
off_t offset = clone_segments[i].offset + o;
|
||||
clone_segments[i].desc->munmap(&ctx, offset);
|
||||
}
|
||||
}
|
||||
delete[] clone_segments;
|
||||
delete clone;
|
||||
return NULL;
|
||||
}
|
||||
|
@ -744,8 +789,8 @@ void Process::ResetForExecute()
|
|||
ResetAddressSpace();
|
||||
}
|
||||
|
||||
bool Process::MapSegment(struct segment* result, void* hint, size_t size,
|
||||
int flags, int prot)
|
||||
bool Process::MapSegment(struct segment_location* result, void* hint,
|
||||
size_t size, int flags, int prot)
|
||||
{
|
||||
// process->segment_write_lock is held at this point.
|
||||
// process->segment_lock is held at this point.
|
||||
|
@ -755,7 +800,7 @@ bool Process::MapSegment(struct segment* result, void* hint, size_t size,
|
|||
|
||||
if ( !PlaceSegment(result, this, hint, size, flags) )
|
||||
return false;
|
||||
if ( !Memory::MapMemory(this, result->addr, result->size, result->prot = prot) )
|
||||
if ( !Memory::MapMemory(this, result->addr, result->size, prot) )
|
||||
{
|
||||
// The caller is expected to self-destruct in this case, so the
|
||||
// segment just created is not removed.
|
||||
|
@ -849,11 +894,11 @@ int Process::Execute(const char* programname, const uint8_t* program,
|
|||
for ( int i = 0; i < envc; i++ )
|
||||
arg_size += strlen(envp[i]) + 1;
|
||||
|
||||
struct segment arg_segment;
|
||||
struct segment stack_segment;
|
||||
struct segment raw_tls_segment;
|
||||
struct segment tls_segment;
|
||||
struct segment auxcode_segment;
|
||||
struct segment_location arg_segment;
|
||||
struct segment_location stack_segment;
|
||||
struct segment_location raw_tls_segment;
|
||||
struct segment_location tls_segment;
|
||||
struct segment_location auxcode_segment;
|
||||
|
||||
kthread_mutex_lock(&segment_write_lock);
|
||||
kthread_mutex_lock(&segment_lock);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2013 Jonas 'Sortie' Termansen.
|
||||
* Copyright (c) 2013, 2016 Jonas 'Sortie' Termansen.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -27,6 +27,7 @@
|
|||
#include <sortix/mman.h>
|
||||
|
||||
#include <sortix/kernel/decl.h>
|
||||
#include <sortix/kernel/ioctx.h>
|
||||
#include <sortix/kernel/kernel.h>
|
||||
#include <sortix/kernel/memorymanagement.h>
|
||||
#include <sortix/kernel/process.h>
|
||||
|
@ -35,12 +36,13 @@
|
|||
|
||||
namespace Sortix {
|
||||
|
||||
bool AreSegmentsOverlapping(const struct segment* a, const struct segment* b)
|
||||
bool AreSegmentsOverlapping(const struct segment_location* a,
|
||||
const struct segment_location* b)
|
||||
{
|
||||
return a->addr < b->addr + b->size && b->addr < a->addr + a->size;
|
||||
}
|
||||
|
||||
bool IsUserspaceSegment(const struct segment* segment)
|
||||
bool IsUserspaceSegment(const Segment* segment)
|
||||
{
|
||||
uintptr_t userspace_addr;
|
||||
size_t userspace_size;
|
||||
|
@ -53,29 +55,31 @@ bool IsUserspaceSegment(const struct segment* segment)
|
|||
return true;
|
||||
}
|
||||
|
||||
struct segment* FindOverlappingSegment(Process* process, const struct segment* new_segment)
|
||||
Segment* FindOverlappingSegment(Process* process,
|
||||
const struct segment_location* location)
|
||||
{
|
||||
// process->segment_lock is held at this point.
|
||||
|
||||
// TODO: Speed up using binary search.
|
||||
for ( size_t i = 0; i < process->segments_used; i++ )
|
||||
{
|
||||
struct segment* segment = &process->segments[i];
|
||||
if ( AreSegmentsOverlapping(segment, new_segment) )
|
||||
Segment* segment = &process->segments[i];
|
||||
if ( AreSegmentsOverlapping(segment, location) )
|
||||
return segment;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool IsSegmentOverlapping(Process* process, const struct segment* new_segment)
|
||||
bool IsSegmentOverlapping(Process* process,
|
||||
const struct segment_location* location)
|
||||
{
|
||||
// process->segment_lock is held at this point.
|
||||
|
||||
return FindOverlappingSegment(process, new_segment) != NULL;
|
||||
return FindOverlappingSegment(process, location) != NULL;
|
||||
}
|
||||
|
||||
bool AddSegment(Process* process, const struct segment* new_segment)
|
||||
bool AddSegment(Process* process, const Segment* new_segment)
|
||||
{
|
||||
// process->segment_lock is held at this point.
|
||||
|
||||
|
@ -86,11 +90,12 @@ bool AddSegment(Process* process, const struct segment* new_segment)
|
|||
{
|
||||
size_t new_length = process->segments_length ?
|
||||
process->segments_length * 2 : 8;
|
||||
size_t new_size = new_length * sizeof(struct segment);
|
||||
struct segment* new_segments =
|
||||
(struct segment*) realloc(process->segments, new_size);
|
||||
Segment* new_segments = new Segment[new_length];
|
||||
if ( !new_segments )
|
||||
return false;
|
||||
for ( size_t i = 0; i < process->segments_used; i++ )
|
||||
new_segments[i] = process->segments[i];
|
||||
delete[] process->segments;
|
||||
process->segments = new_segments;
|
||||
process->segments_length = new_length;
|
||||
}
|
||||
|
@ -99,7 +104,8 @@ bool AddSegment(Process* process, const struct segment* new_segment)
|
|||
process->segments[process->segments_used++] = *new_segment;
|
||||
|
||||
// Sort the segment list after address.
|
||||
qsort(process->segments, process->segments_used, sizeof(struct segment),
|
||||
// TODO: It's wrong to qsort the Segment class.
|
||||
qsort(process->segments, process->segments_used, sizeof(Segment),
|
||||
segmentcmp);
|
||||
|
||||
return true;
|
||||
|
@ -107,7 +113,7 @@ bool AddSegment(Process* process, const struct segment* new_segment)
|
|||
|
||||
class segment_gaps
|
||||
{
|
||||
typedef yielder_iterator<segment_gaps, struct segment> my_iterator;
|
||||
typedef yielder_iterator<segment_gaps, Segment> my_iterator;
|
||||
|
||||
public:
|
||||
segment_gaps(finished_yielder) : process(0) { }
|
||||
|
@ -121,7 +127,7 @@ public:
|
|||
Memory::GetUserVirtualArea(&userspace_addr, &userspace_size);
|
||||
}
|
||||
|
||||
bool yield(struct segment* result)
|
||||
bool yield(Segment* result)
|
||||
{
|
||||
// process->segment_lock is held at this point.
|
||||
|
||||
|
@ -200,8 +206,8 @@ private:
|
|||
|
||||
};
|
||||
|
||||
bool PlaceSegment(struct segment* solution, Process* process, void* addr_ptr,
|
||||
size_t size, int flags)
|
||||
bool PlaceSegment(struct segment_location* solution, Process* process,
|
||||
void* addr_ptr, size_t size, int flags)
|
||||
{
|
||||
// process->segment_lock is held at this point.
|
||||
|
||||
|
@ -212,9 +218,9 @@ bool PlaceSegment(struct segment* solution, Process* process, void* addr_ptr,
|
|||
size = Page::AlignUp(size);
|
||||
bool found_any = false;
|
||||
size_t best_distance = 0;
|
||||
struct segment best;
|
||||
struct segment_location best;
|
||||
|
||||
for ( struct segment gap : segment_gaps(process) )
|
||||
for ( Segment gap : segment_gaps(process) )
|
||||
{
|
||||
if ( gap.size < size )
|
||||
continue;
|
||||
|
@ -222,20 +228,17 @@ bool PlaceSegment(struct segment* solution, Process* process, void* addr_ptr,
|
|||
{
|
||||
solution->addr = addr;
|
||||
solution->size = size;
|
||||
solution->prot = 0;
|
||||
return true;
|
||||
}
|
||||
struct segment attempt;
|
||||
struct segment_location attempt;
|
||||
size_t distance;
|
||||
attempt.addr = gap.addr;
|
||||
attempt.size = size;
|
||||
attempt.prot = 0;
|
||||
distance = addr < attempt.addr ? attempt.addr - addr : addr - attempt.addr;
|
||||
if ( !found_any|| distance < best_distance )
|
||||
found_any = true, best_distance = distance, best = attempt;
|
||||
attempt.addr = gap.addr + gap.size - size;
|
||||
attempt.size = size;
|
||||
attempt.prot = 0;
|
||||
distance = addr < attempt.addr ? attempt.addr - addr : addr - attempt.addr;
|
||||
if ( !found_any|| distance < best_distance )
|
||||
found_any = true, best_distance = distance, best = attempt;
|
||||
|
@ -244,4 +247,31 @@ bool PlaceSegment(struct segment* solution, Process* process, void* addr_ptr,
|
|||
return *solution = best, found_any;
|
||||
}
|
||||
|
||||
void UnmapSegment(Segment* segment)
|
||||
{
|
||||
UnmapSegmentRange(segment, 0, segment->size);
|
||||
}
|
||||
|
||||
void UnmapSegmentRange(Segment* segment, size_t offset, size_t size)
|
||||
{
|
||||
assert(offset <= segment->size);
|
||||
assert(size <= segment->size - offset);
|
||||
if ( !size )
|
||||
return;
|
||||
if ( segment->desc )
|
||||
{
|
||||
for ( size_t i = 0; i < size; i += Page::Size() )
|
||||
Memory::Unmap(segment->addr + offset + i);
|
||||
Memory::Flush();
|
||||
ioctx_t ctx; SetupUserIOCtx(&ctx);
|
||||
for ( size_t i = 0; i < size; i += Page::Size() )
|
||||
segment->desc->munmap(&ctx, segment->offset + i);
|
||||
}
|
||||
else
|
||||
{
|
||||
Memory::UnmapRange(segment->addr, segment->size, PAGE_USAGE_USER_SPACE);
|
||||
Memory::Flush();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Sortix
|
||||
|
|
|
@ -452,4 +452,19 @@ int Vnode::tcsetattr(ioctx_t* ctx, int actions, const struct termios* tio)
|
|||
return inode->tcsetattr(ctx, actions, tio);
|
||||
}
|
||||
|
||||
addr_t Vnode::mmap(ioctx_t* ctx, off_t off)
|
||||
{
|
||||
return inode->mmap(ctx, off);
|
||||
}
|
||||
|
||||
void Vnode::munmap(ioctx_t* ctx, off_t off)
|
||||
{
|
||||
return inode->munmap(ctx, off);
|
||||
}
|
||||
|
||||
int Vnode::mprotect(ioctx_t* ctx, int prot)
|
||||
{
|
||||
return inode->mprotect(ctx, prot);
|
||||
}
|
||||
|
||||
} // namespace Sortix
|
||||
|
|
|
@ -16,6 +16,7 @@ regress \
|
|||
|
||||
TESTS:=\
|
||||
test-fmemopen \
|
||||
test-mmap-anon-shared \
|
||||
test-pthread-argv \
|
||||
test-pthread-basic \
|
||||
test-pthread-main-join \
|
||||
|
|
57
regress/test-mmap-anon-shared.c
Normal file
57
regress/test-mmap-anon-shared.c
Normal file
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Jonas 'Sortie' Termansen.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*
|
||||
* test-mmap-anon-shared.c
|
||||
* Tests whether anonymous shared memory works.
|
||||
*/
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <sys/wait.h>
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
#include "test.h"
|
||||
|
||||
int main(void)
|
||||
{
|
||||
const char* magic = "Tests whether anonymous shared memory works";
|
||||
size_t pagesize = getpagesize();
|
||||
test_assert(strlen(magic) < pagesize);
|
||||
|
||||
void* shared = mmap(NULL, pagesize, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
|
||||
if ( shared == MAP_FAILED )
|
||||
test_error(errno, "mmap(MAP_SHARED | MAP_ANONYMOUS)");
|
||||
|
||||
pid_t child = fork();
|
||||
if ( child < 0 )
|
||||
test_error(errno, "fork");
|
||||
|
||||
if ( child == 0 )
|
||||
{
|
||||
strlcpy((char*) shared, magic, pagesize);
|
||||
_exit(0);
|
||||
}
|
||||
|
||||
int status;
|
||||
waitpid(child, &status, 0);
|
||||
|
||||
test_assert(strncmp((const char*) shared, magic, pagesize) == 0);
|
||||
|
||||
if ( munmap(shared, pagesize) < 0 )
|
||||
test_error(errno, "munmap");
|
||||
|
||||
return 0;
|
||||
}
|
Loading…
Add table
Reference in a new issue