2018-11-29 11:08:14 -05:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2018-11-20 06:48:18 -05:00
|
|
|
require 'spec_helper'
|
|
|
|
|
2020-06-24 14:09:03 -04:00
|
|
|
RSpec.describe PoolRepository do
|
2018-11-20 06:48:18 -05:00
|
|
|
describe 'associations' do
|
|
|
|
it { is_expected.to belong_to(:shard) }
|
2019-04-20 09:27:53 -04:00
|
|
|
it { is_expected.to belong_to(:source_project) }
|
2018-11-20 06:48:18 -05:00
|
|
|
it { is_expected.to have_many(:member_projects) }
|
|
|
|
end
|
|
|
|
|
|
|
|
describe 'validations' do
|
|
|
|
let!(:pool_repository) { create(:pool_repository) }
|
|
|
|
|
|
|
|
it { is_expected.to validate_presence_of(:shard) }
|
Allow public forks to be deduplicated
When a project is forked, the new repository used to be a deep copy of everything
stored on disk by leveraging `git clone`. This works well, and makes isolation
between repository easy. However, the clone is at the start 100% the same as the
origin repository. And in the case of the objects in the object directory, this
is almost always going to be a lot of duplication.
Object Pools are a way to create a third repository that essentially only exists
for its 'objects' subdirectory. This third repository's object directory will be
set as alternate location for objects. This means that in the case an object is
missing in the local repository, git will look in another location. This other
location is the object pool repository.
When Git performs garbage collection, it's smart enough to check the
alternate location. When objects are duplicated, it will allow git to
throw one copy away. This copy is on the local repository, where to pool
remains as is.
These pools have an origin location, which for now will always be a
repository that itself is not a fork. When the root of a fork network is
forked by a user, the fork still clones the full repository. Async, the
pool repository will be created.
Either one of these processes can be done earlier than the other. To
handle this race condition, the Join ObjectPool operation is
idempotent. Given its idempotent, we can schedule it twice, with the
same effect.
To accommodate the holding of state two migrations have been added.
1. Added a state column to the pool_repositories column. This column is
managed by the state machine, allowing for hooks on transitions.
2. pool_repositories now has a source_project_id. This column in
convenient to have for multiple reasons: it has a unique index allowing
the database to handle race conditions when creating a new record. Also,
it's nice to know who the host is. As that's a short link to the fork
networks root.
Object pools are only available for public project, which use hashed
storage and when forking from the root of the fork network. (That is,
the project being forked from itself isn't a fork)
In this commit message I use both ObjectPool and Pool repositories,
which are alike, but different from each other. ObjectPool refers to
whatever is on the disk stored and managed by Gitaly. PoolRepository is
the record in the database.
2018-12-03 08:49:58 -05:00
|
|
|
it { is_expected.to validate_presence_of(:source_project) }
|
2018-11-20 06:48:18 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
describe '#disk_path' do
|
|
|
|
it 'sets the hashed disk_path' do
|
|
|
|
pool = create(:pool_repository)
|
|
|
|
|
Allow public forks to be deduplicated
When a project is forked, the new repository used to be a deep copy of everything
stored on disk by leveraging `git clone`. This works well, and makes isolation
between repository easy. However, the clone is at the start 100% the same as the
origin repository. And in the case of the objects in the object directory, this
is almost always going to be a lot of duplication.
Object Pools are a way to create a third repository that essentially only exists
for its 'objects' subdirectory. This third repository's object directory will be
set as alternate location for objects. This means that in the case an object is
missing in the local repository, git will look in another location. This other
location is the object pool repository.
When Git performs garbage collection, it's smart enough to check the
alternate location. When objects are duplicated, it will allow git to
throw one copy away. This copy is on the local repository, where to pool
remains as is.
These pools have an origin location, which for now will always be a
repository that itself is not a fork. When the root of a fork network is
forked by a user, the fork still clones the full repository. Async, the
pool repository will be created.
Either one of these processes can be done earlier than the other. To
handle this race condition, the Join ObjectPool operation is
idempotent. Given its idempotent, we can schedule it twice, with the
same effect.
To accommodate the holding of state two migrations have been added.
1. Added a state column to the pool_repositories column. This column is
managed by the state machine, allowing for hooks on transitions.
2. pool_repositories now has a source_project_id. This column in
convenient to have for multiple reasons: it has a unique index allowing
the database to handle race conditions when creating a new record. Also,
it's nice to know who the host is. As that's a short link to the fork
networks root.
Object pools are only available for public project, which use hashed
storage and when forking from the root of the fork network. (That is,
the project being forked from itself isn't a fork)
In this commit message I use both ObjectPool and Pool repositories,
which are alike, but different from each other. ObjectPool refers to
whatever is on the disk stored and managed by Gitaly. PoolRepository is
the record in the database.
2018-12-03 08:49:58 -05:00
|
|
|
expect(pool.disk_path).to match(%r{\A@pools/\h{2}/\h{2}/\h{64}})
|
2018-11-20 06:48:18 -05:00
|
|
|
end
|
|
|
|
end
|
2018-12-17 03:49:38 -05:00
|
|
|
|
2022-06-03 08:09:05 -04:00
|
|
|
describe '#unlink_repository' do
|
2018-12-17 03:49:38 -05:00
|
|
|
let(:pool) { create(:pool_repository, :ready) }
|
2022-06-03 08:09:05 -04:00
|
|
|
let(:repository_path) { File.join(TestEnv.repos_path, pool.source_project.repository.relative_path) }
|
|
|
|
let(:alternates_file) { File.join(repository_path, 'objects', 'info', 'alternates') }
|
|
|
|
|
|
|
|
before do
|
|
|
|
pool.link_repository(pool.source_project.repository)
|
|
|
|
end
|
2018-12-17 03:49:38 -05:00
|
|
|
|
|
|
|
context 'when the last member leaves' do
|
|
|
|
it 'schedules pool removal' do
|
|
|
|
expect(::ObjectPool::DestroyWorker).to receive(:perform_async).with(pool.id).and_call_original
|
|
|
|
|
2022-06-03 08:09:05 -04:00
|
|
|
pool.unlink_repository(pool.source_project.repository)
|
|
|
|
|
|
|
|
expect(File).not_to exist(alternates_file)
|
2018-12-17 03:49:38 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2022-08-17 17:09:50 -04:00
|
|
|
context 'when skipping disconnect' do
|
|
|
|
it 'does not change the alternates file' do
|
|
|
|
before = File.read(alternates_file)
|
|
|
|
pool.unlink_repository(pool.source_project.repository, disconnect: false)
|
|
|
|
|
|
|
|
expect(File.read(alternates_file)).to eq(before)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2018-12-17 03:49:38 -05:00
|
|
|
context 'when the second member leaves' do
|
|
|
|
it 'does not schedule pool removal' do
|
2022-06-03 08:09:05 -04:00
|
|
|
other_project = create(:project, :repository, pool_repository: pool)
|
|
|
|
pool.link_repository(other_project.repository)
|
|
|
|
|
2018-12-17 03:49:38 -05:00
|
|
|
expect(::ObjectPool::DestroyWorker).not_to receive(:perform_async).with(pool.id)
|
|
|
|
|
2022-06-03 08:09:05 -04:00
|
|
|
pool.unlink_repository(pool.source_project.repository)
|
|
|
|
|
|
|
|
expect(File).not_to exist(alternates_file)
|
2018-12-17 03:49:38 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
2018-11-20 06:48:18 -05:00
|
|
|
end
|