2019-04-11 08:17:24 -04:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2013-03-19 11:37:50 -04:00
|
|
|
require 'spec_helper'
|
|
|
|
|
2020-06-24 11:08:50 -04:00
|
|
|
RSpec.describe Projects::ForkService do
|
2017-09-29 04:04:50 -04:00
|
|
|
include ProjectForksHelper
|
Allow public forks to be deduplicated
When a project is forked, the new repository used to be a deep copy of everything
stored on disk by leveraging `git clone`. This works well, and makes isolation
between repository easy. However, the clone is at the start 100% the same as the
origin repository. And in the case of the objects in the object directory, this
is almost always going to be a lot of duplication.
Object Pools are a way to create a third repository that essentially only exists
for its 'objects' subdirectory. This third repository's object directory will be
set as alternate location for objects. This means that in the case an object is
missing in the local repository, git will look in another location. This other
location is the object pool repository.
When Git performs garbage collection, it's smart enough to check the
alternate location. When objects are duplicated, it will allow git to
throw one copy away. This copy is on the local repository, where to pool
remains as is.
These pools have an origin location, which for now will always be a
repository that itself is not a fork. When the root of a fork network is
forked by a user, the fork still clones the full repository. Async, the
pool repository will be created.
Either one of these processes can be done earlier than the other. To
handle this race condition, the Join ObjectPool operation is
idempotent. Given its idempotent, we can schedule it twice, with the
same effect.
To accommodate the holding of state two migrations have been added.
1. Added a state column to the pool_repositories column. This column is
managed by the state machine, allowing for hooks on transitions.
2. pool_repositories now has a source_project_id. This column in
convenient to have for multiple reasons: it has a unique index allowing
the database to handle race conditions when creating a new record. Also,
it's nice to know who the host is. As that's a short link to the fork
networks root.
Object pools are only available for public project, which use hashed
storage and when forking from the root of the fork network. (That is,
the project being forked from itself isn't a fork)
In this commit message I use both ObjectPool and Pool repositories,
which are alike, but different from each other. ObjectPool refers to
whatever is on the disk stored and managed by Gitaly. PoolRepository is
the record in the database.
2018-12-03 08:49:58 -05:00
|
|
|
|
2019-12-16 07:07:43 -05:00
|
|
|
shared_examples 'forks count cache refresh' do
|
|
|
|
it 'flushes the forks count cache of the source project', :clean_gitlab_redis_cache do
|
|
|
|
expect(from_project.forks_count).to be_zero
|
|
|
|
|
2020-08-25 20:10:31 -04:00
|
|
|
fork_project(from_project, to_user, using_service: true)
|
2020-07-09 11:08:59 -04:00
|
|
|
BatchLoader::Executor.clear_current
|
2019-12-16 07:07:43 -05:00
|
|
|
|
|
|
|
expect(from_project.forks_count).to eq(1)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
context 'when forking a new project' do
|
|
|
|
describe 'fork by user' do
|
|
|
|
before do
|
|
|
|
@from_user = create(:user)
|
|
|
|
@from_namespace = @from_user.namespace
|
2018-06-05 17:18:06 -04:00
|
|
|
avatar = fixture_file_upload("spec/fixtures/dk.png", "image/png")
|
2017-12-07 03:44:55 -05:00
|
|
|
@from_project = create(:project,
|
|
|
|
:repository,
|
|
|
|
creator_id: @from_user.id,
|
|
|
|
namespace: @from_namespace,
|
|
|
|
star_count: 107,
|
|
|
|
avatar: avatar,
|
2021-09-02 05:11:35 -04:00
|
|
|
description: 'wow such project',
|
|
|
|
external_authorization_classification_label: 'classification-label')
|
2017-12-07 03:44:55 -05:00
|
|
|
@to_user = create(:user)
|
|
|
|
@to_namespace = @to_user.namespace
|
|
|
|
@from_project.add_user(@to_user, :developer)
|
|
|
|
end
|
2017-08-22 19:19:35 -04:00
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
context 'fork project' do
|
|
|
|
context 'when forker is a guest' do
|
|
|
|
before do
|
|
|
|
@guest = create(:user)
|
|
|
|
@from_project.add_user(@guest, :guest)
|
|
|
|
end
|
2020-08-25 20:10:31 -04:00
|
|
|
subject { fork_project(@from_project, @guest, using_service: true) }
|
2013-03-19 11:37:50 -04:00
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
it { is_expected.not_to be_persisted }
|
|
|
|
it { expect(subject.errors[:forked_from_project_id]).to eq(['is forbidden']) }
|
2018-08-31 13:16:34 -04:00
|
|
|
|
|
|
|
it 'does not create a fork network' do
|
|
|
|
expect { subject }.not_to change { @from_project.reload.fork_network }
|
|
|
|
end
|
2016-09-19 14:28:41 -04:00
|
|
|
end
|
|
|
|
|
2019-12-16 07:07:43 -05:00
|
|
|
it_behaves_like 'forks count cache refresh' do
|
|
|
|
let(:from_project) { @from_project }
|
|
|
|
let(:to_user) { @to_user }
|
|
|
|
end
|
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
describe "successfully creates project in the user namespace" do
|
2020-08-25 20:10:31 -04:00
|
|
|
let(:to_project) { fork_project(@from_project, @to_user, namespace: @to_user.namespace, using_service: true) }
|
2016-09-19 14:28:41 -04:00
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
it { expect(to_project).to be_persisted }
|
|
|
|
it { expect(to_project.errors).to be_empty }
|
|
|
|
it { expect(to_project.owner).to eq(@to_user) }
|
|
|
|
it { expect(to_project.namespace).to eq(@to_user.namespace) }
|
|
|
|
it { expect(to_project.star_count).to be_zero }
|
|
|
|
it { expect(to_project.description).to eq(@from_project.description) }
|
|
|
|
it { expect(to_project.avatar.file).to be_exists }
|
2019-09-30 08:06:01 -04:00
|
|
|
it { expect(to_project.ci_config_path).to eq(@from_project.ci_config_path) }
|
2021-09-02 05:11:35 -04:00
|
|
|
it { expect(to_project.external_authorization_classification_label).to eq(@from_project.external_authorization_classification_label) }
|
2017-08-14 09:22:09 -04:00
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
# This test is here because we had a bug where the from-project lost its
|
|
|
|
# avatar after being forked.
|
2019-09-18 10:02:45 -04:00
|
|
|
# https://gitlab.com/gitlab-org/gitlab-foss/issues/26158
|
2017-12-07 03:44:55 -05:00
|
|
|
it "after forking the from-project still has its avatar" do
|
|
|
|
# If we do not fork the project first we cannot detect the bug.
|
|
|
|
expect(to_project).to be_persisted
|
2017-08-14 09:22:09 -04:00
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
expect(@from_project.avatar.file).to be_exists
|
|
|
|
end
|
2017-08-14 09:22:09 -04:00
|
|
|
|
2019-12-16 07:07:43 -05:00
|
|
|
it_behaves_like 'forks count cache refresh' do
|
|
|
|
let(:from_project) { @from_project }
|
|
|
|
let(:to_user) { @to_user }
|
2017-12-07 03:44:55 -05:00
|
|
|
end
|
2017-09-28 10:38:12 -04:00
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
it 'creates a fork network with the new project and the root project set' do
|
|
|
|
to_project
|
|
|
|
fork_network = @from_project.reload.fork_network
|
2017-09-28 10:38:12 -04:00
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
expect(fork_network).not_to be_nil
|
|
|
|
expect(fork_network.root_project).to eq(@from_project)
|
|
|
|
expect(fork_network.projects).to contain_exactly(@from_project, to_project)
|
|
|
|
end
|
2018-08-31 13:16:34 -04:00
|
|
|
|
2019-10-23 05:06:03 -04:00
|
|
|
it 'imports the repository of the forked project', :sidekiq_might_not_need_inline do
|
2020-08-25 20:10:31 -04:00
|
|
|
to_project = fork_project(@from_project, @to_user, repository: true, using_service: true)
|
2018-08-31 13:16:34 -04:00
|
|
|
|
|
|
|
expect(to_project.empty_repo?).to be_falsy
|
|
|
|
end
|
2017-09-28 10:38:12 -04:00
|
|
|
end
|
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
context 'creating a fork of a fork' do
|
2020-08-25 20:10:31 -04:00
|
|
|
let(:from_forked_project) { fork_project(@from_project, @to_user, using_service: true) }
|
2017-12-07 03:44:55 -05:00
|
|
|
let(:other_namespace) do
|
|
|
|
group = create(:group)
|
|
|
|
group.add_owner(@to_user)
|
|
|
|
group
|
|
|
|
end
|
2020-08-10 23:11:00 -04:00
|
|
|
|
2020-08-25 20:10:31 -04:00
|
|
|
let(:to_project) { fork_project(from_forked_project, @to_user, namespace: other_namespace, using_service: true) }
|
2017-12-07 03:44:55 -05:00
|
|
|
|
|
|
|
it 'sets the root of the network to the root project' do
|
|
|
|
expect(to_project.fork_network.root_project).to eq(@from_project)
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'sets the forked_from_project on the membership' do
|
|
|
|
expect(to_project.fork_network_member.forked_from_project).to eq(from_forked_project)
|
|
|
|
end
|
2019-12-16 07:07:43 -05:00
|
|
|
|
2021-01-05 10:10:02 -05:00
|
|
|
context 'when the forked project has higher visibility than the root project' do
|
|
|
|
let(:root_project) { create(:project, :public) }
|
|
|
|
|
|
|
|
it 'successfully creates a fork of the fork with correct visibility' do
|
|
|
|
forked_project = fork_project(root_project, @to_user, using_service: true)
|
|
|
|
|
|
|
|
root_project.update!(visibility_level: Gitlab::VisibilityLevel::INTERNAL)
|
|
|
|
|
|
|
|
# Forked project visibility is not affected by root project visibility change
|
|
|
|
expect(forked_project).to have_attributes(visibility_level: Gitlab::VisibilityLevel::PUBLIC)
|
|
|
|
|
|
|
|
fork_of_the_fork = fork_project(forked_project, @to_user, namespace: other_namespace, using_service: true)
|
|
|
|
|
|
|
|
expect(fork_of_the_fork).to be_valid
|
|
|
|
expect(fork_of_the_fork).to have_attributes(visibility_level: Gitlab::VisibilityLevel::PUBLIC)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-12-16 07:07:43 -05:00
|
|
|
it_behaves_like 'forks count cache refresh' do
|
|
|
|
let(:from_project) { from_forked_project }
|
|
|
|
let(:to_user) { @to_user }
|
|
|
|
end
|
2017-09-28 10:38:12 -04:00
|
|
|
end
|
2013-05-02 15:30:13 -04:00
|
|
|
end
|
2013-03-19 11:37:50 -04:00
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
context 'project already exists' do
|
|
|
|
it "fails due to validation, not transaction failure" do
|
|
|
|
@existing_project = create(:project, :repository, creator_id: @to_user.id, name: @from_project.name, namespace: @to_namespace)
|
2020-08-25 20:10:31 -04:00
|
|
|
@to_project = fork_project(@from_project, @to_user, namespace: @to_namespace, using_service: true)
|
2017-12-07 03:44:55 -05:00
|
|
|
expect(@existing_project).to be_persisted
|
2016-09-19 14:28:41 -04:00
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
expect(@to_project).not_to be_persisted
|
|
|
|
expect(@to_project.errors[:name]).to eq(['has already been taken'])
|
|
|
|
expect(@to_project.errors[:path]).to eq(['has already been taken'])
|
|
|
|
end
|
2013-05-02 15:30:13 -04:00
|
|
|
end
|
2015-04-06 09:36:56 -04:00
|
|
|
|
2019-06-12 23:18:05 -04:00
|
|
|
context 'repository in legacy storage already exists' do
|
2020-03-12 11:09:39 -04:00
|
|
|
let(:fake_repo_path) { File.join(TestEnv.repos_path, @to_user.namespace.full_path, "#{@from_project.path}.git") }
|
2020-08-25 20:10:31 -04:00
|
|
|
let(:params) { { namespace: @to_user.namespace, using_service: true } }
|
2017-08-22 19:19:35 -04:00
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
before do
|
2019-06-12 23:18:05 -04:00
|
|
|
stub_application_setting(hashed_storage_enabled: false)
|
2020-03-12 11:09:39 -04:00
|
|
|
TestEnv.create_bare_repository(fake_repo_path)
|
2017-12-07 03:44:55 -05:00
|
|
|
end
|
2017-08-22 19:19:35 -04:00
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
after do
|
2020-03-12 11:09:39 -04:00
|
|
|
FileUtils.rm_rf(fake_repo_path)
|
2017-12-07 03:44:55 -05:00
|
|
|
end
|
2017-08-22 19:19:35 -04:00
|
|
|
|
2019-09-18 10:02:45 -04:00
|
|
|
subject { fork_project(@from_project, @to_user, params) }
|
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
it 'does not allow creation' do
|
2019-09-18 10:02:45 -04:00
|
|
|
expect(subject).not_to be_persisted
|
|
|
|
expect(subject.errors.messages).to have_key(:base)
|
|
|
|
expect(subject.errors.messages[:base].first).to match('There is already a repository with that name on disk')
|
|
|
|
end
|
2017-08-22 19:19:35 -04:00
|
|
|
|
2019-09-18 10:02:45 -04:00
|
|
|
context 'when repository disk validation is explicitly skipped' do
|
|
|
|
let(:params) { super().merge(skip_disk_validation: true) }
|
|
|
|
|
|
|
|
it 'allows fork project creation' do
|
|
|
|
expect(subject).to be_persisted
|
|
|
|
expect(subject.errors.messages).to be_empty
|
|
|
|
end
|
2017-12-07 03:44:55 -05:00
|
|
|
end
|
2017-08-22 19:19:35 -04:00
|
|
|
end
|
|
|
|
|
2019-05-30 02:40:53 -04:00
|
|
|
context "CI/CD settings" do
|
2020-08-25 20:10:31 -04:00
|
|
|
let(:to_project) { fork_project(@from_project, @to_user, using_service: true) }
|
2019-06-06 07:08:01 -04:00
|
|
|
|
|
|
|
context "when origin has git depth specified" do
|
|
|
|
before do
|
2020-10-14 05:08:46 -04:00
|
|
|
@from_project.update!(ci_default_git_depth: 42)
|
2019-06-06 07:08:01 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
it "inherits default_git_depth from the origin project" do
|
2019-06-07 07:51:42 -04:00
|
|
|
expect(to_project.ci_default_git_depth).to eq(42)
|
2019-06-06 07:08:01 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
context "when origin does not define git depth" do
|
|
|
|
before do
|
2019-06-07 07:51:42 -04:00
|
|
|
@from_project.update!(ci_default_git_depth: nil)
|
2019-06-06 07:08:01 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
it "the fork has git depth set to 0" do
|
2019-06-07 07:51:42 -04:00
|
|
|
expect(to_project.ci_default_git_depth).to eq(0)
|
2019-06-06 07:08:01 -04:00
|
|
|
end
|
2019-05-30 02:40:53 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
context "when project has restricted visibility level" do
|
|
|
|
context "and only one visibility level is restricted" do
|
|
|
|
before do
|
2020-10-14 05:08:46 -04:00
|
|
|
@from_project.update!(visibility_level: Gitlab::VisibilityLevel::INTERNAL)
|
2017-12-07 03:44:55 -05:00
|
|
|
stub_application_setting(restricted_visibility_levels: [Gitlab::VisibilityLevel::INTERNAL])
|
|
|
|
end
|
|
|
|
|
2017-12-20 10:19:54 -05:00
|
|
|
it "creates fork with lowest level" do
|
2020-08-25 20:10:31 -04:00
|
|
|
forked_project = fork_project(@from_project, @to_user, using_service: true)
|
2017-12-07 03:44:55 -05:00
|
|
|
|
2017-12-20 10:19:54 -05:00
|
|
|
expect(forked_project.visibility_level).to eq(Gitlab::VisibilityLevel::PRIVATE)
|
2017-12-07 03:44:55 -05:00
|
|
|
end
|
2016-05-24 17:53:53 -04:00
|
|
|
end
|
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
context "and all visibility levels are restricted" do
|
|
|
|
before do
|
|
|
|
stub_application_setting(restricted_visibility_levels: [Gitlab::VisibilityLevel::PUBLIC, Gitlab::VisibilityLevel::INTERNAL, Gitlab::VisibilityLevel::PRIVATE])
|
|
|
|
end
|
|
|
|
|
|
|
|
it "creates fork with private visibility levels" do
|
2020-08-25 20:10:31 -04:00
|
|
|
forked_project = fork_project(@from_project, @to_user, using_service: true)
|
2016-05-24 17:53:53 -04:00
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
expect(forked_project.visibility_level).to eq(Gitlab::VisibilityLevel::PRIVATE)
|
|
|
|
end
|
2016-05-24 17:53:53 -04:00
|
|
|
end
|
|
|
|
end
|
2020-01-17 13:08:41 -05:00
|
|
|
|
|
|
|
context 'when forking is disabled' do
|
|
|
|
before do
|
|
|
|
@from_project.project_feature.update_attribute(
|
|
|
|
:forking_access_level, ProjectFeature::DISABLED)
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'fails' do
|
2020-08-25 20:10:31 -04:00
|
|
|
to_project = fork_project(@from_project, @to_user, namespace: @to_user.namespace, using_service: true)
|
2020-01-17 13:08:41 -05:00
|
|
|
|
|
|
|
expect(to_project.errors[:forked_from_project_id]).to eq(['is forbidden'])
|
|
|
|
end
|
|
|
|
end
|
2017-12-07 03:44:55 -05:00
|
|
|
end
|
2016-05-24 17:53:53 -04:00
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
describe 'fork to namespace' do
|
|
|
|
before do
|
|
|
|
@group_owner = create(:user)
|
|
|
|
@developer = create(:user)
|
|
|
|
@project = create(:project, :repository,
|
|
|
|
creator_id: @group_owner.id,
|
|
|
|
star_count: 777,
|
2019-09-30 08:06:01 -04:00
|
|
|
description: 'Wow, such a cool project!',
|
|
|
|
ci_config_path: 'debian/salsa-ci.yml')
|
2017-12-07 03:44:55 -05:00
|
|
|
@group = create(:group)
|
|
|
|
@group.add_user(@group_owner, GroupMember::OWNER)
|
|
|
|
@group.add_user(@developer, GroupMember::DEVELOPER)
|
|
|
|
@project.add_user(@developer, :developer)
|
|
|
|
@project.add_user(@group_owner, :developer)
|
2020-08-25 20:10:31 -04:00
|
|
|
@opts = { namespace: @group, using_service: true }
|
2017-12-07 03:44:55 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
context 'fork project for group' do
|
|
|
|
it 'group owner successfully forks project into the group' do
|
|
|
|
to_project = fork_project(@project, @group_owner, @opts)
|
|
|
|
|
2019-09-30 08:06:01 -04:00
|
|
|
expect(to_project).to be_persisted
|
|
|
|
expect(to_project.errors).to be_empty
|
|
|
|
expect(to_project.owner).to eq(@group)
|
|
|
|
expect(to_project.namespace).to eq(@group)
|
|
|
|
expect(to_project.name).to eq(@project.name)
|
|
|
|
expect(to_project.path).to eq(@project.path)
|
|
|
|
expect(to_project.description).to eq(@project.description)
|
|
|
|
expect(to_project.ci_config_path).to eq(@project.ci_config_path)
|
|
|
|
expect(to_project.star_count).to be_zero
|
2016-05-24 17:53:53 -04:00
|
|
|
end
|
2017-12-07 03:44:55 -05:00
|
|
|
end
|
2016-05-24 17:53:53 -04:00
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
context 'fork project for group when user not owner' do
|
|
|
|
it 'group developer fails to fork project into the group' do
|
|
|
|
to_project = fork_project(@project, @developer, @opts)
|
2020-02-20 04:09:13 -05:00
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
expect(to_project.errors[:namespace]).to eq(['is not valid'])
|
|
|
|
end
|
|
|
|
end
|
2016-05-24 17:53:53 -04:00
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
context 'project already exists in group' do
|
|
|
|
it 'fails due to validation, not transaction failure' do
|
|
|
|
existing_project = create(:project, :repository,
|
|
|
|
name: @project.name,
|
|
|
|
namespace: @group)
|
|
|
|
to_project = fork_project(@project, @group_owner, @opts)
|
|
|
|
expect(existing_project.persisted?).to be_truthy
|
|
|
|
expect(to_project.errors[:name]).to eq(['has already been taken'])
|
|
|
|
expect(to_project.errors[:path]).to eq(['has already been taken'])
|
2016-05-24 17:53:53 -04:00
|
|
|
end
|
|
|
|
end
|
2017-12-20 10:19:54 -05:00
|
|
|
|
|
|
|
context 'when the namespace has a lower visibility level than the project' do
|
|
|
|
it 'creates the project with the lower visibility level' do
|
|
|
|
public_project = create(:project, :public)
|
|
|
|
private_group = create(:group, :private)
|
|
|
|
group_owner = create(:user)
|
|
|
|
private_group.add_owner(group_owner)
|
|
|
|
|
2020-08-25 20:10:31 -04:00
|
|
|
forked_project = fork_project(public_project, group_owner, namespace: private_group, using_service: true)
|
2017-12-20 10:19:54 -05:00
|
|
|
|
|
|
|
expect(forked_project.visibility_level).to eq(Gitlab::VisibilityLevel::PRIVATE)
|
|
|
|
end
|
|
|
|
end
|
2016-05-24 17:53:53 -04:00
|
|
|
end
|
2021-01-27 10:09:15 -05:00
|
|
|
|
|
|
|
describe 'fork with optional attributes' do
|
|
|
|
let(:public_project) { create(:project, :public) }
|
|
|
|
|
|
|
|
it 'sets optional attributes to specified values' do
|
|
|
|
forked_project = fork_project(
|
|
|
|
public_project,
|
|
|
|
nil,
|
|
|
|
namespace: public_project.namespace,
|
|
|
|
path: 'forked',
|
|
|
|
name: 'My Fork',
|
|
|
|
description: 'Description',
|
|
|
|
visibility: 'internal',
|
|
|
|
using_service: true
|
|
|
|
)
|
|
|
|
|
|
|
|
expect(forked_project.path).to eq('forked')
|
|
|
|
expect(forked_project.name).to eq('My Fork')
|
|
|
|
expect(forked_project.description).to eq('Description')
|
|
|
|
expect(forked_project.visibility_level).to eq(Gitlab::VisibilityLevel::INTERNAL)
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'sets visibility level to private if an unknown visibility is requested' do
|
|
|
|
forked_project = fork_project(public_project, nil, using_service: true, visibility: 'unknown')
|
|
|
|
|
|
|
|
expect(forked_project.visibility_level).to eq(Gitlab::VisibilityLevel::PRIVATE)
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'sets visibility level to project visibility level if requested visibility is greater' do
|
|
|
|
private_project = create(:project, :private)
|
|
|
|
|
|
|
|
forked_project = fork_project(private_project, nil, using_service: true, visibility: 'public')
|
|
|
|
|
|
|
|
expect(forked_project.visibility_level).to eq(Gitlab::VisibilityLevel::PRIVATE)
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'sets visibility level to target namespace visibility level if requested visibility is greater' do
|
|
|
|
private_group = create(:group, :private)
|
|
|
|
|
|
|
|
forked_project = fork_project(public_project, nil, namespace: private_group, using_service: true, visibility: 'public')
|
|
|
|
|
|
|
|
expect(forked_project.visibility_level).to eq(Gitlab::VisibilityLevel::PRIVATE)
|
|
|
|
end
|
2021-07-01 17:08:38 -04:00
|
|
|
|
|
|
|
it 'copies project features visibility settings to the fork', :aggregate_failures do
|
|
|
|
attrs = ProjectFeature::FEATURES.to_h do |f|
|
|
|
|
["#{f}_access_level", ProjectFeature::PRIVATE]
|
|
|
|
end
|
|
|
|
|
|
|
|
public_project.project_feature.update!(attrs)
|
|
|
|
|
|
|
|
user = create(:user, developer_projects: [public_project])
|
|
|
|
forked_project = described_class.new(public_project, user).execute
|
|
|
|
|
|
|
|
expect(forked_project.project_feature.slice(attrs.keys)).to eq(attrs)
|
|
|
|
end
|
2021-01-27 10:09:15 -05:00
|
|
|
end
|
2013-03-19 11:37:50 -04:00
|
|
|
end
|
|
|
|
|
2020-03-02 16:08:01 -05:00
|
|
|
context 'when a project is already forked' do
|
|
|
|
it 'creates a new poolresository after the project is moved to a new shard' do
|
|
|
|
project = create(:project, :public, :repository)
|
2020-08-25 20:10:31 -04:00
|
|
|
fork_before_move = fork_project(project, nil, using_service: true)
|
2020-03-02 16:08:01 -05:00
|
|
|
|
|
|
|
# Stub everything required to move a project to a Gitaly shard that does not exist
|
2020-04-13 20:09:57 -04:00
|
|
|
allow(Gitlab::GitalyClient).to receive(:filesystem_id).with('default').and_call_original
|
|
|
|
allow(Gitlab::GitalyClient).to receive(:filesystem_id).with('test_second_storage').and_return(SecureRandom.uuid)
|
2020-03-12 20:09:34 -04:00
|
|
|
stub_storage_settings('test_second_storage' => { 'path' => TestEnv::SECOND_STORAGE_PATH })
|
2020-03-16 08:09:12 -04:00
|
|
|
allow_any_instance_of(Gitlab::Git::Repository).to receive(:create_repository)
|
|
|
|
.and_return(true)
|
2020-03-12 20:09:34 -04:00
|
|
|
allow_any_instance_of(Gitlab::Git::Repository).to receive(:replicate)
|
|
|
|
allow_any_instance_of(Gitlab::Git::Repository).to receive(:checksum)
|
|
|
|
.and_return(::Gitlab::Git::BLANK_SHA)
|
2020-03-02 16:08:01 -05:00
|
|
|
|
2020-05-06 02:09:36 -04:00
|
|
|
storage_move = create(
|
|
|
|
:project_repository_storage_move,
|
|
|
|
:scheduled,
|
2020-12-15 19:09:58 -05:00
|
|
|
container: project,
|
2020-05-06 02:09:36 -04:00
|
|
|
destination_storage_name: 'test_second_storage'
|
|
|
|
)
|
|
|
|
Projects::UpdateRepositoryStorageService.new(storage_move).execute
|
2020-08-25 20:10:31 -04:00
|
|
|
fork_after_move = fork_project(project.reload, nil, using_service: true)
|
2020-03-02 16:08:01 -05:00
|
|
|
pool_repository_before_move = PoolRepository.joins(:shard)
|
2020-03-27 05:08:28 -04:00
|
|
|
.find_by(source_project: project, shards: { name: 'default' })
|
2020-03-02 16:08:01 -05:00
|
|
|
pool_repository_after_move = PoolRepository.joins(:shard)
|
2020-03-27 05:08:28 -04:00
|
|
|
.find_by(source_project: project, shards: { name: 'test_second_storage' })
|
2020-03-02 16:08:01 -05:00
|
|
|
|
|
|
|
expect(fork_before_move.pool_repository).to eq(pool_repository_before_move)
|
|
|
|
expect(fork_after_move.pool_repository).to eq(pool_repository_after_move)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
Allow public forks to be deduplicated
When a project is forked, the new repository used to be a deep copy of everything
stored on disk by leveraging `git clone`. This works well, and makes isolation
between repository easy. However, the clone is at the start 100% the same as the
origin repository. And in the case of the objects in the object directory, this
is almost always going to be a lot of duplication.
Object Pools are a way to create a third repository that essentially only exists
for its 'objects' subdirectory. This third repository's object directory will be
set as alternate location for objects. This means that in the case an object is
missing in the local repository, git will look in another location. This other
location is the object pool repository.
When Git performs garbage collection, it's smart enough to check the
alternate location. When objects are duplicated, it will allow git to
throw one copy away. This copy is on the local repository, where to pool
remains as is.
These pools have an origin location, which for now will always be a
repository that itself is not a fork. When the root of a fork network is
forked by a user, the fork still clones the full repository. Async, the
pool repository will be created.
Either one of these processes can be done earlier than the other. To
handle this race condition, the Join ObjectPool operation is
idempotent. Given its idempotent, we can schedule it twice, with the
same effect.
To accommodate the holding of state two migrations have been added.
1. Added a state column to the pool_repositories column. This column is
managed by the state machine, allowing for hooks on transitions.
2. pool_repositories now has a source_project_id. This column in
convenient to have for multiple reasons: it has a unique index allowing
the database to handle race conditions when creating a new record. Also,
it's nice to know who the host is. As that's a short link to the fork
networks root.
Object pools are only available for public project, which use hashed
storage and when forking from the root of the fork network. (That is,
the project being forked from itself isn't a fork)
In this commit message I use both ObjectPool and Pool repositories,
which are alike, but different from each other. ObjectPool refers to
whatever is on the disk stored and managed by Gitaly. PoolRepository is
the record in the database.
2018-12-03 08:49:58 -05:00
|
|
|
context 'when forking with object pools' do
|
2021-03-31 14:09:19 -04:00
|
|
|
let(:fork_from_project) { create(:project, :repository, :public) }
|
Allow public forks to be deduplicated
When a project is forked, the new repository used to be a deep copy of everything
stored on disk by leveraging `git clone`. This works well, and makes isolation
between repository easy. However, the clone is at the start 100% the same as the
origin repository. And in the case of the objects in the object directory, this
is almost always going to be a lot of duplication.
Object Pools are a way to create a third repository that essentially only exists
for its 'objects' subdirectory. This third repository's object directory will be
set as alternate location for objects. This means that in the case an object is
missing in the local repository, git will look in another location. This other
location is the object pool repository.
When Git performs garbage collection, it's smart enough to check the
alternate location. When objects are duplicated, it will allow git to
throw one copy away. This copy is on the local repository, where to pool
remains as is.
These pools have an origin location, which for now will always be a
repository that itself is not a fork. When the root of a fork network is
forked by a user, the fork still clones the full repository. Async, the
pool repository will be created.
Either one of these processes can be done earlier than the other. To
handle this race condition, the Join ObjectPool operation is
idempotent. Given its idempotent, we can schedule it twice, with the
same effect.
To accommodate the holding of state two migrations have been added.
1. Added a state column to the pool_repositories column. This column is
managed by the state machine, allowing for hooks on transitions.
2. pool_repositories now has a source_project_id. This column in
convenient to have for multiple reasons: it has a unique index allowing
the database to handle race conditions when creating a new record. Also,
it's nice to know who the host is. As that's a short link to the fork
networks root.
Object pools are only available for public project, which use hashed
storage and when forking from the root of the fork network. (That is,
the project being forked from itself isn't a fork)
In this commit message I use both ObjectPool and Pool repositories,
which are alike, but different from each other. ObjectPool refers to
whatever is on the disk stored and managed by Gitaly. PoolRepository is
the record in the database.
2018-12-03 08:49:58 -05:00
|
|
|
let(:forker) { create(:user) }
|
|
|
|
|
|
|
|
context 'when no pool exists' do
|
|
|
|
it 'creates a new object pool' do
|
2020-08-25 20:10:31 -04:00
|
|
|
forked_project = fork_project(fork_from_project, forker, using_service: true)
|
Allow public forks to be deduplicated
When a project is forked, the new repository used to be a deep copy of everything
stored on disk by leveraging `git clone`. This works well, and makes isolation
between repository easy. However, the clone is at the start 100% the same as the
origin repository. And in the case of the objects in the object directory, this
is almost always going to be a lot of duplication.
Object Pools are a way to create a third repository that essentially only exists
for its 'objects' subdirectory. This third repository's object directory will be
set as alternate location for objects. This means that in the case an object is
missing in the local repository, git will look in another location. This other
location is the object pool repository.
When Git performs garbage collection, it's smart enough to check the
alternate location. When objects are duplicated, it will allow git to
throw one copy away. This copy is on the local repository, where to pool
remains as is.
These pools have an origin location, which for now will always be a
repository that itself is not a fork. When the root of a fork network is
forked by a user, the fork still clones the full repository. Async, the
pool repository will be created.
Either one of these processes can be done earlier than the other. To
handle this race condition, the Join ObjectPool operation is
idempotent. Given its idempotent, we can schedule it twice, with the
same effect.
To accommodate the holding of state two migrations have been added.
1. Added a state column to the pool_repositories column. This column is
managed by the state machine, allowing for hooks on transitions.
2. pool_repositories now has a source_project_id. This column in
convenient to have for multiple reasons: it has a unique index allowing
the database to handle race conditions when creating a new record. Also,
it's nice to know who the host is. As that's a short link to the fork
networks root.
Object pools are only available for public project, which use hashed
storage and when forking from the root of the fork network. (That is,
the project being forked from itself isn't a fork)
In this commit message I use both ObjectPool and Pool repositories,
which are alike, but different from each other. ObjectPool refers to
whatever is on the disk stored and managed by Gitaly. PoolRepository is
the record in the database.
2018-12-03 08:49:58 -05:00
|
|
|
|
|
|
|
expect(forked_project.pool_repository).to eq(fork_from_project.pool_repository)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
context 'when a pool already exists' do
|
|
|
|
let!(:pool_repository) { create(:pool_repository, source_project: fork_from_project) }
|
|
|
|
|
|
|
|
it 'joins the object pool' do
|
2020-08-25 20:10:31 -04:00
|
|
|
forked_project = fork_project(fork_from_project, forker, using_service: true)
|
Allow public forks to be deduplicated
When a project is forked, the new repository used to be a deep copy of everything
stored on disk by leveraging `git clone`. This works well, and makes isolation
between repository easy. However, the clone is at the start 100% the same as the
origin repository. And in the case of the objects in the object directory, this
is almost always going to be a lot of duplication.
Object Pools are a way to create a third repository that essentially only exists
for its 'objects' subdirectory. This third repository's object directory will be
set as alternate location for objects. This means that in the case an object is
missing in the local repository, git will look in another location. This other
location is the object pool repository.
When Git performs garbage collection, it's smart enough to check the
alternate location. When objects are duplicated, it will allow git to
throw one copy away. This copy is on the local repository, where to pool
remains as is.
These pools have an origin location, which for now will always be a
repository that itself is not a fork. When the root of a fork network is
forked by a user, the fork still clones the full repository. Async, the
pool repository will be created.
Either one of these processes can be done earlier than the other. To
handle this race condition, the Join ObjectPool operation is
idempotent. Given its idempotent, we can schedule it twice, with the
same effect.
To accommodate the holding of state two migrations have been added.
1. Added a state column to the pool_repositories column. This column is
managed by the state machine, allowing for hooks on transitions.
2. pool_repositories now has a source_project_id. This column in
convenient to have for multiple reasons: it has a unique index allowing
the database to handle race conditions when creating a new record. Also,
it's nice to know who the host is. As that's a short link to the fork
networks root.
Object pools are only available for public project, which use hashed
storage and when forking from the root of the fork network. (That is,
the project being forked from itself isn't a fork)
In this commit message I use both ObjectPool and Pool repositories,
which are alike, but different from each other. ObjectPool refers to
whatever is on the disk stored and managed by Gitaly. PoolRepository is
the record in the database.
2018-12-03 08:49:58 -05:00
|
|
|
|
|
|
|
expect(forked_project.pool_repository).to eq(fork_from_project.pool_repository)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
context 'when linking fork to an existing project' do
|
|
|
|
let(:fork_from_project) { create(:project, :public) }
|
|
|
|
let(:fork_to_project) { create(:project, :public) }
|
2020-02-20 04:09:13 -05:00
|
|
|
let(:user) do
|
|
|
|
create(:user).tap { |u| fork_to_project.add_maintainer(u) }
|
|
|
|
end
|
2017-12-07 03:44:55 -05:00
|
|
|
|
|
|
|
subject { described_class.new(fork_from_project, user) }
|
|
|
|
|
|
|
|
def forked_from_project(project)
|
|
|
|
project.fork_network_member&.forked_from_project
|
2014-10-03 04:12:44 -04:00
|
|
|
end
|
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
context 'if project is already forked' do
|
|
|
|
it 'does not create fork relation' do
|
|
|
|
allow(fork_to_project).to receive(:forked?).and_return(true)
|
|
|
|
expect(forked_from_project(fork_to_project)).to be_nil
|
|
|
|
expect(subject.execute(fork_to_project)).to be_nil
|
|
|
|
expect(forked_from_project(fork_to_project)).to be_nil
|
2014-10-03 04:12:44 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
context 'if project is not forked' do
|
|
|
|
it 'creates fork relation' do
|
2018-08-31 13:16:34 -04:00
|
|
|
expect(fork_to_project.forked?).to be_falsy
|
2017-12-07 03:44:55 -05:00
|
|
|
expect(forked_from_project(fork_to_project)).to be_nil
|
|
|
|
|
|
|
|
subject.execute(fork_to_project)
|
|
|
|
|
2018-08-31 13:16:34 -04:00
|
|
|
fork_to_project.reload
|
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
expect(fork_to_project.forked?).to be true
|
|
|
|
expect(forked_from_project(fork_to_project)).to eq fork_from_project
|
|
|
|
expect(fork_to_project.forked_from_project).to eq fork_from_project
|
2014-10-03 04:12:44 -04:00
|
|
|
end
|
|
|
|
|
2017-12-07 03:44:55 -05:00
|
|
|
it 'flushes the forks count cache of the source project' do
|
|
|
|
expect(fork_from_project.forks_count).to be_zero
|
|
|
|
|
|
|
|
subject.execute(fork_to_project)
|
2020-07-09 11:08:59 -04:00
|
|
|
BatchLoader::Executor.clear_current
|
2017-12-07 03:44:55 -05:00
|
|
|
|
|
|
|
expect(fork_from_project.forks_count).to eq(1)
|
2014-10-03 04:12:44 -04:00
|
|
|
end
|
2018-08-02 07:30:24 -04:00
|
|
|
|
2018-08-31 13:16:34 -04:00
|
|
|
context 'if the fork is not allowed' do
|
|
|
|
let(:fork_from_project) { create(:project, :private) }
|
|
|
|
|
|
|
|
it 'does not delete the LFS objects' do
|
|
|
|
create(:lfs_objects_project, project: fork_to_project)
|
|
|
|
|
|
|
|
expect { subject.execute(fork_to_project) }
|
|
|
|
.not_to change { fork_to_project.lfs_objects_projects.size }
|
|
|
|
end
|
|
|
|
end
|
2014-10-03 04:12:44 -04:00
|
|
|
end
|
|
|
|
end
|
2020-02-20 04:09:13 -05:00
|
|
|
|
|
|
|
describe '#valid_fork_targets' do
|
|
|
|
let(:finder_mock) { instance_double('ForkTargetsFinder', execute: ['finder_return_value']) }
|
|
|
|
let(:current_user) { instance_double('User') }
|
|
|
|
let(:project) { instance_double('Project') }
|
|
|
|
|
|
|
|
before do
|
|
|
|
allow(ForkTargetsFinder).to receive(:new).with(project, current_user).and_return(finder_mock)
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'returns whatever finder returns' do
|
|
|
|
expect(described_class.new(project, current_user).valid_fork_targets).to eq ['finder_return_value']
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
describe '#valid_fork_target?' do
|
|
|
|
let(:project) { Project.new }
|
|
|
|
let(:params) { {} }
|
|
|
|
|
2020-07-17 11:09:13 -04:00
|
|
|
context 'when target is not passed' do
|
|
|
|
subject { described_class.new(project, user, params).valid_fork_target? }
|
2020-02-20 04:09:13 -05:00
|
|
|
|
2020-07-17 11:09:13 -04:00
|
|
|
context 'when current user is an admin' do
|
|
|
|
let(:user) { build(:user, :admin) }
|
2020-02-20 04:09:13 -05:00
|
|
|
|
2020-07-17 11:09:13 -04:00
|
|
|
it { is_expected.to be_truthy }
|
|
|
|
end
|
2020-02-20 04:09:13 -05:00
|
|
|
|
2020-07-17 11:09:13 -04:00
|
|
|
context 'when current_user is not an admin' do
|
|
|
|
let(:user) { create(:user) }
|
2020-02-20 04:09:13 -05:00
|
|
|
|
2020-07-17 11:09:13 -04:00
|
|
|
let(:finder_mock) { instance_double('ForkTargetsFinder', execute: [user.namespace]) }
|
|
|
|
let(:project) { create(:project) }
|
|
|
|
|
|
|
|
before do
|
|
|
|
allow(ForkTargetsFinder).to receive(:new).with(project, user).and_return(finder_mock)
|
|
|
|
end
|
|
|
|
|
|
|
|
context 'when target namespace is in valid fork targets' do
|
|
|
|
let(:params) { { namespace: user.namespace } }
|
|
|
|
|
|
|
|
it { is_expected.to be_truthy }
|
|
|
|
end
|
|
|
|
|
|
|
|
context 'when target namespace is not in valid fork targets' do
|
|
|
|
let(:params) { { namespace: create(:group) } }
|
|
|
|
|
|
|
|
it { is_expected.to be_falsey }
|
|
|
|
end
|
2020-02-20 04:09:13 -05:00
|
|
|
end
|
2020-07-17 11:09:13 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
context 'when target is passed' do
|
|
|
|
let(:target) { create(:group) }
|
2020-02-20 04:09:13 -05:00
|
|
|
|
2020-07-17 11:09:13 -04:00
|
|
|
subject { described_class.new(project, user, params).valid_fork_target?(target) }
|
|
|
|
|
|
|
|
context 'when current user is an admin' do
|
|
|
|
let(:user) { build(:user, :admin) }
|
2020-02-20 04:09:13 -05:00
|
|
|
|
|
|
|
it { is_expected.to be_truthy }
|
|
|
|
end
|
|
|
|
|
2020-07-17 11:09:13 -04:00
|
|
|
context 'when current user is not an admin' do
|
|
|
|
let(:user) { create(:user) }
|
|
|
|
|
|
|
|
before do
|
|
|
|
allow(ForkTargetsFinder).to receive(:new).with(project, user).and_return(finder_mock)
|
|
|
|
end
|
|
|
|
|
|
|
|
context 'when target namespace is in valid fork targets' do
|
|
|
|
let(:finder_mock) { instance_double('ForkTargetsFinder', execute: [target]) }
|
|
|
|
|
|
|
|
it { is_expected.to be_truthy }
|
|
|
|
end
|
|
|
|
|
|
|
|
context 'when target namespace is not in valid fork targets' do
|
|
|
|
let(:finder_mock) { instance_double('ForkTargetsFinder', execute: [create(:group)]) }
|
2020-02-20 04:09:13 -05:00
|
|
|
|
2020-07-17 11:09:13 -04:00
|
|
|
it { is_expected.to be_falsey }
|
|
|
|
end
|
2020-02-20 04:09:13 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
2013-03-19 11:37:50 -04:00
|
|
|
end
|