gitlab-org--gitlab-foss/spec/services/projects/destroy_service_spec.rb

348 lines
11 KiB
Ruby
Raw Normal View History

# frozen_string_literal: true
require 'spec_helper'
describe Projects::DestroyService do
include ProjectForksHelper
let!(:user) { create(:user) }
let!(:project) { create(:project, :repository, namespace: user.namespace) }
let!(:path) do
Gitlab::GitalyClient::StorageSettings.allow_disk_access do
project.repository.path_to_repo
end
end
let!(:remove_path) { path.sub(/\.git\Z/, "+#{project.id}+deleted.git") }
let!(:async) { false } # execute or async_execute
before do
stub_container_registry_config(enabled: true)
stub_container_registry_tags(repository: :any, tags: [])
end
shared_examples 'deleting the project' do
it 'deletes the project' do
2017-02-03 11:05:31 -05:00
expect(Project.unscoped.all).not_to include(project)
expect(project.gitlab_shell.exists?(project.repository_storage, path + '.git')).to be_falsey
expect(project.gitlab_shell.exists?(project.repository_storage, remove_path + '.git')).to be_falsey
end
end
2017-02-03 11:05:31 -05:00
shared_examples 'deleting the project with pipeline and build' do
context 'with pipeline and build' do # which has optimistic locking
let!(:pipeline) { create(:ci_pipeline, project: project) }
let!(:build) { create(:ci_build, :artifacts, pipeline: pipeline) }
before do
perform_enqueued_jobs do
destroy_project(project, user, {})
end
end
it_behaves_like 'deleting the project'
end
end
shared_examples 'handles errors thrown during async destroy' do |error_message|
it 'does not allow the error to bubble up' do
expect do
perform_enqueued_jobs { destroy_project(project, user, {}) }
end.not_to raise_error
end
it 'unmarks the project as "pending deletion"' do
perform_enqueued_jobs { destroy_project(project, user, {}) }
expect(project.reload.pending_delete).to be(false)
end
it 'stores an error message in `projects.delete_error`' do
perform_enqueued_jobs { destroy_project(project, user, {}) }
expect(project.reload.delete_error).to be_present
expect(project.delete_error).to include(error_message)
end
end
context 'Sidekiq inline' do
before do
# Run sidekiq immediately to check that renamed repository will be removed
perform_enqueued_jobs { destroy_project(project, user, {}) }
end
it_behaves_like 'deleting the project'
context 'when has remote mirrors' do
let!(:project) do
create(:project, :repository, namespace: user.namespace).tap do |project|
project.remote_mirrors.create(url: 'http://test.com')
end
end
let!(:async) { true }
it 'destroys them' do
expect(RemoteMirror.count).to eq(0)
end
end
it 'invalidates personal_project_count cache' do
expect(user).to receive(:invalidate_personal_projects_count)
destroy_project(project, user)
end
context 'when project has exports' do
let!(:project_with_export) do
create(:project, :repository, namespace: user.namespace).tap do |project|
create(:import_export_upload,
project: project,
export_file: fixture_file_upload('spec/fixtures/project_export.tar.gz'))
end
end
let!(:async) { true }
it 'destroys project and export' do
expect { destroy_project(project_with_export, user) }.to change(ImportExportUpload, :count).by(-1)
expect(Project.all).not_to include(project_with_export)
end
end
end
context 'Sidekiq fake' do
before do
# Dont run sidekiq to check if renamed repository exists
Sidekiq::Testing.fake! { destroy_project(project, user, {}) }
end
it { expect(Project.all).not_to include(project) }
it { expect(Dir.exist?(path)).to be_falsey }
it { expect(Dir.exist?(remove_path)).to be_truthy }
end
context 'when flushing caches fail' do
before do
new_user = create(:user)
project.team.add_user(new_user, Gitlab::Access::DEVELOPER)
allow_any_instance_of(described_class).to receive(:flush_caches).and_raise(::Redis::CannotConnectError)
end
it 'keeps project team intact upon an error' do
perform_enqueued_jobs do
2019-03-13 09:42:43 -04:00
destroy_project(project, user, {})
rescue ::Redis::CannotConnectError
end
Use CTEs for nested groups and authorizations This commit introduces the usage of Common Table Expressions (CTEs) to efficiently retrieve nested group hierarchies, without having to rely on the "routes" table (which is an _incredibly_ inefficient way of getting the data). This requires a patch to ActiveRecord (found in the added initializer) to work properly as ActiveRecord doesn't support WITH statements properly out of the box. Unfortunately MySQL provides no efficient way of getting nested groups. For example, the old routes setup could easily take 5-10 seconds depending on the amount of "routes" in a database. Providing vastly different logic for both MySQL and PostgreSQL will negatively impact the development process. Because of this the various nested groups related methods return empty relations when used in combination with MySQL. For project authorizations the logic is split up into two classes: * Gitlab::ProjectAuthorizations::WithNestedGroups * Gitlab::ProjectAuthorizations::WithoutNestedGroups Both classes get the fresh project authorizations (= as they should be in the "project_authorizations" table), including nested groups if PostgreSQL is used. The logic of these two classes is quite different apart from their public interface. This complicates development a bit, but unfortunately there is no way around this. This commit also introduces Gitlab::GroupHierarchy. This class can be used to get the ancestors and descendants of a base relation, or both by using a UNION. This in turn is used by methods such as: * Namespace#ancestors * Namespace#descendants * User#all_expanded_groups Again this class relies on CTEs and thus only works on PostgreSQL. The Namespace methods will return an empty relation when MySQL is used, while User#all_expanded_groups will return only the groups a user is a direct member of. Performance wise the impact is quite large. For example, on GitLab.com Namespace#descendants used to take around 580 ms to retrieve data for a particular user. Using CTEs we are able to reduce this down to roughly 1 millisecond, returning the exact same data. == On The Fly Refreshing Refreshing of authorizations on the fly (= when users.authorized_projects_populated was not set) is removed with this commit. This simplifies the code, and ensures any queries used for authorizations are not mutated because they are executed in a Rails scope (e.g. Project.visible_to_user). This commit includes a migration to schedule refreshing authorizations for all users, ensuring all of them have their authorizations in place. Said migration schedules users in batches of 5000, with 5 minutes between every batch to smear the load around a bit. == Spec Changes This commit also introduces some changes to various specs. For example, some specs for ProjectTeam assumed that creating a personal project would _not_ lead to the owner having access, which is incorrect. Because we also no longer refresh authorizations on the fly for new users some code had to be added to the "empty_project" factory. This chunk of code ensures that the owner's permissions are refreshed after creating the project, something that is normally done in Projects::CreateService.
2017-04-24 11:19:22 -04:00
expect(project.team.members.count).to eq 2
end
end
2017-02-03 11:05:31 -05:00
context 'with async_execute' do
let(:async) { true }
2017-02-03 11:05:31 -05:00
context 'async delete of project with private issue visibility' do
before do
project.project_feature.update_attribute("issues_access_level", ProjectFeature::PRIVATE)
# Run sidekiq immediately to check that renamed repository will be removed
perform_enqueued_jobs { destroy_project(project, user, {}) }
2017-02-03 11:05:31 -05:00
end
it_behaves_like 'deleting the project'
end
2017-02-03 11:05:31 -05:00
it_behaves_like 'deleting the project with pipeline and build'
context 'errors' do
context 'when `remove_legacy_registry_tags` fails' do
before do
expect_any_instance_of(described_class)
.to receive(:remove_legacy_registry_tags).and_return(false)
end
it_behaves_like 'handles errors thrown during async destroy', "Failed to remove some tags"
end
context 'when `remove_repository` fails' do
before do
expect_any_instance_of(described_class)
.to receive(:remove_repository).and_return(false)
end
it_behaves_like 'handles errors thrown during async destroy', "Failed to remove project repository"
end
2017-07-18 11:09:14 -04:00
context 'when `execute` raises expected error' do
before do
2017-07-18 11:09:14 -04:00
expect_any_instance_of(Project)
.to receive(:destroy!).and_raise(StandardError.new("Other error message"))
end
it_behaves_like 'handles errors thrown during async destroy', "Other error message"
end
2017-07-18 11:09:14 -04:00
context 'when `execute` raises unexpected error' do
before do
expect_any_instance_of(Project)
.to receive(:destroy!).and_raise(Exception.new('Other error message'))
2017-07-18 11:09:14 -04:00
end
2017-07-18 11:09:14 -04:00
it 'allows error to bubble up and rolls back project deletion' do
expect do
perform_enqueued_jobs { destroy_project(project, user, {}) }
end.to raise_error(Exception, 'Other error message')
2017-07-18 11:09:14 -04:00
expect(project.reload.pending_delete).to be(false)
expect(project.delete_error).to include("Other error message")
end
end
end
end
describe 'container registry' do
context 'when there are regular container repositories' do
let(:container_repository) { create(:container_repository) }
2016-12-15 22:24:05 -05:00
before do
stub_container_registry_tags(repository: project.full_path + '/image',
tags: ['tag'])
project.container_repositories << container_repository
end
2016-05-16 19:03:55 -04:00
context 'when image repository deletion succeeds' do
it 'removes tags' do
expect_any_instance_of(ContainerRepository)
.to receive(:delete_tags!).and_return(true)
2016-05-16 19:03:55 -04:00
destroy_project(project, user)
end
end
context 'when image repository deletion fails' do
it 'raises an exception' do
expect_any_instance_of(ContainerRepository)
.to receive(:delete_tags!).and_raise(RuntimeError)
2017-07-18 11:09:14 -04:00
expect(destroy_project(project, user)).to be false
end
2016-05-16 19:03:55 -04:00
end
end
context 'when there are tags for legacy root repository' do
2016-12-15 22:24:05 -05:00
before do
stub_container_registry_tags(repository: project.full_path,
tags: ['tag'])
end
context 'when image repository tags deletion succeeds' do
it 'removes tags' do
expect_any_instance_of(ContainerRepository)
.to receive(:delete_tags!).and_return(true)
destroy_project(project, user)
end
2016-12-15 22:24:05 -05:00
end
2016-05-16 19:03:55 -04:00
context 'when image repository tags deletion fails' do
it 'raises an exception' do
expect_any_instance_of(ContainerRepository)
.to receive(:delete_tags!).and_return(false)
2016-05-16 19:03:55 -04:00
2017-07-18 11:09:14 -04:00
expect(destroy_project(project, user)).to be false
end
end
2016-05-16 19:03:55 -04:00
end
end
context 'for a forked project with LFS objects' do
let(:forked_project) { fork_project(project, user) }
before do
project.lfs_objects << create(:lfs_object)
forked_project.reload
end
it 'destroys the fork' do
expect { destroy_project(forked_project, user) }
.not_to raise_error
end
end
context 'as the root of a fork network' do
let!(:fork_network) { create(:fork_network, root_project: project) }
it 'updates the fork network with the project name' do
destroy_project(project, user)
fork_network.reload
expect(fork_network.deleted_root_project_name).to eq(project.full_name)
expect(fork_network.root_project).to be_nil
end
end
context 'repository +deleted path removal' do
def removal_path(path)
"#{path}+#{project.id}#{described_class::DELETED_FLAG}"
end
context 'regular phase' do
it 'schedules +deleted removal of existing repos' do
service = described_class.new(project, user, {})
allow(service).to receive(:schedule_stale_repos_removal)
expect(GitlabShellWorker).to receive(:perform_in)
.with(5.minutes, :remove_repository, project.repository_storage, removal_path(project.disk_path))
service.execute
end
end
context 'stale cleanup' do
let!(:async) { true }
it 'schedules +deleted wiki and repo removal' do
allow(ProjectDestroyWorker).to receive(:perform_async)
expect(GitlabShellWorker).to receive(:perform_in)
.with(10.minutes, :remove_repository, project.repository_storage, removal_path(project.disk_path))
expect(GitlabShellWorker).to receive(:perform_in)
.with(10.minutes, :remove_repository, project.repository_storage, removal_path(project.wiki.disk_path))
destroy_project(project, user, {})
end
end
end
context '#attempt_restore_repositories' do
let(:path) { project.disk_path + '.git' }
before do
expect(project.gitlab_shell.exists?(project.repository_storage, path)).to be_truthy
expect(project.gitlab_shell.exists?(project.repository_storage, remove_path)).to be_falsey
# Dont run sidekiq to check if renamed repository exists
Sidekiq::Testing.fake! { destroy_project(project, user, {}) }
expect(project.gitlab_shell.exists?(project.repository_storage, path)).to be_falsey
expect(project.gitlab_shell.exists?(project.repository_storage, remove_path)).to be_truthy
end
it 'restores the repositories' do
Sidekiq::Testing.fake! { described_class.new(project, user).attempt_repositories_rollback }
expect(project.gitlab_shell.exists?(project.repository_storage, path)).to be_truthy
expect(project.gitlab_shell.exists?(project.repository_storage, remove_path)).to be_falsey
end
end
def destroy_project(project, user, params = {})
if async
Projects::DestroyService.new(project, user, params).async_execute
else
Projects::DestroyService.new(project, user, params).execute
end
end
end