2019-04-11 12:17:24 +00:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2018-05-03 12:55:14 +00:00
|
|
|
require 'spec_helper'
|
|
|
|
|
2020-06-24 15:08:50 +00:00
|
|
|
RSpec.describe Projects::UpdateRemoteMirrorService do
|
2020-09-17 18:10:12 +00:00
|
|
|
let_it_be(:project) { create(:project, :repository, lfs_enabled: true) }
|
|
|
|
let_it_be(:remote_project) { create(:forked_project_with_submodules) }
|
|
|
|
let_it_be(:remote_mirror) { create(:remote_mirror, project: project, enabled: true) }
|
|
|
|
|
2018-09-10 19:12:49 +00:00
|
|
|
subject(:service) { described_class.new(project, project.creator) }
|
2018-05-03 12:55:14 +00:00
|
|
|
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 20:52:01 +00:00
|
|
|
describe '#execute' do
|
2021-03-30 09:10:51 +00:00
|
|
|
let(:retries) { 0 }
|
|
|
|
|
|
|
|
subject(:execute!) { service.execute(remote_mirror, retries) }
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 20:52:01 +00:00
|
|
|
|
2018-05-03 12:55:14 +00:00
|
|
|
before do
|
2022-01-20 00:15:02 +00:00
|
|
|
project.repository.add_branch(project.first_owner, 'existing-branch', 'master')
|
2018-06-25 09:37:24 +00:00
|
|
|
|
2020-04-28 21:09:35 +00:00
|
|
|
allow(remote_mirror)
|
|
|
|
.to receive(:update_repository)
|
|
|
|
.and_return(double(divergent_refs: []))
|
2018-05-03 12:55:14 +00:00
|
|
|
end
|
|
|
|
|
2020-07-24 03:09:19 +00:00
|
|
|
it 'does not fetch the remote repository' do
|
|
|
|
# See https://gitlab.com/gitlab-org/gitaly/-/issues/2670
|
|
|
|
expect(project.repository).not_to receive(:fetch_remote)
|
2018-09-10 19:12:49 +00:00
|
|
|
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 20:52:01 +00:00
|
|
|
execute!
|
2018-09-10 19:12:49 +00:00
|
|
|
end
|
|
|
|
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 20:52:01 +00:00
|
|
|
it 'marks the mirror as started when beginning' do
|
|
|
|
expect(remote_mirror).to receive(:update_start!).and_call_original
|
|
|
|
|
|
|
|
execute!
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'marks the mirror as successfully finished' do
|
|
|
|
result = execute!
|
2018-05-03 12:55:14 +00:00
|
|
|
|
|
|
|
expect(result[:status]).to eq(:success)
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 20:52:01 +00:00
|
|
|
expect(remote_mirror).to be_finished
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'marks the mirror as failed and raises the error when an unexpected error occurs' do
|
2020-07-24 03:09:19 +00:00
|
|
|
allow(remote_mirror).to receive(:update_repository).and_raise('Badly broken')
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 20:52:01 +00:00
|
|
|
|
2020-04-28 21:09:35 +00:00
|
|
|
expect { execute! }.to raise_error(/Badly broken/)
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 20:52:01 +00:00
|
|
|
|
|
|
|
expect(remote_mirror).to be_failed
|
|
|
|
expect(remote_mirror.last_error).to include('Badly broken')
|
|
|
|
end
|
|
|
|
|
2020-09-02 15:10:54 +00:00
|
|
|
context 'when the URL is blocked' do
|
|
|
|
before do
|
|
|
|
allow(Gitlab::UrlBlocker).to receive(:blocked_url?).and_return(true)
|
|
|
|
end
|
|
|
|
|
2021-03-30 09:10:51 +00:00
|
|
|
it 'hard retries and returns error status' do
|
2020-09-02 15:10:54 +00:00
|
|
|
expect(execute!).to eq(status: :error, message: 'The remote mirror URL is invalid.')
|
2021-03-30 09:10:51 +00:00
|
|
|
expect(remote_mirror).to be_to_retry
|
|
|
|
end
|
|
|
|
|
|
|
|
context 'when retries are exceeded' do
|
|
|
|
let(:retries) { 4 }
|
|
|
|
|
|
|
|
it 'hard fails and returns error status' do
|
|
|
|
expect(execute!).to eq(status: :error, message: 'The remote mirror URL is invalid.')
|
|
|
|
expect(remote_mirror).to be_failed
|
|
|
|
end
|
2020-09-02 15:10:54 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
context "when given URLs containing escaped elements" do
|
2020-09-21 12:09:34 +00:00
|
|
|
it_behaves_like "URLs containing escaped elements return expected status" do
|
|
|
|
let(:result) { execute! }
|
2020-09-02 15:10:54 +00:00
|
|
|
|
|
|
|
before do
|
|
|
|
allow(remote_mirror).to receive(:url).and_return(url)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 20:52:01 +00:00
|
|
|
context 'when the update fails because of a `Gitlab::Git::CommandError`' do
|
|
|
|
before do
|
2020-07-24 03:09:19 +00:00
|
|
|
allow(remote_mirror).to receive(:update_repository)
|
|
|
|
.and_raise(Gitlab::Git::CommandError.new('update failed'))
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 20:52:01 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
it 'wraps `Gitlab::Git::CommandError`s in a service error' do
|
2020-07-24 03:09:19 +00:00
|
|
|
expect(execute!).to eq(status: :error, message: 'update failed')
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 20:52:01 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
it 'marks the mirror as to be retried' do
|
|
|
|
execute!
|
|
|
|
|
|
|
|
expect(remote_mirror).to be_to_retry
|
2020-07-24 03:09:19 +00:00
|
|
|
expect(remote_mirror.last_error).to include('update failed')
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 20:52:01 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
it "marks the mirror as failed after #{described_class::MAX_TRIES} tries" do
|
|
|
|
service.execute(remote_mirror, described_class::MAX_TRIES)
|
|
|
|
|
|
|
|
expect(remote_mirror).to be_failed
|
2020-07-24 03:09:19 +00:00
|
|
|
expect(remote_mirror.last_error).to include('update failed')
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 20:52:01 +00:00
|
|
|
end
|
2018-05-03 12:55:14 +00:00
|
|
|
end
|
|
|
|
|
2020-04-28 21:09:35 +00:00
|
|
|
context 'when there are divergent refs' do
|
|
|
|
it 'marks the mirror as failed and sets an error message' do
|
|
|
|
response = double(divergent_refs: %w[refs/heads/master refs/heads/develop])
|
|
|
|
expect(remote_mirror).to receive(:update_repository).and_return(response)
|
2018-05-03 12:55:14 +00:00
|
|
|
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 20:52:01 +00:00
|
|
|
execute!
|
2018-05-03 12:55:14 +00:00
|
|
|
|
2020-04-28 21:09:35 +00:00
|
|
|
expect(remote_mirror).to be_failed
|
|
|
|
expect(remote_mirror.last_error).to include("Some refs have diverged")
|
|
|
|
expect(remote_mirror.last_error).to include("refs/heads/master\n")
|
|
|
|
expect(remote_mirror.last_error).to include("refs/heads/develop")
|
2018-09-10 19:12:49 +00:00
|
|
|
end
|
2018-05-03 12:55:14 +00:00
|
|
|
end
|
2020-09-17 18:10:12 +00:00
|
|
|
|
|
|
|
context "sending lfs objects" do
|
|
|
|
let_it_be(:lfs_pointer) { create(:lfs_objects_project, project: project) }
|
|
|
|
|
|
|
|
before do
|
|
|
|
stub_lfs_setting(enabled: true)
|
|
|
|
end
|
|
|
|
|
2020-10-14 21:08:38 +00:00
|
|
|
it 'pushes LFS objects to a HTTP repository' do
|
|
|
|
expect_next_instance_of(Lfs::PushService) do |service|
|
|
|
|
expect(service).to receive(:execute)
|
2020-09-17 18:10:12 +00:00
|
|
|
end
|
2022-01-05 06:13:32 +00:00
|
|
|
expect(Gitlab::AppJsonLogger).not_to receive(:info)
|
2020-09-17 18:10:12 +00:00
|
|
|
|
2020-10-14 21:08:38 +00:00
|
|
|
execute!
|
2022-01-05 06:13:32 +00:00
|
|
|
|
|
|
|
expect(remote_mirror.update_status).to eq('finished')
|
|
|
|
expect(remote_mirror.last_error).to be_nil
|
2020-10-14 21:08:38 +00:00
|
|
|
end
|
2020-09-17 18:10:12 +00:00
|
|
|
|
2022-01-05 06:13:32 +00:00
|
|
|
context 'when LFS objects fail to push' do
|
|
|
|
before do
|
|
|
|
expect_next_instance_of(Lfs::PushService) do |service|
|
|
|
|
expect(service).to receive(:execute).and_return({ status: :error, message: 'unauthorized' })
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
context 'when remote_mirror_fail_on_lfs feature flag enabled' do
|
|
|
|
it 'fails update' do
|
|
|
|
expect(Gitlab::AppJsonLogger).to receive(:info).with(
|
|
|
|
hash_including(message: "Error synching remote mirror")).and_call_original
|
2020-09-17 18:10:12 +00:00
|
|
|
|
2022-01-05 06:13:32 +00:00
|
|
|
execute!
|
2020-09-17 18:10:12 +00:00
|
|
|
|
2022-01-05 06:13:32 +00:00
|
|
|
expect(remote_mirror.update_status).to eq('failed')
|
|
|
|
expect(remote_mirror.last_error).to eq("Error synchronizing LFS files:\n\nunauthorized\n\n")
|
|
|
|
end
|
|
|
|
end
|
2020-09-17 18:10:12 +00:00
|
|
|
|
2022-01-05 06:13:32 +00:00
|
|
|
context 'when remote_mirror_fail_on_lfs feature flag is disabled' do
|
|
|
|
before do
|
|
|
|
stub_feature_flags(remote_mirror_fail_on_lfs: false)
|
|
|
|
end
|
2020-09-17 18:10:12 +00:00
|
|
|
|
2022-01-05 06:13:32 +00:00
|
|
|
it 'does not fail update' do
|
|
|
|
expect(Gitlab::AppJsonLogger).to receive(:info).with(
|
|
|
|
hash_including(message: "Error synching remote mirror")).and_call_original
|
2020-09-17 18:10:12 +00:00
|
|
|
|
2022-01-05 06:13:32 +00:00
|
|
|
execute!
|
|
|
|
|
|
|
|
expect(remote_mirror.update_status).to eq('finished')
|
|
|
|
expect(remote_mirror.last_error).to be_nil
|
|
|
|
end
|
|
|
|
end
|
2020-09-17 18:10:12 +00:00
|
|
|
end
|
|
|
|
|
2022-01-05 06:13:32 +00:00
|
|
|
context 'with SSH repository' do
|
|
|
|
let(:ssh_mirror) { create(:remote_mirror, project: project, enabled: true) }
|
|
|
|
|
|
|
|
before do
|
|
|
|
allow(ssh_mirror)
|
|
|
|
.to receive(:update_repository)
|
|
|
|
.and_return(double(divergent_refs: []))
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'does nothing to an SSH repository' do
|
|
|
|
ssh_mirror.update!(url: 'ssh://example.com')
|
2020-09-17 18:10:12 +00:00
|
|
|
|
2022-01-05 06:13:32 +00:00
|
|
|
expect_any_instance_of(Lfs::PushService).not_to receive(:execute)
|
2020-09-17 18:10:12 +00:00
|
|
|
|
2022-01-05 06:13:32 +00:00
|
|
|
service.execute(ssh_mirror, retries)
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'does nothing if LFS is disabled' do
|
|
|
|
expect(project).to receive(:lfs_enabled?) { false }
|
|
|
|
|
|
|
|
expect_any_instance_of(Lfs::PushService).not_to receive(:execute)
|
|
|
|
|
|
|
|
service.execute(ssh_mirror, retries)
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'does nothing if non-password auth is specified' do
|
|
|
|
ssh_mirror.update!(auth_method: 'ssh_public_key')
|
|
|
|
|
|
|
|
expect_any_instance_of(Lfs::PushService).not_to receive(:execute)
|
|
|
|
|
|
|
|
service.execute(ssh_mirror, retries)
|
|
|
|
end
|
2020-09-17 18:10:12 +00:00
|
|
|
end
|
|
|
|
end
|
2018-05-03 12:55:14 +00:00
|
|
|
end
|
|
|
|
end
|