2018-07-25 05:30:33 -04:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2019-03-28 09:17:42 -04:00
|
|
|
class RemoteMirror < ApplicationRecord
|
2018-05-03 08:55:14 -04:00
|
|
|
include AfterCommitQueue
|
2018-11-12 05:52:48 -05:00
|
|
|
include MirrorAuthentication
|
2019-12-17 10:08:15 -05:00
|
|
|
include SafeUrl
|
2018-05-03 08:55:14 -04:00
|
|
|
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 16:52:01 -04:00
|
|
|
MAX_FIRST_RUNTIME = 3.hours
|
|
|
|
MAX_INCREMENTAL_RUNTIME = 1.hour
|
2018-05-03 08:55:14 -04:00
|
|
|
PROTECTED_BACKOFF_DELAY = 1.minute
|
|
|
|
UNPROTECTED_BACKOFF_DELAY = 5.minutes
|
|
|
|
|
|
|
|
attr_encrypted :credentials,
|
2018-05-19 09:03:29 -04:00
|
|
|
key: Settings.attr_encrypted_db_key_base,
|
2018-05-03 08:55:14 -04:00
|
|
|
marshal: true,
|
|
|
|
encode: true,
|
|
|
|
mode: :per_attribute_iv_and_salt,
|
|
|
|
insecure_mode: true,
|
|
|
|
algorithm: 'aes-256-cbc'
|
|
|
|
|
|
|
|
belongs_to :project, inverse_of: :remote_mirrors
|
|
|
|
|
2019-04-11 02:29:07 -04:00
|
|
|
validates :url, presence: true, public_url: { schemes: %w(ssh git http https), allow_blank: true, enforce_user: true }
|
2018-05-03 08:55:14 -04:00
|
|
|
|
|
|
|
before_save :set_new_remote_name, if: :mirror_url_changed?
|
|
|
|
|
|
|
|
after_save :set_override_remote_mirror_available, unless: -> { Gitlab::CurrentSettings.current_application_settings.mirror_available }
|
2019-04-23 05:30:18 -04:00
|
|
|
after_save :refresh_remote, if: :saved_change_to_mirror_url?
|
|
|
|
after_update :reset_fields, if: :saved_change_to_mirror_url?
|
2018-05-03 08:55:14 -04:00
|
|
|
|
|
|
|
after_commit :remove_remote, on: :destroy
|
|
|
|
|
2018-11-12 05:52:48 -05:00
|
|
|
before_validation :store_credentials
|
|
|
|
|
2018-05-03 08:55:14 -04:00
|
|
|
scope :enabled, -> { where(enabled: true) }
|
|
|
|
scope :started, -> { with_update_status(:started) }
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 16:52:01 -04:00
|
|
|
|
|
|
|
scope :stuck, -> do
|
|
|
|
started
|
|
|
|
.where('(last_update_started_at < ? AND last_update_at IS NOT NULL)',
|
|
|
|
MAX_INCREMENTAL_RUNTIME.ago)
|
|
|
|
.or(where('(last_update_started_at < ? AND last_update_at IS NULL)',
|
|
|
|
MAX_FIRST_RUNTIME.ago))
|
|
|
|
end
|
2018-05-03 08:55:14 -04:00
|
|
|
|
|
|
|
state_machine :update_status, initial: :none do
|
|
|
|
event :update_start do
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 16:52:01 -04:00
|
|
|
transition any => :started
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
event :update_finish do
|
|
|
|
transition started: :finished
|
|
|
|
end
|
|
|
|
|
|
|
|
event :update_fail do
|
|
|
|
transition started: :failed
|
|
|
|
end
|
|
|
|
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 16:52:01 -04:00
|
|
|
event :update_retry do
|
|
|
|
transition started: :to_retry
|
|
|
|
end
|
|
|
|
|
2018-05-03 08:55:14 -04:00
|
|
|
state :started
|
|
|
|
state :finished
|
|
|
|
state :failed
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 16:52:01 -04:00
|
|
|
state :to_retry
|
2018-05-03 08:55:14 -04:00
|
|
|
|
|
|
|
after_transition any => :started do |remote_mirror, _|
|
2018-06-19 13:03:25 -04:00
|
|
|
Gitlab::Metrics.add_event(:remote_mirrors_running)
|
2018-05-03 08:55:14 -04:00
|
|
|
|
|
|
|
remote_mirror.update(last_update_started_at: Time.now)
|
|
|
|
end
|
|
|
|
|
|
|
|
after_transition started: :finished do |remote_mirror, _|
|
2018-06-19 13:03:25 -04:00
|
|
|
Gitlab::Metrics.add_event(:remote_mirrors_finished)
|
2018-05-03 08:55:14 -04:00
|
|
|
|
|
|
|
timestamp = Time.now
|
2018-07-02 06:43:06 -04:00
|
|
|
remote_mirror.update!(
|
2019-01-15 01:33:48 -05:00
|
|
|
last_update_at: timestamp,
|
|
|
|
last_successful_update_at: timestamp,
|
|
|
|
last_error: nil,
|
|
|
|
error_notification_sent: false
|
2018-05-03 08:55:14 -04:00
|
|
|
)
|
|
|
|
end
|
|
|
|
|
2018-12-05 10:22:52 -05:00
|
|
|
after_transition started: :failed do |remote_mirror|
|
2018-06-19 13:03:25 -04:00
|
|
|
Gitlab::Metrics.add_event(:remote_mirrors_failed)
|
2018-05-03 08:55:14 -04:00
|
|
|
|
|
|
|
remote_mirror.update(last_update_at: Time.now)
|
2018-12-05 10:22:52 -05:00
|
|
|
|
|
|
|
remote_mirror.run_after_commit do
|
|
|
|
RemoteMirrorNotificationWorker.perform_async(remote_mirror.id)
|
|
|
|
end
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def remote_name
|
|
|
|
super || fallback_remote_name
|
|
|
|
end
|
|
|
|
|
|
|
|
def update_failed?
|
|
|
|
update_status == 'failed'
|
|
|
|
end
|
|
|
|
|
|
|
|
def update_in_progress?
|
|
|
|
update_status == 'started'
|
|
|
|
end
|
|
|
|
|
2020-04-28 17:09:35 -04:00
|
|
|
def update_repository
|
|
|
|
Gitlab::Git::RemoteMirror.new(
|
|
|
|
project.repository.raw,
|
|
|
|
remote_name,
|
|
|
|
**options_for_update
|
|
|
|
).update
|
|
|
|
end
|
|
|
|
|
|
|
|
def options_for_update
|
|
|
|
options = {
|
|
|
|
keep_divergent_refs: keep_divergent_refs?
|
|
|
|
}
|
|
|
|
|
|
|
|
if only_protected_branches?
|
|
|
|
options[:only_branches_matching] = project.protected_branches.pluck(:name)
|
|
|
|
end
|
|
|
|
|
2018-11-12 05:52:48 -05:00
|
|
|
if ssh_mirror_url?
|
|
|
|
if ssh_key_auth? && ssh_private_key.present?
|
|
|
|
options[:ssh_key] = ssh_private_key
|
|
|
|
end
|
|
|
|
|
|
|
|
if ssh_known_hosts.present?
|
|
|
|
options[:known_hosts] = ssh_known_hosts
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-04-28 17:09:35 -04:00
|
|
|
options
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
|
2018-05-03 10:19:21 -04:00
|
|
|
def sync?
|
2018-05-07 05:52:53 -04:00
|
|
|
enabled?
|
2018-05-03 10:19:21 -04:00
|
|
|
end
|
|
|
|
|
2018-05-03 08:55:14 -04:00
|
|
|
def sync
|
2018-05-07 05:52:53 -04:00
|
|
|
return unless sync?
|
2018-05-03 08:55:14 -04:00
|
|
|
|
|
|
|
if recently_scheduled?
|
|
|
|
RepositoryUpdateRemoteMirrorWorker.perform_in(backoff_delay, self.id, Time.now)
|
|
|
|
else
|
|
|
|
RepositoryUpdateRemoteMirrorWorker.perform_async(self.id, Time.now)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def enabled
|
|
|
|
return false unless project && super
|
|
|
|
return false unless project.remote_mirror_available?
|
|
|
|
return false unless project.repository_exists?
|
|
|
|
return false if project.pending_delete?
|
|
|
|
|
|
|
|
true
|
|
|
|
end
|
|
|
|
alias_method :enabled?, :enabled
|
|
|
|
|
2019-03-21 18:24:48 -04:00
|
|
|
def disabled?
|
|
|
|
!enabled?
|
|
|
|
end
|
|
|
|
|
2018-05-03 08:55:14 -04:00
|
|
|
def updated_since?(timestamp)
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 16:52:01 -04:00
|
|
|
return false if failed?
|
|
|
|
|
|
|
|
last_update_started_at && last_update_started_at > timestamp
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def mark_for_delete_if_blank_url
|
|
|
|
mark_for_destruction if url.blank?
|
|
|
|
end
|
|
|
|
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 16:52:01 -04:00
|
|
|
def update_error_message(error_message)
|
|
|
|
self.last_error = Gitlab::UrlSanitizer.sanitize(error_message)
|
|
|
|
end
|
|
|
|
|
|
|
|
def mark_for_retry!(error_message)
|
|
|
|
update_error_message(error_message)
|
|
|
|
update_retry!
|
|
|
|
end
|
|
|
|
|
|
|
|
def mark_as_failed!(error_message)
|
|
|
|
update_error_message(error_message)
|
|
|
|
update_fail!
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def url=(value)
|
|
|
|
super(value) && return unless Gitlab::UrlSanitizer.valid?(value)
|
|
|
|
|
|
|
|
mirror_url = Gitlab::UrlSanitizer.new(value)
|
2018-11-12 05:52:48 -05:00
|
|
|
self.credentials ||= {}
|
|
|
|
self.credentials = self.credentials.merge(mirror_url.credentials)
|
2018-05-03 08:55:14 -04:00
|
|
|
|
|
|
|
super(mirror_url.sanitized_url)
|
|
|
|
end
|
|
|
|
|
|
|
|
def url
|
|
|
|
if super
|
|
|
|
Gitlab::UrlSanitizer.new(super, credentials: credentials).full_url
|
|
|
|
end
|
|
|
|
rescue
|
|
|
|
super
|
|
|
|
end
|
|
|
|
|
|
|
|
def safe_url
|
2019-12-17 10:08:15 -05:00
|
|
|
super(usernames_whitelist: %w[git])
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
|
2018-08-22 13:43:15 -04:00
|
|
|
def ensure_remote!
|
|
|
|
return unless project
|
2018-11-12 05:52:48 -05:00
|
|
|
return unless remote_name && remote_url
|
2018-08-22 13:43:15 -04:00
|
|
|
|
|
|
|
# If this fails or the remote already exists, we won't know due to
|
|
|
|
# https://gitlab.com/gitlab-org/gitaly/issues/1317
|
2018-11-12 05:52:48 -05:00
|
|
|
project.repository.add_remote(remote_name, remote_url)
|
2018-08-22 13:43:15 -04:00
|
|
|
end
|
|
|
|
|
2019-01-15 01:33:48 -05:00
|
|
|
def after_sent_notification
|
|
|
|
update_column(:error_notification_sent, true)
|
|
|
|
end
|
|
|
|
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 16:52:01 -04:00
|
|
|
def backoff_delay
|
|
|
|
if self.only_protected_branches
|
|
|
|
PROTECTED_BACKOFF_DELAY
|
|
|
|
else
|
|
|
|
UNPROTECTED_BACKOFF_DELAY
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def max_runtime
|
|
|
|
last_update_at.present? ? MAX_INCREMENTAL_RUNTIME : MAX_FIRST_RUNTIME
|
|
|
|
end
|
|
|
|
|
2018-05-03 08:55:14 -04:00
|
|
|
private
|
|
|
|
|
2018-11-12 05:52:48 -05:00
|
|
|
def store_credentials
|
|
|
|
# This is a necessary workaround for attr_encrypted, which doesn't otherwise
|
|
|
|
# notice that the credentials have changed
|
|
|
|
self.credentials = self.credentials
|
|
|
|
end
|
|
|
|
|
|
|
|
# The remote URL omits any password if SSH public-key authentication is in use
|
|
|
|
def remote_url
|
|
|
|
return url unless ssh_key_auth? && password.present?
|
|
|
|
|
|
|
|
Gitlab::UrlSanitizer.new(read_attribute(:url), credentials: { user: user }).full_url
|
|
|
|
rescue
|
|
|
|
super
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def fallback_remote_name
|
|
|
|
return unless id
|
|
|
|
|
|
|
|
"remote_mirror_#{id}"
|
|
|
|
end
|
|
|
|
|
|
|
|
def recently_scheduled?
|
|
|
|
return false unless self.last_update_started_at
|
|
|
|
|
|
|
|
self.last_update_started_at >= Time.now - backoff_delay
|
|
|
|
end
|
|
|
|
|
|
|
|
def reset_fields
|
|
|
|
update_columns(
|
|
|
|
last_error: nil,
|
|
|
|
last_update_at: nil,
|
|
|
|
last_successful_update_at: nil,
|
2019-01-15 01:33:48 -05:00
|
|
|
update_status: 'finished',
|
|
|
|
error_notification_sent: false
|
2018-05-03 08:55:14 -04:00
|
|
|
)
|
|
|
|
end
|
|
|
|
|
|
|
|
def set_override_remote_mirror_available
|
|
|
|
enabled = read_attribute(:enabled)
|
|
|
|
|
|
|
|
project.update(remote_mirror_available_overridden: enabled)
|
|
|
|
end
|
|
|
|
|
|
|
|
def set_new_remote_name
|
|
|
|
self.remote_name = "remote_mirror_#{SecureRandom.hex}"
|
|
|
|
end
|
|
|
|
|
|
|
|
def refresh_remote
|
|
|
|
return unless project
|
|
|
|
|
|
|
|
# Before adding a new remote we have to delete the data from
|
|
|
|
# the previous remote name
|
2019-01-15 16:05:36 -05:00
|
|
|
prev_remote_name = remote_name_before_last_save || fallback_remote_name
|
2018-05-03 08:55:14 -04:00
|
|
|
run_after_commit do
|
|
|
|
project.repository.async_remove_remote(prev_remote_name)
|
|
|
|
end
|
|
|
|
|
2018-11-12 05:52:48 -05:00
|
|
|
project.repository.add_remote(remote_name, remote_url)
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def remove_remote
|
|
|
|
return unless project # could be pending to delete so don't need to touch the git repository
|
|
|
|
|
|
|
|
project.repository.async_remove_remote(remote_name)
|
|
|
|
end
|
|
|
|
|
|
|
|
def mirror_url_changed?
|
2018-11-12 05:52:48 -05:00
|
|
|
url_changed? || credentials_changed?
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
2019-04-23 05:30:18 -04:00
|
|
|
|
|
|
|
def saved_change_to_mirror_url?
|
|
|
|
saved_change_to_url? || saved_change_to_credentials?
|
|
|
|
end
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
2019-09-13 09:26:31 -04:00
|
|
|
|
|
|
|
RemoteMirror.prepend_if_ee('EE::RemoteMirror')
|