2018-07-25 05:30:33 -04:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2019-03-28 09:17:42 -04:00
|
|
|
class RemoteMirror < ApplicationRecord
|
2018-05-03 08:55:14 -04:00
|
|
|
include AfterCommitQueue
|
2018-11-12 05:52:48 -05:00
|
|
|
include MirrorAuthentication
|
2019-12-17 10:08:15 -05:00
|
|
|
include SafeUrl
|
2018-05-03 08:55:14 -04:00
|
|
|
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 16:52:01 -04:00
|
|
|
MAX_FIRST_RUNTIME = 3.hours
|
|
|
|
MAX_INCREMENTAL_RUNTIME = 1.hour
|
2018-05-03 08:55:14 -04:00
|
|
|
PROTECTED_BACKOFF_DELAY = 1.minute
|
|
|
|
UNPROTECTED_BACKOFF_DELAY = 5.minutes
|
|
|
|
|
|
|
|
attr_encrypted :credentials,
|
2018-05-19 09:03:29 -04:00
|
|
|
key: Settings.attr_encrypted_db_key_base,
|
2018-05-03 08:55:14 -04:00
|
|
|
marshal: true,
|
|
|
|
encode: true,
|
|
|
|
mode: :per_attribute_iv_and_salt,
|
|
|
|
insecure_mode: true,
|
|
|
|
algorithm: 'aes-256-cbc'
|
|
|
|
|
|
|
|
belongs_to :project, inverse_of: :remote_mirrors
|
|
|
|
|
2019-04-11 02:29:07 -04:00
|
|
|
validates :url, presence: true, public_url: { schemes: %w(ssh git http https), allow_blank: true, enforce_user: true }
|
2018-05-03 08:55:14 -04:00
|
|
|
|
|
|
|
after_save :set_override_remote_mirror_available, unless: -> { Gitlab::CurrentSettings.current_application_settings.mirror_available }
|
2019-04-23 05:30:18 -04:00
|
|
|
after_update :reset_fields, if: :saved_change_to_mirror_url?
|
2018-05-03 08:55:14 -04:00
|
|
|
|
2018-11-12 05:52:48 -05:00
|
|
|
before_validation :store_credentials
|
|
|
|
|
2018-05-03 08:55:14 -04:00
|
|
|
scope :enabled, -> { where(enabled: true) }
|
|
|
|
scope :started, -> { with_update_status(:started) }
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 16:52:01 -04:00
|
|
|
|
|
|
|
scope :stuck, -> do
|
|
|
|
started
|
|
|
|
.where('(last_update_started_at < ? AND last_update_at IS NOT NULL)',
|
|
|
|
MAX_INCREMENTAL_RUNTIME.ago)
|
|
|
|
.or(where('(last_update_started_at < ? AND last_update_at IS NULL)',
|
|
|
|
MAX_FIRST_RUNTIME.ago))
|
|
|
|
end
|
2018-05-03 08:55:14 -04:00
|
|
|
|
|
|
|
state_machine :update_status, initial: :none do
|
|
|
|
event :update_start do
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 16:52:01 -04:00
|
|
|
transition any => :started
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
event :update_finish do
|
|
|
|
transition started: :finished
|
|
|
|
end
|
|
|
|
|
|
|
|
event :update_fail do
|
|
|
|
transition started: :failed
|
|
|
|
end
|
|
|
|
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 16:52:01 -04:00
|
|
|
event :update_retry do
|
|
|
|
transition started: :to_retry
|
|
|
|
end
|
|
|
|
|
2018-05-03 08:55:14 -04:00
|
|
|
state :started
|
|
|
|
state :finished
|
|
|
|
state :failed
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 16:52:01 -04:00
|
|
|
state :to_retry
|
2018-05-03 08:55:14 -04:00
|
|
|
|
|
|
|
after_transition any => :started do |remote_mirror, _|
|
2018-06-19 13:03:25 -04:00
|
|
|
Gitlab::Metrics.add_event(:remote_mirrors_running)
|
2018-05-03 08:55:14 -04:00
|
|
|
|
2020-05-22 05:08:09 -04:00
|
|
|
remote_mirror.update(last_update_started_at: Time.current)
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
after_transition started: :finished do |remote_mirror, _|
|
2018-06-19 13:03:25 -04:00
|
|
|
Gitlab::Metrics.add_event(:remote_mirrors_finished)
|
2018-05-03 08:55:14 -04:00
|
|
|
|
2020-05-22 05:08:09 -04:00
|
|
|
timestamp = Time.current
|
2018-07-02 06:43:06 -04:00
|
|
|
remote_mirror.update!(
|
2019-01-15 01:33:48 -05:00
|
|
|
last_update_at: timestamp,
|
|
|
|
last_successful_update_at: timestamp,
|
|
|
|
last_error: nil,
|
|
|
|
error_notification_sent: false
|
2018-05-03 08:55:14 -04:00
|
|
|
)
|
|
|
|
end
|
|
|
|
|
2018-12-05 10:22:52 -05:00
|
|
|
after_transition started: :failed do |remote_mirror|
|
2021-03-30 05:10:51 -04:00
|
|
|
remote_mirror.send_failure_notifications
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def update_failed?
|
|
|
|
update_status == 'failed'
|
|
|
|
end
|
|
|
|
|
|
|
|
def update_in_progress?
|
|
|
|
update_status == 'started'
|
|
|
|
end
|
|
|
|
|
2021-07-26 05:09:00 -04:00
|
|
|
def update_repository
|
2020-04-28 17:09:35 -04:00
|
|
|
Gitlab::Git::RemoteMirror.new(
|
|
|
|
project.repository.raw,
|
2021-07-26 05:09:00 -04:00
|
|
|
remote_url,
|
2020-04-28 17:09:35 -04:00
|
|
|
**options_for_update
|
|
|
|
).update
|
|
|
|
end
|
|
|
|
|
|
|
|
def options_for_update
|
|
|
|
options = {
|
|
|
|
keep_divergent_refs: keep_divergent_refs?
|
|
|
|
}
|
|
|
|
|
|
|
|
if only_protected_branches?
|
|
|
|
options[:only_branches_matching] = project.protected_branches.pluck(:name)
|
|
|
|
end
|
|
|
|
|
2018-11-12 05:52:48 -05:00
|
|
|
if ssh_mirror_url?
|
|
|
|
if ssh_key_auth? && ssh_private_key.present?
|
|
|
|
options[:ssh_key] = ssh_private_key
|
|
|
|
end
|
|
|
|
|
|
|
|
if ssh_known_hosts.present?
|
|
|
|
options[:known_hosts] = ssh_known_hosts
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-04-28 17:09:35 -04:00
|
|
|
options
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
|
2018-05-03 10:19:21 -04:00
|
|
|
def sync?
|
2018-05-07 05:52:53 -04:00
|
|
|
enabled?
|
2018-05-03 10:19:21 -04:00
|
|
|
end
|
|
|
|
|
2018-05-03 08:55:14 -04:00
|
|
|
def sync
|
2018-05-07 05:52:53 -04:00
|
|
|
return unless sync?
|
2018-05-03 08:55:14 -04:00
|
|
|
|
2022-07-13 17:10:15 -04:00
|
|
|
if schedule_with_delay?
|
2020-05-22 05:08:09 -04:00
|
|
|
RepositoryUpdateRemoteMirrorWorker.perform_in(backoff_delay, self.id, Time.current)
|
2018-05-03 08:55:14 -04:00
|
|
|
else
|
2020-05-22 05:08:09 -04:00
|
|
|
RepositoryUpdateRemoteMirrorWorker.perform_async(self.id, Time.current)
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def enabled
|
|
|
|
return false unless project && super
|
|
|
|
return false unless project.remote_mirror_available?
|
|
|
|
return false unless project.repository_exists?
|
|
|
|
return false if project.pending_delete?
|
|
|
|
|
|
|
|
true
|
|
|
|
end
|
|
|
|
alias_method :enabled?, :enabled
|
|
|
|
|
2019-03-21 18:24:48 -04:00
|
|
|
def disabled?
|
|
|
|
!enabled?
|
|
|
|
end
|
|
|
|
|
2018-05-03 08:55:14 -04:00
|
|
|
def updated_since?(timestamp)
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 16:52:01 -04:00
|
|
|
return false if failed?
|
|
|
|
|
|
|
|
last_update_started_at && last_update_started_at > timestamp
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def mark_for_delete_if_blank_url
|
|
|
|
mark_for_destruction if url.blank?
|
|
|
|
end
|
|
|
|
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 16:52:01 -04:00
|
|
|
def update_error_message(error_message)
|
|
|
|
self.last_error = Gitlab::UrlSanitizer.sanitize(error_message)
|
|
|
|
end
|
|
|
|
|
|
|
|
def mark_for_retry!(error_message)
|
|
|
|
update_error_message(error_message)
|
|
|
|
update_retry!
|
|
|
|
end
|
|
|
|
|
|
|
|
def mark_as_failed!(error_message)
|
|
|
|
update_error_message(error_message)
|
|
|
|
update_fail!
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
|
2021-03-30 05:10:51 -04:00
|
|
|
# Force the mrror into the retry state
|
|
|
|
def hard_retry!(error_message)
|
|
|
|
update_error_message(error_message)
|
|
|
|
self.update_status = :to_retry
|
|
|
|
|
|
|
|
save!(validate: false)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Force the mirror into the failed state
|
|
|
|
def hard_fail!(error_message)
|
|
|
|
update_error_message(error_message)
|
|
|
|
self.update_status = :failed
|
|
|
|
|
|
|
|
save!(validate: false)
|
|
|
|
|
|
|
|
send_failure_notifications
|
|
|
|
end
|
|
|
|
|
2018-05-03 08:55:14 -04:00
|
|
|
def url=(value)
|
|
|
|
super(value) && return unless Gitlab::UrlSanitizer.valid?(value)
|
|
|
|
|
|
|
|
mirror_url = Gitlab::UrlSanitizer.new(value)
|
2018-11-12 05:52:48 -05:00
|
|
|
self.credentials ||= {}
|
|
|
|
self.credentials = self.credentials.merge(mirror_url.credentials)
|
2018-05-03 08:55:14 -04:00
|
|
|
|
|
|
|
super(mirror_url.sanitized_url)
|
|
|
|
end
|
|
|
|
|
|
|
|
def url
|
|
|
|
if super
|
|
|
|
Gitlab::UrlSanitizer.new(super, credentials: credentials).full_url
|
|
|
|
end
|
2021-04-26 08:09:44 -04:00
|
|
|
rescue StandardError
|
2018-05-03 08:55:14 -04:00
|
|
|
super
|
|
|
|
end
|
|
|
|
|
|
|
|
def safe_url
|
2021-03-22 08:09:02 -04:00
|
|
|
super(allowed_usernames: %w[git])
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
|
2020-09-17 14:10:12 -04:00
|
|
|
def bare_url
|
|
|
|
Gitlab::UrlSanitizer.new(read_attribute(:url)).full_url
|
|
|
|
end
|
|
|
|
|
2019-01-15 01:33:48 -05:00
|
|
|
def after_sent_notification
|
|
|
|
update_column(:error_notification_sent, true)
|
|
|
|
end
|
|
|
|
|
Rework retry strategy for remote mirrors
**Prevention of running 2 simultaneous updates**
Instead of using `RemoteMirror#update_status` and raise an error if
it's already running to prevent the same mirror being updated at the
same time we now use `Gitlab::ExclusiveLease` for that.
When we fail to obtain a lease in 3 tries, 30 seconds apart, we bail
and reschedule. We'll reschedule faster for the protected branches.
If the mirror already ran since it was scheduled, the job will be
skipped.
**Error handling: Remote side**
When an update fails because of a `Gitlab::Git::CommandError`, we
won't track this error in sentry, this could be on the remote side:
for example when branches have diverged.
In this case, we'll try 3 times scheduled 1 or 5 minutes apart.
In between, the mirror is marked as "to_retry", the error would be
visible to the user when they visit the settings page.
After 3 tries we'll mark the mirror as failed and notify the user.
We won't track this error in sentry, as it's not likely we can help
it.
The next event that would trigger a new refresh.
**Error handling: our side**
If an unexpected error occurs, we mark the mirror as failed, but we'd
still retry the job based on the regular sidekiq retries with
backoff. Same as we used to
The error would be reported in sentry, since its likely we need to do
something about it.
2019-08-13 16:52:01 -04:00
|
|
|
def backoff_delay
|
|
|
|
if self.only_protected_branches
|
|
|
|
PROTECTED_BACKOFF_DELAY
|
|
|
|
else
|
|
|
|
UNPROTECTED_BACKOFF_DELAY
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def max_runtime
|
|
|
|
last_update_at.present? ? MAX_INCREMENTAL_RUNTIME : MAX_FIRST_RUNTIME
|
|
|
|
end
|
|
|
|
|
2021-03-30 05:10:51 -04:00
|
|
|
def send_failure_notifications
|
|
|
|
Gitlab::Metrics.add_event(:remote_mirrors_failed)
|
|
|
|
|
|
|
|
run_after_commit do
|
|
|
|
RemoteMirrorNotificationWorker.perform_async(id)
|
|
|
|
end
|
|
|
|
|
|
|
|
self.last_update_at = Time.current
|
|
|
|
save!(validate: false)
|
|
|
|
end
|
|
|
|
|
2018-05-03 08:55:14 -04:00
|
|
|
private
|
|
|
|
|
2018-11-12 05:52:48 -05:00
|
|
|
def store_credentials
|
|
|
|
# This is a necessary workaround for attr_encrypted, which doesn't otherwise
|
|
|
|
# notice that the credentials have changed
|
|
|
|
self.credentials = self.credentials
|
|
|
|
end
|
|
|
|
|
|
|
|
# The remote URL omits any password if SSH public-key authentication is in use
|
|
|
|
def remote_url
|
|
|
|
return url unless ssh_key_auth? && password.present?
|
|
|
|
|
|
|
|
Gitlab::UrlSanitizer.new(read_attribute(:url), credentials: { user: user }).full_url
|
2021-04-26 08:09:44 -04:00
|
|
|
rescue StandardError
|
2018-11-12 05:52:48 -05:00
|
|
|
super
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
|
2022-07-13 17:10:15 -04:00
|
|
|
def schedule_with_delay?
|
|
|
|
return false if Feature.enabled?(:remote_mirror_no_delay, project, type: :ops)
|
2018-05-03 08:55:14 -04:00
|
|
|
return false unless self.last_update_started_at
|
|
|
|
|
2020-05-22 05:08:09 -04:00
|
|
|
self.last_update_started_at >= Time.current - backoff_delay
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def reset_fields
|
|
|
|
update_columns(
|
|
|
|
last_error: nil,
|
|
|
|
last_update_at: nil,
|
|
|
|
last_successful_update_at: nil,
|
2019-01-15 01:33:48 -05:00
|
|
|
update_status: 'finished',
|
|
|
|
error_notification_sent: false
|
2018-05-03 08:55:14 -04:00
|
|
|
)
|
|
|
|
end
|
|
|
|
|
|
|
|
def set_override_remote_mirror_available
|
|
|
|
enabled = read_attribute(:enabled)
|
|
|
|
|
|
|
|
project.update(remote_mirror_available_overridden: enabled)
|
|
|
|
end
|
|
|
|
|
|
|
|
def mirror_url_changed?
|
2021-01-07 22:10:42 -05:00
|
|
|
url_changed? || attribute_changed?(:credentials)
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
2019-04-23 05:30:18 -04:00
|
|
|
|
|
|
|
def saved_change_to_mirror_url?
|
|
|
|
saved_change_to_url? || saved_change_to_credentials?
|
|
|
|
end
|
2018-05-03 08:55:14 -04:00
|
|
|
end
|
2019-09-13 09:26:31 -04:00
|
|
|
|
2021-05-11 17:10:21 -04:00
|
|
|
RemoteMirror.prepend_mod_with('RemoteMirror')
|