2018-06-27 03:23:28 -04:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2020-02-19 13:09:10 -05:00
|
|
|
class BackgroundMigrationWorker # rubocop:disable Scalability/IdempotentWorker
|
2017-11-28 11:08:30 -05:00
|
|
|
include ApplicationWorker
|
2017-06-02 11:12:36 -04:00
|
|
|
|
2021-09-09 08:09:09 -04:00
|
|
|
MAX_LEASE_ATTEMPTS = 5
|
|
|
|
|
2021-07-21 08:09:35 -04:00
|
|
|
data_consistency :always
|
|
|
|
|
2021-04-30 14:10:09 -04:00
|
|
|
sidekiq_options retry: 3
|
|
|
|
|
2020-03-24 11:08:44 -04:00
|
|
|
feature_category :database
|
2020-04-15 08:09:18 -04:00
|
|
|
urgency :throttled
|
2020-06-12 08:08:56 -04:00
|
|
|
loggable_arguments 0, 1
|
2019-10-18 07:11:44 -04:00
|
|
|
|
2018-01-04 10:49:15 -05:00
|
|
|
# The minimum amount of time between processing two jobs of the same migration
|
|
|
|
# class.
|
|
|
|
#
|
2018-07-19 11:16:47 -04:00
|
|
|
# This interval is set to 2 or 5 minutes so autovacuuming and other
|
|
|
|
# maintenance related tasks have plenty of time to clean up after a migration
|
|
|
|
# has been performed.
|
|
|
|
def self.minimum_interval
|
2018-08-28 07:49:16 -04:00
|
|
|
2.minutes.to_i
|
2018-07-19 11:16:47 -04:00
|
|
|
end
|
2018-01-04 10:49:15 -05:00
|
|
|
|
2017-06-02 11:12:36 -04:00
|
|
|
# Performs the background migration.
|
|
|
|
#
|
|
|
|
# See Gitlab::BackgroundMigration.perform for more information.
|
2018-01-04 10:49:15 -05:00
|
|
|
#
|
|
|
|
# class_name - The class name of the background migration to run.
|
|
|
|
# arguments - The arguments to pass to the migration class.
|
2020-10-16 20:08:46 -04:00
|
|
|
# lease_attempts - The number of times we will try to obtain an exclusive
|
2020-11-06 16:08:57 -05:00
|
|
|
# lease on the class before giving up. See MR for more discussion.
|
|
|
|
# https://gitlab.com/gitlab-org/gitlab/-/merge_requests/45298#note_434304956
|
2021-09-09 08:09:09 -04:00
|
|
|
def perform(class_name, arguments = [], lease_attempts = MAX_LEASE_ATTEMPTS)
|
2020-02-19 13:09:10 -05:00
|
|
|
with_context(caller_id: class_name.to_s) do
|
2021-09-09 08:09:09 -04:00
|
|
|
retried = lease_attempts != MAX_LEASE_ATTEMPTS
|
2020-11-06 16:08:57 -05:00
|
|
|
attempts_left = lease_attempts - 1
|
2021-09-09 08:09:09 -04:00
|
|
|
should_perform, ttl = perform_and_ttl(class_name, attempts_left, retried)
|
2020-11-06 16:08:57 -05:00
|
|
|
|
|
|
|
break if should_perform.nil?
|
2018-01-04 10:49:15 -05:00
|
|
|
|
2020-02-19 13:09:10 -05:00
|
|
|
if should_perform
|
|
|
|
Gitlab::BackgroundMigration.perform(class_name, arguments)
|
|
|
|
else
|
|
|
|
# If the lease could not be obtained this means either another process is
|
|
|
|
# running a migration of this class or we ran one recently. In this case
|
|
|
|
# we'll reschedule the job in such a way that it is picked up again around
|
|
|
|
# the time the lease expires.
|
|
|
|
self.class
|
2020-11-06 16:08:57 -05:00
|
|
|
.perform_in(ttl || self.class.minimum_interval, class_name, arguments, attempts_left)
|
2020-02-19 13:09:10 -05:00
|
|
|
end
|
2018-01-04 10:49:15 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2021-09-09 08:09:09 -04:00
|
|
|
def perform_and_ttl(class_name, attempts_left, retried)
|
2020-11-06 16:08:57 -05:00
|
|
|
# In test environments `perform_in` will run right away. This can then
|
|
|
|
# lead to stack level errors in the above `#perform`. To work around this
|
|
|
|
# we'll just perform the migration right away in the test environment.
|
|
|
|
return [true, nil] if always_perform?
|
|
|
|
|
2021-09-09 08:09:09 -04:00
|
|
|
lease = lease_for(class_name, retried)
|
2020-11-06 16:08:57 -05:00
|
|
|
lease_obtained = !!lease.try_obtain
|
|
|
|
healthy_db = healthy_database?
|
|
|
|
perform = lease_obtained && healthy_db
|
|
|
|
|
|
|
|
database_unhealthy_counter.increment if lease_obtained && !healthy_db
|
2018-07-19 11:16:47 -04:00
|
|
|
|
2020-11-06 19:08:58 -05:00
|
|
|
# When the DB is unhealthy or the lease can't be obtained after several tries,
|
|
|
|
# then give up on the job and log a warning. Otherwise we could end up in
|
|
|
|
# an infinite rescheduling loop. Jobs can be tracked in the database with the
|
|
|
|
# use of Gitlab::Database::BackgroundMigrationJob
|
2020-11-06 16:08:57 -05:00
|
|
|
if !perform && attempts_left < 0
|
|
|
|
msg = if !lease_obtained
|
|
|
|
'Job could not get an exclusive lease after several tries. Giving up.'
|
|
|
|
else
|
|
|
|
'Database was unhealthy after several tries. Giving up.'
|
|
|
|
end
|
|
|
|
|
|
|
|
Sidekiq.logger.warn(class: class_name, message: msg, job_id: jid)
|
|
|
|
|
|
|
|
return [nil, nil]
|
2018-01-04 10:49:15 -05:00
|
|
|
end
|
2020-11-06 16:08:57 -05:00
|
|
|
|
|
|
|
[perform, lease.ttl]
|
2018-01-04 10:49:15 -05:00
|
|
|
end
|
|
|
|
|
2021-09-09 08:09:09 -04:00
|
|
|
def lease_for(class_name, retried)
|
2018-01-04 10:49:15 -05:00
|
|
|
Gitlab::ExclusiveLease
|
2021-09-09 08:09:09 -04:00
|
|
|
.new(lease_key_for(class_name, retried), timeout: self.class.minimum_interval)
|
2018-07-19 11:16:47 -04:00
|
|
|
end
|
|
|
|
|
2021-09-09 08:09:09 -04:00
|
|
|
def lease_key_for(class_name, retried)
|
|
|
|
key = "#{self.class.name}:#{class_name}"
|
|
|
|
# We use a different exclusive lock key for retried jobs to allow them running concurrently with the scheduled jobs.
|
|
|
|
# See https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68763 for more information.
|
|
|
|
key += ":retried" if retried
|
|
|
|
key
|
2018-01-04 10:49:15 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
def always_perform?
|
|
|
|
Rails.env.test?
|
2017-06-02 11:12:36 -04:00
|
|
|
end
|
2018-07-19 11:16:47 -04:00
|
|
|
|
|
|
|
# Returns true if the database is healthy enough to allow the migration to be
|
|
|
|
# performed.
|
|
|
|
#
|
|
|
|
# class_name - The name of the background migration that we might want to
|
|
|
|
# run.
|
|
|
|
def healthy_database?
|
|
|
|
!Postgresql::ReplicationSlot.lag_too_great?
|
|
|
|
end
|
|
|
|
|
|
|
|
def database_unhealthy_counter
|
|
|
|
Gitlab::Metrics.counter(
|
|
|
|
:background_migration_database_health_reschedules,
|
|
|
|
'The number of times a background migration is rescheduled because the database is unhealthy.'
|
|
|
|
)
|
|
|
|
end
|
2017-06-02 11:12:36 -04:00
|
|
|
end
|