gitlab-org--gitlab-foss/app/models/ci/build_trace_chunk.rb

181 lines
4 KiB
Ruby
Raw Normal View History

2018-03-26 07:45:18 -04:00
module Ci
class BuildTraceChunk < ActiveRecord::Base
2018-05-01 04:06:44 -04:00
include FastDestroyAll
2018-03-26 07:45:18 -04:00
extend Gitlab::Ci::Model
belongs_to :build, class_name: "Ci::Build", foreign_key: :build_id
2018-04-04 06:19:17 -04:00
default_value_for :data_store, :redis
2018-04-06 11:08:35 -04:00
WriteError = Class.new(StandardError)
2018-04-05 07:39:35 -04:00
CHUNK_SIZE = 128.kilobytes
2018-04-06 09:43:12 -04:00
CHUNK_REDIS_TTL = 1.week
2018-05-07 04:34:47 -04:00
WRITE_LOCK_RETRY = 10
2018-05-07 05:45:38 -04:00
WRITE_LOCK_SLEEP = 0.01.seconds
2018-05-07 04:34:47 -04:00
WRITE_LOCK_TTL = 1.minute
2018-04-04 06:19:17 -04:00
enum data_store: {
redis: 1,
2018-04-05 11:57:05 -04:00
db: 2
2018-04-04 06:19:17 -04:00
}
2018-05-01 04:06:44 -04:00
class << self
def redis_data_key(build_id, chunk_index)
"gitlab:ci:trace:#{build_id}:chunks:#{chunk_index}"
end
def redis_data_keys
2018-05-01 04:06:44 -04:00
redis.pluck(:build_id, :chunk_index).map do |data|
redis_data_key(data.first, data.second)
end
end
def redis_delete_data(keys)
return if keys.empty?
Gitlab::Redis::SharedState.with do |redis|
redis.del(keys)
2018-05-01 04:06:44 -04:00
end
end
2018-05-03 04:08:05 -04:00
##
# FastDestroyAll concerns
def begin_fast_destroy
redis_data_keys
end
##
# FastDestroyAll concerns
def finalize_fast_destroy(keys)
redis_delete_data(keys)
end
end
##
# Data is memoized for optimizing #size and #end_offset
2018-04-04 06:19:17 -04:00
def data
@data ||= get_data.to_s
2018-04-04 06:19:17 -04:00
end
def truncate(offset = 0)
raise ArgumentError, 'Offset is out of range' if offset > size || offset < 0
return if offset == size # Skip the following process as it doesn't affect anything
self.append("", offset)
2018-04-04 06:19:17 -04:00
end
def append(new_data, offset)
raise ArgumentError, 'Offset is out of range' if offset > size || offset < 0
raise ArgumentError, 'Chunk size overflow' if CHUNK_SIZE < (offset + new_data.bytesize)
2018-04-04 06:19:17 -04:00
set_data(data.byteslice(0, offset) + new_data)
2018-04-04 06:19:17 -04:00
end
def size
data&.bytesize.to_i
end
def start_offset
chunk_index * CHUNK_SIZE
end
def end_offset
start_offset + size
end
def range
(start_offset...end_offset)
end
def use_database!
2018-04-06 11:08:35 -04:00
in_lock do
2018-04-23 02:20:55 -04:00
break if db?
break unless size > 0
2018-04-04 06:19:17 -04:00
2018-04-06 11:08:35 -04:00
self.update!(raw_data: data, data_store: :db)
2018-05-03 01:57:15 -04:00
self.class.redis_delete_data([redis_data_key])
2018-04-06 11:08:35 -04:00
end
2018-04-04 06:19:17 -04:00
end
private
def get_data
if redis?
redis_data
elsif db?
raw_data
else
raise 'Unsupported data store'
end&.force_encoding(Encoding::BINARY) # Redis/Database return UTF-8 string as default
end
def set_data(value)
raise ArgumentError, 'too much data' if value.bytesize > CHUNK_SIZE
in_lock do
if redis?
redis_set_data(value)
elsif db?
self.raw_data = value
else
raise 'Unsupported data store'
end
@data = value
save! if changed?
end
2018-05-07 04:34:47 -04:00
schedule_to_db if full?
end
2018-04-04 06:19:17 -04:00
def schedule_to_db
return if db?
Ci::BuildTraceChunkFlushWorker.perform_async(id)
2018-04-04 06:19:17 -04:00
end
2018-05-07 04:34:47 -04:00
def full?
2018-04-04 06:19:17 -04:00
size == CHUNK_SIZE
end
def redis_data
Gitlab::Redis::SharedState.with do |redis|
2018-04-06 11:08:35 -04:00
redis.get(redis_data_key)
2018-04-04 06:19:17 -04:00
end
end
def redis_set_data(data)
Gitlab::Redis::SharedState.with do |redis|
2018-04-06 11:08:35 -04:00
redis.set(redis_data_key, data, ex: CHUNK_REDIS_TTL)
2018-04-04 06:19:17 -04:00
end
end
2018-04-06 11:08:35 -04:00
def redis_data_key
2018-05-03 01:57:15 -04:00
self.class.redis_data_key(build_id, chunk_index)
2018-04-06 11:08:35 -04:00
end
def in_lock
2018-05-03 01:57:15 -04:00
write_lock_key = "trace_write:#{build_id}:chunks:#{chunk_index}"
lease = Gitlab::ExclusiveLease.new(write_lock_key, timeout: WRITE_LOCK_TTL)
2018-04-06 11:08:35 -04:00
retry_count = 0
until uuid = lease.try_obtain
# Keep trying until we obtain the lease. To prevent hammering Redis too
# much we'll wait for a bit between retries.
sleep(WRITE_LOCK_SLEEP)
break if WRITE_LOCK_RETRY < (retry_count += 1)
2018-04-06 11:08:35 -04:00
end
raise WriteError, 'Failed to obtain write lock' unless uuid
self.reload if self.persisted?
return yield
ensure
2018-05-03 01:57:15 -04:00
Gitlab::ExclusiveLease.cancel(write_lock_key, uuid)
2018-04-04 06:19:17 -04:00
end
2018-03-26 07:45:18 -04:00
end
end