Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-08-13 15:11:15 +00:00
parent d7ba2e953a
commit 7134e029c5
44 changed files with 841 additions and 348 deletions

View File

@ -312,6 +312,9 @@ export default {
id: milestoneId,
});
},
setWeight(weight) {
this.$set(this.board, 'weight', weight);
},
},
};
</script>
@ -381,6 +384,7 @@ export default {
@set-board-labels="setBoardLabels"
@set-assignee="setAssignee"
@set-milestone="setMilestone"
@set-weight="setWeight"
/>
</form>
</gl-modal>

View File

@ -30,7 +30,7 @@ export default {
<gl-button
category="tertiary"
size="small"
class="float-right gl-text-gray-900! js-sidebar-dropdown-toggle gl-mr-n2"
class="float-right js-sidebar-dropdown-toggle gl-mr-n2"
data-qa-selector="labels_edit_button"
@click="toggleDropdownContents"
>

View File

@ -78,6 +78,10 @@ class ProjectTeam
members.where(id: member_user_ids)
end
def members_with_access_levels(access_levels = [])
fetch_members(access_levels)
end
def guests
@guests ||= fetch_members(Gitlab::Access::GUEST)
end

View File

@ -124,7 +124,6 @@ module Auth
end
def migration_eligible(project, actions)
return unless actions.include?('push')
return unless Feature.enabled?(:container_registry_migration_phase1)
# The migration process will start by allowing only specific test and gitlab-org projects using the

View File

@ -17,7 +17,9 @@ module Packages
})
end
::Packages::Composer::CacheUpdateWorker.perform_async(created_package.project_id, created_package.name, nil)
unless Feature.enabled?(:remove_composer_v1_cache_code, project)
::Packages::Composer::CacheUpdateWorker.perform_async(created_package.project_id, created_package.name, nil)
end
created_package
end

View File

@ -35,8 +35,24 @@ module Gitlab
Gitlab::GithubImport::ObjectCounter.increment(project, object_type, :imported)
info(project.id, message: 'importer finished')
rescue KeyError => e
# This exception will be more useful in development when a new
# Representation is created but the developer forgot to add a
# `:github_id` field.
Gitlab::Import::ImportFailureService.track(
project_id: project.id,
error_source: importer_class.name,
exception: e,
fail_import: true
)
raise(e)
rescue StandardError => e
error(project.id, e, hash)
Gitlab::Import::ImportFailureService.track(
project_id: project.id,
error_source: importer_class.name,
exception: e
)
end
def object_type
@ -62,22 +78,6 @@ module Gitlab
Logger.info(log_attributes(project_id, extra))
end
def error(project_id, exception, data = {})
Logger.error(
log_attributes(
project_id,
message: 'importer failed',
'error.message': exception.message,
'github.data': data
)
)
Gitlab::ErrorTracking.track_and_raise_exception(
exception,
log_attributes(project_id, import_source: :github)
)
end
def log_attributes(project_id, extra = {})
extra.merge(
project_id: project_id,

View File

@ -17,13 +17,10 @@ module Gitlab
sidekiq_options dead: false, retry: 5
sidekiq_retries_exhausted do |msg, e|
Logger.error(
event: :github_importer_exhausted,
message: msg['error_message'],
class: msg['class'],
args: msg['args'],
exception_message: e.message,
exception_backtrace: e.backtrace
Gitlab::Import::ImportFailureService.track(
project_id: msg['args'][0],
exception: e,
fail_import: true
)
end
end

View File

@ -15,7 +15,14 @@ module Gitlab
info(project_id, message: 'stage finished')
rescue StandardError => e
error(project_id, e)
Gitlab::Import::ImportFailureService.track(
project_id: project_id,
exception: e,
error_source: self.class.name,
fail_import: abort_on_failure
)
raise(e)
end
# client - An instance of Gitlab::GithubImport::Client.
@ -34,25 +41,14 @@ module Gitlab
end
# rubocop: enable CodeReuse/ActiveRecord
def abort_on_failure
false
end
private
def info(project_id, extra = {})
Logger.info(log_attributes(project_id, extra))
end
def error(project_id, exception)
Logger.error(
log_attributes(
project_id,
message: 'stage failed',
'error.message': exception.message
)
)
Gitlab::ErrorTracking.track_and_raise_exception(
exception,
log_attributes(project_id, import_source: :github)
)
Gitlab::GithubImport::Logger.info(log_attributes(project_id, extra))
end
def log_attributes(project_id, extra = {})

View File

@ -4,7 +4,7 @@ module Deployments
class HooksWorker # rubocop:disable Scalability/IdempotentWorker
include ApplicationWorker
data_consistency :always
data_consistency :delayed, feature_flag: :load_balancing_for_deployments_hooks_worker
queue_namespace :deployment
feature_category :continuous_delivery

View File

@ -28,7 +28,7 @@ module Gitlab
info(project.id, message: "starting importer", importer: 'Importer::RepositoryImporter')
importer = Importer::RepositoryImporter.new(project, client)
return unless importer.execute
importer.execute
counter.increment
@ -41,6 +41,10 @@ module Gitlab
'The number of imported GitHub repositories'
)
end
def abort_on_failure
true
end
end
end
end

View File

@ -5,6 +5,8 @@ module Gitlab
module StuckImportJob
extend ActiveSupport::Concern
StuckImportJobError = Class.new(StandardError)
IMPORT_JOBS_EXPIRATION = 24.hours.seconds.to_i
included do
@ -34,9 +36,9 @@ module Gitlab
end
def mark_imports_without_jid_as_failed!
enqueued_import_states_without_jid.each do |import_state|
import_state.mark_as_failed(error_message)
end.size
enqueued_import_states_without_jid
.each(&method(:mark_as_failed))
.size
end
def mark_imports_with_jid_as_failed!
@ -58,9 +60,20 @@ module Gitlab
job_ids: completed_import_state_jids
)
completed_import_states.each do |import_state|
import_state.mark_as_failed(error_message)
end.size
completed_import_states
.each(&method(:mark_as_failed))
.size
end
def mark_as_failed(import_state)
raise StuckImportJobError, error_message
rescue StuckImportJobError => e
Gitlab::Import::ImportFailureService.track(
import_state: import_state,
exception: e,
error_source: self.class.name,
fail_import: true
)
end
def enqueued_import_states

View File

@ -0,0 +1,8 @@
---
name: load_balancing_for_deployments_hooks_worker
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67878
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/338342
milestone: '14.2'
type: development
group: group::release
default_enabled: false

View File

@ -0,0 +1,8 @@
---
name: remove_composer_v1_cache_code
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67843
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/338264
milestone: '14.2'
type: development
group: group::package
default_enabled: false

View File

@ -70,6 +70,7 @@ exceptions:
- HDD
- HEAD
- HIPAA
- HLL
- HTML
- HTTP
- HTTPS

View File

@ -21,7 +21,7 @@ When you are optimizing your SQL queries, there are two dimensions to pay attent
| Queries in a migration | `100ms` | This is different than the total [migration time](migration_style_guide.md#how-long-a-migration-should-take). |
| Concurrent operations in a migration | `5min` | Concurrent operations do not block the database, but they block the GitLab update. This includes operations such as `add_concurrent_index` and `add_concurrent_foreign_key`. |
| Background migrations | `1s` | |
| Service Ping | `1s` | See the [Service Ping docs](service_ping/index.md#developing-and-testing-service-ping) for more details. |
| Service Ping | `1s` | See the [Service Ping docs](service_ping/index.md#develop-and-test-service-ping) for more details. |
- When analyzing your query's performance, pay attention to if the time you are seeing is on a [cold or warm cache](#cold-and-warm-cache). These guidelines apply for both cache types.
- When working with batched queries, change the range and batch size to see how it effects the query timing and caching.

View File

@ -82,9 +82,9 @@ Registration is not yet required for participation, but will be added in a futur
## View the Service Ping payload **(FREE SELF)**
You can view the exact JSON payload sent to GitLab Inc. in the administration panel. To view the payload:
You can view the exact JSON payload sent to GitLab Inc. in the Admin Area. To view the payload:
1. Sign in as a user with [Administrator](../../user/permissions.md) permissions.
1. Sign in as a user with the [Administrator](../../user/permissions.md) role.
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Settings > Metrics and profiling**.
1. Expand the **Usage statistics** section.
@ -106,7 +106,7 @@ configuration file.
To disable Service Ping in the GitLab UI:
1. Sign in as a user with [Administrator](../../user/permissions.md) permissions.
1. Sign in as a user with the [Administrator](../../user/permissions.md) role.
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Settings > Metrics and profiling**.
1. Expand the **Usage statistics** section.
@ -116,7 +116,7 @@ To disable Service Ping in the GitLab UI:
### Disable Service Ping using the configuration file
To disable Service Ping and prevent it from being configured in the future through
the admin area:
the Admin Area:
**For installations using the Linux package:**
@ -238,9 +238,9 @@ There are several types of counters in `usage_data.rb`:
- **Redis Counters:** Used for in-memory counts.
NOTE:
Only use the provided counter methods. Each counter method contains a built in fail safe to isolate each counter to avoid breaking the entire Service Ping.
Only use the provided counter methods. Each counter method contains a built-in fail-safe mechanism that isolates each counter to avoid breaking the entire Service Ping process.
### Using instrumentation classes
### Instrumentation classes
We recommend you use [instrumentation classes](metrics_instrumentation.md) in `usage_data.rb` where possible.
@ -253,7 +253,7 @@ You should add it to `usage_data.rb` as follows:
boards: add_metric('CountBoardsMetric', time_frame: 'all'),
```
### Why batch counting
### Batch counting
For large tables, PostgreSQL can take a long time to count rows due to MVCC [(Multi-version Concurrency Control)](https://en.wikipedia.org/wiki/Multiversion_concurrency_control). Batch counting is a counting method where a single large query is broken into multiple smaller queries. For example, instead of a single query querying 1,000,000 records, with batch counting, you can execute 100 queries of 10,000 records each. Batch counting is useful for avoiding database timeouts as each batch query is significantly shorter than one single long running query.
@ -266,18 +266,18 @@ For GitLab.com, there are extremely large tables with 15 second query timeouts,
| `merge_request_diff_files` | 1082 |
| `events` | 514 |
The following operation methods are available for your use:
The following operation methods are available:
- [Ordinary Batch Counters](#ordinary-batch-counters)
- [Distinct Batch Counters](#distinct-batch-counters)
- [Sum Batch Operation](#sum-batch-operation)
- [Add Operation](#add-operation)
- [Estimated Batch Counters](#estimated-batch-counters)
- [Ordinary batch counters](#ordinary-batch-counters)
- [Distinct batch counters](#distinct-batch-counters)
- [Sum batch operation](#sum-batch-operation)
- [Add operation](#add-operation)
- [Estimated batch counters](#estimated-batch-counters)
Batch counting requires indexes on columns to calculate max, min, and range queries. In some cases,
you may need to add a specialized index on the columns involved in a counter.
### Ordinary Batch Counters
### Ordinary batch counters
Handles `ActiveRecord::StatementInvalid` error
@ -301,7 +301,7 @@ count(::Clusters::Cluster.aws_installed.enabled, :cluster_id)
count(::Clusters::Cluster.aws_installed.enabled, :cluster_id, start: ::Clusters::Cluster.minimum(:id), finish: ::Clusters::Cluster.maximum(:id))
```
### Distinct Batch Counters
### Distinct batch counters
Handles `ActiveRecord::StatementInvalid` error
@ -319,7 +319,7 @@ Arguments:
- `end`: custom end of the batch counting to avoid complex min calculations
WARNING:
Counting over non-unique columns can lead to performance issues. Take a look at the [iterating tables in batches](../iterating_tables_in_batches.md) guide for more details.
Counting over non-unique columns can lead to performance issues. For more information, see the [iterating tables in batches](../iterating_tables_in_batches.md) guide.
Examples:
@ -329,7 +329,7 @@ distinct_count(::Note.with_suggestions.where(time_period), :author_id, start: ::
distinct_count(::Clusters::Applications::CertManager.where(time_period).available.joins(:cluster), 'clusters.user_id')
```
### Sum Batch Operation
### Sum batch operation
Handles `ActiveRecord::StatementInvalid` error
@ -351,7 +351,7 @@ Examples:
sum(JiraImportState.finished, :imported_issues_count)
```
### Grouping & Batch Operations
### Grouping and batch operations
The `count`, `distinct_count`, and `sum` batch counters can accept an `ActiveRecord::Relation`
object, which groups by a specified column. With a grouped relation, the methods do batch counting,
@ -370,7 +370,7 @@ sum(Issue.group(:state_id), :weight))
# returns => {1=>3542, 2=>6820}
```
### Add Operation
### Add operation
Handles `StandardError`.
@ -380,7 +380,7 @@ Sum the values given as parameters.
Method: `add(*args)`
Examples
Examples:
```ruby
project_imports = distinct_count(::Project.where.not(import_type: nil), :creator_id)
@ -389,7 +389,7 @@ bulk_imports = distinct_count(::BulkImport, :user_id)
add(project_imports, bulk_imports)
```
### Estimated Batch Counters
### Estimated batch counters
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/48233) in GitLab 13.7.
@ -424,7 +424,7 @@ The method includes the following prerequisites:
1. The supplied `relation` must include the primary key defined as the numeric column.
For example: `id bigint NOT NULL`.
1. The `estimate_batch_distinct_count` can handle a joined relation. To use its ability to
count non-unique columns, the joined relation **must NOT** have a one-to-many relationship,
count non-unique columns, the joined relation **must not** have a one-to-many relationship,
such as `has_many :boards`.
1. Both `start` and `finish` arguments should always represent primary key relationship values,
even if the estimated count refers to another column, for example:
@ -468,7 +468,7 @@ When instrumenting metric with usage of estimated batch counter please add
...
```
### Redis Counters
### Redis counters
Handles `::Redis::CommandError` and `Gitlab::UsageDataCounters::BaseCounter::UnknownEvent`
returns -1 when a block is sent or hash with all values -1 when a `counter(Gitlab::UsageDataCounters)` is sent
@ -481,14 +481,14 @@ Arguments:
- `counter`: a counter from `Gitlab::UsageDataCounters`, that has `fallback_totals` method implemented
- or a `block`: which is evaluated
#### Ordinary Redis Counters
#### Ordinary Redis counters
Examples of implementation:
- Using Redis methods [`INCR`](https://redis.io/commands/incr), [`GET`](https://redis.io/commands/get), and [`Gitlab::UsageDataCounters::WikiPageCounter`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters/wiki_page_counter.rb)
- Using Redis methods [`HINCRBY`](https://redis.io/commands/hincrby), [`HGETALL`](https://redis.io/commands/hgetall), and [`Gitlab::UsageCounters::PodLogs`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_counters/pod_logs.rb)
##### UsageData API Tracking
##### UsageData API tracking
<!-- There's nearly identical content in `##### Adding new events`. If you fix errors here, you may need to fix the same errors in the other location. -->
@ -510,7 +510,7 @@ Examples of implementation:
| :-------- | :--- | :------- | :---------- |
| `event` | string | yes | The event name it should be tracked |
Response
Response:
- `200` if event was tracked
- `400 Bad request` if event parameter is missing
@ -527,7 +527,7 @@ Examples of implementation:
api.trackRedisCounterEvent('my_already_defined_event_name'),
```
#### Redis HLL Counters
#### Redis HLL counters
WARNING:
HyperLogLog (HLL) is a probabilistic algorithm and its **results always includes some small error**. According to [Redis documentation](https://redis.io/commands/pfcount), data from
@ -537,7 +537,7 @@ With `Gitlab::UsageDataCounters::HLLRedisCounter` we have available data structu
Implemented using Redis methods [PFADD](https://redis.io/commands/pfadd) and [PFCOUNT](https://redis.io/commands/pfcount).
##### Adding new events
##### Add new events
1. Define events in [`known_events`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters/known_events/).
@ -716,7 +716,7 @@ Next, get the unique events for the current week.
##### Recommendations
We have the following recommendations for [Adding new events](#adding-new-events):
We have the following recommendations for [adding new events](#add-new-events):
- Event aggregation: weekly.
- Key expiry time:
@ -726,7 +726,7 @@ We have the following recommendations for [Adding new events](#adding-new-events
- For feature flags triggered by another service, set `default_enabled: false`,
- Events can be triggered using the `UsageData` API, which helps when there are > 10 events per change
##### Enable/Disable Redis HLL tracking
##### Enable or disable Redis HLL tracking
Events are tracked behind optional [feature flags](../feature_flags/index.md) due to concerns for Redis performance and scalability.
@ -752,8 +752,8 @@ We can also disable tracking completely by using the global flag:
All events added in [`known_events/common.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters/known_events/common.yml) are automatically added to Service Data generation under the `redis_hll_counters` key. This column is stored in [version-app as a JSON](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/master/db/schema.rb#L209).
For each event we add metrics for the weekly and monthly time frames, and totals for each where applicable:
- `#{event_name}_weekly`: Data for 7 days for daily [aggregation](#adding-new-events) events and data for the last complete week for weekly [aggregation](#adding-new-events) events.
- `#{event_name}_monthly`: Data for 28 days for daily [aggregation](#adding-new-events) events and data for the last 4 complete weeks for weekly [aggregation](#adding-new-events) events.
- `#{event_name}_weekly`: Data for 7 days for daily [aggregation](#add-new-events) events and data for the last complete week for weekly [aggregation](#add-new-events) events.
- `#{event_name}_monthly`: Data for 28 days for daily [aggregation](#add-new-events) events and data for the last 4 complete weeks for weekly [aggregation](#add-new-events) events.
Redis HLL implementation calculates automatic total metrics, if there are more than one metric for the same category, aggregation, and Redis slot.
@ -786,7 +786,7 @@ Example of `redis_hll_counters` data:
}
```
Example usage:
Example:
```ruby
# Redis Counters
@ -802,7 +802,7 @@ Gitlab::UsageDataCounters::HLLRedisCounter.track_event('users_expanding_vulnerab
redis_usage_data { Gitlab::UsageDataCounters::HLLRedisCounter.unique_events(event_names: 'users_expanding_vulnerabilities', start_date: 28.days.ago, end_date: Date.current) }
```
### Alternative Counters
### Alternative counters
Handles `StandardError` and fallbacks into -1 this way not all measures fail if we encounter one exception.
Mainly used for settings and configurations.
@ -815,7 +815,7 @@ Arguments:
- or a `block`: which is evaluated
- `fallback: -1`: the common value used for any metrics that are failing.
Usage:
Example:
```ruby
alt_usage_data { Gitlab::VERSION }
@ -823,25 +823,25 @@ alt_usage_data { Gitlab::CurrentSettings.uuid }
alt_usage_data(999)
```
### Adding counters to build new metrics
### Add counters to build new metrics
When adding the results of two counters, use the `add` Service Data method that
handles fallback values and exceptions. It also generates a valid [SQL export](#exporting-service-ping-sql-queries-and-definitions).
handles fallback values and exceptions. It also generates a valid [SQL export](#export-service-ping-sql-queries-and-definitions).
Example usage:
Example:
```ruby
add(User.active, User.bot)
```
### Prometheus Queries
### Prometheus queries
In those cases where operational metrics should be part of Service Ping, a database or Redis query is unlikely
to provide useful data. Instead, Prometheus might be more appropriate, because most GitLab architectural
components publish metrics to it that can be queried back, aggregated, and included as Service Data.
NOTE:
Prometheus as a data source for Service Ping is currently only available for single-node Omnibus installations
Prometheus as a data source for Service Ping is only available for single-node Omnibus installations
that are running the [bundled Prometheus](../../administration/monitoring/prometheus/index.md) instance.
To query Prometheus for metrics, a helper method is available to `yield` a fully configured
@ -854,10 +854,10 @@ with_prometheus_client do |client|
end
```
Please refer to [the `PrometheusClient` definition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/prometheus_client.rb)
Refer to [the `PrometheusClient` definition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/prometheus_client.rb)
for how to use its API to query for data.
### Fallback values for UsagePing
### Fallback values for Service Ping
We return fallback values in these cases:
@ -867,11 +867,11 @@ We return fallback values in these cases:
| Timeouts, general failures | -1 |
| Standard errors in counters | -2 |
## Developing and testing Service Ping
## Develop and test Service Ping
### 1. Naming and placing the metrics
### 1. Name and place the metric
Add the metric in one of the top level keys
Add the metric in one of the top level keys:
- `settings`: for settings related metrics.
- `counts_weekly`: for counters that have data for the most recent 7 days.
@ -884,8 +884,8 @@ The metric YAML generator can suggest a metric name for you. To generate a metri
first instrument the metric at the provided `key_path`, generate the metrics YAML definition, then
return to the instrumentation and update it.
1. Add the metric instrumentation within `lib/gitlab/usage_data.rb` inside one
of the [top level keys](index.md#1-naming-and-placing-the-metrics) using any name you choose.
1. Add the metric instrumentation to `lib/gitlab/usage_data.rb` inside one
of the [top level keys](index.md#1-name-and-place-the-metric), using any name you choose.
1. Run the [metrics YAML generator](metrics_dictionary.md#metrics-definition-and-validation).
1. Use the metric name suggestion to select a suitable metric name.
1. Update the instrumentation you created in the first step and change the metric name to the suggested name.
@ -950,19 +950,19 @@ We also use `#database-lab` and [explain.depesz.com](https://explain.depesz.com/
### 6. Add new metric to Versions Application
Check if new metrics need to be added to the Versions Application. See `usage_data` [schema](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/master/db/schema.rb#L147) and Service Data [parameters accepted](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/master/app/services/usage_ping.rb). Any metrics added under the `counts` key are saved in the `stats` column.
Check if the new metric must be added to the Versions Application. See `usage_data` [schema](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/master/db/schema.rb#L147) and Service Data [parameters accepted](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/master/app/services/usage_ping.rb). Any metrics added under the `counts` key are saved in the `stats` column.
### 7. Add the feature label
Add the `feature` label to the Merge Request for new Service Ping metrics. These are user-facing changes and are part of expanding the Service Ping feature.
### 8. Add a changelog
### 8. Add a changelog entry
Ensure you comply with the [Changelog entries guide](../changelog.md).
### 9. Ask for a Product Intelligence Review
### 9. Ask for a Product Intelligence review
On GitLab.com, we have DangerBot set up to monitor Product Intelligence related files and DangerBot recommends a [Product Intelligence review](review_guidelines.md).
On GitLab.com, we have DangerBot set up to monitor Product Intelligence related files and recommend a [Product Intelligence review](review_guidelines.md).
### 10. Verify your metric
@ -1029,15 +1029,15 @@ Three kinds of components may export data to Prometheus, and are included in Ser
This is the recommended approach to test Prometheus based Service Ping.
The easiest way to verify your changes is to build a new Omnibus image from your code branch by using CI, then download the image
The easiest way to verify your changes is to build a new Omnibus image from your code branch using CI/CD, download the image,
and run a local container instance:
1. From your merge request, click on the `qa` stage, then trigger the `package-and-qa` job. This job triggers an Omnibus
build in a [downstream pipeline of the `omnibus-gitlab-mirror` project](https://gitlab.com/gitlab-org/build/omnibus-gitlab-mirror/-/pipelines).
1. From your merge request, select the `qa` stage, then trigger the `package-and-qa` job. This job triggers an Omnibus
build in a [downstream pipeline of the `omnibus-gitlab-mirror` project](https://gitlab.com/gitlab-org/build/omnibus-gitlab-mirror/-/pipelines).
1. In the downstream pipeline, wait for the `gitlab-docker` job to finish.
1. Open the job logs and locate the full container name including the version. It takes the following form: `registry.gitlab.com/gitlab-org/build/omnibus-gitlab-mirror/gitlab-ee:<VERSION>`.
1. On your local machine, make sure you are signed in to the GitLab Docker registry. You can find the instructions for this in
[Authenticate to the GitLab Container Registry](../../user/packages/container_registry/index.md#authenticate-with-the-container-registry).
[Authenticate to the GitLab Container Registry](../../user/packages/container_registry/index.md#authenticate-with-the-container-registry).
1. Once signed in, download the new image by using `docker pull registry.gitlab.com/gitlab-org/build/omnibus-gitlab-mirror/gitlab-ee:<VERSION>`
1. For more information about working with and running Omnibus GitLab containers in Docker, please refer to [GitLab Docker images](https://docs.gitlab.com/omnibus/docker/README.html) in the Omnibus documentation.
@ -1053,10 +1053,10 @@ By default, it already comes with a fully configured Prometheus service that is
but with the following limitations:
- It does not run a `gitlab-exporter` instance, so several `process_*` metrics from services such as Gitaly may be missing.
- While it runs a `node_exporter`, `docker-compose` services emulate hosts, meaning that it would normally report itself to not be associated
with any of the other services that are running. That is not how node metrics are reported in a production setup, where `node_exporter`
always runs as a process alongside other GitLab components on any given node. From Service Ping's perspective none of the node data would therefore
appear to be associated to any of the services running, because they all appear to be running on different hosts. To alleviate this problem, the `node_exporter` in GCK was arbitrarily "assigned" to the `web` service, meaning only for this service `node_*` metrics appears in Service Ping.
- While it runs a `node_exporter`, `docker-compose` services emulate hosts, meaning that it normally reports itself as not associated
with any of the other running services. That is not how node metrics are reported in a production setup, where `node_exporter`
always runs as a process alongside other GitLab components on any given node. For Service Ping, none of the node data would therefore
appear to be associated to any of the services running, because they all appear to be running on different hosts. To alleviate this problem, the `node_exporter` in GCK was arbitrarily "assigned" to the `web` service, meaning only for this service `node_*` metrics appears in Service Ping.
## Aggregated metrics
@ -1065,7 +1065,10 @@ appear to be associated to any of the services running, because they all appear
WARNING:
This feature is intended solely for internal GitLab use.
To add data for aggregated metrics into Service Ping payload you should add corresponding definition at [`config/metrics/aggregates/*.yaml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/metrics/aggregates/) for metrics available at Community Edition and at [`ee/config/metrics/aggregates/*.yaml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/config/metrics/aggregates/) for Enterprise Edition ones.
To add data for aggregated metrics to the Service Ping payload, add a corresponding definition to:
- [`config/metrics/aggregates/*.yaml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/metrics/aggregates/) for metrics available in the Community Edition.
- [`ee/config/metrics/aggregates/*.yaml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/config/metrics/aggregates/) for metrics available in the Enterprise Edition.
Each aggregate definition includes following parts:
@ -1454,7 +1457,7 @@ The following is example content of the Service Ping payload.
In GitLab 13.5, `pg_system_id` was added to send the [PostgreSQL system identifier](https://www.2ndquadrant.com/en/blog/support-for-postgresqls-system-identifier-in-barman/).
## Exporting Service Ping SQL queries and definitions
## Export Service Ping SQL queries and definitions
Two Rake tasks exist to export Service Ping definitions.
@ -1514,25 +1517,25 @@ you are not impacted by this bug.
#### Check if you are affected
You can check if you were affected by this bug by using the Admin area or by
You can check if you were affected by this bug by using the Admin Area or by
checking the configuration file of your GitLab instance:
- Using the Admin area:
- Using the Admin Area:
1. On the top bar, go to the admin area (**{admin}**).
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Settings > Metrics and profiling**.
1. Expand **Usage Statistics**.
1. Are you able to check/uncheck the checkbox to disable Service Ping?
1. Are you able to check or uncheck the checkbox to disable Service Ping?
- If _yes_, your GitLab instance is not affected by this bug.
- If you can't check/uncheck the checkbox, you are affected by this bug.
Read below [how to fix this](#how-to-fix-the-cannot-disable-service-ping-bug).
- If you can't check or uncheck the checkbox, you are affected by this bug.
See the steps on [how to fix this](#how-to-fix-the-cannot-disable-service-ping-bug).
- Checking your GitLab instance configuration file:
To check whether you're impacted by this bug, check your instance configuration
settings. The configuration file in which Service Ping can be disabled will depend
on your installation and deployment method, but it will typically be one of the following:
settings. The configuration file in which Service Ping can be disabled depends
on your installation and deployment method, but is typically one of the following:
- `/etc/gitlab/gitlab.rb` for Omnibus GitLab Linux Package and Docker.
- `charts.yaml` for GitLab Helm and cloud-native Kubernetes deployments.
@ -1576,7 +1579,7 @@ To work around this bug, you have two options:
sudo gitlab-ctl reconfigure
```
1. In GitLab, on the top bar, go to the admin area (**{admin}**).
1. In GitLab, on the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Settings > Metrics and profiling**.
1. Expand **Usage Statistics**.
1. Clear the **Enable service ping** checkbox.

View File

@ -51,7 +51,7 @@ are regular backend changes.
#### The Product Intelligence **reviewer** should
- Perform a first-pass review on the merge request and suggest improvements to the author.
- Check the [metrics location](index.md#1-naming-and-placing-the-metrics) in
- Check the [metrics location](index.md#1-name-and-place-the-metric) in
the Service Ping JSON payload.
- Suggest that the author checks the [naming suggestion](index.md#how-to-get-a-metric-name-suggestion) while
generating the metric's YAML definition.

View File

@ -120,6 +120,20 @@ For example:
| Amir | `ami` | Yes |
| Amir | `amr` | No |
### Export seat usage
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/262877) in GitLab 14.2.
To export seat usage data as a CSV file:
1. On the top bar, select **Menu > Groups** and find your group.
1. On the left sidebar, select **Settings > Billing**.
1. Under **Seats currently in use**, select **See usage**.
1. Select **Export list**.
The generated list contains all seats being used,
and is not affected by the current search.
## Subscription expiry
When your subscription expires, you can continue to use paid features of GitLab for 14 days.

View File

@ -521,8 +521,7 @@ API requests to add a new user to a project are not possible.
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/287940) in GitLab 14.2.
FLAG:
On self-managed GitLab, by default this feature is not available. To make it available per group, ask an administrator to [enable the :ff_group_membership_export flag](../../administration/feature_flags.md). On GitLab.com, this feature is not available.
The feature is not ready for production use.
On self-managed GitLab, by default this feature is available. To hide the feature per group, ask an administrator to [disable the :ff_group_membership_export flag](../../administration/feature_flags.md). On GitLab.com, this feature is available.
You can export a list of members in a group as a CSV.

View File

@ -228,3 +228,16 @@ it's recommended to keep snippets' repositories as compact as possible.
For more information about tools to compact repositories,
see the documentation on [reducing repository size](../user/project/repository/reducing_the_repo_size_using_git.md).
### Cannot enter text into the snippet text box
If the text area after the filename field is disabled and prevents you from
creating a new snippet, use this workaround:
1. Enter a title for your snippet.
1. Scroll to the bottom of the **Files** field, then select
**Add another file**. GitLab displays a second set of fields to add a second file.
1. In the filename field for the second file, enter a filename to avoid a [known copy-pasting bug](https://gitlab.com/gitlab-org/gitlab/-/issues/22870).
1. Enter any string into the text area for the second file.
1. Scroll back to the first filename, and select **Delete file**.
1. Create the rest of your file, and select **Create snippet** when done.

View File

@ -72,6 +72,17 @@ module Backup
end
end
def remove_tmp
# delete tmp inside backups
progress.print "Deleting backups/tmp ... "
if FileUtils.rm_rf(File.join(backup_path, "tmp"))
progress.puts "done".color(:green)
else
puts "deleting backups/tmp failed".color(:red)
end
end
def remove_old
# delete backups
progress.print "Deleting old backups ... "

View File

@ -133,6 +133,11 @@ module Gitlab
# as this is dependent on a execution context
return ROLE_UNKNOWN if connection.is_a?(ConnectionProxy)
# During application init we might receive `NullPool`
return ROLE_UNKNOWN unless connection.respond_to?(:pool) &&
connection.pool.respond_to?(:db_config) &&
connection.pool.db_config.respond_to?(:name)
if connection.pool.db_config.name.ends_with?(LoadBalancer::REPLICA_SUFFIX)
ROLE_REPLICA
else

View File

@ -35,7 +35,11 @@ module Gitlab
yield object
end
rescue StandardError => e
error(project.id, e)
Gitlab::Import::ImportFailureService.track(
project_id: project.id,
error_source: importer_class.name,
exception: e
)
end
end
end

View File

@ -59,8 +59,6 @@ module Gitlab
Repositories::HousekeepingService.new(project, :gc).execute
true
rescue Gitlab::Git::Repository::NoRepository, Gitlab::Shell::Error => e
fail_import("Failed to import the repository: #{e.message}")
end
def import_wiki_repository
@ -70,7 +68,8 @@ module Gitlab
rescue ::Gitlab::Git::CommandError => e
if e.message !~ /repository not exported/
project.create_wiki
fail_import("Failed to import the wiki: #{e.message}")
raise e
else
true
end
@ -84,11 +83,6 @@ module Gitlab
project.update_column(:last_repository_updated_at, Time.zone.now)
end
def fail_import(message)
project.import_state.mark_as_failed(message)
false
end
private
def default_branch

View File

@ -49,9 +49,14 @@ module Gitlab
retval
rescue StandardError => e
error(project.id, e)
Gitlab::Import::ImportFailureService.track(
project_id: project.id,
error_source: self.class.name,
exception: e,
fail_import: abort_on_failure
)
raise e
raise(e)
end
# Imports all the objects in sequence in the current thread.
@ -165,6 +170,10 @@ module Gitlab
raise NotImplementedError
end
def abort_on_failure
false
end
# Any options to be passed to the method used for retrieving the data to
# import.
def collection_options
@ -177,21 +186,6 @@ module Gitlab
Logger.info(log_attributes(project_id, extra))
end
def error(project_id, exception)
Logger.error(
log_attributes(
project_id,
message: 'importer failed',
'error.message': exception.message
)
)
Gitlab::ErrorTracking.track_exception(
exception,
log_attributes(project_id, import_source: :github)
)
end
def log_attributes(project_id, extra = {})
extra.merge(
project_id: project_id,

View File

@ -0,0 +1,76 @@
# frozen_string_literal: true
module Gitlab
module Import
class ImportFailureService
def self.track(
exception:,
import_state: nil,
project_id: nil,
error_source: nil,
fail_import: false
)
new(
exception: exception,
import_state: import_state,
project_id: project_id,
error_source: error_source
).execute(fail_import: fail_import)
end
def initialize(exception:, import_state: nil, project_id: nil, error_source: nil)
if import_state.blank? && project_id.blank?
raise ArgumentError, 'import_state OR project_id must be provided'
end
if project_id.blank?
@import_state = import_state
@project = import_state.project
else
@project = Project.find(project_id)
@import_state = @project.import_state
end
@exception = exception
@error_source = error_source
end
def execute(fail_import:)
track_exception
persist_failure
import_state.mark_as_failed(exception.message) if fail_import
end
private
attr_reader :exception, :import_state, :project, :error_source
def track_exception
attributes = {
import_type: project.import_type,
project_id: project.id,
source: error_source
}
Gitlab::Import::Logger.error(
attributes.merge(
message: 'importer failed',
'error.message': exception.message
)
)
Gitlab::ErrorTracking.track_exception(exception, attributes)
end
def persist_failure
project.import_failures.create(
source: error_source,
exception_class: exception.class.to_s,
exception_message: exception.message.truncate(255),
correlation_id_value: Labkit::Correlation::CorrelationId.current_or_new_id
)
end
end
end
end

View File

@ -91,6 +91,8 @@ namespace :gitlab do
backup.cleanup
end
backup.remove_tmp
puts "Warning: Your gitlab.rb and gitlab-secrets.json files contain sensitive data \n" \
"and are not included in this backup. You will need to restore these files manually.".color(:red)
puts "Restore task is done."

View File

@ -5437,12 +5437,18 @@ msgstr ""
msgid "BoardScope|Select milestone"
msgstr ""
msgid "BoardScope|Select weight"
msgstr ""
msgid "BoardScope|Started"
msgstr ""
msgid "BoardScope|Upcoming"
msgstr ""
msgid "BoardScope|Weight"
msgstr ""
msgid "Boards"
msgstr ""

View File

@ -88,6 +88,27 @@ RSpec.describe Backup::Manager do
end
end
describe '#remove_tmp' do
let(:path) { File.join(Gitlab.config.backup.path, 'tmp') }
before do
allow(FileUtils).to receive(:rm_rf).and_return(true)
end
it 'removes backups/tmp dir' do
subject.remove_tmp
expect(FileUtils).to have_received(:rm_rf).with(path)
end
it 'prints running task with a done confirmation' do
subject.remove_tmp
expect(progress).to have_received(:print).with('Deleting backups/tmp ... ')
expect(progress).to have_received(:puts).with('done')
end
end
describe '#remove_old' do
let(:files) do
[

View File

@ -304,6 +304,15 @@ RSpec.describe Gitlab::Database::LoadBalancing do
end
end
context 'when the NullPool is used for connection' do
let(:pool) { ActiveRecord::ConnectionAdapters::NullPool.new }
let(:connection) { double(:connection, pool: pool) }
it 'returns unknown' do
expect(described_class.db_role_for_connection(connection)).to eq(:unknown)
end
end
context 'when the load balancing is configured' do
let(:db_host) { ActiveRecord::Base.connection_pool.db_config.host }
let(:proxy) { described_class::ConnectionProxy.new([db_host]) }

View File

@ -3,7 +3,8 @@
require 'spec_helper'
RSpec.describe Gitlab::GithubImport::Importer::LfsObjectsImporter do
let(:project) { double(:project, id: 4, import_source: 'foo/bar') }
let_it_be(:project) { create(:project, :import_started) }
let(:client) { double(:client) }
let(:download_link) { "http://www.gitlab.com/lfs_objects/oid" }
@ -61,24 +62,12 @@ RSpec.describe Gitlab::GithubImport::Importer::LfsObjectsImporter do
.and_raise(exception)
end
expect(Gitlab::GithubImport::Logger)
.to receive(:error)
expect(Gitlab::Import::ImportFailureService)
.to receive(:track)
.with(
message: 'importer failed',
project_id: project.id,
parallel: false,
importer: 'Gitlab::GithubImport::Importer::LfsObjectImporter',
'error.message': 'Invalid Project URL'
)
expect(Gitlab::ErrorTracking)
.to receive(:track_exception)
.with(
exception,
import_source: :github,
parallel: false,
project_id: project.id,
importer: 'Gitlab::GithubImport::Importer::LfsObjectImporter'
exception: exception,
error_source: 'Gitlab::GithubImport::Importer::LfsObjectImporter'
).and_call_original
importer.execute

View File

@ -211,17 +211,6 @@ RSpec.describe Gitlab::GithubImport::Importer::RepositoryImporter do
expect(importer.import_repository).to eq(true)
end
it 'marks the import as failed when an error was raised' do
expect(project).to receive(:ensure_repository)
.and_raise(Gitlab::Git::Repository::NoRepository)
expect(importer)
.to receive(:fail_import)
.and_return(false)
expect(importer.import_repository).to eq(false)
end
end
describe '#import_wiki_repository' do
@ -234,28 +223,40 @@ RSpec.describe Gitlab::GithubImport::Importer::RepositoryImporter do
expect(importer.import_wiki_repository).to eq(true)
end
it 'marks the import as failed and creates an empty repo if an error was raised' do
expect(wiki_repository)
.to receive(:import_repository)
.with(importer.wiki_url)
.and_raise(Gitlab::Git::CommandError)
context 'when it raises a Gitlab::Git::CommandError' do
context 'when the error is not a "repository not exported"' do
it 'creates the wiki and re-raise the exception' do
exception = Gitlab::Git::CommandError.new
expect(importer)
.to receive(:fail_import)
.and_return(false)
expect(wiki_repository)
.to receive(:import_repository)
.with(importer.wiki_url)
.and_raise(exception)
expect(project)
.to receive(:create_wiki)
expect(project)
.to receive(:create_wiki)
expect(importer.import_wiki_repository).to eq(false)
end
end
expect { importer.import_wiki_repository }
.to raise_error(exception)
end
end
describe '#fail_import' do
it 'marks the import as failed' do
expect(project.import_state).to receive(:mark_as_failed).with('foo')
context 'when the error is a "repository not exported"' do
it 'returns true' do
exception = Gitlab::Git::CommandError.new('repository not exported')
expect(importer.fail_import('foo')).to eq(false)
expect(wiki_repository)
.to receive(:import_repository)
.with(importer.wiki_url)
.and_raise(exception)
expect(project)
.not_to receive(:create_wiki)
expect(importer.import_wiki_repository)
.to eq(true)
end
end
end
end

View File

@ -5,6 +5,10 @@ require 'spec_helper'
RSpec.describe Gitlab::GithubImport::ParallelScheduling do
let(:importer_class) do
Class.new do
def self.name
'MyImporter'
end
include(Gitlab::GithubImport::ParallelScheduling)
def importer_class
@ -21,7 +25,8 @@ RSpec.describe Gitlab::GithubImport::ParallelScheduling do
end
end
let(:project) { double(:project, id: 4, import_source: 'foo/bar') }
let_it_be(:project) { create(:project, :import_started, import_source: 'foo/bar') }
let(:client) { double(:client) }
describe '#parallel?' do
@ -100,46 +105,109 @@ RSpec.describe Gitlab::GithubImport::ParallelScheduling do
importer.execute
end
it 'logs the error when it fails' do
exception = StandardError.new('some error')
context 'when abort_on_failure is false' do
it 'logs the error when it fails' do
exception = StandardError.new('some error')
importer = importer_class.new(project, client, parallel: false)
importer = importer_class.new(project, client, parallel: false)
expect(importer)
.to receive(:sequential_import)
.and_raise(exception)
expect(importer)
.to receive(:sequential_import)
.and_raise(exception)
expect(Gitlab::GithubImport::Logger)
.to receive(:info)
.with(
message: 'starting importer',
parallel: false,
project_id: project.id,
importer: 'Class'
)
expect(Gitlab::GithubImport::Logger)
.to receive(:info)
.with(
message: 'starting importer',
parallel: false,
project_id: project.id,
importer: 'Class'
)
expect(Gitlab::GithubImport::Logger)
.to receive(:error)
.with(
message: 'importer failed',
project_id: project.id,
parallel: false,
importer: 'Class',
'error.message': 'some error'
)
expect(Gitlab::Import::ImportFailureService)
.to receive(:track)
.with(
project_id: project.id,
exception: exception,
error_source: 'MyImporter',
fail_import: false
).and_call_original
expect(Gitlab::ErrorTracking)
.to receive(:track_exception)
.with(
exception,
parallel: false,
project_id: project.id,
import_source: :github,
importer: 'Class'
)
.and_call_original
expect { importer.execute }
.to raise_error(exception)
expect { importer.execute }.to raise_error(exception)
expect(project.import_state.reload.status).to eq('started')
expect(project.import_failures).not_to be_empty
expect(project.import_failures.last.exception_class).to eq('StandardError')
expect(project.import_failures.last.exception_message).to eq('some error')
end
end
context 'when abort_on_failure is true' do
let(:importer_class) do
Class.new do
def self.name
'MyImporter'
end
include(Gitlab::GithubImport::ParallelScheduling)
def importer_class
Class
end
def object_type
:dummy
end
def collection_method
:issues
end
def abort_on_failure
true
end
end
end
it 'logs the error when it fails and marks import as failed' do
exception = StandardError.new('some error')
importer = importer_class.new(project, client, parallel: false)
expect(importer)
.to receive(:sequential_import)
.and_raise(exception)
expect(Gitlab::GithubImport::Logger)
.to receive(:info)
.with(
message: 'starting importer',
parallel: false,
project_id: project.id,
importer: 'Class'
)
expect(Gitlab::Import::ImportFailureService)
.to receive(:track)
.with(
project_id: project.id,
exception: exception,
error_source: 'MyImporter',
fail_import: true
).and_call_original
expect { importer.execute }
.to raise_error(exception)
expect(project.import_state.reload.status).to eq('failed')
expect(project.import_state.last_error).to eq('some error')
expect(project.import_failures).not_to be_empty
expect(project.import_failures.last.exception_class).to eq('StandardError')
expect(project.import_failures.last.exception_message).to eq('some error')
end
end
end

View File

@ -0,0 +1,136 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Import::ImportFailureService do
let_it_be(:import_type) { 'import_type' }
let_it_be(:project) do
create(
:project,
:import_started,
import_type: import_type
)
end
let(:import_state) { project.import_state }
let(:exception) { StandardError.new('some error') }
shared_examples 'logs the exception and fails the import' do
it 'when the failure does not abort the import' do
expect(Gitlab::ErrorTracking)
.to receive(:track_exception)
.with(
exception,
project_id: project.id,
import_type: import_type,
source: 'SomeImporter'
)
expect(Gitlab::Import::Logger)
.to receive(:error)
.with(
message: 'importer failed',
'error.message': 'some error',
project_id: project.id,
import_type: import_type,
source: 'SomeImporter'
)
described_class.track(**arguments)
expect(project.import_state.reload.status).to eq('failed')
expect(project.import_failures).not_to be_empty
expect(project.import_failures.last.exception_class).to eq('StandardError')
expect(project.import_failures.last.exception_message).to eq('some error')
end
end
shared_examples 'logs the exception and does not fail the import' do
it 'when the failure does not abort the import' do
expect(Gitlab::ErrorTracking)
.to receive(:track_exception)
.with(
exception,
project_id: project.id,
import_type: import_type,
source: 'SomeImporter'
)
expect(Gitlab::Import::Logger)
.to receive(:error)
.with(
message: 'importer failed',
'error.message': 'some error',
project_id: project.id,
import_type: import_type,
source: 'SomeImporter'
)
described_class.track(**arguments)
expect(project.import_state.reload.status).to eq('started')
expect(project.import_failures).not_to be_empty
expect(project.import_failures.last.exception_class).to eq('StandardError')
expect(project.import_failures.last.exception_message).to eq('some error')
end
end
context 'when using the project as reference' do
context 'when it fails the import' do
let(:arguments) do
{
project_id: project.id,
exception: exception,
error_source: 'SomeImporter',
fail_import: true
}
end
it_behaves_like 'logs the exception and fails the import'
end
context 'when it does not fail the import' do
let(:arguments) do
{
project_id: project.id,
exception: exception,
error_source: 'SomeImporter',
fail_import: false
}
end
it_behaves_like 'logs the exception and does not fail the import'
end
end
context 'when using the import_state as reference' do
context 'when it fails the import' do
let(:arguments) do
{
import_state: import_state,
exception: exception,
error_source: 'SomeImporter',
fail_import: true
}
end
it_behaves_like 'logs the exception and fails the import'
end
context 'when it does not fail the import' do
let(:arguments) do
{
import_state: import_state,
exception: exception,
error_source: 'SomeImporter',
fail_import: false
}
end
it_behaves_like 'logs the exception and does not fail the import'
end
end
end

View File

@ -193,6 +193,36 @@ RSpec.describe ProjectTeam do
end
end
describe '#members_with_access_levels' do
let_it_be(:maintainer) { create(:user) }
let_it_be(:developer) { create(:user) }
let_it_be(:guest) { create(:user) }
let_it_be(:project) { create(:project, namespace: maintainer.namespace) }
let_it_be(:access_levels) { [Gitlab::Access::DEVELOPER, Gitlab::Access::MAINTAINER] }
subject(:members_with_access_levels) { project.team.members_with_access_levels(access_levels) }
before do
project.team.add_developer(developer)
project.team.add_maintainer(maintainer)
project.team.add_guest(guest)
end
context 'with access_levels' do
it 'filters members who have given access levels' do
expect(members_with_access_levels).to contain_exactly(developer, maintainer)
end
end
context 'without access_levels' do
let_it_be(:access_levels) { [] }
it 'returns empty array' do
expect(members_with_access_levels).to be_empty
end
end
end
describe '#add_users' do
let(:user1) { create(:user) }
let(:user2) { create(:user) }

View File

@ -17,11 +17,6 @@ RSpec.describe Auth::ContainerRegistryAuthenticationService do
project.add_developer(current_user)
end
shared_examples 'an unmodified token' do
it_behaves_like 'a valid token'
it { expect(payload['access']).not_to include(have_key('migration_eligible')) }
end
shared_examples 'a modified token with migration eligibility' do |eligible|
it_behaves_like 'a valid token'
it { expect(payload['access']).to include(include('migration_eligible' => eligible)) }
@ -71,7 +66,7 @@ RSpec.describe Auth::ContainerRegistryAuthenticationService do
{ scopes: ["repository:#{project.full_path}:pull"] }
end
it_behaves_like 'an unmodified token'
it_behaves_like 'a modified token'
end
context 'with push action' do
@ -82,20 +77,12 @@ RSpec.describe Auth::ContainerRegistryAuthenticationService do
it_behaves_like 'a modified token'
end
context 'with multiple actions including push' do
context 'with multiple actions' do
let(:current_params) do
{ scopes: ["repository:#{project.full_path}:pull,push,delete"] }
end
it_behaves_like 'a modified token'
end
context 'with multiple actions excluding push' do
let(:current_params) do
{ scopes: ["repository:#{project.full_path}:pull,delete"] }
end
it_behaves_like 'an unmodified token'
end
end
end

View File

@ -6,7 +6,7 @@ RSpec.describe Packages::Composer::CreatePackageService do
let_it_be(:package_name) { 'composer-package-name' }
let_it_be(:json) { { name: package_name }.to_json }
let_it_be(:project) { create(:project, :custom_repo, files: { 'composer.json' => json } ) }
let_it_be(:project) { create(:project, :custom_repo, files: { 'composer.json' => json }) }
let_it_be(:user) { create(:user) }
let(:params) do
@ -24,13 +24,30 @@ RSpec.describe Packages::Composer::CreatePackageService do
let(:created_package) { Packages::Package.composer.last }
shared_examples 'using the cache update worker' do
context 'with remove_composer_v1_cache_code enabled' do
it 'does not enqueue a cache update job' do
expect(::Packages::Composer::CacheUpdateWorker).not_to receive(:perform_async)
subject
end
end
context 'with remove_composer_v1_cache_code disabled' do
it 'enqueues a cache update job' do
stub_feature_flags(remove_composer_v1_cache_code: true)
expect(::Packages::Composer::CacheUpdateWorker).not_to receive(:perform_async)
subject
end
end
end
context 'without an existing package' do
context 'with a branch' do
let(:branch) { project.repository.find_branch('master') }
it 'creates the package' do
expect(::Packages::Composer::CacheUpdateWorker).to receive(:perform_async).with(project.id, package_name, nil)
expect { subject }
.to change { Packages::Package.composer.count }.by(1)
.and change { Packages::Composer::Metadatum.count }.by(1)
@ -47,6 +64,7 @@ RSpec.describe Packages::Composer::CreatePackageService do
it_behaves_like 'assigns build to package'
it_behaves_like 'assigns status to package'
it_behaves_like 'using the cache update worker'
end
context 'with a tag' do
@ -57,8 +75,6 @@ RSpec.describe Packages::Composer::CreatePackageService do
end
it 'creates the package' do
expect(::Packages::Composer::CacheUpdateWorker).to receive(:perform_async).with(project.id, package_name, nil)
expect { subject }
.to change { Packages::Package.composer.count }.by(1)
.and change { Packages::Composer::Metadatum.count }.by(1)
@ -73,6 +89,7 @@ RSpec.describe Packages::Composer::CreatePackageService do
it_behaves_like 'assigns build to package'
it_behaves_like 'assigns status to package'
it_behaves_like 'using the cache update worker'
end
end
@ -85,12 +102,12 @@ RSpec.describe Packages::Composer::CreatePackageService do
end
it 'does not create a new package' do
expect(::Packages::Composer::CacheUpdateWorker).to receive(:perform_async).with(project.id, package_name, nil)
expect { subject }
.to change { Packages::Package.composer.count }.by(0)
.and change { Packages::Composer::Metadatum.count }.by(0)
end
it_behaves_like 'using the cache update worker'
end
context 'belonging to another project' do
@ -108,12 +125,12 @@ RSpec.describe Packages::Composer::CreatePackageService do
let!(:other_package) { create(:package, name: package_name, version: 'dev-master', project: other_project) }
it 'creates the package' do
expect(::Packages::Composer::CacheUpdateWorker).to receive(:perform_async).with(project.id, package_name, nil)
expect { subject }
.to change { Packages::Package.composer.count }.by(1)
.and change { Packages::Composer::Metadatum.count }.by(1)
end
it_behaves_like 'using the cache update worker'
end
end
end

View File

@ -69,6 +69,10 @@ RSpec.shared_examples 'a browsable' do
end
RSpec.shared_examples 'an accessible' do
before do
stub_feature_flags(container_registry_migration_phase1: false)
end
let(:access) do
[{ 'type' => 'repository',
'name' => project.full_path,

View File

@ -490,8 +490,7 @@ RSpec.describe 'gitlab:app namespace rake task', :delete do
'lfs.tar.gz',
'pages.tar.gz',
'registry.tar.gz',
'repositories',
'tmp'
'repositories'
)
end

View File

@ -21,6 +21,12 @@ RSpec.describe Gitlab::GithubImport::ObjectImporter do
end.new
end
let_it_be(:project) { create(:project, :import_started) }
let(:importer_class) { double(:importer_class, name: 'klass_name') }
let(:importer_instance) { double(:importer_instance) }
let(:client) { double(:client) }
before do
stub_const('MockRepresantation', Class.new do
include Gitlab::GithubImport::Representation::ToHash
@ -39,11 +45,6 @@ RSpec.describe Gitlab::GithubImport::ObjectImporter do
end
describe '#import', :clean_gitlab_redis_cache do
let(:importer_class) { double(:importer_class, name: 'klass_name') }
let(:importer_instance) { double(:importer_instance) }
let(:project) { double(:project, full_path: 'foo/bar', id: 1) }
let(:client) { double(:client) }
before do
expect(worker)
.to receive(:importer_class)
@ -65,7 +66,7 @@ RSpec.describe Gitlab::GithubImport::ObjectImporter do
.with(
github_id: 1,
message: 'starting importer',
project_id: 1,
project_id: project.id,
importer: 'klass_name'
)
@ -74,7 +75,7 @@ RSpec.describe Gitlab::GithubImport::ObjectImporter do
.with(
github_id: 1,
message: 'importer finished',
project_id: 1,
project_id: project.id,
importer: 'klass_name'
)
@ -106,59 +107,36 @@ RSpec.describe Gitlab::GithubImport::ObjectImporter do
importer: 'klass_name'
)
expect(Gitlab::GithubImport::Logger)
.to receive(:error)
expect(Gitlab::Import::ImportFailureService)
.to receive(:track)
.with(
github_id: 1,
message: 'importer failed',
project_id: project.id,
importer: 'klass_name',
'error.message': 'some error',
'github.data': {
'github_id' => 1,
'number' => 10
}
exception: exception,
error_source: 'klass_name'
)
.and_call_original
expect(Gitlab::ErrorTracking)
.to receive(:track_and_raise_exception)
.with(
exception,
import_source: :github,
github_id: 1,
project_id: 1,
importer: 'klass_name'
).and_call_original
worker.import(project, client, { 'number' => 10, 'github_id' => 1 })
expect { worker.import(project, client, { 'number' => 10, 'github_id' => 1 }) }
.to raise_error(exception)
expect(project.import_state.reload.status).to eq('started')
expect(project.import_failures).not_to be_empty
expect(project.import_failures.last.exception_class).to eq('StandardError')
expect(project.import_failures.last.exception_message).to eq('some error')
end
it 'logs error when representation does not have a github_id' do
expect(importer_class).not_to receive(:new)
expect(Gitlab::GithubImport::Logger)
.to receive(:error)
expect(Gitlab::Import::ImportFailureService)
.to receive(:track)
.with(
github_id: nil,
message: 'importer failed',
project_id: project.id,
importer: 'klass_name',
'error.message': 'key not found: :github_id',
'github.data': {
'number' => 10
}
exception: a_kind_of(KeyError),
error_source: 'klass_name',
fail_import: true
)
expect(Gitlab::ErrorTracking)
.to receive(:track_and_raise_exception)
.with(
an_instance_of(KeyError),
import_source: :github,
github_id: nil,
project_id: 1,
importer: 'klass_name'
).and_call_original
.and_call_original
expect { worker.import(project, client, { 'number' => 10 }) }
.to raise_error(KeyError, 'key not found: :github_id')

View File

@ -3,7 +3,8 @@
require 'spec_helper'
RSpec.describe Gitlab::GithubImport::StageMethods do
let(:project) { create(:project) }
let_it_be(:project) { create(:project, :import_started, import_url: 'https://t0ken@github.com/repo/repo.git') }
let(:worker) do
Class.new do
def self.name
@ -15,8 +16,6 @@ RSpec.describe Gitlab::GithubImport::StageMethods do
end
describe '#perform' do
let(:project) { create(:project, import_url: 'https://t0ken@github.com/repo/repo.git') }
it 'returns if no project could be found' do
expect(worker).not_to receive(:try_import)
@ -55,46 +54,100 @@ RSpec.describe Gitlab::GithubImport::StageMethods do
worker.perform(project.id)
end
it 'logs error when import fails' do
exception = StandardError.new('some error')
context 'when abort_on_failure is false' do
it 'logs error when import fails' do
exception = StandardError.new('some error')
allow(worker)
.to receive(:find_project)
.with(project.id)
.and_return(project)
allow(worker)
.to receive(:find_project)
.with(project.id)
.and_return(project)
expect(worker)
.to receive(:try_import)
.and_raise(exception)
expect(worker)
.to receive(:try_import)
.and_raise(exception)
expect(Gitlab::GithubImport::Logger)
.to receive(:info)
.with(
message: 'starting stage',
project_id: project.id,
import_stage: 'DummyStage'
)
expect(Gitlab::GithubImport::Logger)
.to receive(:info)
.with(
message: 'starting stage',
project_id: project.id,
import_stage: 'DummyStage'
)
expect(Gitlab::GithubImport::Logger)
.to receive(:error)
.with(
message: 'stage failed',
project_id: project.id,
import_stage: 'DummyStage',
'error.message': 'some error'
)
expect(Gitlab::Import::ImportFailureService)
.to receive(:track)
.with(
project_id: project.id,
exception: exception,
error_source: 'DummyStage',
fail_import: false
).and_call_original
expect(Gitlab::ErrorTracking)
.to receive(:track_and_raise_exception)
.with(
exception,
import_source: :github,
project_id: project.id,
import_stage: 'DummyStage'
)
.and_call_original
expect { worker.perform(project.id) }
.to raise_error(exception)
expect { worker.perform(project.id) }.to raise_error(exception)
expect(project.import_state.reload.status).to eq('started')
expect(project.import_failures).not_to be_empty
expect(project.import_failures.last.exception_class).to eq('StandardError')
expect(project.import_failures.last.exception_message).to eq('some error')
end
end
context 'when abort_on_failure is true' do
let(:worker) do
Class.new do
def self.name
'DummyStage'
end
def abort_on_failure
true
end
include(Gitlab::GithubImport::StageMethods)
end.new
end
it 'logs, captures and re-raises the exception and also marks the import as failed' do
exception = StandardError.new('some error')
allow(worker)
.to receive(:find_project)
.with(project.id)
.and_return(project)
expect(worker)
.to receive(:try_import)
.and_raise(exception)
expect(Gitlab::GithubImport::Logger)
.to receive(:info)
.with(
message: 'starting stage',
project_id: project.id,
import_stage: 'DummyStage'
)
expect(Gitlab::Import::ImportFailureService)
.to receive(:track)
.with(
project_id: project.id,
exception: exception,
error_source: 'DummyStage',
fail_import: true
).and_call_original
expect { worker.perform(project.id) }.to raise_error(exception)
expect(project.import_state.reload.status).to eq('failed')
expect(project.import_state.last_error).to eq('some error')
expect(project.import_failures).not_to be_empty
expect(project.import_failures.last.exception_class).to eq('StandardError')
expect(project.import_failures.last.exception_message).to eq('some error')
end
end
end
@ -126,16 +179,14 @@ RSpec.describe Gitlab::GithubImport::StageMethods do
end
describe '#find_project' do
let(:import_state) { create(:import_state, project: project) }
it 'returns a Project for an existing ID' do
import_state.update_column(:status, 'started')
project.import_state.update_column(:status, 'started')
expect(worker.find_project(project.id)).to eq(project)
end
it 'returns nil for a project that failed importing' do
import_state.update_column(:status, 'failed')
project.import_state.update_column(:status, 'failed')
expect(worker.find_project(project.id)).to be_nil
end

View File

@ -49,5 +49,10 @@ RSpec.describe Deployments::HooksWorker do
worker.perform(deployment_id: deployment.id, status_changed_at: status_changed_at)
end
it_behaves_like 'worker with data consistency',
described_class,
feature_flag: :load_balancing_for_deployments_hooks_worker,
data_consistency: :delayed
end
end

View File

@ -4,6 +4,7 @@ require 'spec_helper'
RSpec.describe Gitlab::GithubImport::Stage::ImportRepositoryWorker do
let(:project) { double(:project, id: 4) }
let(:worker) { described_class.new }
describe '#import' do
@ -36,15 +37,19 @@ RSpec.describe Gitlab::GithubImport::Stage::ImportRepositoryWorker do
context 'when the import fails' do
it 'does not schedule the importing of the base data' do
client = double(:client)
exception_class = Gitlab::Git::Repository::NoRepository
expect_next_instance_of(Gitlab::GithubImport::Importer::RepositoryImporter) do |instance|
expect(instance).to receive(:execute).and_return(false)
expect(instance).to receive(:execute).and_raise(exception_class)
end
expect(Gitlab::GithubImport::Stage::ImportBaseDataWorker)
.not_to receive(:perform_async)
worker.import(client, project)
expect(worker.abort_on_failure).to eq(true)
expect { worker.import(client, project) }
.to raise_error(exception_class)
end
end
end

View File

@ -0,0 +1,36 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Import::StuckImportJob do
let_it_be(:project) { create(:project, :import_started, import_source: 'foo/bar') }
let(:worker) do
Class.new do
def self.name
'MyStuckProjectImportsWorker'
end
include(Gitlab::Import::StuckImportJob)
def track_metrics(...)
nil
end
def enqueued_import_states
ProjectImportState.with_status([:scheduled, :started])
end
end.new
end
it 'marks the stuck import project as failed and track the error on import_failures' do
worker.perform
expect(project.import_state.reload.status).to eq('failed')
expect(project.import_state.last_error).to eq('Import timed out. Import took longer than 86400 seconds')
expect(project.import_failures).not_to be_empty
expect(project.import_failures.last.exception_class).to eq('Gitlab::Import::StuckImportJob::StuckImportJobError')
expect(project.import_failures.last.exception_message).to eq('Import timed out. Import took longer than 86400 seconds')
end
end