Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
d29c19034b
commit
926428e5ab
|
@ -574,7 +574,6 @@ RSpec/EmptyLineAfterFinalLetItBe:
|
|||
- ee/spec/services/requirements_management/update_requirement_service_spec.rb
|
||||
- ee/spec/services/resource_access_tokens/create_service_spec.rb
|
||||
- ee/spec/services/resource_access_tokens/revoke_service_spec.rb
|
||||
- ee/spec/services/todo_service_spec.rb
|
||||
- ee/spec/support/shared_examples/graphql/geo/geo_registries_resolver_shared_examples.rb
|
||||
- ee/spec/support/shared_examples/graphql/mutations/set_multiple_assignees_shared_examples.rb
|
||||
- ee/spec/support/shared_examples/quick_actions/issue/status_page_quick_actions_shared_examples.rb
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class BugzillaService < IssueTrackerService
|
||||
include ActionView::Helpers::UrlHelper
|
||||
|
||||
validates :project_url, :issues_url, :new_issue_url, presence: true, public_url: true, if: :activated?
|
||||
|
||||
def title
|
||||
|
@ -8,7 +10,12 @@ class BugzillaService < IssueTrackerService
|
|||
end
|
||||
|
||||
def description
|
||||
s_('IssueTracker|Bugzilla issue tracker')
|
||||
s_("IssueTracker|Use Bugzilla as this project's issue tracker.")
|
||||
end
|
||||
|
||||
def help
|
||||
docs_link = link_to _('Learn more.'), Rails.application.routes.url_helpers.help_page_url('user/project/integrations/bugzilla'), target: '_blank', rel: 'noopener noreferrer'
|
||||
s_("IssueTracker|Use Bugzilla as this project's issue tracker. %{docs_link}").html_safe % { docs_link: docs_link.html_safe }
|
||||
end
|
||||
|
||||
def self.to_param
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
title: Add --ee option to Usage Metric Definition generator to fill correct tier and distribution
|
||||
merge_request: 59942
|
||||
author:
|
||||
type: other
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
title: Update Bugzilla integration UI text
|
||||
merge_request: 60080
|
||||
author:
|
||||
type: other
|
|
@ -77,7 +77,7 @@ To have a summary and then a list of projects and their attachments using hashed
|
|||
WARNING:
|
||||
In GitLab 13.0, [hashed storage](../repository_storage_types.md#hashed-storage)
|
||||
is enabled by default and the legacy storage is deprecated.
|
||||
Support for legacy storage will be removed in GitLab 14.0. If you're on GitLab
|
||||
GitLab 14.0 eliminates support for legacy storage. If you're on GitLab
|
||||
13.0 and later, switching new projects to legacy storage is not possible.
|
||||
The option to choose between hashed and legacy storage in the admin area has
|
||||
been disabled.
|
||||
|
@ -114,7 +114,7 @@ There is a specific queue you can watch to see how long it will take to finish:
|
|||
After it reaches zero, you can confirm every project has been migrated by running the commands above.
|
||||
If you find it necessary, you can run this migration script again to schedule missing projects.
|
||||
|
||||
Any error or warning will be logged in Sidekiq's log file.
|
||||
Any error or warning is logged in Sidekiq's log file.
|
||||
|
||||
If [Geo](../geo/index.md) is enabled, each project that is successfully migrated
|
||||
generates an event to replicate the changes on any **secondary** nodes.
|
||||
|
@ -127,12 +127,12 @@ commands below that helps you inspect projects and attachments in both legacy an
|
|||
WARNING:
|
||||
In GitLab 13.0, [hashed storage](../repository_storage_types.md#hashed-storage)
|
||||
is enabled by default and the legacy storage is deprecated.
|
||||
Support for legacy storage will be removed in GitLab 14.0. If you're on GitLab
|
||||
GitLab 14.0 eliminates support for legacy storage. If you're on GitLab
|
||||
13.0 and later, switching new projects to legacy storage is not possible.
|
||||
The option to choose between hashed and legacy storage in the admin area has
|
||||
been disabled.
|
||||
|
||||
This task will schedule all your existing projects and associated attachments to be rolled back to the
|
||||
This task schedules all your existing projects and associated attachments to be rolled back to the
|
||||
legacy storage type.
|
||||
|
||||
- **Omnibus installation**
|
||||
|
@ -161,7 +161,7 @@ On the **Queues** tab, you can watch the `hashed_storage:hashed_storage_project_
|
|||
|
||||
After it reaches zero, you can confirm every project has been rolled back by running the commands above.
|
||||
If some projects weren't rolled back, you can run this rollback script again to schedule further rollbacks.
|
||||
Any error or warning will be logged in Sidekiq's log file.
|
||||
Any error or warning is logged in Sidekiq's log file.
|
||||
|
||||
If you have a Geo setup, the rollback will not be reflected automatically
|
||||
on the **secondary** node. You may need to wait for a backfill operation to kick-in and remove
|
||||
|
|
|
@ -113,7 +113,7 @@ If you want to be flexible about growing your hard drive space in the future con
|
|||
|
||||
Apart from a local hard drive you can also mount a volume that supports the network file system (NFS) protocol. This volume might be located on a file server, a network attached storage (NAS) device, a storage area network (SAN) or on an Amazon Web Services (AWS) Elastic Block Store (EBS) volume.
|
||||
|
||||
If you have enough RAM and a recent CPU the speed of GitLab is mainly limited by hard drive seek times. Having a fast drive (7200 RPM and up) or a solid state drive (SSD) will improve the responsiveness of GitLab.
|
||||
If you have enough RAM and a recent CPU the speed of GitLab is mainly limited by hard drive seek times. Having a fast drive (7200 RPM and up) or a solid state drive (SSD) improves the responsiveness of GitLab.
|
||||
|
||||
NOTE:
|
||||
Since file system performance may affect the overall performance of GitLab,
|
||||
|
@ -141,7 +141,7 @@ The following is the recommended minimum Memory hardware guidance for a handful
|
|||
- More users? Consult the [reference architectures page](../administration/reference_architectures/index.md)
|
||||
|
||||
In addition to the above, we generally recommend having at least 2GB of swap on your server,
|
||||
even if you currently have enough available RAM. Having swap will help reduce the chance of errors occurring
|
||||
even if you currently have enough available RAM. Having swap helps to reduce the chance of errors occurring
|
||||
if your available memory changes. We also recommend configuring the kernel's swappiness setting
|
||||
to a low value like `10` to make the most of your RAM while still having the swap
|
||||
available when needed.
|
||||
|
@ -204,7 +204,7 @@ The recommended number of workers is calculated as the highest of the following:
|
|||
For example a node with 4 cores should be configured with 3 Puma workers.
|
||||
|
||||
You can increase the number of Puma workers, providing enough CPU and memory capacity is available.
|
||||
A higher number of Puma workers will usually help to reduce the response time of the application
|
||||
A higher number of Puma workers usually helps to reduce the response time of the application
|
||||
and increase the ability to handle parallel requests. You must perform testing to verify the
|
||||
optimal settings for your infrastructure.
|
||||
|
||||
|
@ -214,7 +214,7 @@ The recommended number of threads is dependent on several factors, including tot
|
|||
of [legacy Rugged code](../administration/gitaly/index.md#direct-access-to-git-in-gitlab).
|
||||
|
||||
- If the operating system has a maximum 2 GB of memory, the recommended number of threads is `1`.
|
||||
A higher value will result in excess swapping, and decrease performance.
|
||||
A higher value results in excess swapping, and decrease performance.
|
||||
- If legacy Rugged code is in use, the recommended number of threads is `1`.
|
||||
- In all other cases, the recommended number of threads is `4`. We don't recommend setting this
|
||||
higher, due to how [Ruby MRI multi-threading](https://en.wikipedia.org/wiki/Global_interpreter_lock)
|
||||
|
@ -230,7 +230,7 @@ If you have a 1GB machine we recommend to configure only two Unicorn workers to
|
|||
swapping.
|
||||
|
||||
As long as you have enough available CPU and memory capacity, it's okay to increase the number of
|
||||
Unicorn workers and this will usually help to reduce the response time of the applications and
|
||||
Unicorn workers and this usually helps to reduce the response time of the applications and
|
||||
increase the ability to handle parallel requests.
|
||||
|
||||
To change the Unicorn workers when you have the Omnibus package (which defaults to the
|
||||
|
@ -248,8 +248,7 @@ On a very active server (10,000 billable users) the Sidekiq process can use 1GB+
|
|||
|
||||
As of Omnibus GitLab 9.0, [Prometheus](https://prometheus.io) and its related
|
||||
exporters are enabled by default, to enable easy and in depth monitoring of
|
||||
GitLab. Approximately 200MB of memory will be consumed by these processes, with
|
||||
default settings.
|
||||
GitLab. With default settings, these processes consume approximately 200MB of memory.
|
||||
|
||||
If you would like to disable Prometheus and it's exporters or read more information
|
||||
about it, check the [Prometheus documentation](../administration/monitoring/prometheus/index.md).
|
||||
|
@ -277,9 +276,9 @@ The GitLab Runner server requirements depend on:
|
|||
- Resources required to run build jobs.
|
||||
- Job concurrency settings.
|
||||
|
||||
Since the nature of the jobs varies for each use case, you will need to experiment by adjusting the job concurrency to get the optimum setting.
|
||||
Since the nature of the jobs varies for each use case, you need to experiment by adjusting the job concurrency to get the optimum setting.
|
||||
|
||||
For reference, GitLab.com's [auto-scaling shared runner](../user/gitlab_com/index.md#shared-runners) is configured so that a **single job** will run in a **single instance** with:
|
||||
For reference, GitLab.com's [auto-scaling shared runner](../user/gitlab_com/index.md#shared-runners) is configured so that a **single job** runs in a **single instance** with:
|
||||
|
||||
- 1vCPU.
|
||||
- 3.75GB of RAM.
|
||||
|
|
|
@ -19,11 +19,11 @@ for all projects:
|
|||
1. Go to **Admin Area > Settings > CI/CD**.
|
||||
1. Check (or uncheck to disable) the box that says **Default to Auto DevOps pipeline for all projects**.
|
||||
1. Optionally, set up the [Auto DevOps base domain](../../../topics/autodevops/index.md#auto-devops-base-domain)
|
||||
which is going to be used for Auto Deploy and Auto Review Apps.
|
||||
which is used for Auto Deploy and Auto Review Apps.
|
||||
1. Hit **Save changes** for the changes to take effect.
|
||||
|
||||
From now on, every existing project and newly created ones that don't have a
|
||||
`.gitlab-ci.yml`, will use the Auto DevOps pipelines.
|
||||
`.gitlab-ci.yml`, uses the Auto DevOps pipelines.
|
||||
|
||||
If you want to disable it for a specific project, you can do so in
|
||||
[its settings](../../../topics/autodevops/index.md#enable-or-disable-auto-devops).
|
||||
|
@ -49,13 +49,13 @@ To change it at the:
|
|||
1. Change the value of maximum artifacts size (in MB).
|
||||
1. Click **Save changes** for the changes to take effect.
|
||||
|
||||
- Group level (this will override the instance setting):
|
||||
- Group level (this overrides the instance setting):
|
||||
|
||||
1. Go to the group's **Settings > CI/CD > General Pipelines**.
|
||||
1. Change the value of **maximum artifacts size (in MB)**.
|
||||
1. Click **Save changes** for the changes to take effect.
|
||||
|
||||
- Project level (this will override the instance and group settings):
|
||||
- Project level (this overrides the instance and group settings):
|
||||
|
||||
1. Go to the project's **Settings > CI/CD > General Pipelines**.
|
||||
1. Change the value of **maximum artifacts size (in MB)**.
|
||||
|
@ -80,7 +80,7 @@ This setting is set per job and can be overridden in
|
|||
To disable the expiration, set it to `0`. The default unit is in seconds.
|
||||
|
||||
NOTE:
|
||||
Any changes to this setting will apply to new artifacts only. The expiration time will not
|
||||
Any changes to this setting applies to new artifacts only. The expiration time is not
|
||||
be updated for artifacts created before this setting was changed.
|
||||
The administrator may need to manually search for and expire previously-created
|
||||
artifacts, as described in the [troubleshooting documentation](../../../administration/troubleshooting/gitlab_rails_cheat_sheet.md#remove-artifacts-more-than-a-week-old).
|
||||
|
@ -117,7 +117,7 @@ All application settings have a [customizable cache expiry interval](../../../ad
|
|||
|
||||
If you have enabled shared runners for your GitLab instance, you can limit their
|
||||
usage by setting a maximum number of pipeline minutes that a group can use on
|
||||
shared runners per month. Setting this to `0` (default value) will grant
|
||||
shared runners per month. Setting this to `0` (default value) grants
|
||||
unlimited pipeline minutes. While build limits are stored as minutes, the
|
||||
counting is done in seconds. Usage resets on the first day of each month.
|
||||
On GitLab.com, the quota is calculated based on your
|
||||
|
@ -157,18 +157,18 @@ Archiving jobs is useful for reducing the CI/CD footprint on the system by
|
|||
removing some of the capabilities of the jobs (metadata needed to run the job),
|
||||
but persisting the traces and artifacts for auditing purposes.
|
||||
|
||||
To set the duration for which the jobs will be considered as old and expired:
|
||||
To set the duration for which the jobs are considered as old and expired:
|
||||
|
||||
1. Go to **Admin Area > Settings > CI/CD**.
|
||||
1. Expand the **Continuous Integration and Deployment** section.
|
||||
1. Set the value of **Archive jobs**.
|
||||
1. Hit **Save changes** for the changes to take effect.
|
||||
|
||||
Once that time passes, the jobs will be archived and no longer able to be
|
||||
Once that time passes, the jobs are archived and no longer able to be
|
||||
retried. Make it empty to never expire jobs. It has to be no less than 1 day,
|
||||
for example: <code>15 days</code>, <code>1 month</code>, <code>2 years</code>.
|
||||
|
||||
As of June 22, 2020 the [value is set](../../gitlab_com/index.md#gitlab-cicd) to 3 months on GitLab.com. Jobs created before that date will be archived after September 22, 2020.
|
||||
As of June 22, 2020 the [value is set](../../gitlab_com/index.md#gitlab-cicd) to 3 months on GitLab.com. Jobs created before that date were archived after September 22, 2020.
|
||||
|
||||
## Default CI configuration path
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ MR is merged.
|
|||
Collecting the coverage information is done via GitLab CI/CD's
|
||||
[artifacts reports feature](../../../ci/yaml/README.md#artifactsreports).
|
||||
You can specify one or more coverage reports to collect, including wildcard paths.
|
||||
GitLab will then take the coverage information in all the files and combine it
|
||||
GitLab then takes the coverage information in all the files and combines it
|
||||
together.
|
||||
|
||||
For the coverage analysis to work, you have to provide a properly formatted
|
||||
|
@ -41,14 +41,14 @@ Other coverage analysis frameworks support the format out of the box, for exampl
|
|||
- [Coverage.py](https://coverage.readthedocs.io/en/coverage-5.0.4/cmd.html#xml-reporting) (Python)
|
||||
|
||||
Once configured, if you create a merge request that triggers a pipeline which collects
|
||||
coverage reports, the coverage will be shown in the diff view. This includes reports
|
||||
from any job in any stage in the pipeline. The coverage will be displayed for each line:
|
||||
coverage reports, the coverage is shown in the diff view. This includes reports
|
||||
from any job in any stage in the pipeline. The coverage displays for each line:
|
||||
|
||||
- `covered` (green): lines which have been checked at least once by tests
|
||||
- `no test coverage` (orange): lines which are loaded but never executed
|
||||
- no coverage information: lines which are non-instrumented or not loaded
|
||||
|
||||
Hovering over the coverage bar will provide further information, such as the number
|
||||
Hovering over the coverage bar provides further information, such as the number
|
||||
of times the line was checked by tests.
|
||||
|
||||
NOTE:
|
||||
|
@ -69,8 +69,8 @@ For the coverage report to properly match the files displayed on a merge request
|
|||
must contain the full path relative to the project root. But in some coverage analysis frameworks, the generated
|
||||
Cobertura XML has the `filename` path relative to the class package directory instead.
|
||||
|
||||
To make an intelligent guess on the project root relative `class` path, the Cobertura XML parser will attempt to build the
|
||||
full path by doing following:
|
||||
To make an intelligent guess on the project root relative `class` path, the Cobertura XML parser attempts to build the
|
||||
full path by doing the following:
|
||||
|
||||
1. Extract a portion of the `source` paths from the `sources` element and combine them with the class `filename` path.
|
||||
1. Check if the candidate path exists in the project.
|
||||
|
@ -93,16 +93,16 @@ And the `sources` from Cobertura XML with paths in the format of `<CI_BUILDS_DIR
|
|||
</sources>
|
||||
```
|
||||
|
||||
The parser will extract `Auth` and `Lib/Utils` from the sources and use these as basis to determine the class path relative to
|
||||
The parser extracts `Auth` and `Lib/Utils` from the sources and use these as basis to determine the class path relative to
|
||||
the project root, combining these extracted sources and the class filename.
|
||||
|
||||
If for example there is a `class` element with the `filename` value of `User.cs`, the parser will take the first candidate path
|
||||
that matches which is `Auth/User.cs`.
|
||||
If for example there is a `class` element with the `filename` value of `User.cs`, the parser takes the first candidate path
|
||||
that matches, which is `Auth/User.cs`.
|
||||
|
||||
For each `class` element, the parser will attempt to look for a match for each extracted `source` path up to `100` iterations. If it reaches this limit without finding a matching path in the file tree, the class will not be included in the final coverage report.
|
||||
For each `class` element, the parser attempts to look for a match for each extracted `source` path up to `100` iterations. If it reaches this limit without finding a matching path in the file tree, the class will not be included in the final coverage report.
|
||||
|
||||
NOTE:
|
||||
The automatic class path correction only works on `source` paths in the format of `<CI_BUILDS_DIR>/<PROJECT_FULL_PATH>/...`. If `source` will be ignored if the path does not follow this pattern. The parser will assume that
|
||||
The automatic class path correction only works on `source` paths in the format of `<CI_BUILDS_DIR>/<PROJECT_FULL_PATH>/...`. If `source` will be ignored if the path does not follow this pattern. The parser assumes that
|
||||
the `filename` of a `class` element contains the full path relative to the project root.
|
||||
|
||||
## Example test coverage configurations
|
||||
|
|
|
@ -10,14 +10,24 @@ module Gitlab
|
|||
|
||||
argument :category, type: :string, desc: "Category name"
|
||||
argument :event, type: :string, desc: "Event name"
|
||||
class_option :ee, type: :boolean, optional: true, default: false, desc: 'Indicates if metric is for ee'
|
||||
|
||||
def create_metrics
|
||||
Gitlab::UsageMetricDefinitionGenerator.start(["#{key_path}_weekly", '--dir', '7d'])
|
||||
Gitlab::UsageMetricDefinitionGenerator.start(["#{key_path}_monthly", '--dir', '28d'])
|
||||
weekly_params = ["#{key_path}_weekly", '--dir', '7d']
|
||||
weekly_params << '--ee' if ee?
|
||||
Gitlab::UsageMetricDefinitionGenerator.start(weekly_params)
|
||||
|
||||
monthly_params = ["#{key_path}_monthly", '--dir', '28d']
|
||||
monthly_params << '--ee' if ee?
|
||||
Gitlab::UsageMetricDefinitionGenerator.start(monthly_params)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def ee?
|
||||
options[:ee]
|
||||
end
|
||||
|
||||
def key_path
|
||||
"redis_hll_counters.#{category}.#{event}"
|
||||
end
|
||||
|
|
|
@ -17998,10 +17998,13 @@ msgstr ""
|
|||
msgid "IssueBoards|Switch board"
|
||||
msgstr ""
|
||||
|
||||
msgid "IssueTracker|Bugzilla issue tracker"
|
||||
msgid "IssueTracker|Custom issue tracker"
|
||||
msgstr ""
|
||||
|
||||
msgid "IssueTracker|Custom issue tracker"
|
||||
msgid "IssueTracker|Use Bugzilla as this project's issue tracker."
|
||||
msgstr ""
|
||||
|
||||
msgid "IssueTracker|Use Bugzilla as this project's issue tracker. %{docs_link}"
|
||||
msgstr ""
|
||||
|
||||
msgid "IssueTracker|Use IBM Engineering Workflow Management as this project's issue tracker."
|
||||
|
|
|
@ -18,6 +18,10 @@ RSpec.describe Gitlab::UsageMetricDefinition::RedisHllGenerator do
|
|||
stub_prometheus_queries
|
||||
end
|
||||
|
||||
after do
|
||||
FileUtils.rm_rf(temp_dir)
|
||||
end
|
||||
|
||||
it 'creates metric definition files' do
|
||||
described_class.new(args).invoke_all
|
||||
|
||||
|
@ -27,4 +31,27 @@ RSpec.describe Gitlab::UsageMetricDefinition::RedisHllGenerator do
|
|||
expect(YAML.safe_load(File.read(weekly_metric_definition_path))).to include("key_path" => "redis_hll_counters.test_category.i_test_event_weekly")
|
||||
expect(YAML.safe_load(File.read(monthly_metric_definition_path))).to include("key_path" => "redis_hll_counters.test_category.i_test_event_monthly")
|
||||
end
|
||||
|
||||
context 'with ee option' do
|
||||
let(:weekly_metric_definition_path) { Dir.glob(File.join(temp_dir, 'ee/config/metrics/counts_7d/*i_test_event_weekly.yml')).first }
|
||||
let(:monthly_metric_definition_path) { Dir.glob(File.join(temp_dir, 'ee/config/metrics/counts_28d/*i_test_event_monthly.yml')).first }
|
||||
|
||||
let(:weekly_metric_definition) { YAML.safe_load(File.read(weekly_metric_definition_path)) }
|
||||
let(:monthly_metric_definition) { YAML.safe_load(File.read(monthly_metric_definition_path)) }
|
||||
|
||||
before do
|
||||
stub_const("#{Gitlab::UsageMetricDefinitionGenerator}::TOP_LEVEL_DIR", 'config')
|
||||
stub_const("#{Gitlab::UsageMetricDefinitionGenerator}::TOP_LEVEL_DIR_EE", File.join(temp_dir, 'ee'))
|
||||
end
|
||||
|
||||
it 'creates metric definition files' do
|
||||
described_class.new(args, { 'ee': true }).invoke_all
|
||||
|
||||
expect(weekly_metric_definition).to include("key_path" => "redis_hll_counters.test_category.i_test_event_weekly")
|
||||
expect(weekly_metric_definition["distribution"]).to include('ee')
|
||||
|
||||
expect(monthly_metric_definition).to include("key_path" => "redis_hll_counters.test_category.i_test_event_monthly")
|
||||
expect(monthly_metric_definition["distribution"]).to include('ee')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
Loading…
Reference in New Issue