Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-05-14 03:10:11 +00:00
parent 980faa8f34
commit 67cc693c9a
40 changed files with 574 additions and 282 deletions

View File

@ -834,10 +834,10 @@ GfmAutoComplete.Members = {
const lowercaseQuery = query.toLowerCase();
const { nameOrUsernameStartsWith, nameOrUsernameIncludes } = GfmAutoComplete.Members;
return sortBy(members, [
return sortBy(
members.filter((member) => nameOrUsernameIncludes(member, lowercaseQuery)),
(member) => (nameOrUsernameStartsWith(member, lowercaseQuery) ? -1 : 0),
(member) => (nameOrUsernameIncludes(member, lowercaseQuery) ? -1 : 0),
]);
);
},
};
GfmAutoComplete.Labels = {

View File

@ -62,6 +62,7 @@ class Clusters::ClustersController < Clusters::BaseController
def show
if params[:tab] == 'integrations'
@prometheus_integration = Clusters::IntegrationPresenter.new(@cluster.find_or_build_integration_prometheus)
@elastic_stack_integration = Clusters::IntegrationPresenter.new(@cluster.find_or_build_integration_elastic_stack)
end
end

View File

@ -24,7 +24,7 @@ module Clusters
end
def cluster_integration_params
params.require(:integration).permit(:application_type, :enabled)
params.permit(integration: [:enabled, :application_type]).require(:integration)
end
def cluster

View File

@ -12,7 +12,10 @@ module SshKeysHelper
message: _('This action cannot be undone, and will permanently delete the %{key} SSH key') % { key: key.title },
okVariant: 'danger',
okTitle: _('Delete')
}
},
toggle: 'tooltip',
placement: 'top',
container: 'body'
}
end
end

View File

@ -3,9 +3,9 @@
module Clusters
module Applications
class ElasticStack < ApplicationRecord
VERSION = '3.0.0'
include ::Clusters::Concerns::ElasticsearchClient
ELASTICSEARCH_PORT = 9200
VERSION = '3.0.0'
self.table_name = 'clusters_applications_elastic_stacks'
@ -13,10 +13,23 @@ module Clusters
include ::Clusters::Concerns::ApplicationStatus
include ::Clusters::Concerns::ApplicationVersion
include ::Clusters::Concerns::ApplicationData
include ::Gitlab::Utils::StrongMemoize
default_value_for :version, VERSION
after_destroy do
cluster&.find_or_build_integration_elastic_stack&.update(enabled: false, chart_version: nil)
end
state_machine :status do
after_transition any => [:installed] do |application|
application.cluster&.find_or_build_integration_elastic_stack&.update(enabled: true, chart_version: application.version)
end
after_transition any => [:uninstalled] do |application|
application.cluster&.find_or_build_integration_elastic_stack&.update(enabled: false, chart_version: nil)
end
end
def chart
'elastic-stack/elastic-stack'
end
@ -51,31 +64,6 @@ module Clusters
super.merge('wait-for-elasticsearch.sh': File.read("#{Rails.root}/vendor/elastic_stack/wait-for-elasticsearch.sh"))
end
def elasticsearch_client(timeout: nil)
strong_memoize(:elasticsearch_client) do
next unless kube_client
proxy_url = kube_client.proxy_url('service', service_name, ::Clusters::Applications::ElasticStack::ELASTICSEARCH_PORT, Gitlab::Kubernetes::Helm::NAMESPACE)
Elasticsearch::Client.new(url: proxy_url) do |faraday|
# ensures headers containing auth data are appended to original client options
faraday.headers.merge!(kube_client.headers)
# ensure TLS certs are properly verified
faraday.ssl[:verify] = kube_client.ssl_options[:verify_ssl]
faraday.ssl[:cert_store] = kube_client.ssl_options[:cert_store]
faraday.options.timeout = timeout unless timeout.nil?
end
rescue Kubeclient::HttpError => error
# If users have mistakenly set parameters or removed the depended clusters,
# `proxy_url` could raise an exception because gitlab can not communicate with the cluster.
# We check for a nil client in downstream use and behaviour is equivalent to an empty state
log_exception(error, :failed_to_create_elasticsearch_client)
nil
end
end
def chart_above_v2?
Gem::Version.new(version) >= Gem::Version.new('2.0.0')
end
@ -106,10 +94,6 @@ module Clusters
]
end
def kube_client
cluster&.kubeclient&.core_client
end
def migrate_to_3_script
return [] if !updating? || chart_above_v3?

View File

@ -52,6 +52,7 @@ module Clusters
has_one :platform_kubernetes, class_name: 'Clusters::Platforms::Kubernetes', inverse_of: :cluster, autosave: true
has_one :integration_prometheus, class_name: 'Clusters::Integrations::Prometheus', inverse_of: :cluster
has_one :integration_elastic_stack, class_name: 'Clusters::Integrations::ElasticStack', inverse_of: :cluster
def self.has_one_cluster_application(name) # rubocop:disable Naming/PredicateName
application = APPLICATIONS[name.to_s]
@ -104,6 +105,7 @@ module Clusters
delegate :available?, to: :application_ingress, prefix: true, allow_nil: true
delegate :available?, to: :application_knative, prefix: true, allow_nil: true
delegate :available?, to: :application_elastic_stack, prefix: true, allow_nil: true
delegate :available?, to: :integration_elastic_stack, prefix: true, allow_nil: true
delegate :external_ip, to: :application_ingress, prefix: true, allow_nil: true
delegate :external_hostname, to: :application_ingress, prefix: true, allow_nil: true
@ -284,6 +286,10 @@ module Clusters
integration_prometheus || build_integration_prometheus
end
def find_or_build_integration_elastic_stack
integration_elastic_stack || build_integration_elastic_stack
end
def provider
if gcp?
provider_gcp
@ -318,6 +324,22 @@ module Clusters
platform_kubernetes.kubeclient if kubernetes?
end
def elastic_stack_adapter
application_elastic_stack || integration_elastic_stack
end
def elasticsearch_client
elastic_stack_adapter&.elasticsearch_client
end
def elastic_stack_available?
if application_elastic_stack_available? || integration_elastic_stack_available?
true
else
false
end
end
def kubernetes_namespace_for(environment, deployable: environment.last_deployable)
if deployable && environment.project_id != deployable.project_id
raise ArgumentError, 'environment.project_id must match deployable.project_id'

View File

@ -6,6 +6,8 @@ module Clusters
extend ActiveSupport::Concern
included do
include ::Clusters::Concerns::KubernetesLogger
belongs_to :cluster, class_name: 'Clusters::Cluster', foreign_key: :cluster_id
validates :cluster, presence: true
@ -79,24 +81,6 @@ module Clusters
# Override if your application needs any action after
# being uninstalled by Helm
end
def logger
@logger ||= Gitlab::Kubernetes::Logger.build
end
def log_exception(error, event)
logger.error({
exception: error.class.name,
status_code: error.error_code,
cluster_id: cluster&.id,
application_id: id,
class_name: self.class.name,
event: event,
message: error.message
})
Gitlab::ErrorTracking.track_exception(error, cluster_id: cluster&.id, application_id: id)
end
end
end
end

View File

@ -0,0 +1,38 @@
# frozen_string_literal: true
module Clusters
module Concerns
module ElasticsearchClient
include ::Gitlab::Utils::StrongMemoize
ELASTICSEARCH_PORT = 9200
ELASTICSEARCH_NAMESPACE = 'gitlab-managed-apps'
def elasticsearch_client(timeout: nil)
strong_memoize(:elasticsearch_client) do
kube_client = cluster&.kubeclient&.core_client
next unless kube_client
proxy_url = kube_client.proxy_url('service', service_name, ELASTICSEARCH_PORT, ELASTICSEARCH_NAMESPACE)
Elasticsearch::Client.new(url: proxy_url) do |faraday|
# ensures headers containing auth data are appended to original client options
faraday.headers.merge!(kube_client.headers)
# ensure TLS certs are properly verified
faraday.ssl[:verify] = kube_client.ssl_options[:verify_ssl]
faraday.ssl[:cert_store] = kube_client.ssl_options[:cert_store]
faraday.options.timeout = timeout unless timeout.nil?
end
rescue Kubeclient::HttpError => error
# If users have mistakenly set parameters or removed the depended clusters,
# `proxy_url` could raise an exception because gitlab can not communicate with the cluster.
# We check for a nil client in downstream use and behaviour is equivalent to an empty state
log_exception(error, :failed_to_create_elasticsearch_client)
nil
end
end
end
end
end

View File

@ -0,0 +1,27 @@
# frozen_string_literal: true
module Clusters
module Concerns
module KubernetesLogger
def logger
@logger ||= Gitlab::Kubernetes::Logger.build
end
def log_exception(error, event)
logger.error(
{
exception: error.class.name,
status_code: error.error_code,
cluster_id: cluster&.id,
application_id: id,
class_name: self.class.name,
event: event,
message: error.message
}
)
Gitlab::ErrorTracking.track_exception(error, cluster_id: cluster&.id, application_id: id)
end
end
end
end

View File

@ -0,0 +1,38 @@
# frozen_string_literal: true
module Clusters
module Integrations
class ElasticStack < ApplicationRecord
include ::Clusters::Concerns::ElasticsearchClient
include ::Clusters::Concerns::KubernetesLogger
self.table_name = 'clusters_integration_elasticstack'
self.primary_key = :cluster_id
belongs_to :cluster, class_name: 'Clusters::Cluster', foreign_key: :cluster_id
validates :cluster, presence: true
validates :enabled, inclusion: { in: [true, false] }
def available?
enabled
end
def service_name
chart_above_v3? ? 'elastic-stack-elasticsearch-master' : 'elastic-stack-elasticsearch-client'
end
def chart_above_v2?
return true if chart_version.nil?
Gem::Version.new(chart_version) >= Gem::Version.new('2.0.0')
end
def chart_above_v3?
return true if chart_version.nil?
Gem::Version.new(chart_version) >= Gem::Version.new('3.0.0')
end
end
end
end

View File

@ -406,7 +406,7 @@ class Environment < ApplicationRecord
end
def elastic_stack_available?
!!deployment_platform&.cluster&.application_elastic_stack_available?
!!deployment_platform&.cluster&.elastic_stack_available?
end
def rollout_status

View File

@ -76,7 +76,7 @@ module Clusters
def gitlab_managed_apps_logs_path
return unless logs_project && can_read_cluster?
if cluster.application_elastic_stack&.available?
if cluster.elastic_stack_adapter&.available?
elasticsearch_project_logs_path(logs_project, cluster_id: cluster.id, format: :json)
else
k8s_project_logs_path(logs_project, cluster_id: cluster.id, format: :json)

View File

@ -28,6 +28,6 @@ class ClusterEntity < Grape::Entity
end
expose :enable_advanced_logs_querying do |cluster|
cluster.application_elastic_stack_available?
cluster.elastic_stack_available?
end
end

View File

@ -27,12 +27,15 @@ module Clusters
private
def integration
case params[:application_type]
when 'prometheus'
cluster.find_or_build_integration_prometheus
else
raise ArgumentError, "invalid application_type: #{params[:application_type]}"
end
@integration ||= \
case params[:application_type]
when 'prometheus'
cluster.find_or_build_integration_prometheus
when 'elastic_stack'
cluster.find_or_build_integration_elastic_stack
else
raise ArgumentError, "invalid application_type: #{params[:application_type]}"
end
end
def authorized?

View File

@ -24,7 +24,7 @@ module PodLogs
end
def get_raw_pods(result)
client = cluster&.application_elastic_stack&.elasticsearch_client
client = cluster&.elasticsearch_client
return error(_('Unable to connect to Elasticsearch')) unless client
result[:raw_pods] = ::Gitlab::Elasticsearch::Logs::Pods.new(client).pods(namespace)
@ -66,11 +66,9 @@ module PodLogs
end
def pod_logs(result)
client = cluster&.application_elastic_stack&.elasticsearch_client
client = cluster&.elasticsearch_client
return error(_('Unable to connect to Elasticsearch')) unless client
chart_above_v2 = cluster.application_elastic_stack.chart_above_v2?
response = ::Gitlab::Elasticsearch::Logs::Lines.new(client).pod_logs(
namespace,
pod_name: result[:pod_name],
@ -79,7 +77,7 @@ module PodLogs
start_time: result[:start_time],
end_time: result[:end_time],
cursor: result[:cursor],
chart_above_v2: chart_above_v2
chart_above_v2: cluster.elastic_stack_adapter.chart_above_v2?
)
result.merge!(response)

View File

@ -1,19 +1,29 @@
.settings.expanded.border-0.m-0
%p
= s_('ClusterIntegration|Integrations enable you to integrate your cluster as part of your GitLab workflow.')
= s_('ClusterIntegration|Integrations allow you to use applications installed in your cluster as part of your GitLab workflow.')
= link_to _('Learn more'), help_page_path('user/clusters/integrations.md'), target: '_blank'
.settings-content#advanced-settings-section
.settings-content#integrations-settings-section
- if can?(current_user, :admin_cluster, @cluster)
.sub-section.form-group
= form_for @prometheus_integration, url: @cluster.integrations_path, as: :integration, method: :post, html: { class: 'js-cluster-integrations-form' } do |form|
= form.hidden_field :application_type
.form-group
= form_for @prometheus_integration, as: :integration, namespace: :prometheus, url: @cluster.integrations_path, method: :post, html: { class: 'js-cluster-integrations-form' } do |prometheus_form|
= prometheus_form.hidden_field :application_type
.form-group.gl-form-group
.gl-form-checkbox.custom-control.custom-checkbox
= form.check_box :enabled, { class: 'custom-control-input'}
= form.label :enabled, s_('ClusterIntegration|Enable Prometheus integration'), class: 'custom-control-label'
.gl-form-group
= prometheus_form.check_box :enabled, class: 'custom-control-input'
= prometheus_form.label :enabled, s_('ClusterIntegration|Enable Prometheus integration'), class: 'custom-control-label'
.form-text.text-gl-muted
- link_start = '<a href="%{url}" target="_blank" rel="noopener noreferrer">'.html_safe % { url: help_page_path("user/clusters/integrations", anchor: "prometheus-cluster-integration") }
- link_end = '</a>'.html_safe
= html_escape(s_('ClusterIntegration|Before you enable this integration, follow the %{link_start}documented process%{link_end}.')) % { link_start: link_start, link_end: link_end }
= form.submit _('Save changes'), class: 'btn gl-button btn-success'
= s_('ClusterIntegration|Allows GitLab to query a specifically configured in-cluster Prometheus for metrics.')
= link_to _('More information.'), help_page_path("user/clusters/integrations", anchor: "prometheus-cluster-integration"), target: '_blank'
= prometheus_form.submit _('Save changes'), class: 'btn gl-button btn-success'
.sub-section.form-group
= form_for @elastic_stack_integration, as: :integration, namespace: :elastic_stack, url: @cluster.integrations_path, method: :post, html: { class: 'js-cluster-integrations-form' } do |elastic_stack_form|
= elastic_stack_form.hidden_field :application_type
.form-group.gl-form-group
.gl-form-checkbox.custom-control.custom-checkbox
= elastic_stack_form.check_box :enabled, class: 'custom-control-input'
= elastic_stack_form.label :enabled, s_('ClusterIntegration|Enable Elastic Stack integration'), class: 'custom-control-label'
.form-text.text-gl-muted
= s_('ClusterIntegration|Allows GitLab to query a specifically configured in-cluster Elasticsearch for pod logs.')
= link_to _('More information.'), help_page_path("user/clusters/integrations", anchor: "elastic-stack-cluster-integration"), target: '_blank'
= elastic_stack_form.submit _('Save changes'), class: 'btn gl-button btn-success'

View File

@ -28,4 +28,4 @@
%span.key-created-at.gl-display-flex.gl-align-items-center
- if key.can_delete?
.gl-ml-3
= render 'shared/ssh_keys/key_delete', html_class: "btn gl-button btn-icon btn-danger js-confirm-modal-button", button_data: ssh_key_delete_modal_data(key, path_to_key(key, is_admin))
= render 'shared/ssh_keys/key_delete', html_class: "btn gl-button btn-icon btn-default js-confirm-modal-button", button_data: ssh_key_delete_modal_data(key, path_to_key(key, is_admin))

View File

@ -1,6 +1,9 @@
- title = _('Delete Key')
- aria = { label: title }
- if defined?(text)
= button_to text, '#', class: html_class, data: button_data
= button_to text, '#', class: html_class, data: button_data, title: title, aria: aria
- else
= button_to '#', class: html_class, data: button_data do
= button_to '#', class: html_class, data: button_data, title: title, aria: aria do
%span.sr-only= _('Delete')
= sprite_icon('remove')

View File

@ -0,0 +1,5 @@
---
title: Updating button variant and adding tooltip for the SSH delete key button.
merge_request: 61626
author:
type: other

View File

@ -0,0 +1,5 @@
---
title: Add Elastic Stack cluster integration
merge_request: 61077
author:
type: added

View File

@ -0,0 +1,15 @@
# frozen_string_literal: true
class CreateClustersIntegrationElasticstack < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
def change
create_table_with_constraints :clusters_integration_elasticstack, id: false do |t|
t.timestamps_with_timezone null: false
t.references :cluster, primary_key: true, default: nil, index: false, foreign_key: { on_delete: :cascade }
t.boolean :enabled, null: false, default: false
t.text :chart_version
t.text_limit :chart_version, 10
end
end
end

View File

@ -0,0 +1 @@
c4593c1638f937618ecf3ae94a409e550dce93cc190989f581fb0007e591696d

View File

@ -11719,6 +11719,15 @@ CREATE SEQUENCE clusters_id_seq
ALTER SEQUENCE clusters_id_seq OWNED BY clusters.id;
CREATE TABLE clusters_integration_elasticstack (
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
cluster_id bigint NOT NULL,
enabled boolean DEFAULT false NOT NULL,
chart_version text,
CONSTRAINT check_f8d671ce04 CHECK ((char_length(chart_version) <= 10))
);
CREATE TABLE clusters_integration_prometheus (
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
@ -20796,6 +20805,9 @@ ALTER TABLE ONLY clusters_applications_prometheus
ALTER TABLE ONLY clusters_applications_runners
ADD CONSTRAINT clusters_applications_runners_pkey PRIMARY KEY (id);
ALTER TABLE ONLY clusters_integration_elasticstack
ADD CONSTRAINT clusters_integration_elasticstack_pkey PRIMARY KEY (cluster_id);
ALTER TABLE ONLY clusters_integration_prometheus
ADD CONSTRAINT clusters_integration_prometheus_pkey PRIMARY KEY (cluster_id);
@ -27064,6 +27076,9 @@ ALTER TABLE ONLY boards_epic_board_positions
ALTER TABLE ONLY vulnerability_finding_links
ADD CONSTRAINT fk_rails_cbdfde27ce FOREIGN KEY (vulnerability_occurrence_id) REFERENCES vulnerability_occurrences(id) ON DELETE CASCADE;
ALTER TABLE ONLY clusters_integration_elasticstack
ADD CONSTRAINT fk_rails_cc5ba8f658 FOREIGN KEY (cluster_id) REFERENCES clusters(id) ON DELETE CASCADE;
ALTER TABLE ONLY issues_self_managed_prometheus_alert_events
ADD CONSTRAINT fk_rails_cc5d88bbb0 FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE;

View File

@ -1087,7 +1087,7 @@ POST /groups/:id/hooks
| `confidential_note_events` | boolean | no | Trigger hook on confidential note events |
| `job_events` | boolean | no | Trigger hook on job events |
| `pipeline_events` | boolean | no | Trigger hook on pipeline events |
| `wiki_page_events` | boolean | no | Trigger hook on wiki events |
| `wiki_page_events` | boolean | no | Trigger hook on wiki page events |
| `deployment_events` | boolean | no | Trigger hook on deployment events |
| `releases_events` | boolean | no | Trigger hook on release events |
| `subgroup_events` | boolean | no | Trigger hook on subgroup events |
@ -1116,7 +1116,7 @@ PUT /groups/:id/hooks/:hook_id
| `confidential_note_events` | boolean | no | Trigger hook on confidential note events |
| `job_events` | boolean | no | Trigger hook on job events |
| `pipeline_events` | boolean | no | Trigger hook on pipeline events |
| `wiki_events` | boolean | no | Trigger hook on wiki events |
| `wiki_page_events` | boolean | no | Trigger hook on wiki page events |
| `deployment_events` | boolean | no | Trigger hook on deployment events |
| `releases_events` | boolean | no | Trigger hook on release events |
| `subgroup_events` | boolean | no | Trigger hook on subgroup events |

View File

@ -457,7 +457,7 @@ Parameters:
| `author_id` | integer | no | Returns merge requests created by the given user `id`. Mutually exclusive with `author_username`. _([Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/13060) in GitLab 9.5)_. |
| `author_username` | string | no | Returns merge requests created by the given `username`. Mutually exclusive with `author_id`. _([Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/13060) in GitLab 12.10)_. |
| `assignee_id` | integer | no | Returns merge requests assigned to the given user `id`. `None` returns unassigned merge requests. `Any` returns merge requests with an assignee. _([Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/13060) in GitLab 9.5)_. |
| `approver_ids` **(PREMIUM))** | integer array | no | Returns merge requests which have specified all the users with the given `id`s as individual approvers. `None` returns merge requests without approvers. `Any` returns merge requests with an approver. |
| `approver_ids` **(PREMIUM)** | integer array | no | Returns merge requests which have specified all the users with the given `id`s as individual approvers. `None` returns merge requests without approvers. `Any` returns merge requests with an approver. |
| `approved_by_ids` **(PREMIUM)** | integer array | no | Returns merge requests which have been approved by all the users with the given `id`s (Max: 5). `None` returns merge requests with no approvals. `Any` returns merge requests with an approval. |
| `reviewer_id` | integer | no | Returns merge requests which have the user as a [reviewer](../user/project/merge_requests/getting_started.md#reviewer) with the given user `id`. `None` returns merge requests with no reviewers. `Any` returns merge requests with any reviewer. Mutually exclusive with `reviewer_username`. |
| `reviewer_username` | string | no | Returns merge requests which have the user as a [reviewer](../user/project/merge_requests/getting_started.md#reviewer) with the given `username`. `None` returns merge requests with no reviewers. `Any` returns merge requests with any reviewer. Mutually exclusive with `reviewer_id`. |

View File

@ -2222,7 +2222,7 @@ PUT /projects/:id/hooks/:hook_id
| `tag_push_events` | boolean | **{dotted-circle}** No | Trigger hook on tag push events. |
| `token` | string | **{dotted-circle}** No | Secret token to validate received payloads; this isn't returned in the response. |
| `url` | string | **{check-circle}** Yes | The hook URL. |
| `wiki_events` | boolean | **{dotted-circle}** No | Trigger hook on wiki events. |
| `wiki_page_events` | boolean | **{dotted-circle}** No | Trigger hook on wiki page events. |
| `releases_events` | boolean | **{dotted-circle}** No | Trigger hook on release events. |
### Delete project hook

View File

@ -21,8 +21,6 @@ Out-of-the-box management systems can decrease hours spent on maintaining toolch
Watch our ["Mastering continuous software development"](https://about.gitlab.com/webcast/mastering-ci-cd/)
webcast to learn about continuous methods and how the GitLab built-in CI can help you simplify and scale software development.
## Overview
Continuous Integration works by pushing small code chunks to your
application's codebase hosted in a Git repository, and to every
push, run a pipeline of scripts to build, test, and validate the
@ -50,42 +48,6 @@ read the [Introduction to CI/CD with GitLab](introduction/index.md).
<iframe src="https://www.youtube.com/embed/1iXFbchozdY" frameborder="0" allowfullscreen="true"> </iframe>
</figure>
## Getting started
GitLab CI/CD is configured by a file called `.gitlab-ci.yml` placed
at the repository's root. This file creates a [pipeline](pipelines/index.md), which runs for changes to the code in the repository. Pipelines consist of one or more stages that run in order and can each contain one or more jobs that run in parallel. These jobs (or scripts) get executed by the [GitLab Runner](https://docs.gitlab.com/runner/) agent.
To get started with GitLab CI/CD, we recommend you read through
the following documents:
- [Get started with GitLab CI/CD](quick_start/index.md).
- [Fundamental pipeline architectures](pipelines/pipeline_architectures.md).
- [GitLab CI/CD basic workflow](introduction/index.md#gitlab-cicd-workflow).
- [Step-by-step guide for writing `.gitlab-ci.yml` for the first time](../user/project/pages/getting_started/pages_from_scratch.md).
If you're migrating from another CI/CD tool, check out our handy references:
- [Migrating from CircleCI](migration/circleci.md)
- [Migrating from Jenkins](migration/jenkins.md)
You can also get started by using one of the
[`.gitlab-ci.yml` templates](https://gitlab.com/gitlab-org/gitlab-foss/tree/master/lib/gitlab/ci/templates)
available through the UI. You can use them by creating a new file,
choosing a template that suits your application, and adjusting it
to your needs:
![Use a YAML template](img/add_file_template_11_10.png)
While building your `.gitlab-ci.yml`, you can use the [CI/CD configuration visualization](pipeline_editor/index.md#visualize-ci-configuration) to facilitate your writing experience.
For a broader overview, see the [CI/CD getting started](quick_start/index.md) guide.
After you're familiar with how GitLab CI/CD works, see the
[`.gitlab-ci.yml` full reference](yaml/README.md)
for all the attributes you can set and use.
GitLab CI/CD and [shared runners](runners/README.md#shared-runners) are enabled on GitLab.com and available for all users, limited only by the [pipeline quota](../user/gitlab_com/index.md#shared-runners).
## Concepts
GitLab CI/CD uses a number of concepts to describe and run your build and deploy.

View File

@ -65,7 +65,7 @@ GitLab [Auto Build](../../../topics/autodevops/stages.md#auto-build)
and [Container Registry](../../../user/packages/container_registry/index.md).
1. Go to **ecs-demo** project on GitLab.
1. Click **Setup up CI/CD**. It brings you to a [`.gitlab-ci.yml`](../../README.md#getting-started)
1. Click **Setup up CI/CD**. It brings you to a `.gitlab-ci.yml`
creation form.
1. Copy and paste the following content into the empty `.gitlab-ci.yml`. This defines
[a pipeline for continuous deployment to ECS](../index.md#deploy-your-application-to-the-aws-elastic-container-service-ecs).

View File

@ -103,7 +103,6 @@ To generate an API Fuzzing configuration snippet:
1. Complete the form as needed. Read below for more information on available configuration options.
1. Select **Generate code snippet**.
A modal opens with the YAML snippet corresponding to the options you've selected in the form.
![API Fuzzing configuration snippet](img/api_fuzzing_configuration_snippet_v13.10.png)
1. Choose one of the following actions:
1. Select **Copy code and open `.gitlab-ci.yml` file** to copy the snippet to your clipboard and
be redirected to your project's `.gitlab-ci.yml` file where you can paste the YAML

View File

@ -78,6 +78,8 @@ An asset that has the potential to be vulnerable, identified in a project by an
include but are not restricted to source code, binary packages, containers, dependencies, networks,
applications, and infrastructure.
Findings are all potential vulnerability items scanners identify in MRs/feature branches. Only after merging to default does a finding become a [vulnerability](#vulnerability).
### Insignificant finding
A legitimate finding that a particular customer doesn't care about.
@ -153,6 +155,8 @@ A flaw that has a negative impact on the security of its environment. Vulnerabil
error or weakness, and don't describe where the error is located (see [finding](#finding)).
Each vulnerability maps to a unique finding.
Vulnerabilities exist in the default branch. Findings (see [finding](#finding)) are all potential vulnerability items scanners identify in MRs/feature branches. Only after merging to default does a finding become a vulnerability.
### Vulnerability finding
When a [report finding](#report-finding) is stored to the database, it becomes a vulnerability

View File

@ -10,7 +10,9 @@ GitLab provides several ways to integrate applications to your
Kubernetes cluster.
To enable cluster integrations, first add a Kubernetes cluster to a GitLab
[project](../project/clusters/add_remove_clusters.md) or [group](../group/clusters/index.md#group-level-kubernetes-clusters).
[project](../project/clusters/add_remove_clusters.md) or
[group](../group/clusters/index.md#group-level-kubernetes-clusters) or
[instance](../instance/clusters/index.md).
## Prometheus cluster integration
@ -20,33 +22,33 @@ You can integrate your Kubernetes cluster with
[Prometheus](https://prometheus.io/) for monitoring key metrics of your
apps directly from the GitLab UI.
[Alerts](../../operations/metrics/alerts.md) are not currently
supported.
[Alerts](../../operations/metrics/alerts.md) can be configured the same way as
for [external Prometheus instances](../../operations/metrics/alerts.md#external-prometheus-instances).
Once enabled, you will see metrics from services available in the
Once enabled, you can see metrics from services available in the
[metrics library](../project/integrations/prometheus_library/index.md).
Prerequisites:
### Prometheus Prerequisites
To benefit from this integration, you must have Prometheus
installed in your cluster with the following requirements:
To use this integration:
1. Prometheus must be installed inside the `gitlab-managed-apps` namespace.
1. Prometheus must be installed in your cluster in the `gitlab-managed-apps` namespace.
1. The `Service` resource for Prometheus must be named `prometheus-prometheus-server`.
You can use the following commands to install Prometheus to meet the requirements for cluster integrations:
You can manage your Prometheus however you like, but as an example, you can set
it up using [Helm](https://helm.sh/) as follows:
```shell
# Create the require Kubernetes namespace
# Create the required Kubernetes namespace
kubectl create ns gitlab-managed-apps
# Download Helm chart values that is compatible with the requirements above.
# You should substitute the tag that corresponds to the GitLab version in the url
# You should substitute the tag that corresponds to the GitLab version in the URL
# - https://gitlab.com/gitlab-org/gitlab/-/raw/<tag>/vendor/prometheus/values.yaml
#
wget https://gitlab.com/gitlab-org/gitlab/-/raw/v13.9.0-ee/vendor/prometheus/values.yaml
# Add the Prometheus community helm repo
# Add the Prometheus community Helm chart repository
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
# Install Prometheus
@ -65,6 +67,65 @@ To enable the Prometheus integration for your cluster:
**Operations > Kubernetes**.
- For a [group-level cluster](../group/clusters/index.md), navigate to your group's
**Kubernetes** page.
- For an [instance-level cluster](../instance/clusters/index.md), navigate to your instance's
**Kubernetes** page.
1. Select the **Integrations** tab.
1. Check the **Enable Prometheus integration** checkbox.
1. Click **Save changes**.
1. Go to the **Health** tab to see your cluster's metrics.
## Elastic Stack cluster integration
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/61077) in GitLab 13.12.
You can integrate your cluster with [Elastic
Stack](https://www.elastic.co/elastic-stack) to index and [query your pod
logs](../project/clusters/kubernetes_pod_logs.md).
### Elastic Stack Prerequisites
To use this integration:
1. Elasticsearch 7.x or must be installed in your cluster in the
`gitlab-managed-apps` namespace.
1. The `Service` resource must be called `elastic-stack-elasticsearch-master`
and expose the Elasticsearch API on port `9200`.
1. The logs are expected to be [Filebeat container logs](https://www.elastic.co/guide/en/beats/filebeat/7.x/filebeat-input-container.html)
following the [7.x log structure](https://www.elastic.co/guide/en/beats/filebeat/7.x/exported-fields-log.html)
and include [Kubernetes metadata](https://www.elastic.co/guide/en/beats/filebeat/7.x/add-kubernetes-metadata.html).
You can manage your Elastic Stack however you like, but as an example, you can
use [this Elastic Stack chart](https://gitlab.com/gitlab-org/charts/elastic-stack) to get up and
running:
```shell
# Create the required Kubernetes namespace
kubectl create namespace gitlab-managed-apps
# Download Helm chart values that is compatible with the requirements above.
# You should substitute the tag that corresponds to the GitLab version in the URL
# - https://gitlab.com/gitlab-org/gitlab/-/raw/<tag>/vendor/elastic_stack/values.yaml
#
wget https://gitlab.com/gitlab-org/gitlab/-/raw/v13.9.0-ee/vendor/elastic_stack/values.yaml
# Add the GitLab Helm chart repository
helm repo add gitlab https://charts.gitlab.io
# Install Elastic Stack
helm install prometheus gitlab/elastic-stack -n gitlab-managed-apps --values values.yaml
```
### Enable Elastic Stack integration for your cluster
To enable the Elastic Stack integration for your cluster:
1. Go to the cluster's page:
- For a [project-level cluster](../project/clusters/index.md), navigate to your project's
**Operations > Kubernetes**.
- For a [group-level cluster](../group/clusters/index.md), navigate to your group's
**Kubernetes** page.
- For an [instance-level cluster](../instance/clusters/index.md), navigate to your instance's
**Kubernetes** page.
1. Select the **Integrations** tab.
1. Check the **Enable Prometheus integration** checkbox.
1. Click **Save changes**.

View File

@ -6949,6 +6949,12 @@ msgstr ""
msgid "ClusterIntegration|Allow GitLab to manage namespaces and service accounts for this cluster."
msgstr ""
msgid "ClusterIntegration|Allows GitLab to query a specifically configured in-cluster Elasticsearch for pod logs."
msgstr ""
msgid "ClusterIntegration|Allows GitLab to query a specifically configured in-cluster Prometheus for metrics."
msgstr ""
msgid "ClusterIntegration|Alternatively, "
msgstr ""
@ -6991,9 +6997,6 @@ msgstr ""
msgid "ClusterIntegration|Base domain"
msgstr ""
msgid "ClusterIntegration|Before you enable this integration, follow the %{link_start}documented process%{link_end}."
msgstr ""
msgid "ClusterIntegration|Blocking mode"
msgstr ""
@ -7159,6 +7162,9 @@ msgstr ""
msgid "ClusterIntegration|Enable Cloud Run for Anthos"
msgstr ""
msgid "ClusterIntegration|Enable Elastic Stack integration"
msgstr ""
msgid "ClusterIntegration|Enable Prometheus integration"
msgstr ""
@ -7306,7 +7312,7 @@ msgstr ""
msgid "ClusterIntegration|Integration enabled"
msgstr ""
msgid "ClusterIntegration|Integrations enable you to integrate your cluster as part of your GitLab workflow."
msgid "ClusterIntegration|Integrations allow you to use applications installed in your cluster as part of your GitLab workflow."
msgstr ""
msgid "ClusterIntegration|Issuer Email"
@ -10593,6 +10599,9 @@ msgstr ""
msgid "Delete Comment"
msgstr ""
msgid "Delete Key"
msgstr ""
msgid "Delete Value Stream"
msgstr ""

View File

@ -0,0 +1,12 @@
# frozen_string_literal: true
FactoryBot.define do
factory :clusters_integrations_elastic_stack, class: 'Clusters::Integrations::ElasticStack' do
cluster factory: %i(cluster provided_by_gcp)
enabled { true }
trait :disabled do
enabled { false }
end
end
end

View File

@ -691,12 +691,9 @@ describe('GfmAutoComplete', () => {
{ search: 'ErlindaMayert nicolle' },
{ search: 'PhoebeSchaden salina' },
{ search: 'KinaCummings robena' },
// Remaining members are grouped last
{ search: 'Administrator root' },
{ search: 'AntoineLedner ammie' },
];
it('sorts by match with start of name/username, then match with any part of name/username, and maintains sort order', () => {
it('filters out non-matches, then puts matches with start of name/username first', () => {
expect(GfmAutoComplete.Members.sort(query, items)).toMatchObject(expected);
});
});

View File

@ -10,6 +10,41 @@ RSpec.describe Clusters::Applications::ElasticStack do
include_examples 'cluster application version specs', :clusters_applications_elastic_stack
include_examples 'cluster application helm specs', :clusters_applications_elastic_stack
describe 'cluster.integration_elastic_stack state synchronization' do
let!(:application) { create(:clusters_applications_elastic_stack) }
let(:cluster) { application.cluster }
let(:integration) { cluster.integration_elastic_stack }
describe 'after_destroy' do
it 'disables the corresponding integration' do
application.destroy!
expect(integration).not_to be_enabled
end
end
describe 'on install' do
it 'enables the corresponding integration' do
application.make_scheduled!
application.make_installing!
application.make_installed!
expect(integration).to be_enabled
end
end
describe 'on uninstall' do
it 'disables the corresponding integration' do
application.make_scheduled!
application.make_installing!
application.make_installed!
application.make_externally_uninstalled!
expect(integration).not_to be_enabled
end
end
end
describe '#install_command' do
let!(:elastic_stack) { create(:clusters_applications_elastic_stack) }
@ -138,78 +173,5 @@ RSpec.describe Clusters::Applications::ElasticStack do
end
end
describe '#elasticsearch_client' do
context 'cluster is nil' do
it 'returns nil' do
expect(subject.cluster).to be_nil
expect(subject.elasticsearch_client).to be_nil
end
end
context "cluster doesn't have kubeclient" do
let(:cluster) { create(:cluster) }
subject { create(:clusters_applications_elastic_stack, cluster: cluster) }
it 'returns nil' do
expect(subject.elasticsearch_client).to be_nil
end
end
context 'cluster has kubeclient' do
let(:cluster) { create(:cluster, :project, :provided_by_gcp) }
let(:kubernetes_url) { subject.cluster.platform_kubernetes.api_url }
let(:kube_client) { subject.cluster.kubeclient.core_client }
subject { create(:clusters_applications_elastic_stack, cluster: cluster) }
before do
subject.cluster.platform_kubernetes.namespace = 'a-namespace'
stub_kubeclient_discover(cluster.platform_kubernetes.api_url)
create(:cluster_kubernetes_namespace,
cluster: cluster,
cluster_project: cluster.cluster_project,
project: cluster.cluster_project.project)
end
it 'creates proxy elasticsearch_client' do
expect(subject.elasticsearch_client).to be_instance_of(Elasticsearch::Transport::Client)
end
it 'copies proxy_url, options and headers from kube client to elasticsearch_client' do
expect(Elasticsearch::Client)
.to(receive(:new))
.with(url: a_valid_url)
.and_call_original
client = subject.elasticsearch_client
faraday_connection = client.transport.connections.first.connection
expect(faraday_connection.headers["Authorization"]).to eq(kube_client.headers[:Authorization])
expect(faraday_connection.ssl.cert_store).to be_instance_of(OpenSSL::X509::Store)
expect(faraday_connection.ssl.verify).to eq(1)
expect(faraday_connection.options.timeout).to be_nil
end
context 'when cluster is not reachable' do
before do
allow(kube_client).to receive(:proxy_url).and_raise(Kubeclient::HttpError.new(401, 'Unauthorized', nil))
end
it 'returns nil' do
expect(subject.elasticsearch_client).to be_nil
end
end
context 'when timeout is provided' do
it 'sets timeout in elasticsearch_client' do
client = subject.elasticsearch_client(timeout: 123)
faraday_connection = client.transport.connections.first.connection
expect(faraday_connection.options.timeout).to eq(123)
end
end
end
end
it_behaves_like 'cluster-based #elasticsearch_client', :clusters_applications_elastic_stack
end

View File

@ -0,0 +1,19 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Clusters::Integrations::ElasticStack do
include KubernetesHelpers
include StubRequests
describe 'associations' do
it { is_expected.to belong_to(:cluster).class_name('Clusters::Cluster') }
end
describe 'validations' do
it { is_expected.to validate_presence_of(:cluster) }
it { is_expected.not_to allow_value(nil).for(:enabled) }
end
it_behaves_like 'cluster-based #elasticsearch_client', :clusters_integrations_elastic_stack
end

View File

@ -6,79 +6,64 @@ RSpec.describe Clusters::Integrations::CreateService, '#execute' do
let_it_be(:project) { create(:project) }
let_it_be_with_reload(:cluster) { create(:cluster, :provided_by_gcp, projects: [project]) }
let(:params) do
{ application_type: 'prometheus', enabled: true }
end
let(:service) do
described_class.new(container: project, cluster: cluster, current_user: project.owner, params: params)
end
it 'creates a new Prometheus instance' do
expect(service.execute).to be_success
shared_examples_for 'a cluster integration' do |application_type|
let(:integration) { cluster.public_send("integration_#{application_type}") }
expect(cluster.integration_prometheus).to be_present
expect(cluster.integration_prometheus).to be_persisted
expect(cluster.integration_prometheus).to be_enabled
end
context 'enabled param is false' do
let(:params) do
{ application_type: 'prometheus', enabled: false }
end
it 'creates a new uninstalled Prometheus instance' do
expect(service.execute).to be_success
expect(cluster.integration_prometheus).to be_present
expect(cluster.integration_prometheus).to be_persisted
expect(cluster.integration_prometheus).not_to be_enabled
end
end
context 'unauthorized user' do
let(:service) do
unauthorized_user = create(:user)
described_class.new(container: project, cluster: cluster, current_user: unauthorized_user, params: params)
end
it 'does not create a new Prometheus instance' do
expect(service.execute).to be_error
expect(cluster.integration_prometheus).to be_nil
end
end
context 'prometheus record exists' do
before do
create(:clusters_integrations_prometheus, cluster: cluster)
end
it 'updates the Prometheus instance' do
expect(service.execute).to be_success
expect(cluster.integration_prometheus).to be_present
expect(cluster.integration_prometheus).to be_persisted
expect(cluster.integration_prometheus).to be_enabled
end
context 'enabled param is false' do
context 'when enabled param is true' do
let(:params) do
{ application_type: 'prometheus', enabled: false }
{ application_type: application_type, enabled: true }
end
it 'updates the Prometheus instance as uninstalled' do
it 'creates a new enabled integration' do
expect(service.execute).to be_success
expect(cluster.integration_prometheus).to be_present
expect(cluster.integration_prometheus).to be_persisted
expect(cluster.integration_prometheus).not_to be_enabled
expect(integration).to be_present
expect(integration).to be_persisted
expect(integration).to be_enabled
end
end
context 'when enabled param is false' do
let(:params) do
{ application_type: application_type, enabled: false }
end
it 'creates a new disabled integration' do
expect(service.execute).to be_success
expect(integration).to be_present
expect(integration).to be_persisted
expect(integration).not_to be_enabled
end
end
context 'when integration already exists' do
before do
create(:"clusters_integrations_#{application_type}", cluster: cluster, enabled: false)
end
let(:params) do
{ application_type: application_type, enabled: true }
end
it 'updates the integration' do
expect(integration).not_to be_enabled
expect(service.execute).to be_success
expect(integration.reload).to be_enabled
end
end
end
context 'for an un-supported application type' do
it_behaves_like 'a cluster integration', 'prometheus'
it_behaves_like 'a cluster integration', 'elastic_stack'
context 'when application_type is invalid' do
let(:params) do
{ application_type: 'something_else', enabled: true }
end
@ -87,4 +72,22 @@ RSpec.describe Clusters::Integrations::CreateService, '#execute' do
expect { service.execute}.to raise_error(ArgumentError)
end
end
context 'when user is unauthorized' do
let(:params) do
{ application_type: 'prometheus', enabled: true }
end
let(:service) do
unauthorized_user = create(:user)
described_class.new(container: project, cluster: cluster, current_user: unauthorized_user, params: params)
end
it 'returns error and does not create a new integration record' do
expect(service.execute).to be_error
expect(cluster.integration_prometheus).to be_nil
end
end
end

View File

@ -0,0 +1,82 @@
# frozen_string_literal: true
# Input
# - factory: [:clusters_applications_elastic_stack, :clusters_integrations_elastic_stack]
RSpec.shared_examples 'cluster-based #elasticsearch_client' do |factory|
describe '#elasticsearch_client' do
context 'cluster is nil' do
subject { build(factory, cluster: nil) }
it 'returns nil' do
expect(subject.cluster).to be_nil
expect(subject.elasticsearch_client).to be_nil
end
end
context "cluster doesn't have kubeclient" do
let(:cluster) { create(:cluster) }
subject { create(factory, cluster: cluster) }
it 'returns nil' do
expect(subject.elasticsearch_client).to be_nil
end
end
context 'cluster has kubeclient' do
let(:cluster) { create(:cluster, :project, :provided_by_gcp) }
let(:kubernetes_url) { subject.cluster.platform_kubernetes.api_url }
let(:kube_client) { subject.cluster.kubeclient.core_client }
subject { create(factory, cluster: cluster) }
before do
subject.cluster.platform_kubernetes.namespace = 'a-namespace'
stub_kubeclient_discover(cluster.platform_kubernetes.api_url)
create(:cluster_kubernetes_namespace,
cluster: cluster,
cluster_project: cluster.cluster_project,
project: cluster.cluster_project.project)
end
it 'creates proxy elasticsearch_client' do
expect(subject.elasticsearch_client).to be_instance_of(Elasticsearch::Transport::Client)
end
it 'copies proxy_url, options and headers from kube client to elasticsearch_client' do
expect(Elasticsearch::Client)
.to(receive(:new))
.with(url: a_valid_url)
.and_call_original
client = subject.elasticsearch_client
faraday_connection = client.transport.connections.first.connection
expect(faraday_connection.headers["Authorization"]).to eq(kube_client.headers[:Authorization])
expect(faraday_connection.ssl.cert_store).to be_instance_of(OpenSSL::X509::Store)
expect(faraday_connection.ssl.verify).to eq(1)
expect(faraday_connection.options.timeout).to be_nil
end
context 'when cluster is not reachable' do
before do
allow(kube_client).to receive(:proxy_url).and_raise(Kubeclient::HttpError.new(401, 'Unauthorized', nil))
end
it 'returns nil' do
expect(subject.elasticsearch_client).to be_nil
end
end
context 'when timeout is provided' do
it 'sets timeout in elasticsearch_client' do
client = subject.elasticsearch_client(timeout: 123)
faraday_connection = client.transport.connections.first.connection
expect(faraday_connection.options.timeout).to eq(123)
end
end
end
end
end

View File

@ -2,7 +2,7 @@
RSpec.shared_examples '#create_or_update action' do
let(:params) do
{ integration: { application_type: Clusters::Applications::Prometheus.application_name, enabled: true } }
{ integration: { application_type: 'prometheus', enabled: true } }
end
let(:path) { raise NotImplementedError }

View File

@ -11,6 +11,14 @@ elasticsearch:
filebeat:
enabled: true
extraVolumes:
- name: varlog
hostPath:
path: /var/log
extraVolumeMounts:
- name: varlog
mountPath: /var/log
readOnly: true
filebeatConfig:
filebeat.yml: |
output.file.enabled: false
@ -22,6 +30,28 @@ filebeat:
index: "filebeat-%{[agent.version]}-%{+yyyy.MM.dd}"
filebeat.inputs:
- type: container
format: cri
paths:
- '/var/log/containers/*.log'
json.keys_under_root: true
json.ignore_decoding_error: true
processors:
- add_id:
target_field: tie_breaker_id
- add_cloud_metadata: ~
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
- decode_json_fields:
fields: ["message"]
when:
equals:
kubernetes.container.namespace: "gitlab-managed-apps"
kubernetes.container.name: "modsecurity-log"
- type: container
format: docker
paths:
- '/var/lib/docker/containers/*/*.log'
json.keys_under_root: true