Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-06-10 21:10:02 +00:00
parent d715acda3b
commit b20c558db2
72 changed files with 830 additions and 1511 deletions

View File

@ -158,15 +158,6 @@
- *node-modules-cache-push
- *assets-cache-push
.use-pg11:
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.7.2.patched-golang-1.16-git-2.31-lfs-2.9-chrome-89-node-14.15-yarn-1.22-postgresql-11-graphicsmagick-1.3.36"
services:
- name: postgres:11.6
command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
- name: redis:5.0-alpine
variables:
POSTGRES_HOST_AUTH_METHOD: trust
.use-pg12:
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.7.2.patched-golang-1.16-git-2.31-lfs-2.9-chrome-89-node-14.15-yarn-1.22-postgresql-12-graphicsmagick-1.3.36"
services:
@ -176,17 +167,6 @@
variables:
POSTGRES_HOST_AUTH_METHOD: trust
.use-pg11-ee:
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.7.2.patched-golang-1.16-git-2.31-lfs-2.9-chrome-89-node-14.15-yarn-1.22-postgresql-11-graphicsmagick-1.3.36"
services:
- name: postgres:11.6
command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
- name: redis:5.0-alpine
- name: elasticsearch:7.11.1
command: ["elasticsearch", "-E", "discovery.type=single-node"]
variables:
POSTGRES_HOST_AUTH_METHOD: trust
.use-pg12-ee:
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.7.2.patched-golang-1.16-git-2.31-lfs-2.9-chrome-89-node-14.15-yarn-1.22-postgresql-12-graphicsmagick-1.3.36"
services:

View File

@ -58,11 +58,6 @@
- !reference [.base-script, script]
- rspec_paralellized_job "--tag ~quarantine --tag ~geo --tag level:migration"
.rspec-base-pg11:
extends:
- .rspec-base
- .use-pg11
.rspec-base-pg12:
extends:
- .rspec-base
@ -75,11 +70,6 @@
- .use-pg12
needs: ["setup-test-env", "retrieve-tests-metadata", "compile-test-assets as-if-foss", "detect-tests"]
.rspec-ee-base-pg11:
extends:
- .rspec-base
- .use-pg11-ee
.rspec-ee-base-pg12:
extends:
- .rspec-base
@ -91,11 +81,6 @@
- !reference [.base-script, script]
- rspec_paralellized_job "--tag ~quarantine --tag geo"
.rspec-ee-base-geo-pg11:
extends:
- .rspec-ee-base-geo
- .use-pg11-ee
.rspec-ee-base-geo-pg12:
extends:
- .rspec-ee-base-geo
@ -298,16 +283,6 @@ rspec system pg12 minimal:
- .minimal-rspec-tests
- .rails:rules:ee-and-foss-system:minimal
# Dedicated job to test DB library code against PG11.
# Note that these are already tested against PG12 in the `rspec unit pg12` / `rspec-ee unit pg12` jobs.
rspec db-library-code pg11:
extends:
- .rspec-base-pg11
- .rails:rules:ee-and-foss-db-library-code
script:
- !reference [.base-script, script]
- rspec_db_library_code
rspec fast_spec_helper:
extends:
- .rspec-base-pg12
@ -668,80 +643,6 @@ db:rollback geo:
# EE: default refs (MRs, default branch, schedules) jobs #
##################################################
##########################################
# EE/FOSS: default branch nightly scheduled jobs #
rspec migration pg11:
extends:
- .rspec-base-pg11
- .rspec-base-migration
- .rails:rules:default-branch-schedule-nightly--code-backstage
- .rspec-migration-parallel
rspec unit pg11:
extends:
- .rspec-base-pg11
- .rails:rules:default-branch-schedule-nightly--code-backstage
- .rspec-unit-parallel
rspec integration pg11:
extends:
- .rspec-base-pg11
- .rails:rules:default-branch-schedule-nightly--code-backstage
- .rspec-integration-parallel
rspec system pg11:
extends:
- .rspec-base-pg11
- .rails:rules:default-branch-schedule-nightly--code-backstage
- .rspec-system-parallel
# EE/FOSS: default branch nightly scheduled jobs #
##########################################
#####################################
# EE: default branch nightly scheduled jobs #
rspec-ee migration pg11:
extends:
- .rspec-ee-base-pg11
- .rspec-base-migration
- .rails:rules:default-branch-schedule-nightly--code-backstage-ee-only
- .rspec-ee-migration-parallel
rspec-ee unit pg11:
extends:
- .rspec-ee-base-pg11
- .rails:rules:default-branch-schedule-nightly--code-backstage-ee-only
- .rspec-ee-unit-parallel
rspec-ee integration pg11:
extends:
- .rspec-ee-base-pg11
- .rails:rules:default-branch-schedule-nightly--code-backstage-ee-only
- .rspec-ee-integration-parallel
rspec-ee system pg11:
extends:
- .rspec-ee-base-pg11
- .rails:rules:default-branch-schedule-nightly--code-backstage-ee-only
- .rspec-ee-system-parallel
rspec-ee unit pg11 geo:
extends:
- .rspec-ee-base-geo-pg11
- .rails:rules:default-branch-schedule-nightly--code-backstage-ee-only
- .rspec-ee-unit-geo-parallel
rspec-ee integration pg11 geo:
extends:
- .rspec-ee-base-geo-pg11
- .rails:rules:default-branch-schedule-nightly--code-backstage-ee-only
rspec-ee system pg11 geo:
extends:
- .rspec-ee-base-geo-pg11
- .rails:rules:default-branch-schedule-nightly--code-backstage-ee-only
# EE: default branch nightly scheduled jobs #
#####################################
##################################################
# EE: Canonical MR pipelines
rspec fail-fast:

View File

@ -41,17 +41,10 @@ export default {
toggleEnabled: true,
envScope: '*',
baseDomainField: '',
externalIp: '',
};
},
computed: {
...mapState([
'enabled',
'editable',
'environmentScope',
'baseDomain',
'applicationIngressExternalIp',
]),
...mapState(['enabled', 'editable', 'environmentScope', 'baseDomain']),
canSubmit() {
return (
this.enabled !== this.toggleEnabled ||
@ -64,7 +57,6 @@ export default {
this.toggleEnabled = this.enabled;
this.envScope = this.environmentScope;
this.baseDomainField = this.baseDomain;
this.externalIp = this.applicationIngressExternalIp;
},
};
</script>
@ -135,13 +127,6 @@ export default {
<gl-link :href="autoDevopsHelpPath" target="_blank">{{ content }}</gl-link>
</template>
</gl-sprintf>
<div v-if="applicationIngressExternalIp" class="js-ingress-domain-help-text inline">
{{ s__('ClusterIntegration|Alternatively, ') }}
<gl-sprintf :message="s__('ClusterIntegration|%{externalIp}.nip.io')">
<template #externalIp>{{ externalIp }}</template>
</gl-sprintf>
{{ s__('ClusterIntegration|can be used instead of a custom domain. ') }}
</div>
<gl-sprintf
class="inline"
:message="s__('ClusterIntegration|%{linkStart}More information%{linkEnd}')"

View File

@ -6,7 +6,6 @@ export default (initialState = {}) => {
editable: parseBoolean(initialState.editable),
environmentScope: initialState.environmentScope,
baseDomain: initialState.baseDomain,
applicationIngressExternalIp: initialState.applicationIngressExternalIp,
autoDevopsHelpPath: initialState.autoDevopsHelpPath,
externalEndpointHelpPath: initialState.externalEndpointHelpPath,
};

View File

@ -1,4 +1,3 @@
import { scaleLinear, scaleThreshold } from 'd3-scale';
import { select } from 'd3-selection';
import dateFormat from 'dateformat';
import $ from 'jquery';
@ -8,7 +7,7 @@ import axios from '~/lib/utils/axios_utils';
import { getDayName, getDayDifference } from '~/lib/utils/datetime_utility';
import { n__, s__, __ } from '~/locale';
const d3 = { select, scaleLinear, scaleThreshold };
const d3 = { select };
const firstDayOfWeekChoices = Object.freeze({
sunday: 0,
@ -16,6 +15,14 @@ const firstDayOfWeekChoices = Object.freeze({
saturday: 6,
});
const CONTRIB_LEGENDS = [
{ title: __('No contributions'), min: 0 },
{ title: __('1-9 contributions'), min: 1 },
{ title: __('10-19 contributions'), min: 10 },
{ title: __('20-29 contributions'), min: 20 },
{ title: __('30+ contributions'), min: 30 },
];
const LOADING_HTML = `
<div class="text-center">
<div class="spinner spinner-md"></div>
@ -42,7 +49,17 @@ function formatTooltipText({ date, count }) {
return `${contribText}<br /><span class="gl-text-gray-300">${dateDayName} ${dateText}</span>`;
}
const initColorKey = () => d3.scaleLinear().range(['#acd5f2', '#254e77']).domain([0, 3]);
// Return the contribution level from the number of contributions
export const getLevelFromContributions = (count) => {
if (count <= 0) {
return 0;
}
const nextLevel = CONTRIB_LEGENDS.findIndex(({ min }) => count < min);
// If there is no higher level, we are at the end
return nextLevel >= 0 ? nextLevel - 1 : CONTRIB_LEGENDS.length - 1;
};
export default class ActivityCalendar {
constructor(
@ -111,10 +128,6 @@ export default class ActivityCalendar {
innerArray.push({ count, date, day });
}
// Init color functions
this.colorKey = initColorKey();
this.color = this.initColor();
// Init the svg element
this.svg = this.renderSvg(container, group);
this.renderDays();
@ -180,9 +193,7 @@ export default class ActivityCalendar {
.attr('y', (stamp) => this.dayYPos(stamp.day))
.attr('width', this.daySize)
.attr('height', this.daySize)
.attr('fill', (stamp) =>
stamp.count !== 0 ? this.color(Math.min(stamp.count, 40)) : '#ededed',
)
.attr('data-level', (stamp) => getLevelFromContributions(stamp.count))
.attr('title', (stamp) => formatTooltipText(stamp))
.attr('class', 'user-contrib-cell has-tooltip')
.attr('data-html', true)
@ -246,50 +257,24 @@ export default class ActivityCalendar {
}
renderKey() {
const keyValues = [
__('No contributions'),
__('1-9 contributions'),
__('10-19 contributions'),
__('20-29 contributions'),
__('30+ contributions'),
];
const keyColors = [
'#ededed',
this.colorKey(0),
this.colorKey(1),
this.colorKey(2),
this.colorKey(3),
];
this.svg
.append('g')
.attr('transform', `translate(18, ${this.daySizeWithSpace * 8 + 16})`)
.selectAll('rect')
.data(keyColors)
.data(CONTRIB_LEGENDS)
.enter()
.append('rect')
.attr('width', this.daySize)
.attr('height', this.daySize)
.attr('x', (color, i) => this.daySizeWithSpace * i)
.attr('x', (_, i) => this.daySizeWithSpace * i)
.attr('y', 0)
.attr('fill', (color) => color)
.attr('class', 'has-tooltip')
.attr('title', (color, i) => keyValues[i])
.attr('data-level', (_, i) => i)
.attr('class', 'user-contrib-cell has-tooltip contrib-legend')
.attr('title', (x) => x.title)
.attr('data-container', 'body')
.attr('data-html', true);
}
initColor() {
const colorRange = [
'#ededed',
this.colorKey(0),
this.colorKey(1),
this.colorKey(2),
this.colorKey(3),
];
return d3.scaleThreshold().domain([0, 10, 20, 30]).range(colorRange);
}
clickDay(stamp) {
if (this.currentSelectedDate !== stamp.date) {
this.currentSelectedDate = stamp.date;

View File

@ -30,6 +30,16 @@
cursor: pointer;
stroke: $black;
}
// `app/assets/javascripts/pages/users/activity_calendar.js` sets this attribute
@for $i from 1 through length($calendar-activity-colors) {
$color: nth($calendar-activity-colors, $i);
$level: $i - 1;
&[data-level='#{$level}'] {
fill: $color;
}
}
}
.user-contrib-text {

View File

@ -716,6 +716,18 @@ $job-line-number-width: 50px;
$job-line-number-margin: 43px;
$job-arrow-margin: 55px;
/*
* Calendar
*/
// See https://gitlab.com/gitlab-org/gitlab/-/issues/332150 to align with Pajamas Design System
$calendar-activity-colors: (
#ededed,
#acd5f2,
#7fa8c9,
#527ba0,
#254e77,
);
/*
* Commit Page
*/

View File

@ -15,6 +15,10 @@ module Ci
next unless job
validate_job!(job)
if job.user && Feature.enabled?(:ci_scoped_job_token, job.project, default_enabled: :yaml)
job.user.set_ci_job_token_scope!(job)
end
end
end

View File

@ -0,0 +1,33 @@
# frozen_string_literal: true
# The connection between a source project (which defines the job token scope)
# and a target project which is the one allowed to be accessed by the job token.
module Ci
module JobToken
class ProjectScopeLink < ApplicationRecord
self.table_name = 'ci_job_token_project_scope_links'
belongs_to :source_project, class_name: 'Project'
belongs_to :target_project, class_name: 'Project'
belongs_to :added_by, class_name: 'User'
scope :from_project, ->(project) { where(source_project: project) }
scope :to_project, ->(project) { where(target_project: project) }
validates :source_project, presence: true
validates :target_project, presence: true
validate :not_self_referential_link
private
def not_self_referential_link
return unless source_project && target_project
if source_project == target_project
self.errors.add(:target_project, _("can't be the same as the source project"))
end
end
end
end
end

View File

@ -0,0 +1,40 @@
# frozen_string_literal: true
# This model represents the surface where a CI_JOB_TOKEN can be used.
# A Scope is initialized with the project that the job token belongs to,
# and indicates what are all the other projects that the token could access.
#
# By default a job token can only access its own project, which is the same
# project that defines the scope.
# By adding ScopeLinks to the scope we can allow other projects to be accessed
# by the job token. This works as an allowlist of projects for a job token.
#
# If a project is not included in the scope we should not allow the job user
# to access it since operations using CI_JOB_TOKEN should be considered untrusted.
module Ci
module JobToken
class Scope
attr_reader :source_project
def initialize(project)
@source_project = project
end
def includes?(target_project)
target_project.id == source_project.id ||
Ci::JobToken::ProjectScopeLink.from_project(source_project).to_project(target_project).exists?
end
def all_projects
Project.from_union([
Project.id_in(source_project),
Project.where_exists(
Ci::JobToken::ProjectScopeLink
.from_project(source_project)
.where('projects.id = ci_job_token_project_scope_links.target_project_id'))
], remove_duplicates: false)
end
end
end
end

View File

@ -1,12 +0,0 @@
# frozen_string_literal: true
# This model is not intended to be used.
# It is a temporary reference to the old non-partitioned
# web_hook_logs table.
# Please refer to https://gitlab.com/groups/gitlab-org/-/epics/5558
# for details.
# rubocop:disable Gitlab/NamespacedClass: This is a temporary class with no relevant namespace
# WebHook, WebHookLog and all hooks are defined outside of a namespace
class WebHookLogArchived < ApplicationRecord
self.table_name = 'web_hook_logs_archived'
end

View File

@ -1923,6 +1923,20 @@ class User < ApplicationRecord
confirmed? && !blocked? && !ghost?
end
# This attribute hosts a Ci::JobToken::Scope object which is set when
# the user is authenticated successfully via CI_JOB_TOKEN.
def ci_job_token_scope
Gitlab::SafeRequestStore[ci_job_token_scope_cache_key]
end
def set_ci_job_token_scope!(job)
Gitlab::SafeRequestStore[ci_job_token_scope_cache_key] = Ci::JobToken::Scope.new(job.project)
end
def from_ci_job_token?
ci_job_token_scope.present?
end
protected
# override, from Devise::Validatable
@ -2086,6 +2100,10 @@ class User < ApplicationRecord
def update_highest_role_attribute
id
end
def ci_job_token_scope_cache_key
"users:#{id}:ci:job_token_scope"
end
end
User.prepend_mod_with('User')

View File

@ -84,6 +84,10 @@ module PolicyActor
def password_expired?
false
end
def from_ci_job_token?
false
end
end
PolicyActor.prepend_mod_with('PolicyActor')

View File

@ -75,6 +75,11 @@ class ProjectPolicy < BasePolicy
user.is_a?(DeployToken) && user.has_access_to?(project) && user.write_package_registry
end
desc "If user is authenticated via CI job token then the target project should be in scope"
condition(:project_allowed_for_job_token) do
!@user&.from_ci_job_token? || @user.ci_job_token_scope.includes?(project)
end
with_scope :subject
condition(:forking_allowed) do
@subject.feature_available?(:forking, @user)
@ -508,6 +513,8 @@ class ProjectPolicy < BasePolicy
enable :read_project_for_iids
end
rule { ~project_allowed_for_job_token }.prevent_all
rule { can?(:public_access) }.policy do
enable :read_package
enable :read_project

View File

@ -27,9 +27,6 @@
provider_type: @cluster.provider_type,
pre_installed_knative: @cluster.knative_pre_installed? ? 'true': 'false',
help_path: help_page_path('user/project/clusters/index.md'),
helm_help_path: help_page_path('user/clusters/applications.md', anchor: 'helm'),
ingress_help_path: help_page_path('user/clusters/applications.md', anchor: 'determining-the-external-endpoint-automatically'),
ingress_dns_help_path: help_page_path('user/clusters/applications.md', anchor: 'pointing-your-dns-at-the-external-endpoint'),
environments_help_path: help_page_path('ci/environments/index.md', anchor: 'create-a-static-environment'),
clusters_help_path: help_page_path('user/project/clusters/index.md', anchor: 'deploying-to-a-kubernetes-cluster'),
deploy_boards_help_path: help_page_path('user/project/deploy_boards.md', anchor: 'enabling-deploy-boards'),
@ -51,13 +48,20 @@
= render 'banner'
.gl-alert.gl-alert-warning{ role: 'alert' }
= sprite_icon('warning', css_class: "gl-alert-icon gl-alert-icon-no-title gl-icon")
%button.js-close.gl-alert-dismiss{ type: 'button', 'aria-label' => _('Dismiss'), data: { testid: 'dismiss-one-click-application-removal' } }
= sprite_icon('close', css_class: 'gl-icon')
.gl-alert-body
= s_('ClusterApplicationsRemoved|One-click application management was removed in GitLab 14.0. Your applications are still installed in your cluster, and integrations continue working.')
= link_to _('More information.'), help_page_path("user/clusters/applications"), target: '_blank'
- if cluster_created?(@cluster)
.js-toggle-container
%ul.nav-links.mobile-separator.nav.nav-tabs{ role: 'tablist' }
= render 'details_tab'
= render_if_exists 'clusters/clusters/environments_tab'
= render 'clusters/clusters/health_tab'
= render 'applications_tab'
= render 'integrations_tab'
= render 'advanced_settings_tab'

View File

@ -0,0 +1,8 @@
---
name: ci_scoped_job_token
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/62733
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/332272
milestone: '14.0'
type: development
group: group::pipeline execution
default_enabled: false

View File

@ -0,0 +1,24 @@
# frozen_string_literal: true
class CreateCiJobTokenProjectScopeLinks < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
def up
with_lock_retries do
create_table :ci_job_token_project_scope_links, if_not_exists: true do |t|
t.belongs_to :source_project, index: false, null: false, foreign_key: { to_table: :projects, on_delete: :cascade }
t.belongs_to :target_project, null: false, foreign_key: { to_table: :projects, on_delete: :cascade }
t.belongs_to :added_by, foreign_key: { to_table: :users, on_delete: :nullify }
t.datetime_with_timezone :created_at, null: false
t.index [:source_project_id, :target_project_id], unique: true, name: 'i_ci_job_token_project_scope_links_on_source_and_target_project'
end
end
end
def down
with_lock_retries do
drop_table :ci_job_token_project_scope_links, if_exists: true
end
end
end

View File

@ -0,0 +1,43 @@
# frozen_string_literal: true
class DropNonPartitionedWebHookLogs < ActiveRecord::Migration[6.1]
include Gitlab::Database::MigrationHelpers
include Gitlab::Database::PartitioningMigrationHelpers::TableManagementHelpers
DOWNTIME = false
def up
drop_nonpartitioned_archive_table(:web_hook_logs)
end
def down
execute(<<~SQL)
CREATE TABLE web_hook_logs_archived (
id integer NOT NULL,
web_hook_id integer NOT NULL,
trigger character varying,
url character varying,
request_headers text,
request_data text,
response_headers text,
response_body text,
response_status character varying,
execution_duration double precision,
internal_error_message character varying,
created_at timestamp without time zone NOT NULL,
updated_at timestamp without time zone NOT NULL
);
ALTER TABLE ONLY web_hook_logs_archived ADD CONSTRAINT web_hook_logs_archived_pkey PRIMARY KEY (id);
CREATE INDEX index_web_hook_logs_on_created_at_and_web_hook_id ON web_hook_logs_archived USING btree (created_at, web_hook_id);
CREATE INDEX index_web_hook_logs_on_web_hook_id ON web_hook_logs_archived USING btree (web_hook_id);
ALTER TABLE ONLY web_hook_logs_archived ADD CONSTRAINT fk_rails_666826e111 FOREIGN KEY (web_hook_id) REFERENCES web_hooks(id) ON DELETE CASCADE;
SQL
with_lock_retries do
create_trigger_to_sync_tables(:web_hook_logs, :web_hook_logs_archived, 'id')
end
end
end

View File

@ -0,0 +1 @@
8c0661a42edbdb79be283df0e88879707ef34ba3fe21b6756b21cd99ea9f05de

View File

@ -0,0 +1 @@
de8468173d8a7499d03b84913cf071af8842a2f47d5f85908af20bf7c71dc96b

View File

@ -41,62 +41,6 @@ RETURN NULL;
END
$$;
CREATE FUNCTION table_sync_function_29bc99d6db() RETURNS trigger
LANGUAGE plpgsql
AS $$
BEGIN
IF (TG_OP = 'DELETE') THEN
DELETE FROM web_hook_logs_archived where id = OLD.id;
ELSIF (TG_OP = 'UPDATE') THEN
UPDATE web_hook_logs_archived
SET web_hook_id = NEW.web_hook_id,
trigger = NEW.trigger,
url = NEW.url,
request_headers = NEW.request_headers,
request_data = NEW.request_data,
response_headers = NEW.response_headers,
response_body = NEW.response_body,
response_status = NEW.response_status,
execution_duration = NEW.execution_duration,
internal_error_message = NEW.internal_error_message,
created_at = NEW.created_at,
updated_at = NEW.updated_at
WHERE web_hook_logs_archived.id = NEW.id;
ELSIF (TG_OP = 'INSERT') THEN
INSERT INTO web_hook_logs_archived (id,
web_hook_id,
trigger,
url,
request_headers,
request_data,
response_headers,
response_body,
response_status,
execution_duration,
internal_error_message,
created_at,
updated_at)
VALUES (NEW.id,
NEW.web_hook_id,
NEW.trigger,
NEW.url,
NEW.request_headers,
NEW.request_data,
NEW.response_headers,
NEW.response_body,
NEW.response_status,
NEW.execution_duration,
NEW.internal_error_message,
NEW.created_at,
NEW.updated_at);
END IF;
RETURN NULL;
END
$$;
COMMENT ON FUNCTION table_sync_function_29bc99d6db() IS 'Partitioning migration: table sync for web_hook_logs table';
CREATE FUNCTION trigger_07c94931164e() RETURNS trigger
LANGUAGE plpgsql
AS $$
@ -10777,6 +10721,23 @@ CREATE SEQUENCE ci_job_artifacts_id_seq
ALTER SEQUENCE ci_job_artifacts_id_seq OWNED BY ci_job_artifacts.id;
CREATE TABLE ci_job_token_project_scope_links (
id bigint NOT NULL,
source_project_id bigint NOT NULL,
target_project_id bigint NOT NULL,
added_by_id bigint,
created_at timestamp with time zone NOT NULL
);
CREATE SEQUENCE ci_job_token_project_scope_links_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER SEQUENCE ci_job_token_project_scope_links_id_seq OWNED BY ci_job_token_project_scope_links.id;
CREATE TABLE ci_job_variables (
id bigint NOT NULL,
key character varying NOT NULL,
@ -19351,22 +19312,6 @@ CREATE SEQUENCE vulnerability_user_mentions_id_seq
ALTER SEQUENCE vulnerability_user_mentions_id_seq OWNED BY vulnerability_user_mentions.id;
CREATE TABLE web_hook_logs_archived (
id integer NOT NULL,
web_hook_id integer NOT NULL,
trigger character varying,
url character varying,
request_headers text,
request_data text,
response_headers text,
response_body text,
response_status character varying,
execution_duration double precision,
internal_error_message character varying,
created_at timestamp without time zone NOT NULL,
updated_at timestamp without time zone NOT NULL
);
CREATE SEQUENCE web_hook_logs_id_seq
START WITH 1
INCREMENT BY 1
@ -19699,6 +19644,8 @@ ALTER TABLE ONLY ci_instance_variables ALTER COLUMN id SET DEFAULT nextval('ci_i
ALTER TABLE ONLY ci_job_artifacts ALTER COLUMN id SET DEFAULT nextval('ci_job_artifacts_id_seq'::regclass);
ALTER TABLE ONLY ci_job_token_project_scope_links ALTER COLUMN id SET DEFAULT nextval('ci_job_token_project_scope_links_id_seq'::regclass);
ALTER TABLE ONLY ci_job_variables ALTER COLUMN id SET DEFAULT nextval('ci_job_variables_id_seq'::regclass);
ALTER TABLE ONLY ci_minutes_additional_packs ALTER COLUMN id SET DEFAULT nextval('ci_minutes_additional_packs_id_seq'::regclass);
@ -20881,6 +20828,9 @@ ALTER TABLE ONLY ci_instance_variables
ALTER TABLE ONLY ci_job_artifacts
ADD CONSTRAINT ci_job_artifacts_pkey PRIMARY KEY (id);
ALTER TABLE ONLY ci_job_token_project_scope_links
ADD CONSTRAINT ci_job_token_project_scope_links_pkey PRIMARY KEY (id);
ALTER TABLE ONLY ci_job_variables
ADD CONSTRAINT ci_job_variables_pkey PRIMARY KEY (id);
@ -22129,9 +22079,6 @@ ALTER TABLE ONLY vulnerability_statistics
ALTER TABLE ONLY vulnerability_user_mentions
ADD CONSTRAINT vulnerability_user_mentions_pkey PRIMARY KEY (id);
ALTER TABLE ONLY web_hook_logs_archived
ADD CONSTRAINT web_hook_logs_archived_pkey PRIMARY KEY (id);
ALTER TABLE ONLY web_hook_logs
ADD CONSTRAINT web_hook_logs_pkey PRIMARY KEY (id, created_at);
@ -22335,6 +22282,8 @@ CREATE INDEX finding_evidences_on_vulnerability_occurrence_id ON vulnerability_f
CREATE INDEX finding_links_on_vulnerability_occurrence_id ON vulnerability_finding_links USING btree (vulnerability_occurrence_id);
CREATE UNIQUE INDEX i_ci_job_token_project_scope_links_on_source_and_target_project ON ci_job_token_project_scope_links USING btree (source_project_id, target_project_id);
CREATE INDEX idx_analytics_devops_adoption_segments_on_namespace_id ON analytics_devops_adoption_segments USING btree (namespace_id);
CREATE INDEX idx_audit_events_part_on_entity_id_desc_author_id_created_at ON ONLY audit_events USING btree (entity_id, entity_type, id DESC, author_id, created_at);
@ -22815,6 +22764,10 @@ CREATE INDEX index_ci_job_artifacts_on_project_id ON ci_job_artifacts USING btre
CREATE INDEX index_ci_job_artifacts_on_project_id_for_security_reports ON ci_job_artifacts USING btree (project_id) WHERE (file_type = ANY (ARRAY[5, 6, 7, 8]));
CREATE INDEX index_ci_job_token_project_scope_links_on_added_by_id ON ci_job_token_project_scope_links USING btree (added_by_id);
CREATE INDEX index_ci_job_token_project_scope_links_on_target_project_id ON ci_job_token_project_scope_links USING btree (target_project_id);
CREATE INDEX index_ci_job_variables_on_job_id ON ci_job_variables USING btree (job_id);
CREATE UNIQUE INDEX index_ci_job_variables_on_key_and_job_id ON ci_job_variables USING btree (key, job_id);
@ -24973,10 +24926,6 @@ CREATE UNIQUE INDEX index_vulns_user_mentions_on_vulnerability_id ON vulnerabili
CREATE UNIQUE INDEX index_vulns_user_mentions_on_vulnerability_id_and_note_id ON vulnerability_user_mentions USING btree (vulnerability_id, note_id);
CREATE INDEX index_web_hook_logs_on_created_at_and_web_hook_id ON web_hook_logs_archived USING btree (created_at, web_hook_id);
CREATE INDEX index_web_hook_logs_on_web_hook_id ON web_hook_logs_archived USING btree (web_hook_id);
CREATE INDEX index_web_hook_logs_part_on_created_at_and_web_hook_id ON ONLY web_hook_logs USING btree (created_at, web_hook_id);
CREATE INDEX index_web_hook_logs_part_on_web_hook_id ON ONLY web_hook_logs USING btree (web_hook_id);
@ -25359,8 +25308,6 @@ ALTER INDEX product_analytics_events_experimental_pkey ATTACH PARTITION gitlab_p
ALTER INDEX product_analytics_events_experimental_pkey ATTACH PARTITION gitlab_partitions_static.product_analytics_events_experimental_63_pkey;
CREATE TRIGGER table_sync_trigger_b99eb6998c AFTER INSERT OR DELETE OR UPDATE ON web_hook_logs FOR EACH ROW EXECUTE FUNCTION table_sync_function_29bc99d6db();
CREATE TRIGGER trigger_07c94931164e BEFORE INSERT OR UPDATE ON push_event_payloads FOR EACH ROW EXECUTE FUNCTION trigger_07c94931164e();
CREATE TRIGGER trigger_21e7a2602957 BEFORE INSERT OR UPDATE ON ci_build_needs FOR EACH ROW EXECUTE FUNCTION trigger_21e7a2602957();
@ -26493,6 +26440,9 @@ ALTER TABLE ONLY metrics_dashboard_annotations
ALTER TABLE ONLY wiki_page_slugs
ADD CONSTRAINT fk_rails_358b46be14 FOREIGN KEY (wiki_page_meta_id) REFERENCES wiki_page_meta(id) ON DELETE CASCADE;
ALTER TABLE ONLY ci_job_token_project_scope_links
ADD CONSTRAINT fk_rails_35f7f506ce FOREIGN KEY (added_by_id) REFERENCES users(id) ON DELETE SET NULL;
ALTER TABLE ONLY board_labels
ADD CONSTRAINT fk_rails_362b0600a3 FOREIGN KEY (label_id) REFERENCES labels(id) ON DELETE CASCADE;
@ -26628,6 +26578,9 @@ ALTER TABLE ONLY upcoming_reconciliations
ALTER TABLE ONLY ci_pipeline_artifacts
ADD CONSTRAINT fk_rails_4a70390ca6 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY ci_job_token_project_scope_links
ADD CONSTRAINT fk_rails_4b2ee3290b FOREIGN KEY (source_project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY group_deletion_schedules
ADD CONSTRAINT fk_rails_4b8c694a6c FOREIGN KEY (group_id) REFERENCES namespaces(id) ON DELETE CASCADE;
@ -26823,9 +26776,6 @@ ALTER TABLE ONLY operations_feature_flags_clients
ALTER TABLE ONLY namespace_admin_notes
ADD CONSTRAINT fk_rails_666166ea7b FOREIGN KEY (namespace_id) REFERENCES namespaces(id) ON DELETE CASCADE;
ALTER TABLE ONLY web_hook_logs_archived
ADD CONSTRAINT fk_rails_666826e111 FOREIGN KEY (web_hook_id) REFERENCES web_hooks(id) ON DELETE CASCADE;
ALTER TABLE ONLY analytics_cycle_analytics_project_value_streams
ADD CONSTRAINT fk_rails_669f4ba293 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
@ -26844,6 +26794,9 @@ ALTER TABLE ONLY vulnerability_finding_evidence_headers
ALTER TABLE ONLY geo_hashed_storage_migrated_events
ADD CONSTRAINT fk_rails_687ed7d7c5 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY ci_job_token_project_scope_links
ADD CONSTRAINT fk_rails_6904b38465 FOREIGN KEY (target_project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY plan_limits
ADD CONSTRAINT fk_rails_69f8b6184f FOREIGN KEY (plan_id) REFERENCES plans(id) ON DELETE CASCADE;

View File

@ -142,8 +142,9 @@ different places.
To view the IP address of a shared runner you must have admin access to
the GitLab instance. To determine this:
1. Visit **Admin Area > Overview > Runners**.
1. Look for the runner in the table and you should see a column for **IP Address**.
1. On the top bar, select **Menu >** **{admin}** **Admin**.
1. On the left sidebar, select **Overview > Runners**.
1. Find the runner in the table and view the **IP Address** column.
![shared runner IP address](img/shared_runner_ip_address.png)

View File

@ -215,7 +215,7 @@ click NodeExporter "./architecture.html#node-exporter"
### Component legend
- ✅ - Installed by default
- ⚙ - Requires additional configuration, or GitLab Managed Apps
- ⚙ - Requires additional configuration
- ⤓ - Manual installation required
- ❌ - Not supported or no instructions available
- N/A - Not applicable

View File

@ -480,11 +480,7 @@ because of 2 reasons:
### PostgreSQL versions testing
Our test suite runs against PG12 as GitLab.com runs on PG12 and
[Omnibus defaults to PG12 for new installs and upgrades](https://docs.gitlab.com/omnibus/package-information/postgresql_versions.html),
Our test suite is currently running against PG11, since GitLab.com still runs on PG11.
We do run our test suite against PG11 on nightly scheduled pipelines as well as upon specific
database library changes in MRs and `master` pipelines (with the `rspec db-library-code pg11` job).
[Omnibus defaults to PG12 for new installs and upgrades](https://docs.gitlab.com/omnibus/package-information/postgresql_versions.html).
#### Current versions testing
@ -502,7 +498,6 @@ We follow the [PostgreSQL versions shipped with Omnibus GitLab](https://docs.git
| PostgreSQL version | 13.11 (April 2021) | 13.12 (May 2021) | 14.0 (June 2021?) |
| -------------------| ---------------------- | ---------------------- | ---------------------- |
| PG12 | `nightly` | MRs/`2-hour`/`nightly` | MRs/`2-hour`/`nightly` |
| PG11 | MRs/`2-hour`/`nightly` | `nightly` | `nightly` |
### Test jobs
@ -730,8 +725,6 @@ that are scoped to a single [configuration keyword](../ci/yaml/README.md#job-key
| `.qa-cache` | Allows a job to use a default `cache` definition suitable for QA tasks. |
| `.yarn-cache` | Allows a job to use a default `cache` definition suitable for frontend jobs that do a `yarn install`. |
| `.assets-compile-cache` | Allows a job to use a default `cache` definition suitable for frontend jobs that compile assets. |
| `.use-pg11` | Allows a job to run the `postgres` 11 and `redis` services (see [`.gitlab/ci/global.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/.gitlab/ci/global.gitlab-ci.yml) for the specific versions of the services). |
| `.use-pg11-ee` | Same as `.use-pg11` but also use an `elasticsearch` service (see [`.gitlab/ci/global.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/.gitlab/ci/global.gitlab-ci.yml) for the specific version of the service). |
| `.use-pg12` | Allows a job to use the `postgres` 12 and `redis` services (see [`.gitlab/ci/global.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/.gitlab/ci/global.gitlab-ci.yml) for the specific versions of the services). |
| `.use-pg12-ee` | Same as `.use-pg12` but also use an `elasticsearch` service (see [`.gitlab/ci/global.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/.gitlab/ci/global.gitlab-ci.yml) for the specific version of the service). |
| `.use-kaniko` | Allows a job to use the `kaniko` tool to build Docker images. |

View File

@ -73,7 +73,7 @@ production services. GitLab provides centralized, aggregated log storage for you
distributed application, enabling you to collect logs across multiple services and
infrastructure.
- [View logs of pods or managed applications](../user/project/clusters/kubernetes_pod_logs.md)
- [View logs of pods](../user/project/clusters/kubernetes_pod_logs.md)
in connected Kubernetes clusters.
## Manage your infrastructure in code

View File

@ -217,8 +217,46 @@ links:
## Troubleshooting
When troubleshooting issues with a managed Prometheus app, it is often useful to
[view the Prometheus UI](../../../user/project/integrations/prometheus.md#access-the-ui-of-a-prometheus-managed-application-in-kubernetes).
### Accessing the UI of Prometheus in Kubernetes
When troubleshooting issues with an in-cluster Prometheus, it can help to
view the Prometheus UI. In the example below, we assume the Prometheus
server to be the pod `prometheus-prometheus-server` in the `gitlab-managed-apps`
namespace:
1. Find the name of the Prometheus pod in the user interface of your Kubernetes
provider, such as GKE, or by running the following `kubectl` command in your
terminal. For example:
```shell
kubectl get pods -n gitlab-managed-apps | grep 'prometheus-prometheus-server'
```
The command should return a result like the following example, where
`prometheus-prometheus-server-55b4bd64c9-dpc6b` is the name of the Prometheus pod:
```plaintext
gitlab-managed-apps prometheus-prometheus-server-55b4bd64c9-dpc6b 2/2 Running 0 71d
```
1. Run a `kubectl port-forward` command. In the following example, `9090` is the
Prometheus server's listening port:
```shell
kubectl port-forward prometheus-prometheus-server-55b4bd64c9-dpc6b 9090:9090 -n gitlab-managed-apps
```
The `port-forward` command forwards all requests sent to your system's `9090` port
to the `9090` port of the Prometheus pod. If the `9090` port on your system is used
by another application, you can change the port number before the colon to your
desired port. For example, to forward port `8080` of your local system, change the
command to:
```shell
kubectl port-forward prometheus-prometheus-server-55b4bd64c9-dpc6b 8080:9090 -n gitlab-managed-apps
```
1. Open `localhost:9090` in your browser to display the Prometheus user interface.
### "No data found" error on Metrics dashboard page

View File

@ -16,10 +16,10 @@ critical. For GitLab to display your information in charts, you must:
For an overview, see [How to instrument Prometheus metrics in GitLab](https://www.youtube.com/watch?v=tuI2oJ3TTB4).
1. **Expose metrics for capture** - Make logs, metrics, and traces available for capture.
1. [**Configure Prometheus to gather metrics**](#configure-prometheus-to-gather-metrics) -
Deploy managed applications like Elasticsearch, Prometheus, and Jaeger to gather
Use applications like Elasticsearch, Prometheus, and Jaeger to gather
the data you've exposed.
1. **GitLab collects metrics** - GitLab uses Prometheus to scrape the data you've
captured in your managed apps, and prepares the data for display. To learn more, read
captured in your applications, and prepares the data for display. To learn more, read
[Collect and process metrics](#collect-and-process-metrics).
1. **Display charts in the GitLab user interface** - GitLab converts your metrics
into easy-to-read charts on a default dashboard. You can create as many custom charts
@ -34,30 +34,10 @@ your Prometheus integration depends on where your apps are running:
- **For manually-configured Prometheus** -
[Specify your Prometheus server](../../user/project/integrations/prometheus.md#manual-configuration-of-prometheus),
and define at least one environment.
- **For GitLab-managed Prometheus** - GitLab can
[deploy and manage Prometheus](../../user/project/integrations/prometheus.md#managed-prometheus-on-kubernetes) for you.
You must also complete a code deployment, as described in
[Deploy code with GitLab-managed Prometheus](#deploy-code-with-gitlab-managed-prometheus),
for the **Operations > Metrics** page to contain data.
### Deploy code with GitLab-managed Prometheus
For GitLab-managed Prometheus, you can set up [Auto DevOps](../../topics/autodevops/index.md)
to quickly create a deployment:
1. Navigate to your project's **Operations > Kubernetes** page.
1. Ensure that, in addition to Prometheus, you also have GitLab Runner and Ingress
installed.
1. After installing Ingress, copy its endpoint.
1. Navigate to your project's **Settings > CI/CD** page. In the
**Auto DevOps** section, select a deployment strategy and save your changes.
1. On the same page, in the **Variables** section, add a variable named
`KUBE_INGRESS_BASE_DOMAIN` with the value of the Ingress endpoint you
copied previously. Leave the type as **Variable**.
1. Navigate to your project's **{rocket}** **CI/CD > Pipelines** page, and run a
pipeline on any branch.
1. When the pipeline has run successfully, graphs are available on the
**Operations > Metrics** page.
- **For a cluster integrated Prometheus** - GitLab can query
[an in-cluster Prometheus](../../user/clusters/integrations.md#prometheus-cluster-integration).
You must also complete a code deployment to your cluster for the **Operations > Metrics**
page to contain data. You can do this using [Auto DevOps](../../topics/autodevops/quick_start_guide.md).
![Monitoring Dashboard](img/prometheus_monitoring_dashboard_v13_3.png)

View File

@ -66,8 +66,7 @@ To make full use of Auto DevOps with Kubernetes, you need:
Runners should be registered as [shared runners](../../ci/runners/runners_scope.md#shared-runners)
for the entire GitLab instance, or [specific runners](../../ci/runners/runners_scope.md#specific-runners)
that are assigned to specific projects (the default if you've installed the
GitLab Runner managed application).
that are assigned to specific projects.
- **Prometheus** (for [Auto Monitoring](stages.md#auto-monitoring))

View File

@ -6,12 +6,15 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# GitLab Managed Apps (DEPRECATED) **(FREE)**
NOTE:
The new recommended way to manage cluster applications is to use the [cluster management project template](management_project_template.md).
If you want to migrate your GitLab managed apps management to this template, reference to [migrating from GitLab managed apps to project template](migrating_from_gma_to_project_template.md).
**GitLab Managed Apps** was created to help you configure applications in your
cluster directly from GitLab. You could use this feature through two different
methods: "one-click install" and "CI/CD template". Both methods are **deprecated**:
- The **one-click install** method was deprecated in GitLab 13.9 and **will be
removed** in GitLab 14.0.
- The **one-click install** method was [removed](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/63348) in GitLab 14.0.
- The **CI/CD template method** was deprecated in GitLab 13.12 and is scheduled
to be removed in GitLab 15.0.
@ -19,15 +22,6 @@ Both methods were limiting as you couldn't fully customize your third-party apps
through GitLab Managed Apps. Therefore, we decided to deprecate this feature and provide
better [GitOps-driven alternatives](https://about.gitlab.com/direction/configure/kubernetes_management/#gitlab-managed-applications) to our users, such as [cluster integrations](integrations.md#cluster-integrations) and [cluster management project](management_project.md).
Read the sections below according to the installation method you chose to
learn how to proceed to keep your apps up and running:
- [One-click install method](#install-with-one-click-deprecated)
- [CI/CD template method](#install-using-gitlab-cicd-deprecated)
NOTE:
Despite being deprecated, the recommended way for installing GitLab integrated applications is by the GitLab CI/CD method presented below. We are working on a [cluster management project template](https://gitlab.com/gitlab-org/gitlab/-/issues/327908) with a simple upgrade path from the CI/CD based method.
## Install using GitLab CI/CD (DEPRECATED)
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/20822) in GitLab 12.6.
@ -37,8 +31,8 @@ WARNING:
The GitLab Managed Apps CI/CD installation method was [deprecated in 13.12](https://gitlab.com/gitlab-org/gitlab/-/issues/327908).
Your applications continue to work. However, we no longer support and maintain the GitLab CI/CD template for
Managed Apps (`Managed-Cluster-Applications.gitlab-ci.yml`).
As a replacement, we are working on a [cluster management project template](https://gitlab.com/gitlab-org/gitlab/-/issues/327908),
still to be released.
The new recommended way to manage cluster applications is to use the [cluster management project template](management_project_template.md).
If you want to migrate your GitLab managed apps management to this template, reference to [migrating from GitLab managed apps to project template](migrating_from_gma_to_project_template.md).
The CI/CD template was the primary method for installing applications to clusters via GitLab Managed Apps
and customize them through Helm.
@ -821,11 +815,6 @@ management project. Refer to the
[chart](https://gitlab.com/gitlab-org/charts/elastic-stack) for all
available configuration options.
NOTE:
In this alpha implementation of installing Elastic Stack through CI, reading the
environment logs through Elasticsearch is unsupported. This is supported if
[installed with the UI](#elastic-stack).
Support for installing the Elastic Stack managed application is provided by the
GitLab APM group. If you run into unknown issues,
[open a new issue](https://gitlab.com/gitlab-org/gitlab/-/issues/new), and ping at
@ -1033,6 +1022,17 @@ GitLab Container Security group. If you run into unknown issues,
at least 2 people from the
[Container Security group](https://about.gitlab.com/handbook/product/categories/#container-security-group).
## Install with one click (REMOVED)
> [Removed](https://gitlab.com/groups/gitlab-org/-/epics/4280) in GitLab 14.0.
The one-click installation method was deprecated in GitLab 13.9 and removed in [GitLab 14.0](https://gitlab.com/groups/gitlab-org/-/epics/4280).
The removal does not break nor uninstall any apps you have installed, it only
removes the "Applications" tab from the cluster page.
Follow the process to [take ownership of your GitLab Managed Apps](#take-ownership-of-your-gitlab-managed-apps).
If you are not yet on GitLab 14.0 or later, you can refer to [an older version of this document](https://docs.gitlab.com/13.12/ee/user/clusters/applications.html#install-with-one-click-deprecated).
## Browse applications logs
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/36769) in GitLab 13.2.
@ -1040,558 +1040,11 @@ at least 2 people from the
Logs produced by pods running **GitLab Managed Apps** can be browsed using
[**Log Explorer**](../project/clusters/kubernetes_pod_logs.md).
## Install with one click (DEPRECATED)
WARNING:
The one-click installation method was deprecated in GitLab 13.9 and will be removed in [GitLab 14.0](https://gitlab.com/groups/gitlab-org/-/epics/4280).
The removal does not break nor uninstall any apps you have installed but removes the GitLab UI page
for installing and updating your GitLab Managed Apps.
Follow the process to [take ownership of your GitLab Managed Apps](#take-ownership-of-your-gitlab-managed-apps).
Applications managed by GitLab are installed onto the `gitlab-managed-apps`
namespace. This namespace:
- Is different from the namespace used for project deployments.
- Is created once.
- Has a non-configurable name.
To view a list of available applications to install for a:
- [Project-level cluster](../project/clusters/index.md), navigate to your project's
**Operations > Kubernetes**.
- [Group-level cluster](../group/clusters/index.md), navigate to your group's
**Kubernetes** page.
You can install the following applications with one click:
- [Helm](#helm)
- [Ingress](#ingress)
- [cert-manager](#cert-manager)
- [Prometheus](#prometheus)
- [GitLab Runner](#gitlab-runner)
- [JupyterHub](#jupyterhub)
- [Knative](#knative)
- [Crossplane](#crossplane)
- [Elastic Stack](#elastic-stack)
With the exception of Knative, the applications are installed in a dedicated
namespace called `gitlab-managed-apps`.
Some applications are installable only for a project-level cluster.
Support for installing these applications in a group-level cluster is
planned for future releases.
For updates, see the [issue tracking progress](https://gitlab.com/gitlab-org/gitlab/-/issues/24411).
WARNING:
If you have an existing Kubernetes cluster with Helm already installed,
you should be careful as GitLab cannot detect it. In this case, installing
Helm with the applications results in the cluster having it twice, which
can lead to confusion during deployments.
In GitLab versions 11.6 and greater, Helm is upgraded to the latest version
supported by GitLab before installing any of the applications.
### Helm
> - Introduced in GitLab 10.2 for project-level clusters.
> - Introduced in GitLab 11.6 for group-level clusters.
> - [Uses a local Tiller](https://gitlab.com/gitlab-org/gitlab/-/issues/209736) in GitLab 13.2 and later.
> - [Uses Helm 3](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/46267) for clusters created with GitLab 13.6 and later.
> - [Offers legacy Tiller removal](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/47457) in GitLab 13.7 and later.
[Helm](https://helm.sh/docs/) is a package manager for Kubernetes and is
used to install the GitLab-managed apps. GitLab runs each `helm` command
in a pod in the `gitlab-managed-apps` namespace inside the cluster.
- For clusters created in GitLab 13.6 and newer, GitLab uses Helm 3 to manage
applications.
- For clusters created on versions of GitLab prior to 13.6, GitLab uses Helm 2
with a local [Tiller](https://v2.helm.sh/docs/glossary/#tiller) server. Prior
to [GitLab 13.2](https://gitlab.com/gitlab-org/gitlab/-/issues/209736), GitLab
used an in-cluster Tiller server in the `gitlab-managed-apps` namespace. You
can safely uninstall the server from the GitLab application page if you have
previously installed it. This doesn't affect your other applications.
The GitLab Helm integration does not support installing applications behind a proxy,
but a [workaround](../../topics/autodevops/index.md#install-applications-behind-a-proxy)
is available.
#### Upgrade a cluster to Helm 3
GitLab does not offer a way to migrate existing application management
on existing clusters from Helm 2 to Helm 3. To migrate a cluster to Helm 3:
1. Uninstall all applications on your cluster.
1. [Remove the cluster integration](../project/clusters/add_remove_clusters.md#removing-integration).
1. [Re-add the cluster](../project/clusters/add_remove_clusters.md#existing-kubernetes-cluster) as
an existing cluster.
### cert-manager
> Introduced in GitLab 11.6 for project- and group-level clusters.
[cert-manager](https://cert-manager.io/docs/) is a native Kubernetes certificate
management controller that helps with issuing certificates. Installing
cert-manager on your cluster issues a certificate by [Let's Encrypt](https://letsencrypt.org/)
and ensures that certificates are valid and up-to-date.
The chart used to install this application depends on the version of GitLab used. In:
- GitLab 12.3 and newer, the [`jetstack/cert-manager`](https://github.com/jetstack/cert-manager)
chart is used with a
[`values.yaml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/vendor/cert_manager/values.yaml)
file.
- GitLab 12.2 and older, the
[`stable/cert-manager`](https://github.com/helm/charts/tree/master/stable/cert-manager)
chart was used.
If you installed cert-manager prior to GitLab 12.3, Let's Encrypt
[blocks requests](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753)
from older versions of `cert-manager`. To resolve this:
1. [Back up any additional configuration](https://cert-manager.io/docs/tutorials/backup/).
1. Uninstall cert-manager.
1. Install cert-manager again.
### GitLab Runner
> - Introduced in GitLab 10.6 for project-level clusters.
> - Introduced in GitLab 11.10 for group-level clusters.
[GitLab Runner](https://docs.gitlab.com/runner/) is the open source project that
is used to run your jobs and send the results back to GitLab. It's used in
conjunction with [GitLab CI/CD](../../ci/README.md), the open-source continuous
integration service included with GitLab that coordinates the jobs.
If the project is on GitLab.com, [shared runners](../gitlab_com/index.md#shared-runners)
are available. You don't have to deploy one if they are enough for your
needs. If a project-specific runner is desired, or there are no shared runners,
you can deploy one.
The deployed runner is set as **privileged**. Root access to the underlying
server is required to build Docker images, so it's the default. Be sure to read
the [security implications](../project/clusters/index.md#security-implications)
before deploying one.
The [`runner/gitlab-runner`](https://gitlab.com/gitlab-org/charts/gitlab-runner)
chart is used to install this application, using
[a preconfigured `values.yaml`](https://gitlab.com/gitlab-org/charts/gitlab-runner/-/blob/main/values.yaml)
file. Customizing the installation by modifying this file is not supported. This
also means you cannot modify `config.toml` file for this Runner. If you want to
have that possibility and still deploy Runner in Kubernetes, consider using the
[Cluster management project](management_project.md) or installing Runner manually
via [GitLab Runner Helm Chart](https://docs.gitlab.com/runner/install/kubernetes.html).
### Ingress
> - Introduced in GitLab 10.2 for project-level clusters.
> - Introduced in GitLab 11.6 for group-level clusters.
[Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/)
provides load balancing, SSL termination, and name-based virtual hosting
out of the box. It acts as a web proxy for your applications and is useful
if you want to use [Auto DevOps](../../topics/autodevops/index.md) or deploy your own web apps.
The Ingress Controller installed is
[Ingress-NGINX](https://kubernetes.io/docs/concepts/services-networking/ingress/),
which is supported by the Kubernetes community.
With the following procedure, a load balancer must be installed in your cluster
to obtain the endpoint. You can use either
Ingress, or Knative's own load balancer ([Istio](https://istio.io)) if using Knative.
To publish your web application, you first need to find the endpoint, which is either an IP
address or a hostname associated with your load balancer.
To install it, click on the **Install** button for Ingress. GitLab attempts
to determine the external endpoint and it should be available in a few minutes.
#### Determining the external endpoint automatically
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/17052) in GitLab 10.6.
After you install Ingress, the external endpoint should be available in a few minutes.
NOTE:
This endpoint can be used for the
[Auto DevOps base domain](../../topics/autodevops/index.md#auto-devops-base-domain)
using the `KUBE_INGRESS_BASE_DOMAIN` environment variable.
If the endpoint doesn't appear and your cluster runs on Google Kubernetes Engine:
1. [Examine your Kubernetes cluster](https://console.cloud.google.com/kubernetes)
on Google Kubernetes Engine to ensure there are no errors on its nodes.
1. Ensure you have enough [Quotas](https://console.cloud.google.com/iam-admin/quotas)
on Google Kubernetes Engine. For more information, see
[Resource Quotas](https://cloud.google.com/compute/quotas).
1. Review [Google Cloud's Status](https://status.cloud.google.com/) for service
disruptions.
The [`stable/nginx-ingress`](https://github.com/helm/charts/tree/master/stable/nginx-ingress)
chart is used to install this application with a
[`values.yaml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/vendor/ingress/values.yaml)
file.
After installing, you may see a `?` for **Ingress IP Address** depending on the
cloud provider. For EKS specifically, this is because the ELB is created
with a DNS name, not an IP address. If GitLab is still unable to
determine the endpoint of your Ingress or Knative application, you can
[determine it manually](#determining-the-external-endpoint-manually).
#### Determining the external endpoint manually
See the [Base domain section](../project/clusters/index.md#base-domain) for a
guide on how to determine the external endpoint manually.
#### Using a static IP
By default, an ephemeral external IP address is associated to the cluster's load
balancer. If you associate the ephemeral IP with your DNS and the IP changes,
your apps aren't reachable, and you'd have to change the DNS record again.
To avoid that, change it into a static reserved IP.
Read how to [promote an ephemeral external IP address in GKE](https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address#promote_ephemeral_ip).
#### Pointing your DNS at the external endpoint
After you have set up the external endpoint, associate it with a
[wildcard DNS record](https://en.wikipedia.org/wiki/Wildcard_DNS_record) (such
as `*.example.com.`) to reach your apps. If your external endpoint is an IP
address, use an A record. If your external endpoint is a hostname, use a CNAME
record.
### JupyterHub
> - Introduced in GitLab 11.0 for project-level clusters.
> - Introduced in GitLab 12.3 for group and instance-level clusters.
[JupyterHub](https://jupyterhub.readthedocs.io/en/stable/) is a multi-user service
for managing notebooks across a team. [Jupyter Notebooks](https://jupyter-notebook.readthedocs.io/en/latest/)
provide a web-based interactive programming environment used for data analysis,
visualization, and machine learning.
The [`jupyter/jupyterhub`](https://jupyterhub.github.io/helm-chart/)
chart is used to install this application with a
[`values.yaml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/vendor/jupyter/values.yaml)
file.
Authentication is enabled only for [project members](../project/members/index.md)
for project-level clusters and group members for group-level clusters with
[Developer or higher](../permissions.md) access to the associated project or group.
GitLab uses a [custom Jupyter image](https://gitlab.com/gitlab-org/jupyterhub-user-image/blob/master/Dockerfile)
that installs additional relevant packages on top of the base Jupyter. Ready-to-use
DevOps Runbooks built with Nurtch's [Rubix library](https://github.com/Nurtch/rubix)
are also available.
More information on creating executable runbooks can be found in
[our Runbooks documentation](../project/clusters/runbooks/index.md#configure-an-executable-runbook-with-gitlab).
Ingress must be installed and have an IP address assigned before
JupyterHub can be installed.
#### Jupyter Git Integration
> - [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/28783) in GitLab 12.0 for project-level clusters.
> - [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/32512) in GitLab 12.3 for group and instance-level clusters.
When installing JupyterHub onto your Kubernetes cluster,
[JupyterLab's Git extension](https://github.com/jupyterlab/jupyterlab-git)
is provisioned and configured using the authenticated user's:
- Name.
- Email.
- Newly created access token.
JupyterLab's Git extension enables full version control of your notebooks, and
issuance of Git commands in Jupyter. You can issue Git commands through the
**Git** tab on the left panel, or through Jupyter's command-line prompt.
JupyterLab's Git extension stores the user token in the JupyterHub DB in encrypted
format, and in the single user Jupyter instance as plain text, because
[Git requires storing credentials as plain text](https://git-scm.com/docs/git-credential-store)
Potentially, if a nefarious user finds a way to read from the file system in the
single-user Jupyter instance, they could retrieve the token.
![Jupyter's Git Extension](img/jupyter-git-extension.gif)
You can clone repositories from the files tab in Jupyter:
![Jupyter clone repository](img/jupyter-gitclone.png)
### Knative
> - Introduced in GitLab 11.5 for project-level clusters.
> - Introduced in GitLab 12.3 for group- and instance-level clusters.
[Knative](https://cloud.google.com/knative/) provides a platform to
create, deploy, and manage serverless workloads from a Kubernetes
cluster. It's used in conjunction with, and includes
[Istio](https://istio.io) to provide an external IP address for all
programs hosted by Knative.
The [`knative/knative`](https://storage.googleapis.com/triggermesh-charts)
chart is used to install this application.
During installation, you must enter a wildcard domain where your applications
are exposed. Configure your DNS server to use the external IP address for that
domain. Applications created and installed are accessible as
`<program_name>.<kubernetes_namespace>.<domain_name>`, which requires
your Kubernetes cluster to have
[RBAC enabled](../project/clusters/add_remove_clusters.md#rbac-cluster-resources).
### Prometheus
> - Introduced in GitLab 10.4 for project-level clusters.
> - Introduced in GitLab 11.11 for group-level clusters.
[Prometheus](https://prometheus.io/docs/introduction/overview/) is an
open-source monitoring and alerting system you can use to supervise your
deployed applications.
GitLab is able to monitor applications by using the
[Prometheus integration](../project/integrations/prometheus.md). Kubernetes container CPU and
memory metrics are collected, and response metrics are also retrieved
from NGINX Ingress.
The [`stable/prometheus`](https://github.com/helm/charts/tree/master/stable/prometheus)
chart is used to install this application with a
[`values.yaml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/vendor/prometheus/values.yaml)
file.
To enable monitoring, install Prometheus into the cluster with the **Install**
button.
### Crossplane
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/34702) in GitLab 12.5 for project-level clusters.
[Crossplane](https://crossplane.github.io/docs/v0.9/) is a multi-cloud control plane
to help you manage applications and infrastructure across multiple clouds. It extends the
Kubernetes API using:
- Custom resources.
- Controllers that watch those custom resources.
Crossplane allows provisioning and lifecycle management of infrastructure components
across cloud providers in a uniform manner by abstracting cloud provider-specific
configurations.
The Crossplane GitLab-managed application:
- Installs Crossplane with a provider of choice on a Kubernetes cluster attached to the
project repository.
- Can then be used to provision infrastructure or managed applications such as
PostgreSQL (for example, CloudSQL from GCP or RDS from AWS) and other services
required by the application with the Auto DevOps pipeline.
[`alpha/crossplane`](https://github.com/crossplane/crossplane/tree/v0.4.1/cluster/charts/crossplane) chart v0.4.1 is used to
install Crossplane using the
[`values.yaml`](https://github.com/crossplane/crossplane/blob/master/cluster/charts/crossplane/values.yaml.tmpl)
file.
For information about configuring Crossplane installed on the cluster, see
[Crossplane configuration](crossplane.md).
### Elastic Stack
> Introduced in GitLab 12.7 for project- and group-level clusters.
[Elastic Stack](https://www.elastic.co/elastic-stack) is a complete end-to-end
log analysis solution which helps in deep searching, analyzing and visualizing the logs
generated from different machines.
GitLab can gather logs from pods in your cluster. Filebeat runs as a DaemonSet
on each node in your cluster, and ships container logs to Elasticsearch for
querying. GitLab then connects to Elasticsearch for logs, instead of the
Kubernetes API, giving you access to more advanced querying capabilities. Log
data is deleted after 30 days, using [Curator](https://www.elastic.co/guide/en/elasticsearch/client/curator/5.5/about.html).
The Elastic Stack cluster application is intended as a log aggregation solution
and is not related to our [Advanced Search](../search/advanced_search.md)
functionality, which uses a separate Elasticsearch cluster.
To enable log shipping:
1. Ensure your cluster contains at least three nodes of instance types larger
than `f1-micro`, `g1-small`, or `n1-standard-1`.
1. Navigate to **Operations > Kubernetes**.
1. In **Kubernetes Cluster**, select a cluster.
1. In the **Applications** section, find **Elastic Stack**, and then select
**Install**.
The [`gitlab/elastic-stack`](https://gitlab.com/gitlab-org/charts/elastic-stack)
chart is used to install this application with a
[`values.yaml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/vendor/elastic_stack/values.yaml)
file. The chart deploys three identical Elasticsearch pods which can't be
colocated, and each requires one CPU and 2 GB of RAM, making them
incompatible with clusters containing fewer than three nodes, or consisting of
`f1-micro`, `g1-small`, `n1-standard-1`, or `*-highcpu-2` instance types.
#### Optional: deploy Kibana to perform advanced queries
If you are an advanced user and have direct access to your Kubernetes cluster
using `kubectl` and `helm`, you can deploy Kibana manually. The following assumes
that `helm` has been [initialized](https://v2.helm.sh/docs/helm/) with `helm init`.
Save the following to `kibana.yml`:
```yaml
elasticsearch:
enabled: false
filebeat:
enabled: false
kibana:
enabled: true
elasticsearchHosts: http://elastic-stack-elasticsearch-master.gitlab-managed-apps.svc.cluster.local:9200
```
Then install it on your cluster:
```shell
helm repo add gitlab https://charts.gitlab.io
helm install --name kibana gitlab/elastic-stack --values kibana.yml
```
To access Kibana, forward the port to your local machine:
```shell
kubectl port-forward svc/kibana-kibana 5601:5601
```
Then, you can visit Kibana at `http://localhost:5601`.
## Upgrading applications
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/24789) in GitLab 11.8.
The applications below can be upgraded.
| Application | GitLab version |
| ----------- | -------------- |
| GitLab Runner | 11.8+ |
To upgrade an application:
1. For a:
- [Project-level cluster](../project/clusters/index.md),
navigate to your project's **Operations > Kubernetes**.
- [Group-level cluster](../group/clusters/index.md),
navigate to your group's **Kubernetes** page.
1. Select your cluster.
1. If an upgrade is available, the **Upgrade** button is displayed. Click the button to upgrade.
Upgrades reset values back to the values built into the `runner` chart, plus the values set by
[`values.yaml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/vendor/runner/values.yaml)
## Uninstalling applications
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/60665) in GitLab 11.11.
The applications below can be uninstalled.
| Application | GitLab version | Notes |
| ----------- | -------------- | ----- |
| cert-manager | 12.2+ | The associated private key is deleted and cannot be restored. Deployed applications continue to use HTTPS, but certificates aren't renewed. Before uninstalling, you may want to [back up your configuration](https://cert-manager.io/docs/tutorials/backup/) or [revoke your certificates](https://letsencrypt.org/docs/revoking/). |
| GitLab Runner | 12.2+ | Any running pipelines are canceled. |
| Helm | 12.2+ | The associated Tiller pod, the `gitlab-managed-apps` namespace, and all of its resources are deleted and cannot be restored. |
| Ingress | 12.1+ | The associated load balancer and IP are deleted and cannot be restored. Furthermore, it can only be uninstalled if JupyterHub is not installed. |
| JupyterHub | 12.1+ | All data not committed to GitLab are deleted and cannot be restored. |
| Knative | 12.1+ | The associated IP are deleted and cannot be restored. |
| Prometheus | 11.11+ | All data are deleted and cannot be restored. |
| Crossplane | 12.5+ | All data are deleted and cannot be restored. |
| Elastic Stack | 12.7+ | All data are deleted and cannot be restored. |
| Sentry | 12.6+ | The PostgreSQL persistent volume remains and should be manually removed for complete uninstall. |
To uninstall an application:
1. For a:
- [Project-level cluster](../project/clusters/index.md),
navigate to your project's **Operations > Kubernetes**.
- [Group-level cluster](../group/clusters/index.md),
navigate to your group's **Kubernetes** page.
1. Select your cluster.
1. Click the **Uninstall** button for the application.
Support for uninstalling all applications is planned for progressive rollout.
To follow progress, see the [relevant epic](https://gitlab.com/groups/gitlab-org/-/epics/1201).
## Troubleshooting applications
Applications can fail with the following error:
```plaintext
Error: remote error: tls: bad certificate
```
To avoid installation errors:
- Before starting the installation of applications, make sure that time is synchronized
between your GitLab server and your Kubernetes cluster.
- Ensure certificates are not out of sync. When installing applications, GitLab
expects a new cluster with no previous installation of Helm.
You can confirm that the certificates match by using `kubectl`:
```shell
kubectl get configmaps/values-content-configuration-ingress -n gitlab-managed-apps -o \
"jsonpath={.data['cert\.pem']}" | base64 -d > a.pem
kubectl get secrets/tiller-secret -n gitlab-managed-apps -o "jsonpath={.data['ca\.crt']}" | base64 -d > b.pem
diff a.pem b.pem
```
### Error installing managed apps on EKS cluster
If you're using a managed cluster on AWS EKS, and you are not able to install some of the managed
apps, consider checking the logs.
You can check the logs by running the following commands:
```shell
kubectl get pods --all-namespaces
kubectl get services --all-namespaces
```
If you are getting the `Failed to assign an IP address to container` error, it's probably due to the
instance type you've specified in the AWS configuration.
The number and size of nodes might not have enough IP addresses to run or install those pods.
For reference, all the AWS instance IP limits are found
[in this AWS repository on GitHub](https://github.com/aws/amazon-vpc-cni-k8s/blob/master/pkg/awsutils/vpc_ip_resource_limit.go) (search for `InstanceENIsAvailable`).
### Unable to install Prometheus
Installing Prometheus is failing with the following error:
```shell
# kubectl -n gitlab-managed-apps logs install-prometheus
...
Error: Could not get apiVersions from Kubernetes: unable to retrieve the complete list of server APIs: admission.certmanager.k8s.io/v1beta1: the server is currently unable to handle the request
```
This is a bug that was introduced in Helm `2.15` and fixed in `3.0.2`. As a workaround,
ensure [`cert-manager`](#cert-manager) is installed successfully prior to installing Prometheus.
### Unable to create a Persistent Volume Claim with DigitalOcean
Trying to create additional block storage volumes might lead to the following error when using DigitalOcean:
```plaintext
Server requested
[Warning] pod has unbound immediate PersistentVolumeClaims (repeated 2 times)
[Normal] pod didn't trigger scale-up (it wouldn't fit if a new node is added):
Spawn failed: Timeout
```
This is due to DigitalOcean imposing a few limits with regards to creating additional block storage volumes.
[Learn more about DigitalOcean Block Storage Volumes limits.](https://www.digitalocean.com/docs/volumes/#limits)
## Take ownership of your GitLab Managed Apps
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/327803) in GitLab 13.12.
With the removal of the [One-click install method](#install-with-one-click-deprecated) in GitLab 14.0,
With the removal of the One-click install method in GitLab 14.0,
the **Applications** tab (under your project's **Operations > Kubernetes**)
will no longer be displayed:
@ -1664,10 +1117,10 @@ If you choose to keep using Helm v2 (B), follow the steps below to manage your a
### Cluster integrations
Some applications were not only installed in your cluster by GitLab through Managed Apps but were also
directly integrated with GitLab so that you could benefit from seeing, controlling, or getting notified
about them through GitLab.
To keep them integrated, read the documentation for:
Some applications were not only installed in your cluster by GitLab through
Managed Apps but were also directly integrated with GitLab. If you had one of
these applications installed before GitLab 14.0, then a corresponding [cluster
integration](integrations.md) has been automatically enabled:
- [Prometheus cluster integration](integrations.md#prometheus-cluster-integration)
- [Elastic Stack cluster integration](integrations.md#elastic-stack-cluster-integration)

View File

@ -6,7 +6,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Crossplane configuration **(FREE)**
After [installing](applications.md#crossplane) Crossplane, you must configure it for use.
After [installing](applications.md#install-crossplane-using-gitlab-cicd) Crossplane, you must configure it for use.
The process of configuring Crossplane includes:
1. [Configure RBAC permissions](#configure-rbac-permissions).

View File

@ -14,6 +14,17 @@ To enable cluster integrations, first add a Kubernetes cluster to a GitLab
[group](../group/clusters/index.md#group-level-kubernetes-clusters) or
[instance](../instance/clusters/index.md).
You can install your applications manually as shown in the following sections, or use the
[Cluster management project template](management_project_template.md) that automates the
installation.
Although, the [Cluster management project template](management_project_template.md) still
requires that you manually do the last steps of these sections,
[Enable Prometheus integration for your cluster](#enable-prometheus-integration-for-your-cluster)
or [Enable Elastic Stack integration for your cluster](#enable-elastic-stack-integration-for-your-cluster)
depending on which application you are installing. We plan to also automate this step in the future,
see the [opened issue](https://gitlab.com/gitlab-org/gitlab/-/issues/326565).
## Prometheus cluster integration
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/55244) in GitLab 13.11.
@ -43,10 +54,8 @@ it up using [Helm](https://helm.sh/) as follows:
kubectl create ns gitlab-managed-apps
# Download Helm chart values that is compatible with the requirements above.
# You should substitute the tag that corresponds to the GitLab version in the URL
# - https://gitlab.com/gitlab-org/gitlab/-/raw/<tag>/vendor/prometheus/values.yaml
#
wget https://gitlab.com/gitlab-org/gitlab/-/raw/v13.9.0-ee/vendor/prometheus/values.yaml
# These are included in the Cluster Management project template.
wget https://gitlab.com/gitlab-org/project-templates/cluster-management/-/raw/master/applications/prometheus/values.yaml
# Add the Prometheus community Helm chart repository
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
@ -103,10 +112,8 @@ running:
kubectl create namespace gitlab-managed-apps
# Download Helm chart values that is compatible with the requirements above.
# You should substitute the tag that corresponds to the GitLab version in the URL
# - https://gitlab.com/gitlab-org/gitlab/-/raw/<tag>/vendor/elastic_stack/values.yaml
#
wget https://gitlab.com/gitlab-org/gitlab/-/raw/v13.9.0-ee/vendor/elastic_stack/values.yaml
# These are included in the Cluster Management project template.
wget https://gitlab.com/gitlab-org/project-templates/cluster-management/-/raw/master/applications/elastic-stack/values.yaml
# Add the GitLab Helm chart repository
helm repo add gitlab https://charts.gitlab.io

View File

@ -16,7 +16,7 @@ privileges.
This can be useful for:
- Creating pipelines to install cluster-wide applications into your cluster, see [Install using GitLab CI/CD (beta)](applications.md#install-using-gitlab-cicd-deprecated) for details.
- Creating pipelines to install cluster-wide applications into your cluster, see [management project template](management_project_template.md) for details.
- Any jobs that require `cluster-admin` privileges.
## Permissions

View File

@ -0,0 +1,86 @@
---
stage: Configure
group: Configure
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Cluster Management Project Template **(FREE)**
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/25318) in GitLab 12.10 with Helmfile support via Helm v2.
> - [Improved](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/63577) in GitLab 14.0 with Helmfile support via Helm v3 instead, and a much more flexible usage of Helmfile. This introduces breaking changes that are detailed below.
This [GitLab built-in project template](../project/working_with_projects.md#built-in-templates)
provides a quicker start for users interested in managing cluster
applications via [Helm v3](https://helm.sh/) charts. More specifically, taking advantage of the
[Helmfile](https://github.com/roboll/helmfile) utility client. The template consists of some pre-configured apps that
should help you get started quickly using various GitLab features. Still, you have all the flexibility to remove the ones you do not
need, or even add new ones that are not built-in.
## How to use this template
1. Create a new project, choosing "GitLab Cluster Management" from the list of [built-in project templates](../project/working_with_projects.md#built-in-templates).
1. Make this project a [cluster management project](management_project.md).
1. If you used the [GitLab Managed Apps](applications.md), refer to
[Migrating from GitLab Manged Apps](migrating_from_gma_to_project_template.md).
### Components
In the repository of the newly-created project, you will find:
- A predefined [`.gitlab-ci.yml`](https://gitlab.com/gitlab-org/project-templates/cluster-management/-/blob/master/.gitlab-ci.yml)
file, with a CI pipeline already configured.
- A main [`helmfile.yaml`](https://gitlab.com/gitlab-org/project-templates/cluster-management/-/blob/master/helmfile.yaml) to toggle which applications you would like to manage.
- An `applications` directory with a `helmfile.yaml` configured for each application GitLab provides.
#### The `.gitlab-ci.yml` file
The base image used in your pipeline is built by the [cluster-applications](https://gitlab.com/gitlab-org/cluster-integration/cluster-applications)
project. This image consists of a set of Bash utility scripts to support [Helm v3 releases](https://helm.sh/docs/intro/using_helm/#three-big-concepts):
- `gl-fail-if-helm2-releases-exist {namespace}`: It tries to detect whether you have apps deployed through Helm v2
releases for a given namespace. If so, it will fail the pipeline and ask you to manually
[migrate your Helm v2 releases to Helm v3](https://helm.sh/docs/topics/v2_v3_migration/).
- `gl-ensure-namespace {namespace}`: It creates the given namespace if it does not exist and adds the necessary label
for the [Cilium](https://github.com/cilium/cilium/) app network policies to work.
- `gl-adopt-resource-with-helm-v3 {arguments}`: Used only internally in the [cert-manager's](https://cert-manager.io/) Helmfile to
facilitate the GitLab Managed Apps adoption.
- `gl-adopt-crds-with-helm-v3 {arguments}`: Used only internally in the [cert-manager's](https://cert-manager.io/) Helmfile to
facilitate the GitLab Managed Apps adoption.
- `gl-helmfile {arguments}`: A thin wrapper that triggers the [Helmfile](https://github.com/roboll/helmfile) command.
#### The main `helmfile.yml` file
This file has a list of paths to other Helmfiles for each app. They're all commented out by default, so you must uncomment
the paths for the apps that you would like to manage.
By default, each `helmfile.yaml` in these sub-paths will have the attribute `installed: true`, which signifies that everytime
the pipeline runs, Helmfile will try to either install or update your apps according to the current state of your
cluster and Helm releases. If you change this attribute to `installed: false`, Helmfile will try to uninstall this app
from your cluster. [Read more](https://github.com/roboll/helmfile) about how Helmfile works.
Furthermore, each app has an `applications/{app}/values.yaml` file. This is the
place where you can define some default values for your app's Helm chart. Some apps will already have defaults
pre-defined by GitLab.
#### Built-in applications
The built-in applications are intended to provide an easy way to get started with various Kubernetes oriented GitLab features.
The [built-in supported applications](https://gitlab.com/gitlab-org/project-templates/cluster-management/-/tree/master/applications) are:
- Apparmor
- Cert-manager
- Cilium
- Elastic Stack
- Falco
- Fluentd
- GitLab Runner
- Ingress
- Prometheus
- Sentry
- Vault
### Migrating from GitLab Managed Apps
If you had GitLab Managed Apps, either One-Click or CI/CD install, read the docs on how to
[migrate from GitLab Managed Apps to project template](migrating_from_gma_to_project_template.md)

View File

@ -0,0 +1,95 @@
---
stage: Configure
group: Configure
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Migrating from GitLab Managed Apps to a management project template
The [GitLab Managed Apps](applications.md) are deprecated in GitLab 14.0. To migrate to the new way of managing them:
1. Read how the [management project template](management_project_template.md) works, and
create a new project based on the "GitLab Cluster Management" template.
1. Create a new project as explained in the [management project template](management_project_template.md).
1. Detect apps deployed through Helm v2 releases by using the pre-configured [`.gitlab-ci.yml`](management_project_template.md#the-gitlab-ciyml-file) file:
- In case you had overwritten the default GitLab Managed Apps namespace, edit `.gitlab-ci.yml`,
and make sure the script is receiving the correct namespace as an argument:
```yaml
script:
- gl-fail-if-helm2-releases-exist <your_custom_namespace>
```
- If you kept the default name (`gitlab-managed-apps`), then the script is already
set up.
Either way, [run a pipeline manually](../../ci/pipelines/index.md#run-a-pipeline-manually) and read the logs of the
`detect-helm2-releases` job to know if you have any Helm v2 releases and which are they.
1. If you have no Helm v2 releases, skip this step. Otherwise, follow the official Helm docs on
[how to migrate from Helm v2 to Helm v3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/),
and clean up the Helm v2 releases after you are confident that they have been successfully migrated.
1. In this step you should already have only Helm v3 releases.
Uncomment from the main [`./helmfile.yaml`](management_project_template.md#the-main-helmfileyml-file) the paths for the
applications that you would like to manage with this project. Although you could uncomment all the ones you want to
managed at once, we recommend you repeat the following steps separately for each app, so you do not get lost during
the process.
1. Edit the associated `applications/{app}/helmfiles.yaml` to match the chart version currently deployed
for your app. Take a GitLab Runner Helm v3 release as an example:
The following command lists the releases and their versions:
```shell
helm ls -n gitlab-managed-apps
NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
runner gitlab-managed-apps 1 2021-06-09 19:36:55.739141644 +0000 UTC deployed gitlab-runner-0.28.0 13.11.0
```
Take the version from the `CHART` column which is in the format `{release}-v{chart_version}`,
then edit the `version:` attribute in the `./applications/gitlab-runner/helmfile.yaml`, so that it matches the version
you have currently deployed. This is a safe step to avoid upgrading versions during this migration.
Make sure you replace `gitlab-managed-apps` from the above command if you have your apps deployed to a different
namespace.
1. Edit the `applications/{app}/values.yaml` associated with your app to match the currently
deployed values. For example, for GitLab Runner:
1. Copy the output of the following command (it might be big):
```shell
helm get values runner -n gitlab-managed-apps -a --output yaml
```
1. Overwrite `applications/gitlab-runner/values.yaml` with the output of the previous command.
This safe step will guarantee that no unexpected default values overwrite your currently deployed values.
For instance, your GitLab Runner could have its `gitlabUrl` or `runnerRegistrationToken` overwritten by mistake.
1. Some apps require special attention:
- Ingress: Due to an existing [chart issue](https://github.com/helm/charts/pull/13646), you might see
`spec.clusterIP: Invalid value` when trying to run the [`./gl-helmfile`](management_project_template.md#the-gitlab-ciyml-file)
command. To work around this, after overwriting the release values in `applications/ingress/values.yaml`,
you might need to overwrite all the occurrences of `omitClusterIP: false`, setting it to `omitClusterIP: true`.
Another approach,could be to collect these IPs by running `kubectl get services -n gitlab-managed-apps`
and then overwriting each `ClusterIP` that it complains about with the value you got from that command.
- Vault: This application introduces a breaking change from the chart we used in Helm v2 to the chart
used in Helm v3. So, the only way to integrate it with this Cluster Management Project is to actually uninstall this app and accept the
chart version proposed in `applications/vault/values.yaml`.
1. After following all the previous steps, [run a pipeline manually](../../ci/pipelines/index.md#run-a-pipeline-manually)
and watch the `apply` job logs to see if any of your applications were successfully detected, installed, and whether they got any
unexpected updates.
Some annotation checksums are expected to be updated, as well as this attribute:
```diff
--- heritage: Tiller
+++ heritage: Tiller
```
After getting a successful pipeline, repeat these steps for any other deployed apps
you want to manage with the Cluster Management Project.

View File

@ -9,7 +9,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/4752) in [GitLab Ultimate](https://about.gitlab.com/pricing/) 11.0.
> - [Moved](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/26383) to [GitLab Free](https://about.gitlab.com/pricing/) 12.9.
GitLab makes it easy to view the logs of running pods or managed applications in
GitLab makes it easy to view the logs of running pods in
[connected Kubernetes clusters](index.md). By displaying the logs directly in GitLab
in the **Log Explorer**, developers can avoid managing console tools or jumping
to a different interface. The **Log Explorer** interface provides a set of filters
@ -18,10 +18,11 @@ above the log file data, depending on your configuration:
![Pod logs](img/kubernetes_pod_logs_v12_10.png)
- **Namespace** - Select the environment to display. Users with Maintainer or
greater [permissions](../../permissions.md) can also select Managed Apps.
- **Search** - Only available if the Elastic Stack managed application is installed.
- **Select time range** - Select the range of time to display. Only available if the
Elastic Stack managed application is installed.
greater [permissions](../../permissions.md) can also see pods in the
`gitlab-managed-apps` namespace.
- **Search** - Only available if the [Elastic Stack integration](../../clusters/integrations.md#elastic-stack-cluster-integration) is enabled.
- **Select time range** - Select the range of time to display.
Only available if the [Elastic Stack integration](../../clusters/integrations.md#elastic-stack-cluster-integration) is enabled.
- **Scroll to bottom** **{scroll_down}** - Scroll to the end of the displayed logs.
- **Refresh** **{retry}** - Reload the displayed logs.

View File

@ -20,7 +20,6 @@ The following steps are recommended to install and use Container Host Security t
1. Install and configure an Ingress node:
- [Install the Ingress node via CI/CD (Cluster Management Project)](../../../../clusters/applications.md#install-ingress-using-gitlab-cicd).
- [Determine the external endpoint via the manual method](../../../../clusters/applications.md#determining-the-external-endpoint-manually).
- Navigate to the Kubernetes page and enter the [DNS address for the external endpoint](../../index.md#base-domain)
into the **Base domain** field on the **Details** tab. Save the changes to the Kubernetes
cluster.

View File

@ -20,7 +20,6 @@ The following steps are recommended to install and use Container Network Securit
1. Install and configure an Ingress node:
- [Install the Ingress node via CI/CD (Cluster Management Project)](../../../../clusters/applications.md#install-ingress-using-gitlab-cicd).
- [Determine the external endpoint via the manual method](../../../../clusters/applications.md#determining-the-external-endpoint-manually).
- Navigate to the Kubernetes page and enter the [DNS address for the external endpoint](../../index.md#base-domain)
into the **Base domain** field on the **Details** tab. Save the changes to the Kubernetes
cluster.

View File

@ -17,7 +17,7 @@ in the GitLab interface.
There are two ways to set up Prometheus integration, depending on where your apps are running:
- For deployments on Kubernetes, GitLab can automatically [deploy and manage Prometheus](#managed-prometheus-on-kubernetes).
- For deployments on Kubernetes, GitLab can be [integrated with an in-cluster Prometheus](#prometheus-cluster-integration)
- For other deployment targets, [specify the Prometheus server](#manual-configuration-of-prometheus).
Once enabled, GitLab detects metrics from known services in the
@ -27,137 +27,13 @@ Once enabled, GitLab detects metrics from known services in the
## Enabling Prometheus Integration
### Managed Prometheus on Kubernetes
### Prometheus cluster integration
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/28916) in GitLab 10.5.
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/55244) in GitLab 13.11.
> - [Replaced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/62725) the Prometheus cluster applications in GitLab 14.0.
**Deprecated:** Managed Prometheus on Kubernetes is deprecated, and
scheduled for removal in [GitLab
14.0](https://gitlab.com/groups/gitlab-org/-/epics/4280).
GitLab can seamlessly deploy and manage Prometheus on a
[connected Kubernetes cluster](../clusters/index.md), to help you monitor your apps.
#### Requirements
- A [connected Kubernetes cluster](../clusters/index.md)
#### Getting started
After you have a connected Kubernetes cluster, you can deploy a managed Prometheus with a single click.
1. Go to the **Operations > Kubernetes** page to view your connected clusters
1. Select the cluster you would like to deploy Prometheus to
1. Click the **Install** button to deploy Prometheus to the cluster
![Managed Prometheus Deploy](img/prometheus_deploy.png)
#### About managed Prometheus deployments
Prometheus is deployed into the `gitlab-managed-apps` namespace, using the
[official Helm chart](https://github.com/helm/charts/tree/master/stable/prometheus).
Prometheus is only accessible in the cluster, with GitLab communicating through the
[Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/).
The Prometheus server
[automatically detects and monitors](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config)
nodes, pods, and endpoints. To configure a resource to be monitored by Prometheus,
set the following [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/):
- `prometheus.io/scrape` to `true` to enable monitoring of the resource.
- `prometheus.io/port` to define the port of the metrics endpoint.
- `prometheus.io/path` to define the path of the metrics endpoint. Defaults to `/metrics`.
CPU and Memory consumption is monitored, but requires
[naming conventions](prometheus_library/kubernetes.md#specifying-the-environment)
to determine the environment. If you are using
[Auto DevOps](../../../topics/autodevops/index.md), this is handled automatically.
##### Example of Kubernetes service annotations and labels
As an example, to activate Prometheus monitoring of a service:
1. Add at least this annotation: `prometheus.io/scrape: 'true'`.
1. Add two labels so GitLab can retrieve metrics dynamically for any environment:
- `application: ${CI_ENVIRONMENT_SLUG}`
- `release: ${CI_ENVIRONMENT_SLUG}`
1. Create a dynamic PromQL query. For example, a query like
`temperature{application="{{ci_environment_slug}}",release="{{ci_environment_slug}}"}` to either:
- Add [custom metrics](../../../operations/metrics/index.md#adding-custom-metrics).
- Add [custom dashboards](../../../operations/metrics/dashboards/index.md).
The following is a service definition to accomplish this:
```yaml
---
# Service
apiVersion: v1
kind: Service
metadata:
name: service-${CI_PROJECT_NAME}-${CI_COMMIT_REF_SLUG}
# === Prometheus annotations ===
annotations:
prometheus.io/scrape: 'true'
labels:
application: ${CI_ENVIRONMENT_SLUG}
release: ${CI_ENVIRONMENT_SLUG}
# === End of Prometheus ===
spec:
selector:
app: ${CI_PROJECT_NAME}
ports:
- port: ${EXPOSED_PORT}
targetPort: ${CONTAINER_PORT}
```
#### Access the UI of a Prometheus managed application in Kubernetes
You can connect directly to Prometheus, and view the Prometheus user interface, when
using a Prometheus managed application in Kubernetes:
1. Find the name of the Prometheus pod in the user interface of your Kubernetes
provider, such as GKE, or by running the following `kubectl` command in your
terminal:
```shell
kubectl get pods -n gitlab-managed-apps | grep 'prometheus-prometheus-server'
```
The command should return a result like the following example, where
`prometheus-prometheus-server-55b4bd64c9-dpc6b` is the name of the Prometheus pod:
```plaintext
gitlab-managed-apps prometheus-prometheus-server-55b4bd64c9-dpc6b 2/2 Running 0 71d
```
1. Run a `kubectl port-forward` command. In the following example, `9090` is the
Prometheus server's listening port:
```shell
kubectl port-forward prometheus-prometheus-server-55b4bd64c9-dpc6b 9090:9090 -n gitlab-managed-apps
```
The `port-forward` command forwards all requests sent to your system's `9090` port
to the `9090` port of the Prometheus pod. If the `9090` port on your system is used
by another application, you can change the port number before the colon to your
desired port. For example, to forward port `8080` of your local system, change the
command to:
```shell
kubectl port-forward prometheus-prometheus-server-55b4bd64c9-dpc6b 8080:9090 -n gitlab-managed-apps
```
1. Open `localhost:9090` in your browser to display the Prometheus user interface.
#### Script access to Prometheus
You can script the access to Prometheus, extracting the name of the pod automatically like this:
```shell
POD_INFORMATION=$(kubectl get pods -n gitlab-managed-apps | grep 'prometheus-prometheus-server')
POD_NAME=$(echo $POD_INFORMATION | awk '{print $1;}')
kubectl port-forward $POD_NAME 9090:9090 -n gitlab-managed-apps
```
GitLab can query an in-cluster Prometheus for your metrics.
See [Prometheus cluster integration](../../clusters/integrations.md#prometheus-cluster-integration) for details.
### Manual configuration of Prometheus
@ -219,12 +95,12 @@ to integrate with.
### Precedence with multiple Prometheus configurations
Although you can enable both a [manual configuration](#manual-configuration-of-prometheus)
and [auto configuration](#managed-prometheus-on-kubernetes) of Prometheus, you
and [cluster integration](#prometheus-cluster-integration) of Prometheus, you
can use only one:
- If you have enabled a
[Prometheus manual configuration](#manual-configuration-of-prometheus)
and a [managed Prometheus on Kubernetes](#managed-prometheus-on-kubernetes),
and a [Prometheus cluster integration](#prometheus-cluster-integration),
the manual configuration takes precedence and is used to run queries from
[custom dashboards](../../../operations/metrics/dashboards/index.md) and
[custom metrics](../../../operations/metrics/index.md#adding-custom-metrics).

View File

@ -33,7 +33,7 @@ integration services must be enabled.
Prometheus needs to be deployed into the cluster and configured properly in order to gather Kubernetes metrics. GitLab supports two methods for doing so:
- GitLab [integrates with Kubernetes](../../clusters/index.md), and can [deploy Prometheus into a connected cluster](../prometheus.md#managed-prometheus-on-kubernetes). It is automatically configured to collect Kubernetes metrics.
- GitLab [integrates with Kubernetes](../../clusters/index.md), and can [query a Prometheus in a connected cluster](../../../clusters/integrations.md#prometheus-cluster-integration). The in-cluster Prometheus can be configured to automatically collect application metrics from your cluster.
- To configure your own Prometheus server, you can follow the [Prometheus documentation](https://prometheus.io/docs/introduction/overview/).
## Specifying the Environment

View File

@ -6901,15 +6901,15 @@ msgstr ""
msgid "ClusterAgent|You have insufficient permissions to delete this cluster agent"
msgstr ""
msgid "ClusterApplicationsRemoved|One-click application management was removed in GitLab 14.0. Your applications are still installed in your cluster, and integrations continue working."
msgstr ""
msgid "ClusterIntegration|%{appList} was successfully installed on your Kubernetes cluster"
msgstr ""
msgid "ClusterIntegration|%{boldStart}Note:%{boldEnd} Requires Ingress to be installed."
msgstr ""
msgid "ClusterIntegration|%{externalIp}.nip.io"
msgstr ""
msgid "ClusterIntegration|%{linkStart}More information%{linkEnd}"
msgstr ""
@ -6973,9 +6973,6 @@ msgstr ""
msgid "ClusterIntegration|Allows GitLab to query a specifically configured in-cluster Prometheus for metrics."
msgstr ""
msgid "ClusterIntegration|Alternatively, "
msgstr ""
msgid "ClusterIntegration|Amazon EKS"
msgstr ""
@ -7861,9 +7858,6 @@ msgstr ""
msgid "ClusterIntegration|access to Google Kubernetes Engine"
msgstr ""
msgid "ClusterIntegration|can be used instead of a custom domain. "
msgstr ""
msgid "ClusterIntegration|installed via %{linkStart}Cloud Run%{linkEnd}"
msgstr ""
@ -25487,9 +25481,6 @@ msgstr ""
msgid "Project path"
msgstr ""
msgid "Project scanning help page"
msgstr ""
msgid "Project security status"
msgstr ""
@ -34818,21 +34809,6 @@ msgstr ""
msgid "UnscannedProjects|60 or more days"
msgstr ""
msgid "UnscannedProjects|Default branch scanning by project"
msgstr ""
msgid "UnscannedProjects|Out of date"
msgstr ""
msgid "UnscannedProjects|Project scanning"
msgstr ""
msgid "UnscannedProjects|Untested"
msgstr ""
msgid "UnscannedProjects|Your projects are up do date! Nice job!"
msgstr ""
msgid "Unschedule job"
msgstr ""
@ -38000,6 +37976,9 @@ msgstr ""
msgid "can't be enabled because signed commits are required for this project"
msgstr ""
msgid "can't be the same as the source project"
msgstr ""
msgid "can't include: %{invalid_storages}"
msgstr ""

View File

@ -110,7 +110,6 @@
"css-loader": "^2.1.1",
"d3": "^5.16.0",
"d3-sankey": "^0.12.3",
"d3-scale": "^2.2.2",
"d3-selection": "^1.2.0",
"dateformat": "^4.5.1",
"deckar01-task_list": "^2.3.1",

View File

@ -0,0 +1,9 @@
# frozen_string_literal: true
FactoryBot.define do
factory :ci_job_token_project_scope_link, class: 'Ci::JobToken::ProjectScopeLink' do
association :source_project, factory: :project
association :target_project, factory: :project
association :added_by, factory: :user
end
end

View File

@ -66,6 +66,7 @@ RSpec.describe 'factories' do
# associations must be unique and cannot be reused, or the factory default
# is being mutated.
skip_factory_defaults = %i[
ci_job_token_project_scope_link
evidence
exported_protected_branch
fork_network_member

View File

@ -1,22 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
require_relative '../../../../spec/features/clusters/installing_applications_shared_examples'
RSpec.describe 'Instance-level Cluster Applications', :js do
include GoogleApi::CloudPlatformHelpers
let(:user) { create(:admin) }
before do
sign_in(user)
gitlab_enable_admin_mode_sign_in(user)
end
describe 'Installing applications' do
include_examples "installing applications on a cluster" do
let(:cluster_path) { admin_cluster_path(cluster) }
let(:cluster_factory_args) { [:instance] }
end
end
end

View File

@ -15,10 +15,9 @@ RSpec.describe 'Contributions Calendar', :js do
issue_title = 'Bug in old browser'
issue_params = { title: issue_title }
def get_cell_color_selector(contributions)
activity_colors = ["#ededed", "rgb(172, 213, 242)", "rgb(127, 168, 201)", "rgb(82, 123, 160)", "rgb(37, 78, 119)"]
def get_cell_level_selector(contributions)
# We currently don't actually test the cases with contributions >= 20
activity_colors_index =
activity_level_index =
if contributions > 0 && contributions < 10
1
elsif contributions >= 10 && contributions < 20
@ -31,7 +30,7 @@ RSpec.describe 'Contributions Calendar', :js do
0
end
".user-contrib-cell[fill='#{activity_colors[activity_colors_index]}']"
".user-contrib-cell:not(.contrib-legend)[data-level='#{activity_level_index}']"
end
def get_cell_date_selector(contributions, date)
@ -42,7 +41,7 @@ RSpec.describe 'Contributions Calendar', :js do
"#{contributions} #{'contribution'.pluralize(contributions)}"
end
"#{get_cell_color_selector(contributions)}[title='#{contribution_text}<br /><span class=\"gl-text-gray-300\">#{date}</span>']"
"#{get_cell_level_selector(contributions)}[title='#{contribution_text}<br /><span class=\"gl-text-gray-300\">#{date}</span>']"
end
def push_code_contribution
@ -137,7 +136,7 @@ RSpec.describe 'Contributions Calendar', :js do
include_context 'visit user page'
it 'displays calendar activity square for 1 contribution', :sidekiq_might_not_need_inline do
expect(find('#js-overview')).to have_selector(get_cell_color_selector(contribution_count), count: 1)
expect(find('#js-overview')).to have_selector(get_cell_level_selector(contribution_count), count: 1)
today = Date.today.strftime(date_format)
expect(find('#js-overview')).to have_selector(get_cell_date_selector(contribution_count, today), count: 1)
@ -187,7 +186,7 @@ RSpec.describe 'Contributions Calendar', :js do
include_context 'visit user page'
it 'displays calendar activity squares for both days', :sidekiq_might_not_need_inline do
expect(find('#js-overview')).to have_selector(get_cell_color_selector(1), count: 2)
expect(find('#js-overview')).to have_selector(get_cell_level_selector(1), count: 2)
end
it 'displays calendar activity square for yesterday', :sidekiq_might_not_need_inline do

View File

@ -31,30 +31,6 @@ RSpec.describe 'Clusterable > Show page' do
expect(page).to have_content('Kubernetes cluster was successfully updated.')
end
context 'when there is a cluster with ingress and external ip', :js do
before do
cluster.create_application_ingress!(external_ip: '192.168.1.100')
visit cluster_path
end
it 'shows help text with the domain as an alternative to custom domain', :js do
within '.js-cluster-details-form' do
expect(find(cluster_ingress_help_text_selector).text).to include('192.168.1.100')
end
end
end
context 'when there is no ingress' do
it 'alternative to custom domain is not shown' do
visit cluster_path
within '.js-cluster-details-form' do
expect(page).not_to have_selector(cluster_ingress_help_text_selector)
end
end
end
it 'does not show the environments tab' do
visit cluster_path

View File

@ -27,8 +27,8 @@ RSpec.describe 'Cluster Health board', :js, :kubeclient, :use_clean_rails_memory
expect(page).to have_css('.cluster-health-graphs')
end
context 'no prometheus enabled' do
it 'shows install prometheus message' do
context 'no prometheus available' do
it 'shows enable Prometheus message' do
visit cluster_path
click_link 'Health'
@ -82,12 +82,12 @@ RSpec.describe 'Cluster Health board', :js, :kubeclient, :use_clean_rails_memory
def stub_empty_response
stub_prometheus_request(/prometheus-prometheus-server/, status: 204, body: {})
stub_prometheus_request(/prometheus\/api\/v1/, status: 204, body: {})
stub_prometheus_request(%r{prometheus/api/v1}, status: 204, body: {})
end
def stub_connected
stub_prometheus_request(/prometheus-prometheus-server/, body: prometheus_values_body)
stub_prometheus_request(/prometheus\/api\/v1/, body: prometheus_values_body)
stub_prometheus_request(%r{prometheus/api/v1}, body: prometheus_values_body)
end
end
end

View File

@ -1,252 +0,0 @@
# frozen_string_literal: true
RSpec.shared_examples "installing applications for a cluster" do
before do
# Reduce interval from 10 seconds which is too long for an automated test
stub_const("#{Clusters::ClustersController}::STATUS_POLLING_INTERVAL", 500)
visit cluster_path
end
context 'when cluster is being created' do
let(:cluster) { create(:cluster, :providing_by_gcp, *cluster_factory_args) }
it 'user is unable to install applications' do
expect(page).not_to have_text('Helm')
expect(page).not_to have_text('Install')
end
end
context 'when cluster is created' do
let(:cluster) { create(:cluster, :provided_by_gcp, *cluster_factory_args) }
before do
page.within('.js-edit-cluster-form') do
click_link 'Applications'
end
end
it 'user can install applications' do
wait_for_requests
application_row = '.js-cluster-application-row-ingress'
page.within(application_row) do
expect(page).not_to have_css('.js-cluster-application-install-button[disabled]')
expect(page).to have_css('.js-cluster-application-install-button', exact_text: 'Install')
end
end
it 'does not show the Helm application' do
expect(page).not_to have_selector(:css, '.js-cluster-application-row-helm')
end
context 'when user installs Knative' do
context 'on an abac cluster' do
let(:cluster) { create(:cluster, :provided_by_gcp, :rbac_disabled, *cluster_factory_args) }
it 'shows info block and not be installable' do
page.within('.js-cluster-application-row-knative') do
expect(page).to have_css('.rbac-notice')
expect(page.find(:css, '.js-cluster-application-install-button')['disabled']).to eq('true')
end
end
end
context 'on an rbac cluster' do
let(:cluster) { create(:cluster, :provided_by_gcp, *cluster_factory_args) }
it 'does not show callout block and be installable' do
page.within('.js-cluster-application-row-knative') do
expect(page).not_to have_css('p', text: 'You must have an RBAC-enabled cluster', visible: :all)
expect(page).to have_css('.js-cluster-application-install-button:not([disabled])')
end
end
describe 'when user clicks install button' do
before do
allow(ClusterInstallAppWorker).to receive(:perform_async)
allow(ClusterWaitForIngressIpAddressWorker).to receive(:perform_in)
allow(ClusterWaitForIngressIpAddressWorker).to receive(:perform_async)
page.within('.js-cluster-application-row-knative') do
expect(page).to have_css('.js-cluster-application-install-button:not([disabled])')
page.find('.js-knative-domainname').set("domain.example.org")
click_button 'Install'
wait_for_requests
expect(page).to have_css('.js-cluster-application-install-button', exact_text: 'Installing')
Clusters::Cluster.last.application_knative.make_installing!
Clusters::Cluster.last.application_knative.make_installed!
Clusters::Cluster.last.application_knative.update_attribute(:external_ip, '127.0.0.1')
end
end
it 'shows status transition' do
page.within('.js-cluster-application-row-knative') do
expect(page).to have_field('Knative Domain Name:', with: 'domain.example.org')
expect(page).to have_css('.js-cluster-application-uninstall-button', exact_text: 'Uninstall')
end
expect(page).to have_content('Knative was successfully installed on your Kubernetes cluster')
expect(page).to have_css('.js-knative-save-domain-button'), exact_text: 'Save changes'
end
it 'can then update the domain' do
page.within('.js-cluster-application-row-knative') do
expect(ClusterPatchAppWorker).to receive(:perform_async)
expect(page).to have_field('Knative Domain Name:', with: 'domain.example.org')
page.find('.js-knative-domainname').set("new.domain.example.org")
click_button 'Save changes'
wait_for_requests
expect(page).to have_field('Knative Domain Name:', with: 'new.domain.example.org')
end
end
end
end
end
context 'when user installs Cert Manager' do
before do
allow(ClusterInstallAppWorker).to receive(:perform_async)
allow(ClusterWaitForIngressIpAddressWorker).to receive(:perform_in)
allow(ClusterWaitForIngressIpAddressWorker).to receive(:perform_async)
end
it 'shows status transition' do
page.within('.js-cluster-application-row-cert_manager') do
click_button 'Install'
wait_for_requests
expect(page).to have_field('Issuer Email', with: cluster.user.email)
expect(page).to have_css('.js-cluster-application-install-button', exact_text: 'Installing')
Clusters::Cluster.last.application_cert_manager.make_installing!
expect(page).to have_field('Issuer Email', with: cluster.user.email)
expect(page).to have_css('.js-cluster-application-install-button', exact_text: 'Installing')
Clusters::Cluster.last.application_cert_manager.make_installed!
expect(page).to have_field('Issuer Email', with: cluster.user.email)
expect(page).to have_css('.js-cluster-application-uninstall-button', exact_text: 'Uninstall')
end
expect(page).to have_content('Cert-Manager was successfully installed on your Kubernetes cluster')
end
it 'installs with custom email' do
custom_email = 'new_email@example.org'
page.within('.js-cluster-application-row-cert_manager') do
# Wait for the polling to finish
wait_for_requests
page.find('.js-email').set(custom_email)
click_button 'Install'
wait_for_requests
expect(page).to have_field('Issuer Email', with: custom_email)
expect(page).to have_css('.js-cluster-application-install-button', exact_text: 'Installing')
Clusters::Cluster.last.application_cert_manager.make_installing!
expect(page).to have_field('Issuer Email', with: custom_email)
expect(page).to have_css('.js-cluster-application-install-button', exact_text: 'Installing')
Clusters::Cluster.last.application_cert_manager.make_installed!
expect(page).to have_field('Issuer Email', with: custom_email)
expect(page).to have_css('.js-cluster-application-uninstall-button', exact_text: 'Uninstall')
end
end
end
context 'when user installs Elastic Stack' do
before do
allow(ClusterInstallAppWorker).to receive(:perform_async)
page.within('.js-cluster-application-row-elastic_stack') do
click_button 'Install'
end
wait_for_requests
end
it 'shows status transition' do
page.within('.js-cluster-application-row-elastic_stack') do
expect(page).to have_css('.js-cluster-application-install-button', exact_text: 'Installing')
Clusters::Cluster.last.application_elastic_stack.make_installing!
expect(page).to have_css('.js-cluster-application-install-button', exact_text: 'Installing')
Clusters::Cluster.last.application_elastic_stack.make_installed!
expect(page).to have_css('.js-cluster-application-uninstall-button', exact_text: 'Uninstall')
end
expect(page).to have_content('Elastic Stack was successfully installed on your Kubernetes cluster')
end
end
context 'when user installs Ingress' do
before do
allow(ClusterInstallAppWorker).to receive(:perform_async)
allow(ClusterWaitForIngressIpAddressWorker).to receive(:perform_in)
allow(ClusterWaitForIngressIpAddressWorker).to receive(:perform_async)
page.within('.js-cluster-application-row-ingress') do
expect(page).to have_css('.js-cluster-application-install-button:not([disabled])')
page.find(:css, '.js-cluster-application-install-button').click
wait_for_requests
end
end
it 'shows the status transition' do
page.within('.js-cluster-application-row-ingress') do
# FE sends request and gets the response, then the buttons is "Installing"
expect(page).to have_css('.js-cluster-application-install-button[disabled]', exact_text: 'Installing')
Clusters::Cluster.last.application_ingress.make_installing!
# FE starts polling and update the buttons to "Installing"
expect(page).to have_css('.js-cluster-application-install-button[disabled]', exact_text: 'Installing')
# The application becomes installed but we keep waiting for external IP address
Clusters::Cluster.last.application_ingress.make_installed!
expect(page).to have_css('.js-cluster-application-install-button[disabled]', exact_text: 'Installed')
expect(page).to have_selector('.js-no-endpoint-message')
expect(page).to have_selector('.js-ingress-ip-loading-icon')
# We receive the external IP address and display
Clusters::Cluster.last.application_ingress.update!(external_ip: '192.168.1.100')
expect(page).not_to have_css('button', exact_text: 'Install', visible: :all)
expect(page).not_to have_css('button', exact_text: 'Installing', visible: :all)
expect(page).to have_css('.js-cluster-application-uninstall-button:not([disabled])', exact_text: 'Uninstall')
expect(page).not_to have_css('p', text: 'The endpoint is in the process of being assigned', visible: :all)
expect(page.find('.js-endpoint').value).to eq('192.168.1.100')
end
expect(page).to have_content('Ingress was successfully installed on your Kubernetes cluster')
end
end
end
end
RSpec.shared_examples "installing applications on a cluster" do
it_behaves_like "installing applications for a cluster", false
it_behaves_like "installing applications for a cluster", true
end

View File

@ -1,23 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
require_relative '../../../../spec/features/clusters/installing_applications_shared_examples'
RSpec.describe 'Group-level Cluster Applications', :js do
include GoogleApi::CloudPlatformHelpers
let(:group) { create(:group) }
let(:user) { create(:user) }
before do
group.add_maintainer(user)
sign_in(user)
end
describe 'Installing applications' do
include_examples "installing applications on a cluster" do
let(:cluster_path) { group_cluster_path(group, cluster) }
let(:cluster_factory_args) { [:group, groups: [group]] }
end
end
end

View File

@ -1,23 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
require_relative '../../../../spec/features/clusters/installing_applications_shared_examples'
RSpec.describe 'Project-level Cluster Applications', :js do
include GoogleApi::CloudPlatformHelpers
let(:project) { create(:project) }
let(:user) { create(:user) }
before do
project.add_maintainer(user)
sign_in(user)
end
describe 'Installing applications' do
include_examples "installing applications on a cluster" do
let(:cluster_path) { project_cluster_path(project, cluster) }
let(:cluster_factory_args) { [projects: [project]] }
end
end
end

View File

@ -2,7 +2,8 @@
require 'spec_helper'
RSpec.describe Ci::AuthJobFinder do
let_it_be(:job, reload: true) { create(:ci_build, status: :running) }
let_it_be(:user, reload: true) { create(:user) }
let_it_be(:job, reload: true) { create(:ci_build, status: :running, user: user) }
let(:token) { job.token }
@ -55,10 +56,31 @@ RSpec.describe Ci::AuthJobFinder do
describe '#execute' do
subject(:execute) { finder.execute }
before do
job.success!
context 'when job is not running' do
before do
job.success!
end
it { is_expected.to be_nil }
end
it { is_expected.to be_nil }
context 'when job is running', :request_store do
it 'sets ci_job_token_scope on the job user', :aggregate_failures do
expect(subject).to eq(job)
expect(subject.user).to be_from_ci_job_token
expect(subject.user.ci_job_token_scope.source_project).to eq(job.project)
end
context 'when feature flag ci_scoped_job_token is disabled' do
before do
stub_feature_flags(ci_scoped_job_token: false)
end
it 'does not set ci_job_token_scope on the job user' do
expect(subject).to eq(job)
expect(subject.user).not_to be_from_ci_job_token
end
end
end
end
end

View File

@ -15,7 +15,6 @@ describe('ClusterIntegrationForm', () => {
editable: true,
environmentScope: '*',
baseDomain: 'testDomain',
applicationIngressExternalIp: null,
};
const createWrapper = (storeValues = defaultStoreValues) => {
@ -72,18 +71,6 @@ describe('ClusterIntegrationForm', () => {
expect(findSubmitButton().exists()).toBe(false);
});
});
it('does not render external IP block if applicationIngressExternalIp was not passed', () => {
createWrapper({ ...defaultStoreValues });
expect(wrapper.find('.js-ingress-domain-help-text').exists()).toBe(false);
});
it('renders external IP block if applicationIngressExternalIp was passed', () => {
createWrapper({ ...defaultStoreValues, applicationIngressExternalIp: '127.0.0.1' });
expect(wrapper.find('.js-ingress-domain-help-text').exists()).toBe(true);
});
});
describe('reactivity', () => {

View File

@ -0,0 +1,16 @@
import { getLevelFromContributions } from '~/pages/users/activity_calendar';
describe('getLevelFromContributions', () => {
it.each([
[0, 0],
[1, 1],
[9, 1],
[10, 2],
[19, 2],
[20, 3],
[30, 4],
[99, 4],
])('.getLevelFromContributions(%i, %i)', (count, expected) => {
expect(getLevelFromContributions(count)).toBe(expected);
});
});

View File

@ -50,7 +50,7 @@ RSpec.describe Gitlab::Auth::AuthFinders do
end
shared_examples 'find user from job token' do |without_job_token_allowed|
context 'when route is allowed to be authenticated' do
context 'when route is allowed to be authenticated', :request_store do
let(:route_authentication_setting) { { job_token_allowed: true } }
context 'for an invalid token' do
@ -68,6 +68,8 @@ RSpec.describe Gitlab::Auth::AuthFinders do
it 'return user' do
expect(subject).to eq(user)
expect(@current_authenticated_job).to eq job
expect(subject).to be_from_ci_job_token
expect(subject.ci_job_token_scope.source_project).to eq(job.project)
end
end
@ -81,7 +83,7 @@ RSpec.describe Gitlab::Auth::AuthFinders do
end
end
context 'when route is not allowed to be authenticated' do
context 'when route is not allowed to be authenticated', :request_store do
let(:route_authentication_setting) { { job_token_allowed: false } }
context 'with a running job' do
@ -96,6 +98,8 @@ RSpec.describe Gitlab::Auth::AuthFinders do
it 'returns the user' do
expect(subject).to eq(user)
expect(@current_authenticated_job).to eq job
expect(subject).to be_from_ci_job_token
expect(subject.ci_job_token_scope.source_project).to eq(job.project)
end
else
it 'returns nil' do

View File

@ -0,0 +1,68 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::JobToken::ProjectScopeLink do
it { is_expected.to belong_to(:source_project) }
it { is_expected.to belong_to(:target_project) }
it { is_expected.to belong_to(:added_by) }
let_it_be(:project) { create(:project) }
describe 'unique index' do
let!(:link) { create(:ci_job_token_project_scope_link) }
it 'raises an error' do
expect do
create(:ci_job_token_project_scope_link,
source_project: link.source_project,
target_project: link.target_project)
end.to raise_error(ActiveRecord::RecordNotUnique)
end
end
describe 'validations' do
it 'must have a source project', :aggregate_failures do
link = build(:ci_job_token_project_scope_link, source_project: nil)
expect(link).not_to be_valid
expect(link.errors[:source_project]).to contain_exactly("can't be blank")
end
it 'must have a target project', :aggregate_failures do
link = build(:ci_job_token_project_scope_link, target_project: nil)
expect(link).not_to be_valid
expect(link.errors[:target_project]).to contain_exactly("can't be blank")
end
it 'must have a target project different than source project', :aggregate_failures do
link = build(:ci_job_token_project_scope_link, target_project: project, source_project: project)
expect(link).not_to be_valid
expect(link.errors[:target_project]).to contain_exactly("can't be the same as the source project")
end
end
describe '.from_project' do
subject { described_class.from_project(project) }
let!(:source_link) { create(:ci_job_token_project_scope_link, source_project: project) }
let!(:target_link) { create(:ci_job_token_project_scope_link, target_project: project) }
it 'returns only the links having the given source project' do
expect(subject).to contain_exactly(source_link)
end
end
describe '.to_project' do
subject { described_class.to_project(project) }
let!(:source_link) { create(:ci_job_token_project_scope_link, source_project: project) }
let!(:target_link) { create(:ci_job_token_project_scope_link, target_project: project) }
it 'returns only the links having the given target project' do
expect(subject).to contain_exactly(target_link)
end
end
end

View File

@ -0,0 +1,55 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::JobToken::Scope do
let_it_be(:project) { create(:project) }
let(:scope) { described_class.new(project) }
describe '#all_projects' do
subject(:all_projects) { scope.all_projects }
context 'when no projects are added to the scope' do
it 'returns the project defining the scope' do
expect(all_projects).to contain_exactly(project)
end
end
context 'when other projects are added to the scope' do
let_it_be(:scoped_project) { create(:project) }
let_it_be(:unscoped_project) { create(:project) }
let!(:link_in_scope) { create(:ci_job_token_project_scope_link, source_project: project, target_project: scoped_project) }
let!(:link_out_of_scope) { create(:ci_job_token_project_scope_link, target_project: unscoped_project) }
it 'returns all projects that can be accessed from a given scope' do
expect(subject).to contain_exactly(project, scoped_project)
end
end
end
describe 'includes?' do
subject { scope.includes?(target_project) }
context 'when param is the project defining the scope' do
let(:target_project) { project }
it { is_expected.to be_truthy }
end
context 'when param is a project in scope' do
let(:target_link) { create(:ci_job_token_project_scope_link, source_project: project) }
let(:target_project) { target_link.target_project }
it { is_expected.to be_truthy }
end
context 'when param is a project in another scope' do
let(:scope_link) { create(:ci_job_token_project_scope_link) }
let(:target_project) { scope_link.target_project }
it { is_expected.to be_falsey }
end
end
end

View File

@ -1,52 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe WebHookLogArchived do
let(:source_table) { WebHookLog }
let(:destination_table) { described_class }
it 'has the same columns as the source table' do
column_names_from_source_table = column_names(source_table)
column_names_from_destination_table = column_names(destination_table)
expect(column_names_from_destination_table).to match_array(column_names_from_source_table)
end
it 'has the same null constraints as the source table' do
constraints_from_source_table = null_constraints(source_table)
constraints_from_destination_table = null_constraints(destination_table)
expect(constraints_from_destination_table.to_a).to match_array(constraints_from_source_table.to_a)
end
it 'inserts the same record as the one in the source table', :aggregate_failures do
expect { create(:web_hook_log) }.to change { destination_table.count }.by(1)
event_from_source_table = source_table.connection.select_one(
"SELECT * FROM #{source_table.table_name} ORDER BY created_at desc LIMIT 1"
)
event_from_destination_table = destination_table.connection.select_one(
"SELECT * FROM #{destination_table.table_name} ORDER BY created_at desc LIMIT 1"
)
expect(event_from_destination_table).to eq(event_from_source_table)
end
def column_names(table)
table.connection.select_all(<<~SQL)
SELECT c.column_name
FROM information_schema.columns c
WHERE c.table_name = '#{table.table_name}'
SQL
end
def null_constraints(table)
table.connection.select_all(<<~SQL)
SELECT c.column_name, c.is_nullable
FROM information_schema.columns c
WHERE c.table_name = '#{table.table_name}'
AND c.column_name != 'created_at'
SQL
end
end

View File

@ -1385,4 +1385,53 @@ RSpec.describe ProjectPolicy do
end
end
end
describe 'when user is authenticated via CI_JOB_TOKEN', :request_store do
let(:current_user) { developer }
let(:job) { build_stubbed(:ci_build, project: scope_project, user: current_user) }
before do
current_user.set_ci_job_token_scope!(job)
end
context 'when accessing a private project' do
let(:project) { private_project }
context 'when the job token comes from the same project' do
let(:scope_project) { project }
it { is_expected.to be_allowed(:developer_access) }
end
context 'when the job token comes from another project' do
let(:scope_project) { create(:project, :private) }
before do
scope_project.add_developer(current_user)
end
it { is_expected.to be_disallowed(:guest_access) }
end
end
context 'when accessing a public project' do
let(:project) { public_project }
context 'when the job token comes from the same project' do
let(:scope_project) { project }
it { is_expected.to be_allowed(:developer_access) }
end
context 'when the job token comes from another project' do
let(:scope_project) { create(:project, :private) }
before do
scope_project.add_developer(current_user)
end
it { is_expected.to be_disallowed(:public_access) }
end
end
end
end

View File

@ -18,7 +18,7 @@ RSpec.describe API::GenericPackages do
let_it_be(:project_deploy_token_wo) { create(:project_deploy_token, deploy_token: deploy_token_wo, project: project) }
let(:user) { personal_access_token.user }
let(:ci_build) { create(:ci_build, :running, user: user) }
let(:ci_build) { create(:ci_build, :running, user: user, project: project) }
def auth_header
return {} if user_role == :anonymous

View File

@ -11,7 +11,7 @@ RSpec.describe API::GoProxy do
let_it_be(:base) { "#{Settings.build_gitlab_go_url}/#{project.full_path}" }
let_it_be(:oauth) { create :oauth_access_token, scopes: 'api', resource_owner: user }
let_it_be(:job) { create :ci_build, user: user, status: :running }
let_it_be(:job) { create :ci_build, user: user, status: :running, project: project }
let_it_be(:pa_token) { create :personal_access_token, user: user }
let_it_be(:modules) do

View File

@ -15,7 +15,7 @@ RSpec.describe API::MavenPackages do
let_it_be(:package_file) { package.package_files.with_file_name_like('%.xml').first }
let_it_be(:jar_file) { package.package_files.with_file_name_like('%.jar').first }
let_it_be(:personal_access_token) { create(:personal_access_token, user: user) }
let_it_be(:job, reload: true) { create(:ci_build, user: user, status: :running) }
let_it_be(:job, reload: true) { create(:ci_build, user: user, status: :running, project: project) }
let_it_be(:deploy_token) { create(:deploy_token, read_package_registry: true, write_package_registry: true) }
let_it_be(:project_deploy_token) { create(:project_deploy_token, deploy_token: deploy_token, project: project) }
let_it_be(:deploy_token_for_group) { create(:deploy_token, :group, read_package_registry: true, write_package_registry: true) }

View File

@ -78,7 +78,7 @@ RSpec.describe API::NpmProjectPackages do
context 'with a job token for a different user' do
let_it_be(:other_user) { create(:user) }
let_it_be_with_reload(:other_job) { create(:ci_build, :running, user: other_user) }
let_it_be_with_reload(:other_job) { create(:ci_build, :running, user: other_user, project: project) }
let(:headers) { build_token_auth_header(other_job.token) }

View File

@ -192,7 +192,7 @@ RSpec.describe API::NugetProjectPackages do
it_behaves_like 'deploy token for package uploads'
it_behaves_like 'job token for package uploads', authorize_endpoint: true do
let_it_be(:job) { create(:ci_build, :running, user: user) }
let_it_be(:job) { create(:ci_build, :running, user: user, project: project) }
end
it_behaves_like 'rejects nuget access with unknown target id'
@ -260,7 +260,7 @@ RSpec.describe API::NugetProjectPackages do
it_behaves_like 'deploy token for package uploads'
it_behaves_like 'job token for package uploads' do
let_it_be(:job) { create(:ci_build, :running, user: user) }
let_it_be(:job) { create(:ci_build, :running, user: user, project: project) }
end
it_behaves_like 'rejects nuget access with unknown target id'

View File

@ -13,7 +13,7 @@ RSpec.describe API::PypiPackages do
let_it_be(:personal_access_token) { create(:personal_access_token, user: user) }
let_it_be(:deploy_token) { create(:deploy_token, read_package_registry: true, write_package_registry: true) }
let_it_be(:project_deploy_token) { create(:project_deploy_token, deploy_token: deploy_token, project: project) }
let_it_be(:job) { create(:ci_build, :running, user: user) }
let_it_be(:job) { create(:ci_build, :running, user: user, project: project) }
let(:headers) { {} }
context 'simple API endpoint' do

View File

@ -775,7 +775,7 @@ RSpec.describe API::Releases do
end
context 'when using JOB-TOKEN auth' do
let(:job) { create(:ci_build, user: maintainer) }
let(:job) { create(:ci_build, user: maintainer, project: project) }
let(:params) do
{
name: 'Another release',

View File

@ -10,7 +10,7 @@ RSpec.describe API::RubygemPackages do
let_it_be_with_reload(:project) { create(:project) }
let_it_be(:personal_access_token) { create(:personal_access_token) }
let_it_be(:user) { personal_access_token.user }
let_it_be(:job) { create(:ci_build, :running, user: user) }
let_it_be(:job) { create(:ci_build, :running, user: user, project: project) }
let_it_be(:deploy_token) { create(:deploy_token, read_package_registry: true, write_package_registry: true) }
let_it_be(:project_deploy_token) { create(:project_deploy_token, deploy_token: deploy_token, project: project) }
let_it_be(:headers) { {} }

View File

@ -12,7 +12,7 @@ RSpec.describe API::Terraform::Modules::V1::Packages do
let_it_be(:package) { create(:terraform_module_package, project: project) }
let_it_be(:personal_access_token) { create(:personal_access_token) }
let_it_be(:user) { personal_access_token.user }
let_it_be(:job) { create(:ci_build, :running, user: user) }
let_it_be(:job) { create(:ci_build, :running, user: user, project: project) }
let_it_be(:deploy_token) { create(:deploy_token, read_package_registry: true, write_package_registry: true) }
let_it_be(:project_deploy_token) { create(:project_deploy_token, deploy_token: deploy_token, project: project) }

View File

@ -883,10 +883,10 @@ RSpec.describe 'Git HTTP requests' do
context 'when admin mode is enabled', :enable_admin_mode do
it_behaves_like 'can download code only'
it 'downloads from other project get status 403' do
it 'downloads from other project get status 404' do
clone_get "#{other_project.full_path}.git", user: 'gitlab-ci-token', password: build.token
expect(response).to have_gitlab_http_status(:forbidden)
expect(response).to have_gitlab_http_status(:not_found)
end
end

View File

@ -569,7 +569,7 @@ RSpec.describe 'Git LFS API and storage' do
let(:pipeline) { create(:ci_empty_pipeline, project: other_project) }
# I'm not sure what this tests that is different from the previous test
it_behaves_like 'LFS http 403 response'
it_behaves_like 'LFS http 404 response'
end
end
@ -1043,7 +1043,7 @@ RSpec.describe 'Git LFS API and storage' do
let(:pipeline) { create(:ci_empty_pipeline, project: other_project) }
# I'm not sure what this tests that is different from the previous test
it_behaves_like 'LFS http 403 response'
it_behaves_like 'LFS http 404 response'
end
end

View File

@ -8,11 +8,11 @@ RSpec.shared_context 'conan api setup' do
let_it_be(:personal_access_token) { create(:personal_access_token) }
let_it_be(:user) { personal_access_token.user }
let_it_be(:base_secret) { SecureRandom.base64(64) }
let_it_be(:job) { create(:ci_build, :running, user: user) }
let_it_be(:job_token) { job.token }
let_it_be(:deploy_token) { create(:deploy_token, read_package_registry: true, write_package_registry: true) }
let(:project) { package.project }
let(:job) { create(:ci_build, :running, user: user, project: project) }
let(:job_token) { job.token }
let(:auth_token) { personal_access_token.token }
let(:project_deploy_token) { create(:project_deploy_token, deploy_token: deploy_token, project: project) }

View File

@ -11,7 +11,7 @@ RSpec.shared_context 'npm api setup' do
let_it_be(:package, reload: true) { create(:npm_package, project: project, name: "@#{group.path}/scoped_package") }
let_it_be(:token) { create(:oauth_access_token, scopes: 'api', resource_owner: user) }
let_it_be(:personal_access_token) { create(:personal_access_token, user: user) }
let_it_be(:job, reload: true) { create(:ci_build, user: user, status: :running) }
let_it_be(:job, reload: true) { create(:ci_build, user: user, status: :running, project: project) }
let_it_be(:deploy_token) { create(:deploy_token, read_package_registry: true, write_package_registry: true) }
let_it_be(:project_deploy_token) { create(:project_deploy_token, deploy_token: deploy_token, project: project) }

View File

@ -4014,7 +4014,7 @@ d3-scale-chromatic@1:
d3-color "1"
d3-interpolate "1"
d3-scale@2, d3-scale@^2.2.2:
d3-scale@2:
version "2.2.2"
resolved "https://registry.yarnpkg.com/d3-scale/-/d3-scale-2.2.2.tgz#4e880e0b2745acaaddd3ede26a9e908a9e17b81f"
integrity sha512-LbeEvGgIb8UMcAa0EATLNX0lelKWGYDQiPdHj+gLblGVhGLyNbaCn3EvrJf0A3Y/uOOU5aD6MTh5ZFCdEwGiCw==