Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2020-11-21 00:09:36 +00:00
parent 8585e4137f
commit 707bf4370d
5 changed files with 41 additions and 115 deletions

View file

@ -2,6 +2,8 @@
module Pages
class LookupPath
include Gitlab::Utils::StrongMemoize
def initialize(project, trim_prefix: nil, domain: nil)
@project = project
@domain = domain
@ -37,37 +39,28 @@ module Pages
attr_reader :project, :trim_prefix, :domain
def artifacts_archive
return unless Feature.enabled?(:pages_serve_from_artifacts_archive, project)
project.pages_metadatum.artifacts_archive
end
def deployment
return unless Feature.enabled?(:pages_serve_from_deployments, project)
strong_memoize(:deployment) do
next unless Feature.enabled?(:pages_serve_from_deployments, project)
project.pages_metadatum.pages_deployment
end
end
def zip_source
source = deployment || artifacts_archive
return unless deployment&.file
return unless source&.file
return if deployment.file.file_storage? && !Feature.enabled?(:pages_serve_with_zip_file_protocol, project)
return if source.file.file_storage? && !Feature.enabled?(:pages_serve_with_zip_file_protocol, project)
# artifacts archive doesn't support this
file_count = source.file_count if source.respond_to?(:file_count)
global_id = ::Gitlab::GlobalId.build(source, id: source.id).to_s
global_id = ::Gitlab::GlobalId.build(deployment, id: deployment.id).to_s
{
type: 'zip',
path: source.file.url_or_file_path(expire_at: 1.day.from_now),
path: deployment.file.url_or_file_path(expire_at: 1.day.from_now),
global_id: global_id,
sha256: source.file_sha256,
file_size: source.size,
file_count: file_count
sha256: deployment.file_sha256,
file_size: deployment.size,
file_count: deployment.file_count
}
end

View file

@ -1,8 +0,0 @@
---
name: pages_serve_from_artifacts_archive
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/46320
rollout_issue_url:
group: group::release management
milestone: '13.4'
type: development
default_enabled: false

View file

@ -820,7 +820,7 @@ If you did not change the default location of the configuration file, run:
sudo gitlab-ctl registry-garbage-collect
```
This command will take some time to complete, depending on the amount of
This command takes some time to complete, depending on the amount of
layers you have stored.
If you changed the location of the Container Registry `config.yml`:
@ -867,7 +867,7 @@ You can perform garbage collection without stopping the Container Registry by pu
it in read-only mode and by not using the built-in command. On large instances
this could require Container Registry to be in read-only mode for a while.
During this time,
you will be able to pull from the Container Registry, but you will not be able to
you are able to pull from the Container Registry, but you are not able to
push.
By default, the [registry storage path](#configure-storage-for-the-container-registry)
@ -896,7 +896,7 @@ To enable the read-only mode:
sudo gitlab-ctl reconfigure
```
This will set the Container Registry into the read only mode.
This command sets the Container Registry into the read only mode.
1. Next, trigger one of the garbage collect commands:
@ -908,7 +908,7 @@ To enable the read-only mode:
sudo /opt/gitlab/embedded/bin/registry garbage-collect -m /var/opt/gitlab/registry/config.yml
```
This will start the garbage collection, which might take some time to complete.
This command starts the garbage collection, which might take some time to complete.
1. Once done, in `/etc/gitlab/gitlab.rb` change it back to read-write mode:
@ -935,7 +935,7 @@ To enable the read-only mode:
Ideally, you want to run the garbage collection of the registry regularly on a
weekly basis at a time when the registry is not being in-use.
The simplest way is to add a new crontab job that it will run periodically
The simplest way is to add a new crontab job that it runs periodically
once a week.
Create a file under `/etc/cron.d/registry-garbage-collect`:
@ -1155,7 +1155,7 @@ curl localhost:5001/debug/vars
### Advanced Troubleshooting
We will use a concrete example in the past to illustrate how to
We use a concrete example to illustrate how to
diagnose a problem with the S3 setup.
#### Unexpected 403 error during push
@ -1227,14 +1227,14 @@ To verify that the certificates are properly installed, run:
mitmproxy --port 9000
```
This will run mitmproxy on port `9000`. In another window, run:
This command runs mitmproxy on port `9000`. In another window, run:
```shell
curl --proxy http://localhost:9000 https://httpbin.org/status/200
```
If everything is set up correctly, you will see information on the mitmproxy window and
no errors from the curl commands.
If everything is set up correctly, information is displayed on the mitmproxy window and
no errors are generated by the curl commands.
#### Running the Docker daemon with a proxy
@ -1248,7 +1248,7 @@ export HTTPS_PROXY="https://localhost:9000"
docker daemon --debug
```
This will launch the Docker daemon and proxy all connections through mitmproxy.
This command launches the Docker daemon and proxies all connections through mitmproxy.
#### Running the Docker client
@ -1273,4 +1273,4 @@ The above image shows:
What does this mean? This strongly suggests that the S3 user does not have the right
[permissions to perform a HEAD request](https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html).
The solution: check the [IAM permissions again](https://docs.docker.com/registry/storage-drivers/s3/).
Once the right permissions were set, the error will go away.
Once the right permissions were set, the error goes away.

View file

@ -6,7 +6,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Packages
This document will guide you through adding another [package management system](../administration/packages/index.md) support to GitLab.
This document guides you through adding another [package management system](../administration/packages/index.md) support to GitLab.
See already supported package types in [Packages documentation](../administration/packages/index.md)
@ -56,12 +56,12 @@ Group-level and instance-level endpoints are good to have but are optional.
Packages are scoped within various levels of access, which is generally configured by setting your remote. A
remote endpoint may be set at the project level, meaning when installing packages, only packages belonging to that
project will be visible. Alternatively, a group-level endpoint may be used to allow visibility to all packages
project are visible. Alternatively, a group-level endpoint may be used to allow visibility to all packages
within a given group. Lastly, an instance-level endpoint can be used to allow visibility to all packages within an
entire GitLab instance.
Using group and project level endpoints will allow for more flexibility in package naming, however, more remotes
will have to be managed. Using instance level endpoints requires [stricter naming conventions](#naming-conventions).
Using group and project level endpoints allows for more flexibility in package naming, however, more remotes
have to be managed. Using instance level endpoints requires [stricter naming conventions](#naming-conventions).
The current state of existing package registries availability is:
@ -86,12 +86,12 @@ Composer package naming scope is Instance Level.
### Naming conventions
To avoid name conflict for instance-level endpoints you will need to define a package naming convention
To avoid name conflict for instance-level endpoints you must define a package naming convention
that gives a way to identify the project that the package belongs to. This generally involves using the project
ID or full project path in the package name. See
[Conan's naming convention](../user/packages/conan_repository/index.md#package-recipe-naming-convention-for-instance-remotes) as an example.
For group and project-level endpoints, naming can be less constrained and it will be up to the group and project
For group and project-level endpoints, naming can be less constrained and it is up to the group and project
members to be certain that there is no conflict between two package names. However, the system should prevent
a user from reusing an existing name within a given scope.
@ -121,7 +121,7 @@ The way new package systems are integrated in GitLab is using an [MVC](https://a
- Pulling a package
- Required actions
Required actions are all the additional requests that GitLab will need to handle so the corresponding package manager CLI can work properly. It could be a search feature or an endpoint providing meta information about a package. For example:
Required actions are all the additional requests that GitLab needs to handle so the corresponding package manager CLI can work properly. It could be a search feature or an endpoint providing meta information about a package. For example:
- For NuGet, the search request was implemented during the first MVC iteration, to support Visual Studio.
- For NPM, there is a metadata endpoint used by `npm` to get the tarball URL.
@ -146,22 +146,22 @@ process.
During this phase, the idea is to collect as much information as possible about the API used by the package system. Here some aspects that can be useful to include:
- **Authentication**: What authentication mechanisms are available (OAuth, Basic
Authorization, other). Keep in mind that GitLab users will often want to use their
Authorization, other). Keep in mind that GitLab users often want to use their
[Personal Access Tokens](../user/profile/personal_access_tokens.md).
Although not needed for the MVC first iteration, the [CI job tokens](../user/project/new_ci_build_permissions_model.md#job-token)
have to be supported at some point in the future.
- **Requests**: Which requests are needed to have a working MVC. Ideally, produce
a list of all the requests needed for the MVC (including required actions). Further
investigation could provide an example for each request with the request and the response bodies.
- **Upload**: Carefully analyze how the upload process works. This will probably be the most
- **Upload**: Carefully analyze how the upload process works. This is likely the most
complex request to implement. A detailed analysis is desired here as uploads can be
encoded in different ways (body or multipart) and can even be in a totally different
format (for example, a JSON structure where the package file is a Base64 value of
a particular field). These different encodings lead to slightly different implementations
on GitLab and GitLab Workhorse. For more detailed information, review [file uploads](#file-uploads).
- **Endpoints**: Suggest a list of endpoint URLs that will be implemented in GitLab.
- **Endpoints**: Suggest a list of endpoint URLs to implement in GitLab.
- **Split work**: Suggest a list of changes to do to incrementally build the MVC.
This will give a good idea of how much work there is to be done. Here is an example
This gives a good idea of how much work there is to be done. Here is an example
list that would need to be adapted on a case by case basis:
1. Empty file structure (API file, base service for this package)
1. Authentication system for "logging in" to the package manager
@ -177,7 +177,7 @@ In particular, the upload request can have some [requirements in the GitLab Work
### Implementation
The implementation of the different Merge Requests will vary between different package system integrations. Contributors should take into account some important aspects of the implementation phase.
The implementation of the different Merge Requests varies between different package system integrations. Contributors should take into account some important aspects of the implementation phase.
#### Authentication
@ -217,23 +217,23 @@ If there is package specific behavior for a given package manager, add those met
delegate from the package model.
Note that the existing package UI only displays information within the `packages_packages` and `packages_package_files`
tables. If the data stored in the metadata tables need to be displayed, a ~frontend change will be required.
tables. If the data stored in the metadata tables need to be displayed, a ~frontend change is required.
#### File uploads
File uploads should be handled by GitLab Workhorse using object accelerated uploads. What this means is that
the workhorse proxy that checks all incoming requests to GitLab will intercept the upload request,
the workhorse proxy that checks all incoming requests to GitLab intercept the upload request,
upload the file, and forward a request to the main GitLab codebase only containing the metadata
and file location rather than the file itself. An overview of this process can be found in the
[development documentation](uploads.md#direct-upload).
In terms of code, this means a route will need to be added to the
In terms of code, this means a route must be added to the
[GitLab Workhorse project](https://gitlab.com/gitlab-org/gitlab-workhorse) for each upload endpoint being added
(instance, group, project). [This merge request](https://gitlab.com/gitlab-org/gitlab-workhorse/-/merge_requests/412/diffs)
demonstrates adding an instance-level endpoint for Conan to workhorse. You can also see the Maven project level endpoint
implemented in the same file.
Once the route has been added, you will need to add an additional `/authorize` version of the upload endpoint to your API file.
Once the route has been added, you must add an additional `/authorize` version of the upload endpoint to your API file.
[Here is an example](https://gitlab.com/gitlab-org/gitlab/blob/398fef1ca26ae2b2c3dc89750f6b20455a1e5507/ee/lib/api/maven_packages.rb#L164)
of the additional endpoint added for Maven. The `/authorize` endpoint verifies and authorizes the request from workhorse,
then the normal upload endpoint is implemented below, consuming the metadata that workhorse provides in order to
@ -244,7 +244,7 @@ in your local development environment.
### Future Work
While working on the MVC, contributors will probably find features that are not mandatory for the MVC but can provide a better user experience. It's generally a good idea to keep an eye on those and open issues.
While working on the MVC, contributors might find features that are not mandatory for the MVC but can provide a better user experience. It's generally a good idea to keep an eye on those and open issues.
Here are some examples

View file

@ -9,7 +9,6 @@ RSpec.describe Pages::LookupPath do
before do
stub_pages_setting(access_control: true, external_https: ["1.1.1.1:443"])
stub_artifacts_object_storage
stub_pages_object_storage(::Pages::DeploymentUploader)
end
@ -117,64 +116,6 @@ RSpec.describe Pages::LookupPath do
include_examples 'uses disk storage'
end
end
context 'when artifact_id from build job is present in pages metadata' do
let(:artifacts_archive) { create(:ci_job_artifact, :zip, :remote_store, project: project) }
before do
project.mark_pages_as_deployed(artifacts_archive: artifacts_archive)
end
it 'uses artifacts object storage' do
Timecop.freeze do
expect(source).to(
eq({
type: 'zip',
path: artifacts_archive.file.url(expire_at: 1.day.from_now),
global_id: "gid://gitlab/Ci::JobArtifact/#{artifacts_archive.id}",
sha256: artifacts_archive.file_sha256,
file_size: artifacts_archive.size,
file_count: nil
})
)
end
end
context 'when artifact is not uploaded to object storage' do
let(:artifacts_archive) { create(:ci_job_artifact, :zip) }
it 'uses file protocol', :aggregate_failures do
Timecop.freeze do
expect(source).to(
eq({
type: 'zip',
path: 'file://' + artifacts_archive.file.path,
global_id: "gid://gitlab/Ci::JobArtifact/#{artifacts_archive.id}",
sha256: artifacts_archive.file_sha256,
file_size: artifacts_archive.size,
file_count: nil
})
)
end
end
context 'when pages_serve_with_zip_file_protocol feature flag is disabled' do
before do
stub_feature_flags(pages_serve_with_zip_file_protocol: false)
end
include_examples 'uses disk storage'
end
end
context 'when feature flag is disabled' do
before do
stub_feature_flags(pages_serve_from_artifacts_archive: false)
end
include_examples 'uses disk storage'
end
end
end
describe '#prefix' do