expectedResults
}{
{name: "RepositoriesMax50", requestURL: "/api/v1/repos/search?limit=50&private=false", expectedResults: expectedResults{
- nil: {count: 27},
- user: {count: 27},
- user2: {count: 27}},
+ nil: {count: 28},
+ user: {count: 28},
+ user2: {count: 28}},
},
{name: "RepositoriesMax10", requestURL: "/api/v1/repos/search?limit=10&private=false", expectedResults: expectedResults{
nil: {count: 10},
--- /dev/null
+ref: refs/heads/master
--- /dev/null
+[core]
+ repositoryformatversion = 0
+ filemode = false
+ bare = true
+ symlinks = false
+ ignorecase = true
--- /dev/null
+Unnamed repository; edit this file 'description' to name the repository.
--- /dev/null
+#!/bin/sh
+#
+# An example hook script to check the commit log message taken by
+# applypatch from an e-mail message.
+#
+# The hook should exit with non-zero status after issuing an
+# appropriate message if it wants to stop the commit. The hook is
+# allowed to edit the commit message file.
+#
+# To enable this hook, rename this file to "applypatch-msg".
+
+. git-sh-setup
+commitmsg="$(git rev-parse --git-path hooks/commit-msg)"
+test -x "$commitmsg" && exec "$commitmsg" ${1+"$@"}
+:
--- /dev/null
+#!/bin/sh
+#
+# An example hook script to check the commit log message.
+# Called by "git commit" with one argument, the name of the file
+# that has the commit message. The hook should exit with non-zero
+# status after issuing an appropriate message if it wants to stop the
+# commit. The hook is allowed to edit the commit message file.
+#
+# To enable this hook, rename this file to "commit-msg".
+
+# Uncomment the below to add a Signed-off-by line to the message.
+# Doing this in a hook is a bad idea in general, but the prepare-commit-msg
+# hook is more suited to it.
+#
+# SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p')
+# grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1"
+
+# This example catches duplicate Signed-off-by lines.
+
+test "" = "$(grep '^Signed-off-by: ' "$1" |
+ sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || {
+ echo >&2 Duplicate Signed-off-by lines.
+ exit 1
+}
--- /dev/null
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+use IPC::Open2;
+
+# An example hook script to integrate Watchman
+# (https://facebook.github.io/watchman/) with git to speed up detecting
+# new and modified files.
+#
+# The hook is passed a version (currently 1) and a time in nanoseconds
+# formatted as a string and outputs to stdout all files that have been
+# modified since the given time. Paths must be relative to the root of
+# the working tree and separated by a single NUL.
+#
+# To enable this hook, rename this file to "query-watchman" and set
+# 'git config core.fsmonitor .git/hooks/query-watchman'
+#
+my ($version, $time) = @ARGV;
+
+# Check the hook interface version
+
+if ($version == 1) {
+ # convert nanoseconds to seconds
+ $time = int $time / 1000000000;
+} else {
+ die "Unsupported query-fsmonitor hook version '$version'.\n" .
+ "Falling back to scanning...\n";
+}
+
+my $git_work_tree;
+if ($^O =~ 'msys' || $^O =~ 'cygwin') {
+ $git_work_tree = Win32::GetCwd();
+ $git_work_tree =~ tr/\\/\//;
+} else {
+ require Cwd;
+ $git_work_tree = Cwd::cwd();
+}
+
+my $retry = 1;
+
+launch_watchman();
+
+sub launch_watchman {
+
+ my $pid = open2(\*CHLD_OUT, \*CHLD_IN, 'watchman -j --no-pretty')
+ or die "open2() failed: $!\n" .
+ "Falling back to scanning...\n";
+
+ # In the query expression below we're asking for names of files that
+ # changed since $time but were not transient (ie created after
+ # $time but no longer exist).
+ #
+ # To accomplish this, we're using the "since" generator to use the
+ # recency index to select candidate nodes and "fields" to limit the
+ # output to file names only. Then we're using the "expression" term to
+ # further constrain the results.
+ #
+ # The category of transient files that we want to ignore will have a
+ # creation clock (cclock) newer than $time_t value and will also not
+ # currently exist.
+
+ my $query = <<" END";
+ ["query", "$git_work_tree", {
+ "since": $time,
+ "fields": ["name"],
+ "expression": ["not", ["allof", ["since", $time, "cclock"], ["not", "exists"]]]
+ }]
+ END
+
+ print CHLD_IN $query;
+ close CHLD_IN;
+ my $response = do {local $/; <CHLD_OUT>};
+
+ die "Watchman: command returned no output.\n" .
+ "Falling back to scanning...\n" if $response eq "";
+ die "Watchman: command returned invalid output: $response\n" .
+ "Falling back to scanning...\n" unless $response =~ /^\{/;
+
+ my $json_pkg;
+ eval {
+ require JSON::XS;
+ $json_pkg = "JSON::XS";
+ 1;
+ } or do {
+ require JSON::PP;
+ $json_pkg = "JSON::PP";
+ };
+
+ my $o = $json_pkg->new->utf8->decode($response);
+
+ if ($retry > 0 and $o->{error} and $o->{error} =~ m/unable to resolve root .* directory (.*) is not watched/) {
+ print STDERR "Adding '$git_work_tree' to watchman's watch list.\n";
+ $retry--;
+ qx/watchman watch "$git_work_tree"/;
+ die "Failed to make watchman watch '$git_work_tree'.\n" .
+ "Falling back to scanning...\n" if $? != 0;
+
+ # Watchman will always return all files on the first query so
+ # return the fast "everything is dirty" flag to git and do the
+ # Watchman query just to get it over with now so we won't pay
+ # the cost in git to look up each individual file.
+ print "/\0";
+ eval { launch_watchman() };
+ exit 0;
+ }
+
+ die "Watchman: $o->{error}.\n" .
+ "Falling back to scanning...\n" if $o->{error};
+
+ binmode STDOUT, ":utf8";
+ local $, = "\0";
+ print @{$o->{files}};
+}
--- /dev/null
+#!/usr/bin/env bash
+data=$(cat)
+exitcodes=""
+hookname=$(basename $0)
+GIT_DIR=${GIT_DIR:-$(dirname $0)}
+
+for hook in ${GIT_DIR}/hooks/${hookname}.d/*; do
+test -x "${hook}" && test -f "${hook}" || continue
+echo "${data}" | "${hook}"
+exitcodes="${exitcodes} $?"
+done
+
+for i in ${exitcodes}; do
+[ ${i} -eq 0 ] || exit ${i}
+done
--- /dev/null
+#!/usr/bin/env bash
+"$GITEA_ROOT/gitea" hook --config="$GITEA_ROOT/$GITEA_CONF" post-receive
--- /dev/null
+#!/bin/sh
+#
+# An example hook script to prepare a packed repository for use over
+# dumb transports.
+#
+# To enable this hook, rename this file to "post-update".
+
+exec git update-server-info
--- /dev/null
+#!/bin/sh
+#
+# An example hook script to verify what is about to be committed
+# by applypatch from an e-mail message.
+#
+# The hook should exit with non-zero status after issuing an
+# appropriate message if it wants to stop the commit.
+#
+# To enable this hook, rename this file to "pre-applypatch".
+
+. git-sh-setup
+precommit="$(git rev-parse --git-path hooks/pre-commit)"
+test -x "$precommit" && exec "$precommit" ${1+"$@"}
+:
--- /dev/null
+#!/bin/sh
+#
+# An example hook script to verify what is about to be committed.
+# Called by "git commit" with no arguments. The hook should
+# exit with non-zero status after issuing an appropriate message if
+# it wants to stop the commit.
+#
+# To enable this hook, rename this file to "pre-commit".
+
+if git rev-parse --verify HEAD >/dev/null 2>&1
+then
+ against=HEAD
+else
+ # Initial commit: diff against an empty tree object
+ against=$(git hash-object -t tree /dev/null)
+fi
+
+# If you want to allow non-ASCII filenames set this variable to true.
+allownonascii=$(git config --bool hooks.allownonascii)
+
+# Redirect output to stderr.
+exec 1>&2
+
+# Cross platform projects tend to avoid non-ASCII filenames; prevent
+# them from being added to the repository. We exploit the fact that the
+# printable range starts at the space character and ends with tilde.
+if [ "$allownonascii" != "true" ] &&
+ # Note that the use of brackets around a tr range is ok here, (it's
+ # even required, for portability to Solaris 10's /usr/bin/tr), since
+ # the square bracket bytes happen to fall in the designated range.
+ test $(git diff --cached --name-only --diff-filter=A -z $against |
+ LC_ALL=C tr -d '[ -~]\0' | wc -c) != 0
+then
+ cat <<\EOF
+Error: Attempt to add a non-ASCII file name.
+
+This can cause problems if you want to work with people on other platforms.
+
+To be portable it is advisable to rename the file.
+
+If you know what you are doing you can disable this check using:
+
+ git config hooks.allownonascii true
+EOF
+ exit 1
+fi
+
+# If there are whitespace errors, print the offending file names and fail.
+exec git diff-index --check --cached $against --
--- /dev/null
+#!/bin/sh
+
+# An example hook script to verify what is about to be pushed. Called by "git
+# push" after it has checked the remote status, but before anything has been
+# pushed. If this script exits with a non-zero status nothing will be pushed.
+#
+# This hook is called with the following parameters:
+#
+# $1 -- Name of the remote to which the push is being done
+# $2 -- URL to which the push is being done
+#
+# If pushing without using a named remote those arguments will be equal.
+#
+# Information about the commits which are being pushed is supplied as lines to
+# the standard input in the form:
+#
+# <local ref> <local sha1> <remote ref> <remote sha1>
+#
+# This sample shows how to prevent push of commits where the log message starts
+# with "WIP" (work in progress).
+
+remote="$1"
+url="$2"
+
+z40=0000000000000000000000000000000000000000
+
+while read local_ref local_sha remote_ref remote_sha
+do
+ if [ "$local_sha" = $z40 ]
+ then
+ # Handle delete
+ :
+ else
+ if [ "$remote_sha" = $z40 ]
+ then
+ # New branch, examine all commits
+ range="$local_sha"
+ else
+ # Update to existing branch, examine new commits
+ range="$remote_sha..$local_sha"
+ fi
+
+ # Check for WIP commit
+ commit=`git rev-list -n 1 --grep '^WIP' "$range"`
+ if [ -n "$commit" ]
+ then
+ echo >&2 "Found WIP commit in $local_ref, not pushing"
+ exit 1
+ fi
+ fi
+done
+
+exit 0
--- /dev/null
+#!/bin/sh
+#
+# Copyright (c) 2006, 2008 Junio C Hamano
+#
+# The "pre-rebase" hook is run just before "git rebase" starts doing
+# its job, and can prevent the command from running by exiting with
+# non-zero status.
+#
+# The hook is called with the following parameters:
+#
+# $1 -- the upstream the series was forked from.
+# $2 -- the branch being rebased (or empty when rebasing the current branch).
+#
+# This sample shows how to prevent topic branches that are already
+# merged to 'next' branch from getting rebased, because allowing it
+# would result in rebasing already published history.
+
+publish=next
+basebranch="$1"
+if test "$#" = 2
+then
+ topic="refs/heads/$2"
+else
+ topic=`git symbolic-ref HEAD` ||
+ exit 0 ;# we do not interrupt rebasing detached HEAD
+fi
+
+case "$topic" in
+refs/heads/??/*)
+ ;;
+*)
+ exit 0 ;# we do not interrupt others.
+ ;;
+esac
+
+# Now we are dealing with a topic branch being rebased
+# on top of master. Is it OK to rebase it?
+
+# Does the topic really exist?
+git show-ref -q "$topic" || {
+ echo >&2 "No such branch $topic"
+ exit 1
+}
+
+# Is topic fully merged to master?
+not_in_master=`git rev-list --pretty=oneline ^master "$topic"`
+if test -z "$not_in_master"
+then
+ echo >&2 "$topic is fully merged to master; better remove it."
+ exit 1 ;# we could allow it, but there is no point.
+fi
+
+# Is topic ever merged to next? If so you should not be rebasing it.
+only_next_1=`git rev-list ^master "^$topic" ${publish} | sort`
+only_next_2=`git rev-list ^master ${publish} | sort`
+if test "$only_next_1" = "$only_next_2"
+then
+ not_in_topic=`git rev-list "^$topic" master`
+ if test -z "$not_in_topic"
+ then
+ echo >&2 "$topic is already up to date with master"
+ exit 1 ;# we could allow it, but there is no point.
+ else
+ exit 0
+ fi
+else
+ not_in_next=`git rev-list --pretty=oneline ^${publish} "$topic"`
+ /usr/bin/perl -e '
+ my $topic = $ARGV[0];
+ my $msg = "* $topic has commits already merged to public branch:\n";
+ my (%not_in_next) = map {
+ /^([0-9a-f]+) /;
+ ($1 => 1);
+ } split(/\n/, $ARGV[1]);
+ for my $elem (map {
+ /^([0-9a-f]+) (.*)$/;
+ [$1 => $2];
+ } split(/\n/, $ARGV[2])) {
+ if (!exists $not_in_next{$elem->[0]}) {
+ if ($msg) {
+ print STDERR $msg;
+ undef $msg;
+ }
+ print STDERR " $elem->[1]\n";
+ }
+ }
+ ' "$topic" "$not_in_next" "$not_in_master"
+ exit 1
+fi
+
+<<\DOC_END
+
+This sample hook safeguards topic branches that have been
+published from being rewound.
+
+The workflow assumed here is:
+
+ * Once a topic branch forks from "master", "master" is never
+ merged into it again (either directly or indirectly).
+
+ * Once a topic branch is fully cooked and merged into "master",
+ it is deleted. If you need to build on top of it to correct
+ earlier mistakes, a new topic branch is created by forking at
+ the tip of the "master". This is not strictly necessary, but
+ it makes it easier to keep your history simple.
+
+ * Whenever you need to test or publish your changes to topic
+ branches, merge them into "next" branch.
+
+The script, being an example, hardcodes the publish branch name
+to be "next", but it is trivial to make it configurable via
+$GIT_DIR/config mechanism.
+
+With this workflow, you would want to know:
+
+(1) ... if a topic branch has ever been merged to "next". Young
+ topic branches can have stupid mistakes you would rather
+ clean up before publishing, and things that have not been
+ merged into other branches can be easily rebased without
+ affecting other people. But once it is published, you would
+ not want to rewind it.
+
+(2) ... if a topic branch has been fully merged to "master".
+ Then you can delete it. More importantly, you should not
+ build on top of it -- other people may already want to
+ change things related to the topic as patches against your
+ "master", so if you need further changes, it is better to
+ fork the topic (perhaps with the same name) afresh from the
+ tip of "master".
+
+Let's look at this example:
+
+ o---o---o---o---o---o---o---o---o---o "next"
+ / / / /
+ / a---a---b A / /
+ / / / /
+ / / c---c---c---c B /
+ / / / \ /
+ / / / b---b C \ /
+ / / / / \ /
+ ---o---o---o---o---o---o---o---o---o---o---o "master"
+
+
+A, B and C are topic branches.
+
+ * A has one fix since it was merged up to "next".
+
+ * B has finished. It has been fully merged up to "master" and "next",
+ and is ready to be deleted.
+
+ * C has not merged to "next" at all.
+
+We would want to allow C to be rebased, refuse A, and encourage
+B to be deleted.
+
+To compute (1):
+
+ git rev-list ^master ^topic next
+ git rev-list ^master next
+
+ if these match, topic has not merged in next at all.
+
+To compute (2):
+
+ git rev-list master..topic
+
+ if this is empty, it is fully merged to "master".
+
+DOC_END
--- /dev/null
+#!/usr/bin/env bash
+data=$(cat)
+exitcodes=""
+hookname=$(basename $0)
+GIT_DIR=${GIT_DIR:-$(dirname $0)}
+
+for hook in ${GIT_DIR}/hooks/${hookname}.d/*; do
+test -x "${hook}" && test -f "${hook}" || continue
+echo "${data}" | "${hook}"
+exitcodes="${exitcodes} $?"
+done
+
+for i in ${exitcodes}; do
+[ ${i} -eq 0 ] || exit ${i}
+done
--- /dev/null
+#!/usr/bin/env bash
+"$GITEA_ROOT/gitea" hook --config="$GITEA_ROOT/$GITEA_CONF" pre-receive
--- /dev/null
+#!/bin/sh
+#
+# An example hook script to make use of push options.
+# The example simply echoes all push options that start with 'echoback='
+# and rejects all pushes when the "reject" push option is used.
+#
+# To enable this hook, rename this file to "pre-receive".
+
+if test -n "$GIT_PUSH_OPTION_COUNT"
+then
+ i=0
+ while test "$i" -lt "$GIT_PUSH_OPTION_COUNT"
+ do
+ eval "value=\$GIT_PUSH_OPTION_$i"
+ case "$value" in
+ echoback=*)
+ echo "echo from the pre-receive-hook: ${value#*=}" >&2
+ ;;
+ reject)
+ exit 1
+ esac
+ i=$((i + 1))
+ done
+fi
--- /dev/null
+#!/bin/sh
+#
+# An example hook script to prepare the commit log message.
+# Called by "git commit" with the name of the file that has the
+# commit message, followed by the description of the commit
+# message's source. The hook's purpose is to edit the commit
+# message file. If the hook fails with a non-zero status,
+# the commit is aborted.
+#
+# To enable this hook, rename this file to "prepare-commit-msg".
+
+# This hook includes three examples. The first one removes the
+# "# Please enter the commit message..." help message.
+#
+# The second includes the output of "git diff --name-status -r"
+# into the message, just before the "git status" output. It is
+# commented because it doesn't cope with --amend or with squashed
+# commits.
+#
+# The third example adds a Signed-off-by line to the message, that can
+# still be edited. This is rarely a good idea.
+
+COMMIT_MSG_FILE=$1
+COMMIT_SOURCE=$2
+SHA1=$3
+
+/usr/bin/perl -i.bak -ne 'print unless(m/^. Please enter the commit message/..m/^#$/)' "$COMMIT_MSG_FILE"
+
+# case "$COMMIT_SOURCE,$SHA1" in
+# ,|template,)
+# /usr/bin/perl -i.bak -pe '
+# print "\n" . `git diff --cached --name-status -r`
+# if /^#/ && $first++ == 0' "$COMMIT_MSG_FILE" ;;
+# *) ;;
+# esac
+
+# SOB=$(git var GIT_COMMITTER_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p')
+# git interpret-trailers --in-place --trailer "$SOB" "$COMMIT_MSG_FILE"
+# if test -z "$COMMIT_SOURCE"
+# then
+# /usr/bin/perl -i.bak -pe 'print "\n" if !$first_line++' "$COMMIT_MSG_FILE"
+# fi
--- /dev/null
+#!/usr/bin/env bash
+exitcodes=""
+hookname=$(basename $0)
+GIT_DIR=${GIT_DIR:-$(dirname $0)}
+
+for hook in ${GIT_DIR}/hooks/${hookname}.d/*; do
+test -x "${hook}" && test -f "${hook}" || continue
+"${hook}" $1 $2 $3
+exitcodes="${exitcodes} $?"
+done
+
+for i in ${exitcodes}; do
+[ ${i} -eq 0 ] || exit ${i}
+done
--- /dev/null
+#!/usr/bin/env bash
+"$GITEA_ROOT/gitea" hook --config="$GITEA_ROOT/$GITEA_CONF" update $1 $2 $3
--- /dev/null
+#!/bin/sh
+#
+# An example hook script to block unannotated tags from entering.
+# Called by "git receive-pack" with arguments: refname sha1-old sha1-new
+#
+# To enable this hook, rename this file to "update".
+#
+# Config
+# ------
+# hooks.allowunannotated
+# This boolean sets whether unannotated tags will be allowed into the
+# repository. By default they won't be.
+# hooks.allowdeletetag
+# This boolean sets whether deleting tags will be allowed in the
+# repository. By default they won't be.
+# hooks.allowmodifytag
+# This boolean sets whether a tag may be modified after creation. By default
+# it won't be.
+# hooks.allowdeletebranch
+# This boolean sets whether deleting branches will be allowed in the
+# repository. By default they won't be.
+# hooks.denycreatebranch
+# This boolean sets whether remotely creating branches will be denied
+# in the repository. By default this is allowed.
+#
+
+# --- Command line
+refname="$1"
+oldrev="$2"
+newrev="$3"
+
+# --- Safety check
+if [ -z "$GIT_DIR" ]; then
+ echo "Don't run this script from the command line." >&2
+ echo " (if you want, you could supply GIT_DIR then run" >&2
+ echo " $0 <ref> <oldrev> <newrev>)" >&2
+ exit 1
+fi
+
+if [ -z "$refname" -o -z "$oldrev" -o -z "$newrev" ]; then
+ echo "usage: $0 <ref> <oldrev> <newrev>" >&2
+ exit 1
+fi
+
+# --- Config
+allowunannotated=$(git config --bool hooks.allowunannotated)
+allowdeletebranch=$(git config --bool hooks.allowdeletebranch)
+denycreatebranch=$(git config --bool hooks.denycreatebranch)
+allowdeletetag=$(git config --bool hooks.allowdeletetag)
+allowmodifytag=$(git config --bool hooks.allowmodifytag)
+
+# check for no description
+projectdesc=$(sed -e '1q' "$GIT_DIR/description")
+case "$projectdesc" in
+"Unnamed repository"* | "")
+ echo "*** Project description file hasn't been set" >&2
+ exit 1
+ ;;
+esac
+
+# --- Check types
+# if $newrev is 0000...0000, it's a commit to delete a ref.
+zero="0000000000000000000000000000000000000000"
+if [ "$newrev" = "$zero" ]; then
+ newrev_type=delete
+else
+ newrev_type=$(git cat-file -t $newrev)
+fi
+
+case "$refname","$newrev_type" in
+ refs/tags/*,commit)
+ # un-annotated tag
+ short_refname=${refname##refs/tags/}
+ if [ "$allowunannotated" != "true" ]; then
+ echo "*** The un-annotated tag, $short_refname, is not allowed in this repository" >&2
+ echo "*** Use 'git tag [ -a | -s ]' for tags you want to propagate." >&2
+ exit 1
+ fi
+ ;;
+ refs/tags/*,delete)
+ # delete tag
+ if [ "$allowdeletetag" != "true" ]; then
+ echo "*** Deleting a tag is not allowed in this repository" >&2
+ exit 1
+ fi
+ ;;
+ refs/tags/*,tag)
+ # annotated tag
+ if [ "$allowmodifytag" != "true" ] && git rev-parse $refname > /dev/null 2>&1
+ then
+ echo "*** Tag '$refname' already exists." >&2
+ echo "*** Modifying a tag is not allowed in this repository." >&2
+ exit 1
+ fi
+ ;;
+ refs/heads/*,commit)
+ # branch
+ if [ "$oldrev" = "$zero" -a "$denycreatebranch" = "true" ]; then
+ echo "*** Creating a branch is not allowed in this repository" >&2
+ exit 1
+ fi
+ ;;
+ refs/heads/*,delete)
+ # delete branch
+ if [ "$allowdeletebranch" != "true" ]; then
+ echo "*** Deleting a branch is not allowed in this repository" >&2
+ exit 1
+ fi
+ ;;
+ refs/remotes/*,commit)
+ # tracking branch
+ ;;
+ refs/remotes/*,delete)
+ # delete tracking branch
+ if [ "$allowdeletebranch" != "true" ]; then
+ echo "*** Deleting a tracking branch is not allowed in this repository" >&2
+ exit 1
+ fi
+ ;;
+ *)
+ # Anything else (is there anything else?)
+ echo "*** Update hook: unknown type of update to ref $refname of type $newrev_type" >&2
+ exit 1
+ ;;
+esac
+
+# --- Finished
+exit 0
--- /dev/null
+# git ls-files --others --exclude-from=.git/info/exclude
+# Lines that start with '#' are comments.
+# For a project mostly in C, the following would be a good set of
+# exclude patterns (uncomment them if you want to use them):
+# *.[oa]
+# *~
--- /dev/null
+aacbdfe9e1c4b47f60abe81849045fa4e96f1d75 refs/heads/master
--- /dev/null
+aacbdfe9e1c4b47f60abe81849045fa4e96f1d75
num_pulls: 1
is_mirror: false
status: 0
+
+-
+ id: 49
+ owner_id: 27
+ owner_name: user27
+ lower_name: repo49
+ name: repo49
+ is_private: false
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ is_mirror: false
+ status: 0
is_admin: false
avatar: avatar27
avatar_email: user27@example.com
- num_repos: 2
+ num_repos: 3
-
id: 28
count: 14},
{name: "AllPublic/PublicRepositoriesOfUserIncludingCollaborative",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, AllPublic: true, Template: util.OptionalBoolFalse},
- count: 25},
+ count: 26},
{name: "AllPublic/PublicAndPrivateRepositoriesOfUserIncludingCollaborative",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true, AllPublic: true, AllLimited: true, Template: util.OptionalBoolFalse},
- count: 30},
+ count: 31},
{name: "AllPublic/PublicAndPrivateRepositoriesOfUserIncludingCollaborativeByName",
opts: &SearchRepoOptions{Keyword: "test", ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true, AllPublic: true},
count: 15},
count: 13},
{name: "AllPublic/PublicRepositoriesOfOrganization",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 17, AllPublic: true, Collaborate: util.OptionalBoolFalse, Template: util.OptionalBoolFalse},
- count: 25},
+ count: 26},
{name: "AllTemplates",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, Template: util.OptionalBoolTrue},
count: 2},
import (
"fmt"
"net/url"
- "os"
- "path"
"strings"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/auth"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/context"
- "code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/migrations"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/task"
"code.gitea.io/gitea/modules/util"
+ archiver_service "code.gitea.io/gitea/services/archiver"
repo_service "code.gitea.io/gitea/services/repository"
-
- "github.com/unknwon/com"
)
const (
ctx.Error(404)
}
-// Download download an archive of a repository
-func Download(ctx *context.Context) {
- var (
- uri = ctx.Params("*")
- refName string
- ext string
- archivePath string
- archiveType git.ArchiveType
- )
+// DownloadStatus checks the status of a download, because archiving may take a
+// while. It does so by creating an archive request from the archiver service,
+// then just examining the completion status.
+func DownloadStatus(ctx *context.Context) {
+ uri := ctx.Params("*")
+ aReq := archiver_service.DeriveRequestFrom(ctx, uri)
- switch {
- case strings.HasSuffix(uri, ".zip"):
- ext = ".zip"
- archivePath = path.Join(ctx.Repo.GitRepo.Path, "archives/zip")
- archiveType = git.ZIP
- case strings.HasSuffix(uri, ".tar.gz"):
- ext = ".tar.gz"
- archivePath = path.Join(ctx.Repo.GitRepo.Path, "archives/targz")
- archiveType = git.TARGZ
- default:
- log.Trace("Unknown format: %s", uri)
+ if aReq == nil {
ctx.Error(404)
return
}
- refName = strings.TrimSuffix(uri, ext)
- if !com.IsDir(archivePath) {
- if err := os.MkdirAll(archivePath, os.ModePerm); err != nil {
- ctx.ServerError("Download -> os.MkdirAll(archivePath)", err)
- return
- }
- }
+ complete := aReq.IsComplete()
+ ctx.JSON(200, map[string]interface{}{
+ "archiving": !complete,
+ "complete": complete,
+ })
+}
- // Get corresponding commit.
- var (
- commit *git.Commit
- err error
- )
- gitRepo := ctx.Repo.GitRepo
- if gitRepo.IsBranchExist(refName) {
- commit, err = gitRepo.GetBranchCommit(refName)
- if err != nil {
- ctx.ServerError("GetBranchCommit", err)
- return
- }
- } else if gitRepo.IsTagExist(refName) {
- commit, err = gitRepo.GetTagCommit(refName)
- if err != nil {
- ctx.ServerError("GetTagCommit", err)
- return
- }
- } else if len(refName) >= 4 && len(refName) <= 40 {
- commit, err = gitRepo.GetCommit(refName)
- if err != nil {
- ctx.NotFound("GetCommit", nil)
- return
- }
+// Download an archive of a repository
+func Download(ctx *context.Context) {
+ uri := ctx.Params("*")
+ aReq := archiver_service.DeriveRequestFrom(ctx, uri)
+
+ if aReq.IsComplete() {
+ ctx.ServeFile(aReq.GetArchivePath(), ctx.Repo.Repository.Name+"-"+aReq.GetArchiveName())
} else {
- ctx.NotFound("Download", nil)
+ ctx.Error(404)
+ }
+}
+
+// InitiateDownload will enqueue an archival request, as needed. It may submit
+// a request that's already in-progress, but the archiver service will just
+// kind of drop it on the floor if this is the case.
+func InitiateDownload(ctx *context.Context) {
+ uri := ctx.Params("*")
+ aReq := archiver_service.DeriveRequestFrom(ctx, uri)
+
+ if aReq == nil {
return
}
- archivePath = path.Join(archivePath, base.ShortSha(commit.ID.String())+ext)
- if !com.IsFile(archivePath) {
- if err := commit.CreateArchive(archivePath, git.CreateArchiveOpts{
- Format: archiveType,
- Prefix: setting.Repository.PrefixArchiveFiles,
- }); err != nil {
- ctx.ServerError("Download -> CreateArchive "+archivePath, err)
- return
- }
+ complete := aReq.IsComplete()
+ if !complete {
+ archiver_service.ArchiveRepository(aReq)
}
- ctx.ServeFile(archivePath, ctx.Repo.Repository.Name+"-"+refName+ext)
+ ctx.JSON(200, map[string]interface{}{
+ "archiving": !complete,
+ "complete": complete,
+ })
}
// Status returns repository's status
m.Get("/:period", repo.ActivityAuthors)
}, context.RepoRef(), repo.MustBeNotEmpty, context.RequireRepoReaderOr(models.UnitTypeCode))
- m.Get("/archive/*", repo.MustBeNotEmpty, reqRepoCodeReader, repo.Download)
+ m.Group("/archive", func() {
+ m.Post("/status/*", repo.DownloadStatus)
+ m.Get("/*", repo.Download)
+ m.Post("/*", repo.InitiateDownload)
+ }, repo.MustBeNotEmpty, reqRepoCodeReader)
m.Get("/status", reqRepoCodeReader, repo.Status)
--- /dev/null
+// Copyright 2020 The Gitea Authors.
+// All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package archiver
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "strings"
+ "sync"
+
+ "code.gitea.io/gitea/modules/base"
+ "code.gitea.io/gitea/modules/context"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/unknwon/com"
+)
+
+// ArchiveRequest defines the parameters of an archive request, which notably
+// includes the specific repository being archived as well as the commit, the
+// name by which it was requested, and the kind of archive being requested.
+// This is entirely opaque to external entities, though, and mostly used as a
+// handle elsewhere.
+type ArchiveRequest struct {
+ uri string
+ repo *git.Repository
+ refName string
+ ext string
+ archivePath string
+ archiveType git.ArchiveType
+ archiveComplete bool
+ commit *git.Commit
+}
+
+var archiveInProgress []*ArchiveRequest
+var archiveMutex sync.Mutex
+
+// GetArchivePath returns the path from which we can serve this archive.
+func (aReq *ArchiveRequest) GetArchivePath() string {
+ return aReq.archivePath
+}
+
+// GetArchiveName returns the name of the caller, based on the ref used by the
+// caller to create this request.
+func (aReq *ArchiveRequest) GetArchiveName() string {
+ return aReq.refName + aReq.ext
+}
+
+// IsComplete returns the completion status of this request.
+func (aReq *ArchiveRequest) IsComplete() bool {
+ return aReq.archiveComplete
+}
+
+// The caller must hold the archiveMutex across calls to getArchiveRequest.
+func getArchiveRequest(repo *git.Repository, commit *git.Commit, archiveType git.ArchiveType) *ArchiveRequest {
+ for _, r := range archiveInProgress {
+ // Need to be referring to the same repository.
+ if r.repo.Path == repo.Path && r.commit.ID == commit.ID && r.archiveType == archiveType {
+ return r
+ }
+ }
+ return nil
+}
+
+// DeriveRequestFrom creates an archival request, based on the URI. The
+// resulting ArchiveRequest is suitable for being passed to ArchiveRepository()
+// if it's determined that the request still needs to be satisfied.
+func DeriveRequestFrom(ctx *context.Context, uri string) *ArchiveRequest {
+ if ctx.Repo == nil || ctx.Repo.GitRepo == nil {
+ log.Trace("Repo not initialized")
+ ctx.Error(404)
+ return nil
+ }
+ r := &ArchiveRequest{
+ uri: uri,
+ repo: ctx.Repo.GitRepo,
+ }
+
+ switch {
+ case strings.HasSuffix(uri, ".zip"):
+ r.ext = ".zip"
+ r.archivePath = path.Join(r.repo.Path, "archives/zip")
+ r.archiveType = git.ZIP
+ case strings.HasSuffix(uri, ".tar.gz"):
+ r.ext = ".tar.gz"
+ r.archivePath = path.Join(r.repo.Path, "archives/targz")
+ r.archiveType = git.TARGZ
+ default:
+ log.Trace("Unknown format: %s", uri)
+ ctx.Error(404)
+ return nil
+ }
+
+ r.refName = strings.TrimSuffix(r.uri, r.ext)
+ if !com.IsDir(r.archivePath) {
+ if err := os.MkdirAll(r.archivePath, os.ModePerm); err != nil {
+ ctx.ServerError("Download -> os.MkdirAll(archivePath)", err)
+ return nil
+ }
+ }
+
+ // Get corresponding commit.
+ var (
+ err error
+ )
+ if r.repo.IsBranchExist(r.refName) {
+ r.commit, err = r.repo.GetBranchCommit(r.refName)
+ if err != nil {
+ ctx.ServerError("GetBranchCommit", err)
+ return nil
+ }
+ } else if r.repo.IsTagExist(r.refName) {
+ r.commit, err = r.repo.GetTagCommit(r.refName)
+ if err != nil {
+ ctx.ServerError("GetTagCommit", err)
+ return nil
+ }
+ } else if len(r.refName) >= 4 && len(r.refName) <= 40 {
+ r.commit, err = r.repo.GetCommit(r.refName)
+ if err != nil {
+ ctx.NotFound("GetCommit", nil)
+ return nil
+ }
+ } else {
+ ctx.NotFound("DeriveRequestFrom", nil)
+ return nil
+ }
+
+ archiveMutex.Lock()
+ defer archiveMutex.Unlock()
+ if rExisting := getArchiveRequest(r.repo, r.commit, r.archiveType); rExisting != nil {
+ return rExisting
+ }
+
+ r.archivePath = path.Join(r.archivePath, base.ShortSha(r.commit.ID.String())+r.ext)
+ r.archiveComplete = com.IsFile(r.archivePath)
+ return r
+}
+
+func doArchive(r *ArchiveRequest) {
+ var (
+ err error
+ tmpArchive *os.File
+ destArchive *os.File
+ )
+
+ // It could have happened that we enqueued two archival requests, due to
+ // race conditions and difficulties in locking. Do one last check that
+ // the archive we're referring to doesn't already exist. If it does exist,
+ // then just mark the request as complete and move on.
+ if com.IsFile(r.archivePath) {
+ r.archiveComplete = true
+ return
+ }
+
+ // Create a temporary file to use while the archive is being built. We
+ // will then copy it into place (r.archivePath) once it's fully
+ // constructed.
+ tmpArchive, err = ioutil.TempFile("", "archive")
+ if err != nil {
+ log.Error("Unable to create a temporary archive file! Error: %v", err)
+ return
+ }
+ defer func() {
+ tmpArchive.Close()
+ os.Remove(tmpArchive.Name())
+ }()
+
+ if err = r.commit.CreateArchive(tmpArchive.Name(), git.CreateArchiveOpts{
+ Format: r.archiveType,
+ Prefix: setting.Repository.PrefixArchiveFiles,
+ }); err != nil {
+ log.Error("Download -> CreateArchive "+tmpArchive.Name(), err)
+ return
+ }
+
+ // Now we copy it into place
+ if destArchive, err = os.Create(r.archivePath); err != nil {
+ log.Error("Unable to open archive " + r.archivePath)
+ return
+ }
+ _, err = io.Copy(destArchive, tmpArchive)
+ destArchive.Close()
+ if err != nil {
+ log.Error("Unable to write archive " + r.archivePath)
+ return
+ }
+
+ r.archiveComplete = true
+}
+
+// ArchiveRepository satisfies the ArchiveRequest being passed in. Processing
+// will occur in a separate goroutine, as this phase may take a while to
+// complete. If the archive already exists, ArchiveRepository will not do
+// anything.
+func ArchiveRepository(request *ArchiveRequest) {
+ if request.archiveComplete {
+ return
+ }
+ go func() {
+ // We'll take some liberties here, in that the caller may not assume that the
+ // specific request they submitted is the one getting enqueued. We'll just drop
+ // it if it turns out we've already enqueued an identical request, as they'll keep
+ // checking back for the status anyways.
+ archiveMutex.Lock()
+ if rExisting := getArchiveRequest(request.repo, request.commit, request.archiveType); rExisting != nil {
+ archiveMutex.Unlock()
+ return
+ }
+ archiveInProgress = append(archiveInProgress, request)
+ archiveMutex.Unlock()
+
+ // Drop the mutex while we process the request. This may take a long
+ // time, and it's not necessary now that we've added the reequest to
+ // archiveInProgress.
+ doArchive(request)
+
+ // Purge this request from the list. To do so, we'll just take the
+ // index at which we ended up at and swap the final element into that
+ // position, then chop off the now-redundant final element. The slice
+ // may have change in between these two segments and we may have moved,
+ // so we search for it here. We could perhaps avoid this search
+ // entirely if len(archiveInProgress) == 1, but we should verify
+ // correctness.
+ archiveMutex.Lock()
+ defer archiveMutex.Unlock()
+ idx := -1
+ for _idx, req := range archiveInProgress {
+ if req == request {
+ idx = _idx
+ break
+ }
+ }
+ if idx == -1 {
+ log.Error("ArchiveRepository: Failed to find request for removal.")
+ return
+ }
+ lastidx := len(archiveInProgress) - 1
+ if idx != lastidx {
+ archiveInProgress[idx] = archiveInProgress[lastidx]
+ }
+ archiveInProgress = archiveInProgress[:lastidx]
+ }()
+}
--- /dev/null
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package archiver
+
+import (
+ "path/filepath"
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/models"
+ "code.gitea.io/gitea/modules/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/unknwon/com"
+)
+
+func TestMain(m *testing.M) {
+ models.MainTest(m, filepath.Join("..", ".."))
+}
+
+func TestArchive_Basic(t *testing.T) {
+ assert.NoError(t, models.PrepareTestDatabase())
+
+ ctx := test.MockContext(t, "user27/repo49")
+ firstCommit, secondCommit := "51f84af23134", "aacbdfe9e1c4"
+
+ bogusReq := DeriveRequestFrom(ctx, firstCommit+".zip")
+ assert.Nil(t, bogusReq)
+
+ test.LoadRepo(t, ctx, 49)
+ bogusReq = DeriveRequestFrom(ctx, firstCommit+".zip")
+ assert.Nil(t, bogusReq)
+
+ test.LoadGitRepo(t, ctx)
+ defer ctx.Repo.GitRepo.Close()
+
+ // Check a series of bogus requests.
+ // Step 1, valid commit with a bad extension.
+ bogusReq = DeriveRequestFrom(ctx, firstCommit+".dilbert")
+ assert.Nil(t, bogusReq)
+
+ // Step 2, missing commit.
+ bogusReq = DeriveRequestFrom(ctx, "dbffff.zip")
+ assert.Nil(t, bogusReq)
+
+ // Step 3, doesn't look like branch/tag/commit.
+ bogusReq = DeriveRequestFrom(ctx, "db.zip")
+ assert.Nil(t, bogusReq)
+
+ // Now two valid requests, firstCommit with valid extensions.
+ zipReq := DeriveRequestFrom(ctx, firstCommit+".zip")
+ assert.NotNil(t, zipReq)
+
+ tgzReq := DeriveRequestFrom(ctx, firstCommit+".tar.gz")
+ assert.NotNil(t, tgzReq)
+
+ secondReq := DeriveRequestFrom(ctx, secondCommit+".zip")
+ assert.NotNil(t, secondReq)
+
+ ArchiveRepository(zipReq)
+ ArchiveRepository(tgzReq)
+ ArchiveRepository(secondReq)
+
+ // Wait for those requests to complete, time out after 8 seconds.
+ timeout := time.Now().Add(8 * time.Second)
+ for {
+ if zipReq.IsComplete() && tgzReq.IsComplete() && secondReq.IsComplete() {
+ break
+ } else if time.Now().After(timeout) {
+ break
+ }
+ }
+
+ assert.True(t, zipReq.IsComplete())
+ assert.True(t, tgzReq.IsComplete())
+ assert.True(t, secondReq.IsComplete())
+ assert.True(t, com.IsExist(zipReq.GetArchivePath()))
+ assert.True(t, com.IsExist(tgzReq.GetArchivePath()))
+ assert.True(t, com.IsExist(secondReq.GetArchivePath()))
+
+ // The queue should also be drained, if all requests have completed.
+ assert.Equal(t, len(archiveInProgress), 0)
+
+ zipReq2 := DeriveRequestFrom(ctx, firstCommit+".zip")
+ // After completion, zipReq should have dropped out of the queue. Make sure
+ // we didn't get it handed back to us, but they should otherwise be
+ // equivalent requests.
+ assert.Equal(t, zipReq, zipReq2)
+ assert.False(t, zipReq == zipReq2)
+
+ // Make sure we can submit this follow-up request with no side-effects, to
+ // the extent that we can.
+ ArchiveRepository(zipReq2)
+ assert.Equal(t, zipReq, zipReq2)
+ assert.Equal(t, len(archiveInProgress), 0)
+
+ // Same commit, different compression formats should have different names.
+ // Ideally, the extension would match what we originally requested.
+ assert.NotEqual(t, zipReq.GetArchiveName(), tgzReq.GetArchiveName())
+ assert.NotEqual(t, zipReq.GetArchiveName(), secondReq.GetArchiveName())
+}
<div class="ui basic jump dropdown icon button poping up" data-content="{{$.i18n.Tr "repo.branch.download" ($.DefaultBranch)}}" data-variation="tiny inverted" data-position="top right">
<i class="download icon"></i>
<div class="menu">
- <a class="item" href="{{$.RepoLink}}/archive/{{EscapePound $.DefaultBranch}}.zip">{{svg "octicon-file-zip" 16}} ZIP</a>
- <a class="item" href="{{$.RepoLink}}/archive/{{EscapePound $.DefaultBranch}}.tar.gz">{{svg "octicon-file-zip" 16}} TAR.GZ</a>
+ <a class="item archive-link" data-url="{{$.RepoLink}}/archive/{{EscapePound $.DefaultBranch}}.zip" data-status="{{$.RepoLink}}/archive/status/{{EscapePound $.DefaultBranch}}.zip">{{svg "octicon-file-zip" 16}} ZIP</a>
+ <a class="item archive-link" data-url="{{$.RepoLink}}/archive/{{EscapePound $.DefaultBranch}}.tar.gz" data-status="{{$.RepoLink}}/archive/status/{{EscapePound $.DefaultBranch}}.tar.gz">{{svg "octicon-file-zip" 16}} TAR.GZ</a>
</div>
</div>
</td>
<div class="ui basic jump dropdown icon button poping up" data-content="{{$.i18n.Tr "repo.branch.download" (.Name)}}" data-variation="tiny inverted" data-position="top right">
<i class="download icon"></i>
<div class="menu">
- <a class="item" href="{{$.RepoLink}}/archive/{{EscapePound .Name}}.zip">{{svg "octicon-file-zip" 16}} ZIP</a>
- <a class="item" href="{{$.RepoLink}}/archive/{{EscapePound .Name}}.tar.gz">{{svg "octicon-file-zip" 16}} TAR.GZ</a>
+ <a class="item archive-link" data-url="{{$.RepoLink}}/archive/{{EscapePound .Name}}.zip" data-status="{{$.RepoLink}}/archive/status/{{EscapePound .Name}}.zip">{{svg "octicon-file-zip" 16}} ZIP</a>
+ <a class="item archive-link" data-url="{{$.RepoLink}}/archive/{{EscapePound .Name}}.tar.gz" data-status="{{$.RepoLink}}/archive/status/{{EscapePound .Name}}.tar.gz">{{svg "octicon-file-zip" 16}} TAR.GZ</a>
</div>
</div>
{{end}}
<div class="ui basic jump dropdown icon button poping up" data-content="{{.i18n.Tr "repo.download_archive"}}" data-variation="tiny inverted" data-position="top right">
<i class="download icon"></i>
<div class="menu">
- <a class="item" href="{{$.RepoLink}}/archive/{{EscapePound $.BranchName}}.zip">{{svg "octicon-file-zip" 16}} ZIP</a>
- <a class="item" href="{{$.RepoLink}}/archive/{{EscapePound $.BranchName}}.tar.gz">{{svg "octicon-file-zip" 16}} TAR.GZ</a>
+ <a class="item archive-link" data-url="{{$.RepoLink}}/archive/{{EscapePound $.BranchName}}.zip" data-status="{{$.RepoLink}}/archive/status{{EscapePound $.BranchName}}.zip">{{svg "octicon-file-zip" 16}} ZIP</a>
+ <a class="item archive-link" data-url="{{$.RepoLink}}/archive/{{EscapePound $.BranchName}}.tar.gz" data-status="{{$.RepoLink}}/archive/status{{EscapePound $.BranchName}}.tar.gz">{{svg "octicon-file-zip" 16}} TAR.GZ</a>
</div>
</div>
</div>
<div class="download">
{{if $.Permission.CanRead $.UnitTypeCode}}
<a href="{{$.RepoLink}}/src/commit/{{.Sha1}}" rel="nofollow"><i class="code icon"></i> {{ShortSha .Sha1}}</a>
- <a href="{{$.RepoLink}}/archive/{{.TagName | EscapePound}}.zip" rel="nofollow">{{svg "octicon-file-zip" 16}} ZIP</a>
- <a href="{{$.RepoLink}}/archive/{{.TagName | EscapePound}}.tar.gz">{{svg "octicon-file-zip" 16}} TAR.GZ</a>
+ <a class="archive-link" data-url="{{$.RepoLink}}/archive/{{.TagName | EscapePound}}.zip" data-status="{{$.RepoLink}}/archive/status/{{.TagName | EscapePound}}.zip" rel="nofollow">{{svg "octicon-file-zip" 16}} ZIP</a>
+ <a class="archive-link" data-url="{{$.RepoLink}}/archive/{{.TagName | EscapePound}}.tar.gz" data-status="{{$.RepoLink}}/archive/status/{{.TagName | EscapePound}}.tar.gz">{{svg "octicon-file-zip" 16}} TAR.GZ</a>
{{end}}
</div>
{{else}}
<ul class="list">
{{if $.Permission.CanRead $.UnitTypeCode}}
<li>
- <a href="{{$.RepoLink}}/archive/{{.TagName | EscapePound}}.zip" rel="nofollow"><strong>{{svg "octicon-file-zip" 16}} {{$.i18n.Tr "repo.release.source_code"}} (ZIP)</strong></a>
+ <a class="archive-link" data-url="{{$.RepoLink}}/archive/{{.TagName | EscapePound}}.zip" rel="nofollow" data-status="{{$.RepoLink}}/archive/status/{{.TagName | EscapePound}}.zip"><strong>{{svg "octicon-file-zip" 16}} {{$.i18n.Tr "repo.release.source_code"}} (ZIP)</strong></a>
</li>
<li>
- <a href="{{$.RepoLink}}/archive/{{.TagName | EscapePound}}.tar.gz"><strong>{{svg "octicon-file-zip" 16}} {{$.i18n.Tr "repo.release.source_code"}} (TAR.GZ)</strong></a>
+ <a class="archive-link" data-url="{{$.RepoLink}}/archive/{{.TagName | EscapePound}}.tar.gz" data-status="{{$.RepoLink}}/archive/status/{{.TagName | EscapePound}}.tar.gz"><strong>{{svg "octicon-file-zip" 16}} {{$.i18n.Tr "repo.release.source_code"}} (TAR.GZ)</strong></a>
</li>
{{end}}
{{if .Attachments}}
});
}
+function initArchiveStatusChecker($target, url, statusUrl) {
+ $.ajax({
+ url: statusUrl,
+ type: 'POST',
+ data: {
+ _csrf: csrf,
+ },
+ complete(xhr) {
+ if (xhr.status === 200) {
+ if (!xhr.responseJSON) {
+ $target.closest('.dropdown').children('i').removeClass('loading');
+ return;
+ }
+
+ if (xhr.responseJSON.complete) {
+ // Null out the status URL. We don't need to query status again.
+ // getArchive() will clear the loading indicator here, as needed.
+ getArchive($target, url, null);
+ return;
+ }
+
+ setTimeout(() => {
+ initArchiveStatusChecker($target, url, statusUrl);
+ }, 2000);
+ } else {
+ $target.closest('.dropdown').children('i').removeClass('loading');
+ }
+ }
+ });
+}
+
+function getArchive($target, url, statusUrl) {
+ $.ajax({
+ url,
+ type: 'POST',
+ data: {
+ _csrf: csrf,
+ },
+ complete(xhr) {
+ if (xhr.status === 200) {
+ if (!xhr.responseJSON) {
+ // XXX Shouldn't happen?
+ $target.closest('.dropdown').children('i').removeClass('loading');
+ return;
+ }
+
+ if (!xhr.responseJSON.complete && statusUrl !== null) {
+ $target.closest('.dropdown').children('i').addClass('loading');
+ setTimeout(() => {
+ initArchiveStatusChecker($target, url, statusUrl);
+ }, 2000);
+ } else {
+ // We don't need to continue checking.
+ $target.closest('.dropdown').children('i').removeClass('loading');
+ window.location.href = url;
+ }
+ }
+ }
+ });
+}
+
+function initArchiveLinks() {
+ if ($('.archive-link').length === 0) {
+ return;
+ }
+
+ $('.archive-link').on('click', function (event) {
+ const url = $(this).data('url');
+ if (typeof url === 'undefined') {
+ return;
+ }
+ const statusUrl = $(this).data('status');
+ if (typeof statusUrl === 'undefined') {
+ return;
+ }
+
+ getArchive($(event.target), url, statusUrl);
+ });
+}
+
async function initRepository() {
if ($('.repository').length === 0) {
return;
initCommentForm();
initInstall();
+ initArchiveLinks();
initRepository();
initMigration();
initWikiForm();