Przeglądaj źródła

[Vendor] added gig and gin-dex

cgars 7 lat temu
rodzic
commit
fd7dd33295

+ 29 - 0
vendor/github.com/G-Node/gig/LICENSE

@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2017, German Neuroinformatics Node
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 2 - 0
vendor/github.com/G-Node/gig/README.md

@@ -0,0 +1,2 @@
+# gig
+gig  is (some) Git in Go

+ 368 - 0
vendor/github.com/G-Node/gig/delta.go

@@ -0,0 +1,368 @@
+package gig
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+)
+
+//Delta represents a git delta representation. Either BaseRef
+//or BaseOff are valid fields, depending on its Type().
+type Delta struct {
+	gitObject
+
+	BaseRef    SHA1
+	BaseOff    int64
+	SizeSource int64
+	SizeTarget int64
+
+	pf  *PackFile
+	op  DeltaOp
+	err error
+}
+
+func readByte(r io.Reader) (byte, error) {
+	var err error
+	n := 0
+	var b [1]byte
+
+	for n != 1 && err == nil {
+		n, err = r.Read(b[:])
+	}
+
+	if n != 1 {
+		return 0, err
+	}
+
+	return b[0], nil
+}
+
+func parseDelta(obj gitObject) (*Delta, error) {
+	delta := Delta{gitObject: obj}
+
+	//all delta objects come from a PackFile and
+	//therefore git.Source is must be a *packReader
+	source := delta.source.(*packReader)
+	delta.pf = source.fd
+
+	var err error
+	if obj.otype == ObjRefDelta {
+		_, err = source.Read(delta.BaseRef[:])
+		//TODO: check n?
+
+		if err != nil {
+			return nil, err
+		}
+
+	} else {
+		off, err := readVarint(source)
+		if err != nil {
+			return nil, err
+		}
+
+		delta.BaseOff = source.start - off
+	}
+
+	err = delta.wrapSourceWithDeflate()
+	if err != nil {
+		return nil, err
+	}
+
+	delta.SizeSource, err = readVarSize(delta.source, 0)
+	if err != nil {
+		return nil, err
+	}
+
+	delta.SizeTarget, err = readVarSize(delta.source, 0)
+	if err != nil {
+		return nil, err
+	}
+
+	return &delta, nil
+}
+
+func readVarSize(r io.Reader, offset uint) (size int64, err error) {
+	size = int64(0)
+	b := byte(0x80)
+
+	// [0111 1111 ... 1111] (int64) is biggest decode-able
+	// value we get by shifting byte b = 0x7F [0111 1111]
+	// left 8*7 = 56 times; the next attempt must overflow.
+	for i := offset; b&0x80 != 0 && i < 57; i += 7 {
+		b, err = readByte(r)
+		if err != nil {
+			return 0, fmt.Errorf("git: io error: %v", err)
+		}
+
+		size |= int64(b&0x7F) << i
+	}
+
+	// means i > 56, would overflow (see above).
+	if b&0x80 != 0 {
+		return 0, fmt.Errorf("int64 overflow")
+	}
+
+	return size, nil
+}
+
+func decodeInt(r io.Reader, b byte, l uint) (size int64, err error) {
+
+	for i := uint(0); i < l; i++ {
+
+		if b&(1<<i) != 0 {
+			var d byte
+			d, err = readByte(r)
+			if err != nil {
+				return
+			}
+
+			size |= int64(d) << (i * 8)
+		}
+	}
+
+	return
+}
+
+func readVarint(r io.Reader) (int64, error) {
+	b, err := readByte(r)
+	if err != nil {
+		return 0, fmt.Errorf("git: io error: %v", err)
+	}
+
+	size := int64(b & 0x7F)
+
+	for b&0x80 != 0 {
+		b, err = readByte(r)
+		if err != nil {
+			return 0, fmt.Errorf("git: io error: %v", err)
+		}
+
+		size++
+
+		// [0000 0001 ... 0000] (int64)
+		//          ^ bit 0x38 (56)
+		// shifting by 7 will shift the bit into the
+		// sign bit of int64, i.e. we have overflow.
+		if size > (1<<0x38)-1 {
+			return 0, fmt.Errorf("int64 overflow")
+		}
+
+		size = (size << 7) + int64(b&0x7F)
+	}
+
+	return size, nil
+}
+
+//DeltaOpCode is the operation code for delta compression
+//instruction set.
+type DeltaOpCode byte
+
+//DeltaOpCode values.
+const (
+	DeltaOpInsert = 1 //insert data from the delta data into dest
+	DeltaOpCopy   = 2 //copy data from the original source into dest
+)
+
+//DeltaOp represents the delta compression operation. Offset is
+//only valid for DeltaOpCopy operations.
+type DeltaOp struct {
+	Op     DeltaOpCode
+	Size   int64
+	Offset int64
+}
+
+//Op returns the current operations
+func (d *Delta) Op() DeltaOp {
+	return d.op
+}
+
+//Err retrieves the current error state, if any
+func (d *Delta) Err() error {
+	if err := d.err; err != io.EOF {
+		return err
+	}
+	return nil
+}
+
+//NextOp reads the next DeltaOp from the delta data stream.
+//Returns false when there are no operations left or on error;
+//use Err() to decide between the two cases.
+func (d *Delta) NextOp() bool {
+
+	if d.err != nil {
+		return false
+	}
+
+	b, err := readByte(d.source)
+	if err != nil {
+		return false
+	}
+
+	if b&0x80 != 0 {
+		d.op.Op = DeltaOpCopy
+		op := b & 0x7F
+		d.op.Offset, d.err = decodeInt(d.source, op, 4)
+		if d.err != nil {
+			return false
+		}
+
+		d.op.Size, d.err = decodeInt(d.source, op>>4, 3)
+		if d.err != nil {
+			return false
+		}
+
+		if d.op.Size == 0 {
+			d.op.Size = 0x10000
+		}
+	} else if n := b; n > 0 {
+		d.op.Op = DeltaOpInsert
+		d.op.Size = int64(n)
+	} else {
+		d.err = fmt.Errorf("git: unknown delta op code")
+		return false
+	}
+
+	return true
+}
+
+//Patch applies the delta data onto r and writes the result to w.
+func (d *Delta) Patch(r io.ReadSeeker, w io.Writer) error {
+
+	for d.NextOp() {
+		op := d.Op()
+		switch op.Op {
+		case DeltaOpCopy:
+			_, err := r.Seek(op.Offset, os.SEEK_SET)
+			if err != nil {
+				return err
+			}
+
+			_, err = io.CopyN(w, r, op.Size)
+			if err != nil {
+				return err
+			}
+		case DeltaOpInsert:
+			_, err := io.CopyN(w, d.source, op.Size)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	return d.Err()
+}
+
+//SkipOp prepares the delta stream to move to the next operation
+//without actually carrying out the delta operation. Useful for
+//printing the delta stream.
+func (d *Delta) SkipOp() {
+	op := d.Op()
+	if op.Op == DeltaOpInsert {
+		_, d.err = io.CopyN(ioutil.Discard, d.source, op.Size)
+	}
+}
+
+//WriteTo would write the object to disk in the git object
+//representation. It is not NOT IMPLEMENTED for the delta
+//object.
+func (d *Delta) WriteTo(w io.Writer) (int64, error) {
+	return 0, fmt.Errorf("WriteTo not implemented for Delta")
+}
+
+type deltaChain struct {
+	baseObj gitObject
+	baseOff int64
+
+	links []Delta
+}
+
+func (c *deltaChain) Len() int {
+	return len(c.links)
+}
+
+type objectSource interface {
+	openRawObject(id SHA1) (gitObject, error)
+}
+
+func buildDeltaChain(d *Delta, s objectSource) (*deltaChain, error) {
+	var chain deltaChain
+	var err error
+
+	for err == nil {
+
+		chain.links = append(chain.links, *d)
+
+		var obj gitObject
+		if d.otype == ObjRefDelta {
+			obj, err = s.openRawObject(d.BaseRef)
+		} else {
+			obj, err = d.pf.readRawObject(d.BaseOff)
+		}
+
+		if err != nil {
+			break
+		}
+
+		if IsStandardObject(obj.otype) {
+			chain.baseObj = obj
+			chain.baseOff = d.BaseOff
+			break
+		} else if !IsDeltaObject(obj.otype) {
+			err = fmt.Errorf("git: unexpected object type in delta chain")
+			break
+		}
+
+		d, err = parseDelta(obj)
+	}
+
+	if err != nil {
+		//cleanup
+		return nil, err
+	}
+
+	return &chain, nil
+}
+
+func (c *deltaChain) resolve() (Object, error) {
+
+	ibuf := bytes.NewBuffer(make([]byte, 0, c.baseObj.Size()))
+	n, err := io.Copy(ibuf, c.baseObj.source)
+	if err != nil {
+		return nil, err
+	}
+
+	if n != c.baseObj.Size() {
+		return nil, io.ErrUnexpectedEOF
+	}
+
+	obuf := bytes.NewBuffer(make([]byte, 0, c.baseObj.Size()))
+
+	for i := len(c.links); i > 0; i-- {
+		lk := c.links[i-1]
+
+		if lk.SizeTarget > int64(^uint(0)>>1) {
+			return nil, fmt.Errorf("git: target to large for delta unpatching")
+		}
+
+		obuf.Grow(int(lk.SizeTarget))
+		obuf.Truncate(0)
+
+		err = lk.Patch(bytes.NewReader(ibuf.Bytes()), obuf)
+
+		if err != nil {
+			return nil, err
+		}
+
+		if lk.SizeTarget != int64(obuf.Len()) {
+			return nil, fmt.Errorf("git: size mismatch while patching delta object")
+		}
+
+		obuf, ibuf = ibuf, obuf
+	}
+
+	//ibuf is holding the data
+	obj := gitObject{c.baseObj.otype, int64(ibuf.Len()), ioutil.NopCloser(ibuf)}
+	return parseObject(obj)
+}

+ 233 - 0
vendor/github.com/G-Node/gig/objects.go

@@ -0,0 +1,233 @@
+package gig
+
+import (
+	"encoding/hex"
+	"fmt"
+	"io"
+	"os"
+	"strings"
+	"time"
+)
+
+//SHA1 is the object identifying checksum of
+// the object data
+type SHA1 [20]byte
+
+func (oid SHA1) String() string {
+	return hex.EncodeToString(oid[:])
+}
+
+//ParseSHA1 expects a string with a hex encoded sha1.
+//It will trim the string of newline and space before
+//parsing.
+func ParseSHA1(input string) (sha SHA1, err error) {
+	data, err := hex.DecodeString(strings.Trim(input, " \n"))
+	if err != nil {
+		return
+	} else if len(data) != 20 {
+		err = fmt.Errorf("git: sha1 must be 20 bytes")
+		return
+	}
+
+	copy(sha[:], data)
+	return
+}
+
+//Signature is a combination of who (Name, Email) and when (Date, Offset).
+//Used by Commit, Tag to link an action (committer, author, tagger, ...)
+//with a person in a point in time.
+type Signature struct {
+	Name   string
+	Email  string
+	Date   time.Time
+	Offset *time.Location
+}
+
+func (s Signature) String() string {
+	return fmt.Sprintf("%s <%s> %d %s", s.Name, s.Email, s.Date.Unix(), s.Offset)
+}
+
+//ObjectType is to the git object type
+type ObjectType byte
+
+//The defined bits match the ones used in
+//the git pack file format.
+const (
+	_         = iota
+	ObjCommit = ObjectType(iota)
+	ObjTree
+	ObjBlob
+	ObjTag
+
+	ObjOFSDelta = ObjectType(0x6)
+	ObjRefDelta = ObjectType(0x7)
+)
+
+//ParseObjectType takes a string and converts it
+//to the corresponding ObjectType or error if
+//the string doesn't match any type.
+func ParseObjectType(s string) (ObjectType, error) {
+	s = strings.Trim(s, "\n ")
+	switch s {
+	case "commit":
+		return ObjCommit, nil
+	case "tree":
+		return ObjTree, nil
+	case "blob":
+		return ObjBlob, nil
+	case "tag":
+		return ObjTag, nil
+	}
+
+	return ObjectType(0), fmt.Errorf("git: unknown object: %q", s)
+}
+
+func (ot ObjectType) String() string {
+	switch ot {
+	case ObjCommit:
+		return "commit"
+	case ObjTree:
+		return "tree"
+	case ObjBlob:
+		return "blob"
+	case ObjTag:
+		return "tag"
+	case ObjOFSDelta:
+		return "delta-ofs"
+	case ObjRefDelta:
+		return "delta-ref"
+	}
+	return "unknown"
+}
+
+//IsStandardObject checks if an object is
+//one of the four common objects such as
+//commit, tree, blob, tag.
+func IsStandardObject(ot ObjectType) bool {
+	return ot > 0 && ot < 5
+}
+
+//IsDeltaObject checks if an object is a
+//delta object, i.e. OFSDelta or RefDelta
+func IsDeltaObject(ot ObjectType) bool {
+	return ot == ObjOFSDelta || ot == ObjRefDelta
+}
+
+//Object represents a git object. It has
+//information common to all git objects,
+//like their type and their size. Also,
+//all git objects should be closed via
+//Close().
+type Object interface {
+	Type() ObjectType
+	Size() int64
+
+	io.WriterTo
+	io.Closer
+}
+
+type gitObject struct {
+	otype ObjectType
+	size  int64
+
+	source io.ReadCloser
+}
+
+func (o *gitObject) Type() ObjectType {
+	return o.otype
+}
+
+func (o *gitObject) Size() int64 {
+	return o.size
+}
+
+func (o *gitObject) Close() error {
+	if o.source == nil {
+		return nil
+	}
+	return o.source.Close()
+}
+
+//Commit represents one git commit.
+type Commit struct {
+	gitObject
+
+	Tree      SHA1
+	Parent    []SHA1
+	Author    Signature
+	Committer Signature
+	Message   string
+	GPGSig    string
+}
+
+//Date returns the commit timestamps (with the correct location).
+func (c *Commit) Date() time.Time {
+	return c.Committer.Date.In(c.Committer.Offset)
+}
+
+//Tree represents the git tree object.
+type Tree struct {
+	gitObject
+
+	entry *TreeEntry
+	err   error
+}
+
+//TreeEntry holds information about a single
+//entry in the git Tree object.
+type TreeEntry struct {
+	Mode os.FileMode
+	Type ObjectType
+	ID   SHA1
+	Name string
+}
+
+//Next advances the pointer to the next TreeEntry
+//within the Tree object. Returns false if it was
+//pointing to the last element (EOF condition), or
+//if there was an error while advacing. Use Err()
+//to resolve between the to conditions.
+func (tree *Tree) Next() bool {
+	tree.entry, tree.err = parseTreeEntry(tree.source)
+	return tree.err == nil
+}
+
+//Err returns the last error non-EOF error encountered.
+func (tree *Tree) Err() error {
+	if err := tree.err; err != nil && err != io.EOF {
+		return err
+	}
+
+	return nil
+}
+
+//Entry returns the current TreeEntry.
+func (tree *Tree) Entry() *TreeEntry {
+	return tree.entry
+}
+
+//Blob represents a git blob object.
+type Blob struct {
+	gitObject
+}
+
+func (b *Blob) Read(data []byte) (n int, err error) {
+	n, err = b.source.Read(data)
+	return
+}
+
+func MakeAnnexBlob(fp *os.File, size int64) *Blob {
+	return &Blob{gitObject{otype: ObjBlob, size: size, source: fp}}
+}
+
+//Tag represents a git tag object.
+type Tag struct {
+	gitObject
+
+	Object  SHA1
+	ObjType ObjectType
+	Tag     string
+	Tagger  Signature
+	Message string
+	GPGSig  string
+}

+ 382 - 0
vendor/github.com/G-Node/gig/pack.go

@@ -0,0 +1,382 @@
+package gig
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"os"
+	"strings"
+)
+
+// Resources:
+//  https://github.com/git/git/blob/master/Documentation/technical/pack-format.txt
+//  http://schacon.github.io/gitbook/7_the_packfile.html
+
+//PackHeader stores version and number of objects in the packfile
+// all data is in network-byte order (big-endian)
+type PackHeader struct {
+	Sig     [4]byte
+	Version uint32
+	Objects uint32
+}
+
+//FanOut table where the "N-th entry of this table records the
+// number of objects in the corresponding pack, the first
+// byte of whose object name is less than or equal to N.
+type FanOut [256]uint32
+
+//Bounds returns the how many objects whose first byte
+//has a value of b-1 (in s) and b (returned in e)
+//are contained in the fanout table
+func (fo FanOut) Bounds(b byte) (s, e int) {
+	e = int(fo[b])
+	if b > 0 {
+		s = int(fo[b-1])
+	}
+	return
+}
+
+//PackIndex represents the git pack file
+//index. It is the main object to use for
+//opening objects contained in packfiles
+//vai OpenObject
+type PackIndex struct {
+	*os.File
+
+	Version uint32
+	FO      FanOut
+
+	shaBase int64
+}
+
+//PackFile is git pack file with the actual
+//data in it. It should normally not be used
+//directly.
+type PackFile struct {
+	*os.File
+
+	Version  uint32
+	ObjCount uint32
+}
+
+//PackIndexOpen opens the git pack file with the given
+//path. The ".idx" if missing will be appended.
+func PackIndexOpen(path string) (*PackIndex, error) {
+
+	if !strings.HasSuffix(path, ".idx") {
+		path += ".idx"
+	}
+
+	fd, err := os.Open(path)
+
+	if err != nil {
+		return nil, fmt.Errorf("git: could not read pack index: %v", err)
+	}
+
+	idx := &PackIndex{File: fd, Version: 1}
+
+	var peek [4]byte
+	err = binary.Read(fd, binary.BigEndian, &peek)
+	if err != nil {
+		fd.Close()
+		return nil, fmt.Errorf("git: could not read pack index: %v", err)
+	}
+
+	if bytes.Equal(peek[:], []byte("\377tOc")) {
+		binary.Read(fd, binary.BigEndian, &idx.Version)
+	}
+
+	if idx.Version == 1 {
+		_, err = idx.Seek(0, 0)
+		if err != nil {
+			fd.Close()
+			return nil, fmt.Errorf("git: io error: %v", err)
+		}
+	} else if idx.Version > 2 {
+		fd.Close()
+		return nil, fmt.Errorf("git: unsupported pack index version: %d", idx.Version)
+	}
+
+	err = binary.Read(idx, binary.BigEndian, &idx.FO)
+	if err != nil {
+		idx.Close()
+		return nil, fmt.Errorf("git: io error: %v", err)
+	}
+
+	idx.shaBase = int64((idx.Version-1)*8) + int64(binary.Size(idx.FO))
+
+	return idx, nil
+}
+
+//ReadSHA1 reads the SHA1 stared at position pos (in the FanOut table).
+func (pi *PackIndex) ReadSHA1(chksum *SHA1, pos int) error {
+	if version := pi.Version; version != 2 {
+		return fmt.Errorf("git: v%d version support incomplete", version)
+	}
+
+	start := pi.shaBase
+	_, err := pi.ReadAt(chksum[0:20], start+int64(pos)*int64(20))
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+//ReadOffset returns the offset in the pack file of the object
+//at position pos in the FanOut table.
+func (pi *PackIndex) ReadOffset(pos int) (int64, error) {
+	if version := pi.Version; version != 2 {
+		return -1, fmt.Errorf("git: v%d version incomplete", version)
+	}
+
+	//header[2*4] + FanOut[256*4] + n * (sha1[20]+crc[4])
+	start := int64(2*4+256*4) + int64(pi.FO[255]*24) + int64(pos*4)
+
+	var offset uint32
+
+	_, err := pi.Seek(start, 0)
+	if err != nil {
+		return -1, fmt.Errorf("git: io error: %v", err)
+	}
+
+	err = binary.Read(pi, binary.BigEndian, &offset)
+	if err != nil {
+		return -1, err
+	}
+
+	//see if msb is set, if so this is an
+	// offset into the 64b_offset table
+	if val := uint32(1<<31) & offset; val != 0 {
+		return -1, fmt.Errorf("git: > 31 bit offests not implemented. Meh")
+	}
+
+	return int64(offset), nil
+}
+
+func (pi *PackIndex) findSHA1(target SHA1) (int, error) {
+
+	//s, e and midpoint are one-based indices,
+	//where s is the index before interval and
+	//e is the index of the last element in it
+	//-> search interval is: (s | 1, 2, ... e]
+	s, e := pi.FO.Bounds(target[0])
+
+	//invariant: object is, if present, in the interval, (s, e]
+	for s < e {
+		midpoint := s + (e-s+1)/2
+
+		var sha SHA1
+		err := pi.ReadSHA1(&sha, midpoint-1)
+		if err != nil {
+			return 0, fmt.Errorf("git: io error: %v", err)
+		}
+
+		switch bytes.Compare(target[:], sha[:]) {
+		case -1: // target < sha1, new interval (s, m-1]
+			e = midpoint - 1
+		case +1: //taget > sha1, new interval (m, e]
+			s = midpoint
+		default:
+			return midpoint - 1, nil
+		}
+	}
+
+	return 0, fmt.Errorf("git: sha1 not found in index")
+}
+
+//FindOffset tries to find  object with the id target and if
+//if found returns the offset of the object in the pack file.
+//Returns an error that can be detected by os.IsNotExist if
+//the object could not be found.
+func (pi *PackIndex) FindOffset(target SHA1) (int64, error) {
+
+	pos, err := pi.findSHA1(target)
+	if err != nil {
+		return 0, err
+	}
+
+	off, err := pi.ReadOffset(pos)
+	if err != nil {
+		return 0, err
+	}
+
+	return off, nil
+}
+
+//OpenPackFile opens the corresponding pack file.
+func (pi *PackIndex) OpenPackFile() (*PackFile, error) {
+	f := pi.Name()
+	pf, err := OpenPackFile(f[:len(f)-4] + ".pack")
+	if err != nil {
+		return nil, err
+	}
+
+	return pf, nil
+}
+
+//OpenObject will try to find the object with the given id
+//in it is index and then reach out to its corresponding
+//pack file to open the actual git Object.
+//If the object cannot be found it will return an error
+//the can be detected via os.IsNotExist()
+//Delta objects will returned as such and not be resolved.
+func (pi *PackIndex) OpenObject(id SHA1) (Object, error) {
+
+	off, err := pi.FindOffset(id)
+
+	if err != nil {
+		return nil, err
+	}
+
+	pf, err := pi.OpenPackFile()
+	if err != nil {
+		return nil, err
+	}
+
+	obj, err := pf.readRawObject(off)
+
+	if err != nil {
+		return nil, err
+	}
+
+	if IsStandardObject(obj.otype) {
+		return parseObject(obj)
+	}
+
+	if !IsDeltaObject(obj.otype) {
+		return nil, fmt.Errorf("git: unsupported object")
+	}
+
+	//This is a delta object
+	delta, err := parseDelta(obj)
+
+	return delta, err
+}
+
+//OpenPackFile opens the git pack file at the given path
+//It will check the pack file header and version.
+//Currently only version 2 is supported.
+//NB: This is low-level API and should most likely
+//not be used directly.
+func OpenPackFile(path string) (*PackFile, error) {
+	osfd, err := os.Open(path)
+
+	if err != nil {
+		return nil, err
+	}
+
+	var header PackHeader
+	err = binary.Read(osfd, binary.BigEndian, &header)
+	if err != nil {
+		return nil, fmt.Errorf("git: could not read header: %v", err)
+	}
+
+	if string(header.Sig[:]) != "PACK" {
+		return nil, fmt.Errorf("git: packfile signature error")
+	}
+
+	if header.Version != 2 {
+		return nil, fmt.Errorf("git: unsupported packfile version")
+	}
+
+	fd := &PackFile{File: osfd,
+		Version:          header.Version,
+		ObjCount:         header.Objects}
+
+	return fd, nil
+}
+
+func (pf *PackFile) readRawObject(offset int64) (gitObject, error) {
+	r := newPackReader(pf, offset)
+
+	b, err := r.ReadByte()
+	if err != nil {
+		return gitObject{}, fmt.Errorf("git: io error: %v", err)
+	}
+
+	//object header format:
+	//[mxxx tttt] (byte)
+	//      tttt -> type [4 bit]
+	otype := ObjectType((b & 0x70) >> 4)
+
+	//  xxx      -> size [3 bit]
+	size := int64(b & 0xF)
+
+	// m         -> 1, if size > 2^3 (n-byte encoding)
+	if b&0x80 != 0 {
+		s, err := readVarSize(r, 4)
+		if err != nil {
+			return gitObject{}, err
+		}
+
+		size += s
+	}
+	obj := gitObject{otype, size, r}
+
+	if IsStandardObject(otype) {
+		err = obj.wrapSourceWithDeflate()
+		if err != nil {
+			return gitObject{}, err
+		}
+	}
+
+	return obj, nil
+}
+
+//OpenObject reads the git object header at offset and
+//then parses the data as the corresponding object type.
+func (pf *PackFile) OpenObject(offset int64) (Object, error) {
+
+	obj, err := pf.readRawObject(offset)
+
+	if err != nil {
+		return nil, err
+	}
+
+	switch obj.otype {
+	case ObjCommit:
+		return parseCommit(obj)
+	case ObjTree:
+		return parseTree(obj)
+	case ObjBlob:
+		return parseBlob(obj)
+	case ObjTag:
+		return parseTag(obj)
+
+	case ObjOFSDelta:
+		fallthrough
+	case ObjRefDelta:
+		return parseDelta(obj)
+
+	default:
+		return nil, fmt.Errorf("git: unknown object type")
+	}
+}
+
+type packReader struct {
+	fd    *PackFile
+	start int64
+	off   int64
+}
+
+func newPackReader(fd *PackFile, offset int64) *packReader {
+	return &packReader{fd: fd, start: offset, off: offset}
+}
+
+func (p *packReader) Read(d []byte) (n int, err error) {
+	n, err = p.fd.ReadAt(d, p.off)
+	p.off += int64(n)
+	return
+}
+
+func (p *packReader) ReadByte() (c byte, err error) {
+	var b [1]byte
+	_, err = p.Read(b[:])
+	c = b[0]
+	return
+}
+
+func (p *packReader) Close() (err error) {
+	return //noop
+}

+ 315 - 0
vendor/github.com/G-Node/gig/parse.go

@@ -0,0 +1,315 @@
+package gig
+
+import (
+	"bufio"
+	"bytes"
+	"compress/zlib"
+	"fmt"
+	"io"
+	"io/ioutil"
+
+	"os"
+	"strconv"
+	"strings"
+	"time"
+)
+
+func parseSignature(line string) (Signature, error) {
+	//Format: "<name> <email> <unix timestamp> <time zone offset>"
+	//i.e. "A U Thor <author@example.com> 1462210432 +0200"
+
+	u := Signature{}
+
+	//<name>
+	start := strings.Index(line, " <")
+	if start == -1 {
+		return u, fmt.Errorf("invalid signature format")
+	}
+	u.Name = line[:start]
+
+	//<email>
+	end := strings.Index(line, "> ")
+	if end == -1 {
+		return u, fmt.Errorf("invalid signature format")
+	}
+	u.Email = line[start+2: end]
+
+	//<unix timestamp>
+	tstr, off := split2(line[end+2:], " ")
+	i, err := strconv.ParseInt(tstr, 10, 64)
+
+	if err != nil || len(off) != 5 {
+		return u, fmt.Errorf("invalid signature time format")
+	}
+	u.Date = time.Unix(i, 0)
+
+	//<time zone offset>
+	h, herr := strconv.Atoi(off[1:3])
+	m, merr := strconv.Atoi(off[3:])
+
+	if herr != nil || merr != nil {
+		return u, fmt.Errorf("invalid signature offset format")
+	}
+
+	o := (h*60 + m) * 60
+
+	if off[0] == '-' {
+		o *= -1
+	}
+
+	u.Offset = time.FixedZone(off, o)
+
+	return u, nil
+}
+
+func parseCommitGPGSig(r *bufio.Reader, w *bytes.Buffer) error {
+	for {
+		l, err := r.ReadString('\n')
+		if err != nil {
+			return nil
+		} else if l[0] == ' ' {
+			_, err = w.WriteString(fmt.Sprintf("\n%s", strings.Trim(l, " \n")))
+			if err != nil {
+				return err
+			}
+			continue
+		} else if l[0] == '\n' {
+			return r.UnreadByte()
+		}
+
+		return fmt.Errorf("Unexpected end of gpg signature")
+	}
+}
+
+func parseTagGPGSig(r *bufio.Reader, w *bytes.Buffer) error {
+	//!Tag signatures do not have trailing whitespaces
+	for {
+		l, err := r.ReadString('\n')
+		if err != nil {
+			return nil
+		}
+		_, err = w.WriteString(fmt.Sprintf("\n%s", strings.Trim(l, " \n")))
+		if err != nil {
+			return err
+		}
+		if !strings.Contains(l, "-----END PGP SIGNATURE-----") {
+			continue
+		} else {
+			return nil
+		}
+	}
+}
+
+func openRawObject(path string) (gitObject, error) {
+	fd, err := os.Open(path)
+	if err != nil {
+		return gitObject{}, err
+	}
+
+	// we wrap the zlib reader below, so it will be
+	// propery closed
+	r, err := zlib.NewReader(fd)
+	if err != nil {
+		return gitObject{}, fmt.Errorf("git: could not create zlib reader: %v", err)
+	}
+
+	// general object format is
+	// [type][space][length {ASCII}][\0]
+
+	line, err := readUntilNul(r)
+	if err != nil {
+		return gitObject{}, err
+	}
+
+	tstr, lstr := split2(line, " ")
+	size, err := strconv.ParseInt(lstr, 10, 64)
+
+	if err != nil {
+		return gitObject{}, fmt.Errorf("git: object parse error: %v", err)
+	}
+
+	otype, err := ParseObjectType(tstr)
+	if err != nil {
+		return gitObject{}, err
+	}
+
+	obj := gitObject{otype, size, r}
+	obj.wrapSource(r)
+
+	return obj, nil
+}
+
+func parseObject(obj gitObject) (Object, error) {
+	switch obj.otype {
+	case ObjCommit:
+		return parseCommit(obj)
+
+	case ObjTree:
+		return parseTree(obj)
+
+	case ObjBlob:
+		return parseBlob(obj)
+
+	case ObjTag:
+		return parseTag(obj)
+	}
+
+	obj.Close()
+	return nil, fmt.Errorf("git: unsupported object")
+}
+
+func parseCommit(obj gitObject) (*Commit, error) {
+	c := &Commit{gitObject: obj}
+
+	lr := &io.LimitedReader{R: obj.source, N: obj.size}
+	br := bufio.NewReader(lr)
+
+	var err error
+	for {
+		var l string
+		l, err = br.ReadString('\n')
+		head, tail := split2(l, " ")
+
+		switch head {
+		case "tree":
+			c.Tree, err = ParseSHA1(tail)
+		case "parent":
+			parent, err := ParseSHA1(tail)
+			if err == nil {
+				c.Parent = append(c.Parent, parent)
+			}
+		case "author":
+			c.Author, err = parseSignature(strings.Trim(tail, "\n"))
+		case "committer":
+			c.Committer, err = parseSignature(strings.Trim(tail, "\n"))
+		case "gpgsig":
+			sw := bytes.NewBufferString(strings.Trim(tail, "\n"))
+			err = parseCommitGPGSig(br, sw)
+			c.GPGSig = sw.String()
+		}
+
+		if err != nil || head == "\n" {
+			break
+		}
+	}
+
+	if err != nil && err != io.EOF {
+		return nil, err
+	}
+
+	data, err := ioutil.ReadAll(br)
+
+	if err != nil {
+		return nil, err
+	}
+
+	c.Message = string(data)
+	return c, nil
+}
+
+func parseTree(obj gitObject) (*Tree, error) {
+	tree := Tree{obj, nil, nil}
+	return &tree, nil
+}
+
+func parseTreeEntry(r io.Reader) (*TreeEntry, error) {
+	//format is: [mode{ASCII, octal}][space][name][\0][SHA1]
+	entry := &TreeEntry{}
+
+	l, err := readUntilNul(r) // read until \0
+
+	if err != nil {
+		return nil, err
+	}
+
+	mstr, name := split2(l, " ")
+	mode, err := strconv.ParseUint(mstr, 8, 32)
+	if err != nil {
+		return nil, err
+	}
+
+	//TODO: this is not correct because
+	// we need to shift the "st_mode" file
+	// info bits by 16
+	entry.Mode = os.FileMode(mode)
+
+	if entry.Mode == 040000 {
+		entry.Type = ObjTree
+	} else {
+		entry.Type = ObjBlob
+	}
+
+	entry.Name = name
+
+	n, err := r.Read(entry.ID[:])
+
+	if err != nil && err != io.EOF {
+		return nil, err
+	} else if err == io.EOF && n != 20 {
+		return nil, fmt.Errorf("git: unexpected EOF")
+	}
+
+	return entry, nil
+}
+
+func parseBlob(obj gitObject) (*Blob, error) {
+	blob := &Blob{obj}
+	return blob, nil
+}
+
+func parseTag(obj gitObject) (*Tag, error) {
+	c := &Tag{gitObject: obj}
+
+	lr := &io.LimitedReader{R: c.source, N: c.size}
+	br := bufio.NewReader(lr)
+	var mess bytes.Buffer
+
+	var err error
+	for {
+		var l string
+		l, err = br.ReadString('\n')
+		head, tail := split2(l, " ")
+
+		switch head {
+		case "object":
+			c.Object, err = ParseSHA1(tail)
+		case "type":
+			c.ObjType, err = ParseObjectType(tail)
+		case "tag":
+			c.Tag = strings.Trim(tail, "\n")
+		case "tagger":
+			c.Tagger, err = parseSignature(strings.Trim(tail, "\n"))
+		case "-----BEGIN":
+			//with signed tags (in difference to signed commits) the
+			// signatures do not start with "gpgsig" but just with
+			//"-----BEGIN PGP SIGNATURE-----"
+			//(tbd)
+			sw := bytes.NewBufferString(strings.Trim(
+				fmt.Sprintf("%s %s", head, tail),
+				"\n"))
+			err = parseTagGPGSig(br, sw)
+			c.GPGSig = sw.String()
+		default:
+			//Capture descriptions for tags here.The old way works for unsigned
+			//tags but not for signed ones.
+			// Be Aware! The message comes before the gpg signature
+			// not after as with commits
+			mess.WriteString(l)
+		}
+
+		if err != nil {
+			//For tags gpg signatures can come after the tag description
+			// which might start and also contain a single newline.
+			// therefore the ||head=="\n" part
+			// has been removed. i guess this wont break anything as err will
+			// eventually become EOF for tags and hence the loop will break
+			// (tbd)
+			break
+		}
+	}
+	if err != nil && err != io.EOF {
+		return nil, err
+	}
+	c.Message = mess.String()[1:]
+	return c, nil
+}

+ 246 - 0
vendor/github.com/G-Node/gig/refs.go

@@ -0,0 +1,246 @@
+package gig
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path"
+	"path/filepath"
+	"strings"
+)
+
+type Ref interface {
+	Repo() *Repository
+	Name() string
+	Fullname() string
+	Namespace() string
+	Resolve() (SHA1, error)
+}
+
+type ref struct {
+	repo *Repository
+	name string
+	ns   string // #special, #branch, or the name like 'remote', 'tags'
+}
+
+func (r *ref) Name() string {
+	return r.name
+}
+
+func (r *ref) Fullname() string {
+	fullname := r.name
+	if !strings.HasPrefix(r.ns, "#") {
+		fullname = path.Join(r.ns, r.name)
+	}
+	return fullname
+}
+
+func (r *ref) Repo() *Repository {
+	return r.repo
+}
+
+func (r *ref) Namespace() string {
+	return r.ns
+}
+
+func IsBranchRef(r Ref) bool {
+	return r.Namespace() == "#branch"
+}
+
+//IDRef is a reference that points via
+//a sha1 directly to a git object
+type IDRef struct {
+	ref
+	id SHA1
+}
+
+//Resolve for IDRef returns the stored object
+//id (SHA1)
+func (r *IDRef) Resolve() (SHA1, error) {
+	return r.id, nil
+}
+
+//SymbolicRef is a reference that points
+//to another reference
+type SymbolicRef struct {
+	ref
+	Symbol string
+}
+
+//Resolve will resolve the symbolic reference into
+//an object id.
+func (r *SymbolicRef) Resolve() (SHA1, error) {
+	gdir := fmt.Sprintf("--git-dir=%s", r.repo.Path)
+
+	cmd := exec.Command("git", gdir, "rev-parse", r.Fullname())
+	body, err := cmd.Output()
+
+	if err != nil {
+		var id SHA1
+		return id, err
+	}
+
+	return ParseSHA1(string(body))
+}
+
+func parseRefName(filename string) (name, ns string, err error) {
+	comps := strings.Split(filename, "/")
+	n := len(comps)
+
+	if n < 1 || n == 2 || (n > 2 && comps[0] != "refs") {
+		err = fmt.Errorf("git: unexpected ref name: %v", filename)
+		return
+	}
+
+	if n == 1 {
+		name = comps[0]
+		ns = "#special"
+	}
+
+	// 'man gitrepository-layout' is really helpfull
+	// 'man git-check-ref-format' too
+	// [HEAD|ORIG_HEAD] -> special head
+	// [0|refs][1|<ns>][2+|name]
+	// <ns> == "heads" -> local branch"
+	switch {
+	case n == 1:
+		name = comps[0]
+		ns = "#special"
+	case comps[1] == "heads":
+		name = path.Join(comps[2:]...)
+		ns = "#branch"
+	default:
+		name = path.Join(comps[2:]...)
+		ns = comps[1]
+	}
+	return
+}
+
+func (repo *Repository) parseRef(filename string) (Ref, error) {
+
+	name, ns, err := parseRefName(filename)
+	if err != nil {
+		return nil, err
+	}
+
+	base := ref{repo, name, ns}
+
+	//now to the actual contents of the ref
+	data, err := ioutil.ReadFile(filepath.Join(repo.Path, filename))
+	if err != nil {
+		if os.IsNotExist(err) {
+			return repo.findPackedRef(base.Fullname())
+		}
+		return nil, err
+	}
+
+	b := string(data)
+	if strings.HasPrefix(b, "ref:") {
+		trimmed := strings.Trim(b[4:], " \n")
+		return &SymbolicRef{base, trimmed}, nil
+	}
+
+	id, err := ParseSHA1(b)
+	if err == nil {
+		return &IDRef{base, id}, nil
+	}
+
+	return nil, fmt.Errorf("git: unknown ref type: %q", b)
+}
+
+func (repo *Repository) listRefWithName(name string) (res []Ref) {
+	gdir := fmt.Sprintf("--git-dir=%s", repo.Path)
+	cmd := exec.Command("git", gdir, "show-ref", name)
+	body, err := cmd.Output()
+
+	if err != nil {
+		return
+	}
+
+	r := bytes.NewBuffer(body)
+
+	for {
+		var l string
+		l, err = r.ReadString('\n')
+		if err != nil {
+			break
+		}
+
+		_, name := split2(l[:len(l)-1], " ")
+		r, err := repo.parseRef(name)
+
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "git: could not parse ref with name %q: %v", name, err)
+			continue
+		}
+
+		res = append(res, r)
+	}
+
+	return
+}
+
+func (repo *Repository) loadPackedRefs() ([]Ref, error) {
+
+	fd, err := os.Open(filepath.Join(repo.Path, "packed-refs"))
+	if err != nil {
+		return nil, err
+	}
+	defer fd.Close()
+
+	r := bufio.NewReader(fd)
+
+	var refs []Ref
+	for {
+		var l string
+		l, err = r.ReadString('\n')
+		if err != nil {
+			break
+		}
+
+		head, tail := split2(l, " ")
+		if tail == "" {
+			//probably a peeled id (i.e. "^SHA1")
+			//TODO: do something with it
+			continue
+		}
+
+		name, ns, err := parseRefName(tail[:len(tail)-1])
+		if err != nil {
+			//TODO: log error, panic?
+			continue
+		}
+
+		id, err := ParseSHA1(head)
+		if err != nil {
+			//TODO: same as above
+			continue
+		}
+
+		refs = append(refs, &IDRef{ref{repo, name, ns}, id})
+	}
+
+	if err != nil && err != io.EOF {
+		return nil, err
+	}
+
+	return refs, nil
+}
+
+func (repo *Repository) findPackedRef(name string) (Ref, error) {
+	refs, err := repo.loadPackedRefs()
+	if err != nil {
+		return nil, err
+	}
+
+	for _, ref := range refs {
+		if ref.Fullname() == name {
+			return ref, nil
+		}
+	}
+	return nil, fmt.Errorf("ref with name %q not found", name)
+}

+ 463 - 0
vendor/github.com/G-Node/gig/repo.go

@@ -0,0 +1,463 @@
+package gig
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path"
+	"path/filepath"
+	"strings"
+)
+
+//Repository represents an on disk git repository.
+type Repository struct {
+	Path string
+}
+
+//InitBareRepository creates a bare git repository at path.
+func InitBareRepository(path string) (*Repository, error) {
+
+	path, err := filepath.Abs(path)
+	if err != nil {
+		return nil, fmt.Errorf("Could not determine absolute path: %v", err)
+	}
+
+	cmd := exec.Command("git", "init", "--bare", path)
+	err = cmd.Run()
+
+	if err != nil {
+		return nil, err
+	}
+
+	return &Repository{Path: path}, nil
+}
+
+//IsBareRepository checks if path is a bare git repository.
+func IsBareRepository(path string) bool {
+
+	cmd := exec.Command("git", fmt.Sprintf("--git-dir=%s", path), "rev-parse", "--is-bare-repository")
+	body, err := cmd.Output()
+
+	if err != nil {
+		return false
+	}
+
+	status := strings.Trim(string(body), "\n ")
+	return status == "true"
+}
+
+//OpenRepository opens the repository at path. Currently
+//verifies that it is a (bare) repository and returns an
+//error if the check fails.
+func OpenRepository(path string) (*Repository, error) {
+
+	path, err := filepath.Abs(path)
+	if err != nil {
+		return nil, fmt.Errorf("git: could not determine absolute path")
+	}
+
+	if !IsBareRepository(path) {
+		return nil, fmt.Errorf("git: not a bare repository")
+	}
+
+	return &Repository{Path: path}, nil
+}
+
+//DiscoverRepository returns the git repository that contains the
+//current working directory, or and error if the current working
+//dir does not lie inside one.
+func DiscoverRepository() (*Repository, error) {
+	cmd := exec.Command("git", "rev-parse", "--git-dir")
+	data, err := cmd.Output()
+	if err != nil {
+		return nil, err
+	}
+
+	path := strings.Trim(string(data), "\n ")
+	return &Repository{Path: path}, nil
+}
+
+//ReadDescription returns the contents of the description file.
+func (repo *Repository) ReadDescription() string {
+	path := filepath.Join(repo.Path, "description")
+
+	dat, err := ioutil.ReadFile(path)
+	if err != nil {
+		return ""
+	}
+
+	return string(dat)
+}
+
+//WriteDescription writes the contents of the description file.
+func (repo *Repository) WriteDescription(description string) error {
+	path := filepath.Join(repo.Path, "description")
+
+	// not atomic, fine for now
+	return ioutil.WriteFile(path, []byte(description), 0666)
+}
+
+// DeleteCollaborator removes a collaborator file from the repositories sharing folder.
+func (repo *Repository) DeleteCollaborator(username string) error {
+	filePath := filepath.Join(repo.Path, "gin", "sharing", username)
+
+	return os.Remove(filePath)
+}
+
+//OpenObject returns the git object for a give id (SHA1).
+func (repo *Repository) OpenObject(id SHA1) (Object, error) {
+	obj, err := repo.openRawObject(id)
+
+	if err != nil {
+		return nil, err
+	}
+
+	if IsStandardObject(obj.otype) {
+		return parseObject(obj)
+	}
+
+	//not a standard object, *must* be a delta object,
+	// we know of no other types
+	if !IsDeltaObject(obj.otype) {
+		return nil, fmt.Errorf("git: unsupported object")
+	}
+
+	delta, err := parseDelta(obj)
+	if err != nil {
+		return nil, err
+	}
+
+	chain, err := buildDeltaChain(delta, repo)
+
+	if err != nil {
+		return nil, err
+	}
+
+	//TODO: check depth, and especially expected memory usage
+	// beofre actually patching it
+
+	return chain.resolve()
+}
+
+func (repo *Repository) openRawObject(id SHA1) (gitObject, error) {
+	idstr := id.String()
+	opath := filepath.Join(repo.Path, "objects", idstr[:2], idstr[2:])
+
+	obj, err := openRawObject(opath)
+
+	if err == nil {
+		return obj, nil
+	} else if err != nil && !os.IsNotExist(err) {
+		return obj, err
+	}
+
+	indicies := repo.loadPackIndices()
+
+	for _, f := range indicies {
+
+		idx, err := PackIndexOpen(f)
+		if err != nil {
+			continue
+		}
+
+		//TODO: we should leave index files open,
+		defer idx.Close()
+
+		off, err := idx.FindOffset(id)
+
+		if err != nil {
+			continue
+		}
+
+		pf, err := idx.OpenPackFile()
+		if err != nil {
+			return gitObject{}, err
+		}
+
+		obj, err := pf.readRawObject(off)
+
+		if err != nil {
+			return gitObject{}, err
+		}
+
+		return obj, nil
+	}
+
+	// from inspecting the os.isNotExist source it
+	// seems that if we have "not found" in the message
+	// os.IsNotExist() report true, which is what we want
+	return gitObject{}, fmt.Errorf("git: object not found")
+}
+
+func (repo *Repository) loadPackIndices() []string {
+	target := filepath.Join(repo.Path, "objects", "pack", "*.idx")
+	files, err := filepath.Glob(target)
+
+	if err != nil {
+		panic(err)
+	}
+
+	return files
+}
+
+//OpenRef returns the Ref with the given name or an error
+//if either no maching could be found or in case the match
+//was not unique.
+func (repo *Repository) OpenRef(name string) (Ref, error) {
+
+	if name == "HEAD" {
+		return repo.parseRef("HEAD")
+	}
+
+	matches := repo.listRefWithName(name)
+
+	//first search in local heads
+	var locals []Ref
+	for _, v := range matches {
+		if IsBranchRef(v) {
+			if name == v.Fullname() {
+				return v, nil
+			}
+			locals = append(locals, v)
+		}
+	}
+
+	// if we find a single local match
+	// we return it directly
+	if len(locals) == 1 {
+		return locals[0], nil
+	}
+
+	switch len(matches) {
+	case 0:
+		return nil, fmt.Errorf("git: ref matching %q not found", name)
+	case 1:
+		return matches[0], nil
+	}
+	return nil, fmt.Errorf("git: ambiguous ref name, multiple matches")
+}
+
+//Readlink returns the destination of a symbilc link blob object
+func (repo *Repository) Readlink(id SHA1) (string, error) {
+
+	b, err := repo.OpenObject(id)
+	if err != nil {
+		return "", err
+	}
+
+	if b.Type() != ObjBlob {
+		return "", fmt.Errorf("id must point to a blob")
+	}
+
+	blob := b.(*Blob)
+
+	//TODO: check size and don't read unreasonable large blobs
+	data, err := ioutil.ReadAll(blob)
+
+	if err != nil {
+		return "", err
+	}
+
+	return string(data), nil
+}
+
+//ObjectForPath will resolve the path to an object
+//for the file tree starting in the node root.
+//The root object can be either a Commit, Tree or Tag.
+func (repo *Repository) ObjectForPath(root Object, pathstr string) (Object, error) {
+
+	var node Object
+	var err error
+
+	switch o := root.(type) {
+	case *Tree:
+		node = root
+	case *Commit:
+		node, err = repo.OpenObject(o.Tree)
+	case *Tag:
+		node, err = repo.OpenObject(o.Object)
+	default:
+		return nil, fmt.Errorf("unsupported root object type")
+	}
+
+	if err != nil {
+		return nil, fmt.Errorf("could not root tree object: %v", err)
+	}
+
+	cleaned := path.Clean(strings.Trim(pathstr, " /"))
+	comps := strings.Split(cleaned, "/")
+
+	var i int
+	for i = 0; i < len(comps); i++ {
+
+		tree, ok := node.(*Tree)
+		if !ok {
+			cwd := strings.Join(comps[:i+1], "/")
+			err := &os.PathError{
+				Op:   "convert git.Object to git.Tree",
+				Path: cwd,
+				Err:  fmt.Errorf("expected tree object, got %s", node.Type()),
+			}
+			return nil, err
+		}
+
+		//Since we call path.Clean(), this should really
+		//only happen at the root, but it is safe to
+		//have here anyway
+		if comps[i] == "." || comps[i] == "/" {
+			continue
+		}
+
+		var id *SHA1
+		for tree.Next() {
+			entry := tree.Entry()
+			if entry.Name == comps[i] {
+				id = &entry.ID
+				break
+			}
+		}
+
+		if err = tree.Err(); err != nil {
+			cwd := strings.Join(comps[:i+1], "/")
+			return nil, &os.PathError{
+				Op:   "find object",
+				Path: cwd,
+				Err:  err}
+		} else if id == nil {
+			cwd := strings.Join(comps[:i+1], "/")
+			return nil, &os.PathError{
+				Op:   "find object",
+				Path: cwd,
+				Err:  os.ErrNotExist}
+		}
+
+		node, err = repo.OpenObject(*id)
+		if err != nil {
+			cwd := strings.Join(comps[:i+1], "/")
+			return nil, &os.PathError{
+				Op:   "open object",
+				Path: cwd,
+				Err:  err,
+			}
+		}
+	}
+
+	return node, nil
+}
+
+// usefmt is the option string used by CommitsForRef to return a formatted git commit log.
+const usefmt = `--pretty=format:
+Commit:=%H%n
+Committer:=%cn%n
+Author:=%an%n
+Date-iso:=%ai%n
+Date-rel:=%ar%n
+Subject:=%s%n
+Changes:=`
+
+// CommitSummary represents a subset of information from a git commit.
+type CommitSummary struct {
+	Commit       string
+	Committer    string
+	Author       string
+	DateIso      string
+	DateRelative string
+	Subject      string
+	Changes      []string
+}
+
+// CommitsForRef executes a custom git log command for the specified ref of the
+// associated git repository and returns the resulting byte array.
+func (repo *Repository) CommitsForRef(ref string) ([]CommitSummary, error) {
+
+	raw, err := commitsForRef(repo.Path, ref, usefmt)
+	if err != nil {
+		return nil, err
+	}
+
+	sep := ":="
+	var comList []CommitSummary
+	r := bytes.NewReader(raw)
+	br := bufio.NewReader(r)
+
+	var changesFlag bool
+	for {
+		// Consume line until newline character
+		l, err := br.ReadString('\n')
+
+		if strings.Contains(l, sep) {
+			splitList := strings.SplitN(l, sep, 2)
+
+			key := splitList[0]
+			val := splitList[1]
+			switch key {
+			case "Commit":
+				// reset non key line flags
+				changesFlag = false
+				newCommit := CommitSummary{Commit: val}
+				comList = append(comList, newCommit)
+			case "Committer":
+				comList[len(comList)-1].Committer = val
+			case "Author":
+				comList[len(comList)-1].Author = val
+			case "Date-iso":
+				comList[len(comList)-1].DateIso = val
+			case "Date-rel":
+				comList[len(comList)-1].DateRelative = val
+			case "Subject":
+				comList[len(comList)-1].Subject = val
+			case "Changes":
+				// Setting changes flag so we know, that the next lines are probably file change notification lines.
+				changesFlag = true
+			default:
+				fmt.Printf("[W] commits: unexpected key %q, value %q\n", key, strings.Trim(val, "\n"))
+			}
+		} else if changesFlag && strings.Contains(l, "\t") {
+			comList[len(comList)-1].Changes = append(comList[len(comList)-1].Changes, l)
+		}
+
+		// Breaks at the latest when EOF err is raised
+		if err != nil {
+			break
+		}
+	}
+	if err != io.EOF && err != nil {
+		return nil, err
+	}
+
+	return comList, nil
+}
+
+// commitsForRef executes a custom git log command for the specified ref of the
+// given git repository with the specified log format string and returns the resulting byte array.
+// Function is kept private to force handling of the []byte inside the package.
+func commitsForRef(repoPath, ref, usefmt string) ([]byte, error) {
+	gdir := fmt.Sprintf("--git-dir=%s", repoPath)
+
+	cmd := exec.Command("git", gdir, "log", ref, usefmt, "--name-status")
+	body, err := cmd.Output()
+	if err != nil {
+		return nil, fmt.Errorf("failed running git log: %s\n", err.Error())
+	}
+	return body, nil
+}
+
+// BranchExists runs the "git branch <branchname> --list" command.
+// It will return an error, if the command fails, true, if the result is not empty and false otherwise.
+func (repo *Repository) BranchExists(branch string) (bool, error) {
+	gdir := fmt.Sprintf("--git-dir=%s", repo.Path)
+
+	cmd := exec.Command("git", gdir, "branch", branch, "--list")
+	body, err := cmd.Output()
+	if err != nil {
+		return false, err
+	} else if len(body) == 0 {
+		return false, nil
+	}
+
+	return true, nil
+}

+ 71 - 0
vendor/github.com/G-Node/gig/util.go

@@ -0,0 +1,71 @@
+package gig
+
+import (
+	"io"
+	"bytes"
+	"strings"
+	"compress/zlib"
+)
+
+func readUntilNul(r io.Reader) (string, error) {
+	buf := bytes.NewBuffer(make([]byte, 0))
+	for {
+		var b [1]byte
+		_, err := r.Read(b[:])
+		if err != nil {
+			return "", err
+		} else if b[0] == 0 {
+			break
+		}
+		buf.WriteByte(b[0])
+	}
+
+	return buf.String(), nil
+}
+
+func split2(s, sep string) (head, tail string) {
+	comps := strings.SplitN(s, sep, 2)
+	head = comps[0]
+	if len(comps) > 1 {
+		tail = comps[1]
+	}
+	return
+}
+
+type zlibReadCloser struct {
+	io.LimitedReader     //R of io.LimitedReader is the zlib reader
+	source io.ReadCloser //the underlying source
+}
+
+func (r *zlibReadCloser) Close() error {
+	var e1, e2 error
+
+	// this shouldn't fail ever actually, since the wrapped
+	//  object should have been an io.ReadCloser
+	if rc, ok := r.LimitedReader.R.(io.Closer); ok {
+		e1 = rc.Close()
+	}
+
+	e2 = r.source.Close()
+
+	if e1 == nil && e2 == nil {
+		return nil
+	} else if e2 != nil {
+		return e2
+	}
+	return e1
+}
+
+func (o *gitObject) wrapSourceWithDeflate() error {
+	r, err := zlib.NewReader(o.source)
+	if err != nil {
+		return err
+	}
+
+	o.source = &zlibReadCloser{io.LimitedReader{R: r, N: o.size}, o.source}
+	return nil
+}
+
+func (o *gitObject) wrapSource(rc io.ReadCloser) {
+	o.source = &zlibReadCloser{io.LimitedReader{R: rc, N: o.size}, o.source}
+}

+ 80 - 0
vendor/github.com/G-Node/gig/walk.go

@@ -0,0 +1,80 @@
+package gig
+
+import "fmt"
+
+func (repo *Repository) WalkRef(refname string, goOn func(SHA1) bool) (map[SHA1]*Commit, error) {
+	head, err := repo.OpenRef(refname)
+	if err != nil {
+		return nil, err
+	}
+
+	HId, err := head.Resolve()
+	if err != nil {
+		return nil, err
+	}
+
+	commits := make(map[SHA1]*Commit)
+	repo.walkCommitTree(commits, HId, goOn)
+	return commits, nil
+}
+
+func (repo *Repository) walkCommitTree(commits map[SHA1]*Commit, commitId SHA1,
+	goOn func(SHA1) bool) error {
+	commit, err := repo.OpenObject(commitId)
+	commit.Close()
+	if err != nil {
+		return err
+	}
+
+	if _, ok := commits[commitId]; !ok && goOn(commitId) {
+		commits[commitId] = commit.(*Commit)
+		for _, parent := range commit.(*Commit).Parent {
+			repo.walkCommitTree(commits, parent, goOn)
+		}
+		return nil
+	} else {
+		return nil
+	}
+}
+
+func (repo *Repository) GetBlobsForCommit(commit *Commit, blobs map[SHA1]*Blob) error {
+	treeOb, err := repo.OpenObject(commit.Tree)
+	if err != nil {
+		return err
+	}
+	defer treeOb.Close()
+
+	tree, ok := treeOb.(*Tree)
+	if !ok {
+		return fmt.Errorf("Could not assert a tree")
+	}
+
+	err = repo.GetBlobsForTree(tree, blobs)
+	return err
+}
+
+func (repo *Repository) GetBlobsForTree(tree *Tree, blobs map[SHA1]*Blob) error {
+	for tree.Next() {
+		trEntry := tree.Entry()
+		switch trEntry.Type {
+		case ObjBlob:
+			if blobOb, err := repo.OpenObject(trEntry.ID); err != nil {
+				return err
+			} else {
+				blobs[trEntry.ID] = blobOb.(*Blob)
+				blobOb.Close()
+			}
+		case ObjTree:
+			if treeOb, err := repo.OpenObject(trEntry.ID); err != nil {
+				return err
+			} else {
+				if err = repo.GetBlobsForTree(treeOb.(*Tree), blobs); err != nil {
+					treeOb.Close()
+					return err
+				}
+
+			}
+		}
+	}
+	return tree.Err()
+}

+ 206 - 0
vendor/github.com/G-Node/gig/write.go

@@ -0,0 +1,206 @@
+package gig
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"strings"
+)
+
+func writeHeader(o Object, w *bufio.Writer) (n int64, err error) {
+
+	x, err := w.WriteString(o.Type().String())
+	n += int64(x)
+	if err != nil {
+		return n, err
+	}
+
+	x, err = w.WriteString(" ")
+	n += int64(x)
+	if err != nil {
+		return n, err
+	}
+
+	x, err = w.WriteString(fmt.Sprintf("%d", o.Size()))
+	n += int64(x)
+	if err != nil {
+		return n, err
+	}
+
+	err = w.WriteByte(0)
+	if err != nil {
+		return n, err
+	}
+
+	return n + 1, nil
+}
+
+//WriteTo writes the commit object to the writer in the on-disk format
+//i.e. as it would be stored in the git objects dir (although uncompressed).
+func (c *Commit) WriteTo(writer io.Writer) (int64, error) {
+	w := bufio.NewWriter(writer)
+
+	n, err := writeHeader(c, w)
+	if err != nil {
+		return n, err
+	}
+
+	x, err := w.WriteString(fmt.Sprintf("tree %s\n", c.Tree))
+	n += int64(x)
+	if err != nil {
+		return n, err
+	}
+
+	for _, p := range c.Parent {
+		x, err = w.WriteString(fmt.Sprintf("parent %s\n", p))
+		n += int64(x)
+		if err != nil {
+			return n, err
+		}
+	}
+
+	x, err = w.WriteString(fmt.Sprintf("author %s\n", c.Author))
+	n += int64(x)
+	if err != nil {
+		return n, err
+	}
+
+	x, err = w.WriteString(fmt.Sprintf("committer %s\n", c.Committer))
+	n += int64(x)
+	if err != nil {
+		return n, err
+	}
+
+	if c.GPGSig != "" {
+		s := strings.Replace(c.GPGSig, "\n", "\n ", -1)
+		x, err = w.WriteString(fmt.Sprintf("gpgsig %s\n", s))
+		n += int64(x)
+		if err != nil {
+			return n, err
+		}
+
+	}
+
+	x, err = w.WriteString(fmt.Sprintf("\n%s", c.Message))
+	n += int64(x)
+	if err != nil {
+		return n, err
+	}
+
+	err = w.Flush()
+	return n, err
+}
+
+//WriteTo writes the tree object to the writer in the on-disk format
+//i.e. as it would be stored in the git objects dir (although uncompressed).
+func (t *Tree) WriteTo(writer io.Writer) (int64, error) {
+
+	w := bufio.NewWriter(writer)
+
+	n, err := writeHeader(t, w)
+	if err != nil {
+		return n, err
+	}
+
+	for t.Next() {
+		//format is: [mode{ASCII, octal}][space][name][\0][SHA1]
+		entry := t.Entry()
+		line := fmt.Sprintf("%o %s", entry.Mode, entry.Name)
+		x, err := w.WriteString(line)
+		n += int64(x)
+		if err != nil {
+			return n, err
+		}
+
+		err = w.WriteByte(0)
+		if err != nil {
+			return n, err
+		}
+		n++
+
+		x, err = w.Write(entry.ID[:])
+		n += int64(x)
+		if err != nil {
+			return n, err
+		}
+	}
+
+	if err = t.Err(); err != nil {
+		return n, err
+	}
+
+	err = w.Flush()
+	return n, err
+}
+
+//WriteTo writes the blob object to the writer in the on-disk format
+//i.e. as it would be stored in the git objects dir (although uncompressed).
+func (b *Blob) WriteTo(writer io.Writer) (int64, error) {
+	w := bufio.NewWriter(writer)
+
+	n, err := writeHeader(b, w)
+	if err != nil {
+		return n, err
+	}
+
+	x, err := io.Copy(w, b.source)
+	n += int64(x)
+	if err != nil {
+		return n, err
+	}
+
+	err = w.Flush()
+	return n, err
+}
+
+//WriteTo writes the tag object to the writer in the on-disk format
+//i.e. as it would be stored in the git objects dir (although uncompressed).
+func (t *Tag) WriteTo(writer io.Writer) (int64, error) {
+	w := bufio.NewWriter(writer)
+
+	n, err := writeHeader(t, w)
+	if err != nil {
+		return n, err
+	}
+
+	x, err := w.WriteString(fmt.Sprintf("object %s\n", t.Object))
+	n += int64(x)
+	if err != nil {
+		return n, err
+	}
+
+	x, err = w.WriteString(fmt.Sprintf("type %s\n", t.ObjType))
+	n += int64(x)
+	if err != nil {
+		return n, err
+	}
+
+	x, err = w.WriteString(fmt.Sprintf("tag %s\n", t.Tag))
+	n += int64(x)
+	if err != nil {
+		return n, err
+	}
+
+	x, err = w.WriteString(fmt.Sprintf("tagger %s\n\n", t.Tagger))
+	n += int64(x)
+	if err != nil {
+		return n, err
+	}
+
+	x, err = w.WriteString(t.Message)
+	n += int64(x)
+	if err != nil {
+		return n, err
+	}
+	if t.GPGSig != "" {
+		x, err = w.WriteString(fmt.Sprintf("%s\n", t.GPGSig))
+		n += int64(x)
+		if err != nil {
+			return n, err
+		}
+
+	}
+
+	err = w.Flush()
+	return n, err
+}

+ 12 - 0
vendor/github.com/G-Node/gin-dex/Dockerfile

@@ -0,0 +1,12 @@
+FROM golang:1.8
+
+WORKDIR /go/src/github.com/G-Node/gin-dex
+COPY . .
+
+RUN  go get ./...
+RUN go build
+
+EXPOSE 8099
+VOLUME /repos
+
+ENTRYPOINT  ./gin-dex --debug

+ 29 - 0
vendor/github.com/G-Node/gin-dex/LICENSE

@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2017, German Neuroinformatics Node
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 2 - 0
vendor/github.com/G-Node/gin-dex/README.md

@@ -0,0 +1,2 @@
+# gin-dex
+Indexing Service for gin

BIN
vendor/github.com/G-Node/gin-dex/gin-dex


+ 54 - 0
vendor/github.com/G-Node/gin-dex/gindex/determine.go

@@ -0,0 +1,54 @@
+package gindex
+
+import (
+	"bufio"
+
+	"net/http"
+	"strings"
+
+	"github.com/G-Node/gogs/pkg/tool"
+	"github.com/Sirupsen/logrus"
+)
+
+const (
+	UKKNOWN = iota
+	ANNEX
+	ODML_XML
+	TEXT
+)
+
+func DetermineFileType(peekData []byte) (int64, error) {
+	if tool.IsAnnexedFile(peekData){
+		logrus.Debugf("Found an annex file")
+		return ANNEX,nil
+	}
+	typeStr := http.DetectContentType(peekData)
+	if strings.Contains(typeStr, "text") {
+		if strings.Contains(string(peekData), "ODML") {
+			return ODML_XML, nil
+		}
+		logrus.Debugf("Found a text file")
+		return TEXT, nil
+	}
+	return UKKNOWN, nil
+
+}
+func BlobFileType(blob *IndexBlob) (int64, *bufio.Reader, error) {
+	blobBuffer := bufio.NewReader(blob.Blob)
+	if blob.Size() > 1024 {
+		peekData, err := blobBuffer.Peek(1024)
+		if err != nil {
+			return UKKNOWN,nil, err
+		}
+		fType, err := DetermineFileType(peekData)
+		return fType, blobBuffer, err
+	} else {
+		peekData, err := blobBuffer.Peek(int(blob.Size())) // conversion should be fine(<1024)
+		if err != nil {
+			return UKKNOWN, nil, err
+		}
+		fType, err := DetermineFileType(peekData)
+		return fType, blobBuffer, err
+	}
+
+}

+ 184 - 0
vendor/github.com/G-Node/gin-dex/gindex/elastic.go

@@ -0,0 +1,184 @@
+package gindex
+
+import (
+	"bytes"
+	"fmt"
+	"net/http"
+
+	"encoding/json"
+
+	"io/ioutil"
+
+	"github.com/G-Node/gig"
+	log "github.com/Sirupsen/logrus"
+)
+
+type ElServer struct {
+	adress   string
+	uname    *string
+	password *string
+}
+
+const (
+	BLOB_INDEX   = "blobs"
+	COMMIT_INDEX = "commits"
+)
+
+func NewElServer(adress string, uname, password *string) *ElServer {
+	return &ElServer{adress: adress, uname: uname, password: password}
+}
+
+func (el *ElServer) Index(index, doctype string, data []byte, id gig.SHA1) (*http.Response, error) {
+	adrr := fmt.Sprintf("%s/%s/%s/%s", el.adress, index, doctype, id.String())
+	req, err := http.NewRequest("POST", adrr, bytes.NewReader(data))
+	if err != nil {
+		return nil, err
+	}
+	return el.elasticRequest(req)
+}
+
+func (el *ElServer) elasticRequest(req *http.Request) (*http.Response, error) {
+	if el.uname != nil {
+		req.SetBasicAuth(*el.uname, *el.password)
+	}
+	req.Header.Set("Content-Type", "application/json")
+	cl := http.Client{}
+	return cl.Do(req)
+}
+
+func (el *ElServer) HasCommit(index string, commitId gig.SHA1) (bool, error) {
+	adrr := fmt.Sprintf("%s/commits/commit/%s", el.adress, commitId)
+	return el.Has(adrr)
+}
+
+func (el *ElServer) HasBlob(index string, blobId gig.SHA1) (bool, error) {
+	adrr := fmt.Sprintf("%s/blobs/blob/%s", el.adress, blobId)
+	return el.Has(adrr)
+}
+
+func (el *ElServer) Has(adr string) (bool, error) {
+	req, err := http.NewRequest("GET", adr, nil)
+	if err != nil {
+		return false, err
+	}
+	resp, err := el.elasticRequest(req)
+	if err != nil {
+		return false, err
+	}
+	bdy, err := ioutil.ReadAll(resp.Body)
+	resp.Body.Close()
+	var res struct{ Found bool }
+	err = json.Unmarshal(bdy, &res)
+	if err != nil {
+		log.WithError(err)
+		return false, err
+	}
+	return res.Found, nil
+}
+
+func (el *ElServer) search(querry, adrr string) (*http.Response, error) {
+	req, err := http.NewRequest("POST", adrr, bytes.NewReader([]byte(querry)))
+	if err != nil {
+		log.Errorf("Could not form search query:%+v", err)
+		log.Errorf("Formatted query was:%s", querry)
+		return nil, err
+	}
+	return el.elasticRequest(req)
+}
+
+func (el *ElServer) SearchBlobs(querry string, okRepos []string) (*http.Response, error) {
+	//implement the passing of the repo ids
+	repos, err := json.Marshal(okRepos)
+	if err != nil {
+		log.Errorf("Could not marshal okRepos: %+v", err)
+		return nil, err
+	}
+	formatted_querry := fmt.Sprintf(BLOB_QUERRY, querry, string(repos))
+	adrr := fmt.Sprintf("%s/%s/_search", el.adress, BLOB_INDEX)
+	return el.search(formatted_querry, adrr)
+}
+
+func (el *ElServer) SearchCommits(querry string, okRepos []string) (*http.Response, error) {
+	//implement the passing of the repo ids
+	repos, err := json.Marshal(okRepos)
+	if err != nil {
+		log.Errorf("Could not marshal okRepos: %+v", err)
+		return nil, err
+	}
+	formatted_querry := fmt.Sprintf(COMMIT_QUERRY, querry, string(repos))
+	adrr := fmt.Sprintf("%s/%s/_search", el.adress, COMMIT_INDEX)
+	return el.search(formatted_querry, adrr)
+}
+
+var BLOB_QUERRY = `{
+	"from" : 0, "size" : 20,
+	  "_source": ["Oid","GinRepoName","FirstCommit","Path"],
+	  "query": {
+		"bool": {
+		  "must": {
+			"match": {
+			  "_all": "%s"
+			}
+		  },
+		  "filter": {
+			"terms": {
+			  "GinRepoId" : %s
+			}
+		  }
+		}
+	},
+	"highlight" : {
+		"fields" : [
+			{"Content" : {
+				"fragment_size" : 100,
+				"number_of_fragments" : 10,
+				"fragmenter": "span",
+				"require_field_match":false,
+				"pre_tags" : ["<b>"],
+				"post_tags" : ["</b>"]
+				}
+			}
+		]
+	}
+}`
+
+var COMMIT_QUERRY = `{
+	"from" : 0, "size" : 20,
+	  "_source": ["Oid","GinRepoName","FirstCommit","Path"],
+	  "query": {
+		"bool": {
+		  "must": {
+			"match": {
+			  "_all": "%s"
+			}
+		  },
+		  "filter": {
+			"terms": {
+			  "GinRepoId" : %s
+			}
+		  }
+		}
+	},
+	"highlight" : {
+		"fields" : [
+			{"Message" : {
+				"fragment_size" : 50,
+				"number_of_fragments" : 3,
+				"fragmenter": "span",
+				"require_field_match":false,
+				"pre_tags" : ["<b>"],
+				"post_tags" : ["</b>"]
+				}
+			},
+			{"GinRepoName" : {
+				"fragment_size" : 50,
+				"number_of_fragments" : 3,
+				"fragmenter": "span",
+				"require_field_match":false,
+				"pre_tags" : ["<b>"],
+				"post_tags" : ["</b>"]
+				}
+			}
+		]
+	}
+}`

+ 41 - 0
vendor/github.com/G-Node/gin-dex/gindex/gindex.go

@@ -0,0 +1,41 @@
+package gindex
+
+type SearchRequest struct {
+	Token  string
+	CsrfT  string
+	UserID int64
+	Querry string
+}
+
+type IndexRequest struct {
+	UserID   int
+	RepoPath string
+	RepoID   string
+}
+
+type ReIndexRequest struct {
+	*IndexRequest
+	Token string
+	CsrfT string
+}
+type GinServer struct {
+	URL     string
+	GetRepo string
+}
+
+type BlobSResult struct {
+	Source    *IndexBlob  `json:"_source"`
+	Score     float64     `json:"_score"`
+	Highlight interface{} `json:"highlight"`
+}
+
+type CommitSResult struct {
+	Source    *IndexCommit `json:"_source"`
+	Score     float64      `json:"_score"`
+	Highlight interface{}  `json:"highlight"`
+}
+
+type SearchResults struct {
+	Blobs   []BlobSResult
+	Commits []CommitSResult
+}

+ 179 - 0
vendor/github.com/G-Node/gin-dex/gindex/handler.go

@@ -0,0 +1,179 @@
+package gindex
+
+import (
+	"net/http"
+	"fmt"
+	log "github.com/Sirupsen/logrus"
+	"github.com/gogits/go-gogs-client"
+
+	"encoding/json"
+	"bytes"
+	"net/http/httptest"
+	"strings"
+)
+
+// Handler for Index requests
+func IndexH(w http.ResponseWriter, r *http.Request, els *ElServer, rpath *string) {
+	rbd := IndexRequest{}
+	err := getParsedBody(r, &rbd)
+	log.Debugf("got a indexing request:%+v", rbd)
+	if err != nil {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+	err = IndexRepoWithPath(fmt.Sprintf("%s/%s", *rpath, strings.ToLower(rbd.RepoPath)+".git"),
+		"master", els, rbd.RepoID, rbd.RepoPath)
+	if err != nil {
+		w.WriteHeader(http.StatusInternalServerError)
+		return
+	}
+	w.WriteHeader(http.StatusOK)
+	return
+}
+
+// Handler for SearchBlobs requests
+func SearchH(w http.ResponseWriter, r *http.Request, els *ElServer, gins *GinServer) {
+	rbd := SearchRequest{}
+	err := getParsedBody(r, &rbd)
+	log.Debugf("got a search request:%+v", rbd)
+	if err != nil {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+	// Get repo ids from the gin server to which the user has access
+	// we need to limit results to those
+	repos := []gogs.Repository{}
+	err = getParsedHttpCall(http.MethodGet, fmt.Sprintf("%s/api/v1/user/repos", gins.URL),
+		nil, rbd.Token, rbd.CsrfT, &repos)
+	if err != nil {
+		log.Errorf("could not querry repos: %+v", err)
+		w.WriteHeader(http.StatusUnauthorized)
+		return
+	}
+	// Get repos ids for public repos
+	prepos := struct{ Data []gogs.Repository }{}
+	err = getParsedHttpCall(http.MethodGet, fmt.Sprintf("%s/api/v1/repos/search/?limit=10000", gins.URL),
+		nil, rbd.Token, rbd.CsrfT, &prepos)
+	if err != nil {
+		log.Errorf("could not querry public repos: %+v", err)
+		w.WriteHeader(http.StatusUnauthorized)
+		return
+	}
+	repos = append(repos, prepos.Data...)
+
+	repids := make([]string, len(repos))
+	for c, repo := range repos {
+		repids[c] = fmt.Sprintf("%d", repo.ID)
+	}
+	log.Debugf("Repod to search in:%+v", repids)
+	// Lets search now
+	rBlobs := [] BlobSResult{}
+	err = searchBlobs(rbd.Querry, repids, els, &rBlobs)
+	if err != nil {
+		log.Warnf("could not search blobs:%+v", err)
+	}
+	rCommits := [] CommitSResult{}
+	err = searchCommits(rbd.Querry, repids, els, &rCommits)
+	if err != nil {
+		log.Warnf("could not search commits:%+v", err)
+	}
+	data, err := json.Marshal(SearchResults{Blobs: rBlobs, Commits: rCommits})
+	if err != nil {
+		log.Debugf("Could not Masrschal search results")
+		w.WriteHeader(http.StatusInternalServerError)
+		return
+	}
+	w.WriteHeader(http.StatusOK)
+	w.Write(data)
+}
+
+// Handler for Index requests
+func ReIndexRepo(w http.ResponseWriter, r *http.Request, els *ElServer, rpath *string) {
+	rbd := IndexRequest{}
+	err := getParsedBody(r, &rbd)
+	log.Debugf("got a indexing request:%+v", rbd)
+	if err != nil {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+	err = ReIndexRepoWithPath(fmt.Sprintf("%s/%s", *rpath, strings.ToLower(rbd.RepoPath)+".git"),
+		"master", els, rbd.RepoID, rbd.RepoPath)
+	if err != nil {
+		w.WriteHeader(http.StatusInternalServerError)
+		return
+	}
+	w.WriteHeader(http.StatusOK)
+	return
+}
+func ReindexH(w http.ResponseWriter, r *http.Request, els *ElServer, gins *GinServer, rpath *string) {
+	rbd := ReIndexRequest{}
+	getParsedBody(r, &rbd)
+	log.Debugf("got a reindex request:%+v", rbd)
+	repos, err := findRepos(*rpath, &rbd, gins)
+	if err != nil {
+		log.Debugf("failed listing repositories: %+v", err)
+		w.WriteHeader(http.StatusInternalServerError)
+		return
+	}
+	if err != nil {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	for _, repo := range repos {
+		rec := httptest.NewRecorder()
+		ireq := IndexRequest{rbd.UserID, repo.FullName,
+			fmt.Sprintf("%d", repo.ID)}
+		data, _ := json.Marshal(ireq)
+		req, _ := http.NewRequest(http.MethodPost, "/index", bytes.NewReader(data))
+		ReIndexRepo(rec, req, els, rpath)
+		if rec.Code != http.StatusOK {
+			log.Debugf("Could not index %s,%d", repo.FullName, rec.Code)
+		}
+	}
+	w.WriteHeader(http.StatusOK)
+}
+
+func searchCommits(querry string, okRepids []string, els *ElServer,
+	result interface{}) error {
+	commS, err := els.SearchCommits(querry, okRepids)
+	if err != nil {
+		return err
+	}
+	err = parseElResult(commS, &result)
+	commS.Body.Close()
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func searchBlobs(querry string, okRepids []string, els *ElServer,
+	result interface{}) error {
+	blobS, err := els.SearchBlobs(querry, okRepids)
+	if err != nil {
+		return err
+	}
+	err = parseElResult(blobS, &result)
+	blobS.Body.Close()
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func parseElResult(comS *http.Response, pRes interface{}) error {
+	var res interface{}
+	err := getParsedResponse(comS, &res)
+	if err != nil {
+		return err
+	}
+	// extract the somewhat nested search rersult
+	if x, ok := res.(map[string](interface{})); ok {
+		if y, ok := x["hits"].(map[string](interface{})); ok {
+			err = map2struct(y["hits"], &pRes)
+			return err
+		}
+	}
+	return fmt.Errorf("could not extract elastic result")
+}

+ 136 - 0
vendor/github.com/G-Node/gin-dex/gindex/indexObjects.go

@@ -0,0 +1,136 @@
+package gindex
+
+import (
+	"encoding/json"
+	"io/ioutil"
+	"time"
+
+	"github.com/G-Node/gig"
+	log "github.com/Sirupsen/logrus"
+	"github.com/G-Node/go-annex"
+	"fmt"
+)
+
+type IndexBlob struct {
+	*gig.Blob
+	GinRepoName  string
+	GinRepoId    string
+	FirstCommit  string
+	Id           int64
+	Oid          gig.SHA1
+	IndexingTime time.Time
+	Content      string
+	Path         string
+}
+
+func NewCommitFromGig(gCommit *gig.Commit, repoid string, reponame string, oid gig.SHA1) *IndexCommit {
+	commit := &IndexCommit{gCommit, repoid, oid,
+		reponame, time.Now()}
+	return commit
+}
+
+func NewBlobFromGig(gBlob *gig.Blob, repoid string, oid gig.SHA1, commit string, path string, reponame string) *IndexBlob {
+	// Remember keeping the id
+	blob := IndexBlob{Blob: gBlob, GinRepoId: repoid, Oid: oid, FirstCommit: commit, Path: path, GinRepoName: reponame}
+	return &blob
+}
+
+type IndexCommit struct {
+	*gig.Commit
+	GinRepoId    string
+	Oid          gig.SHA1
+	GinRepoName  string
+	IndexingTime time.Time
+}
+
+func BlobFromJson(data []byte) (*IndexBlob, error) {
+	bl := &IndexBlob{}
+	err := json.Unmarshal(data, bl)
+	return bl, err
+}
+
+func (c *IndexCommit) ToJson() ([]byte, error) {
+	return json.Marshal(c)
+}
+
+func (c *IndexCommit) AddToIndex(server *ElServer, index string, id gig.SHA1) error {
+	data, err := c.ToJson()
+	if err != nil {
+		return err
+	}
+	indexid := GetIndexCommitId(id.String(), c.GinRepoId)
+	err = AddToIndex(data, server, index, "commit", indexid)
+	return err
+}
+
+func (bl *IndexBlob) ToJson() ([]byte, error) {
+	return json.Marshal(bl)
+}
+
+func (bl *IndexBlob) AddToIndex(server *ElServer, index, repopath string, id gig.SHA1) error {
+	indexid := GetIndexCommitId(id.String(), bl.GinRepoId)
+	if bl.Size() > gannex.MEGABYTE*10 {
+		return fmt.Errorf("File to big")
+	}
+	f_type, blobBuffer, err := BlobFileType(bl)
+	if err != nil {
+		log.Errorf("Could not determine file type: %+v", err)
+		return nil
+	}
+	switch f_type {
+	case ANNEX:
+		fallthrough // deactivated fort the time being
+		/*		APFileC, err := ioutil.ReadAll(blobBuffer)
+				log.Debugf("Annex file:%s", APFileC)
+				if err != nil {
+					log.Errorf("Could not open annex pointer file: %+v", err)
+					return err
+				}
+				Afile, err := gannex.NewAFile(repopath, "", "", APFileC)
+				if err != nil {
+					log.Errorf("Could not get annex file%+v", err)
+					return err
+				}
+				fp, err := Afile.Open()
+				if err != nil {
+					log.Errorf("Could not open annex file: %+v", err)
+					return err
+				}
+				defer fp.Close()
+				bl.Blob = gig.MakeAnnexBlob(fp, Afile.Info.Size())
+				return bl.AddToIndex(server, index, repopath, id)*/
+
+	case TEXT:
+		ct, err := ioutil.ReadAll(blobBuffer)
+		if err != nil {
+			log.Errorf("Could not read text file content:%+v", err)
+			return err
+		}
+		bl.Content = string(ct)
+	case ODML_XML:
+		ct, err := ioutil.ReadAll(blobBuffer)
+		if err != nil {
+			return err
+		}
+		bl.Content = string(ct)
+	}
+	data, err := bl.ToJson()
+	if err != nil {
+		return err
+	}
+	err = AddToIndex(data, server, index, "blob", indexid)
+	return err
+}
+
+func (bl *IndexBlob) IsInIndex() (bool, error) {
+	return false, nil
+}
+
+func AddToIndex(data []byte, server *ElServer, index, doctype string, id gig.SHA1) error {
+	resp, err := server.Index(index, doctype, data, id)
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return err
+}

+ 88 - 0
vendor/github.com/G-Node/gin-dex/gindex/repo.go

@@ -0,0 +1,88 @@
+package gindex
+
+import (
+	"github.com/G-Node/gig"
+	log "github.com/Sirupsen/logrus"
+)
+
+func IndexRepoWithPath(path, ref string, serv *ElServer, repoid string, reponame string) error {
+	log.Info("Start indexing repository with path: %s", path)
+	rep, err := gig.OpenRepository(path)
+	if err != nil {
+		log.Errorf("Could not open repository: %+v", err)
+		return err
+	}
+	log.Debugf("Opened repository")
+	commits, err := rep.WalkRef(ref,
+		func(comitID gig.SHA1) bool {
+			res, err := serv.HasCommit("commits", GetIndexCommitId(comitID.String(), repoid))
+			if err != nil {
+				log.Errorf("Could not querry commit index: %v", err)
+				return false
+			}
+			return !res
+		})
+	log.Infof("Found %d commits", len(commits))
+
+	for commitid, commit := range commits {
+		err = indexCommit(commit, repoid, commitid, rep, path, reponame, serv, serv.HasBlob)
+	}
+	return nil
+}
+
+func ReIndexRepoWithPath(path, ref string, serv *ElServer, repoid string, reponame string) error {
+	log.Info("Start indexing repository with path: %s", path)
+	rep, err := gig.OpenRepository(path)
+	if err != nil {
+		log.Errorf("Could not open repository: %+v", err)
+		return err
+	}
+	log.Debugf("Opened repository")
+	commits, err := rep.WalkRef(ref,
+		func(comitID gig.SHA1) bool {
+			return true
+		})
+	log.Infof("Found %d commits", len(commits))
+
+	blobs := make(map[gig.SHA1]bool)
+	for commitid, commit := range commits {
+		err = indexCommit(commit, repoid, commitid, rep, path, reponame, serv,
+			func(indexName string, id gig.SHA1) (bool, error) {
+				if !blobs[id] {
+					blobs[id] = true
+					return false, nil
+				}
+				return true, nil
+			})
+	}
+	return nil
+}
+
+func indexCommit(commit *gig.Commit, repoid string, commitid gig.SHA1, rep *gig.Repository,
+	path string, reponame string, serv *ElServer,
+	indexBlob func(string, gig.SHA1) (bool, error)) error {
+	err := NewCommitFromGig(commit, repoid, reponame, commitid).AddToIndex(serv, "commits", commitid)
+	if err != nil {
+		log.Printf("Indexing commit failed:%+v", err)
+	}
+	blobs := make(map[gig.SHA1]*gig.Blob)
+	rep.GetBlobsForCommit(commit, blobs)
+	for blid, blob := range blobs {
+		log.Debugf("Blob %s has Size:%d", blid, blob.Size())
+		hasBlob, err := indexBlob("blobs", GetIndexBlobId(blid.String(), repoid))
+		if err != nil {
+			log.Errorf("Could not querry for blob: %+v", err)
+			return err
+		}
+		if !hasBlob {
+			bpath, _ := GetBlobPath(blid.String(), commitid.String(), path)
+			err = NewBlobFromGig(blob, repoid, blid, commitid.String(), bpath, reponame).AddToIndex(serv, "blobs", path, blid)
+			if err != nil {
+				log.Debugf("Indexing blob failed: %+v", err)
+			}
+		} else {
+			log.Debugf("Blob there :%s", blid)
+		}
+	}
+	return nil
+}

+ 8 - 0
vendor/github.com/G-Node/gin-dex/gindex/search.go

@@ -0,0 +1,8 @@
+package gindex
+
+type Result struct {
+}
+
+func Search(querry string, repos []int64) ([]Result, error) {
+	return nil, nil
+}

+ 128 - 0
vendor/github.com/G-Node/gin-dex/gindex/util.go

@@ -0,0 +1,128 @@
+package gindex
+
+import (
+	"net/http"
+	"io/ioutil"
+	"encoding/json"
+	"io"
+	"fmt"
+	log "github.com/Sirupsen/logrus"
+	"github.com/gogits/go-gogs-client"
+	"github.com/G-Node/gig"
+	"os"
+	"path/filepath"
+	"strings"
+	"crypto/sha1"
+	"regexp"
+	"github.com/G-Node/git-module"
+)
+
+func getParsedBody(r *http.Request, obj interface{}) error {
+	data, err := ioutil.ReadAll(r.Body)
+	r.Body.Close()
+	if err != nil {
+		log.Debugf("Could not read request body: %+v", err)
+		return err
+	}
+	err = json.Unmarshal(data, obj)
+	if err != nil {
+		log.Debugf("Could not unmarshal request: %+v, %s", err, string(data))
+		return err
+	}
+	return nil
+}
+
+func getParsedResponse(resp *http.Response, obj interface{}) error {
+	data, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return err
+	}
+	return json.Unmarshal(data, obj)
+}
+
+func getParsedHttpCall(method, path string, body io.Reader, token, csrfT string, obj interface{}) error {
+	client := &http.Client{}
+	req, _ := http.NewRequest(method, path, body)
+	req.Header.Set("Cookie", fmt.Sprintf("i_like_gogits=%s; _csrf=%s", token, csrfT))
+	resp, err := client.Do(req)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	if (resp.StatusCode != http.StatusOK) {
+		return fmt.Errorf("%s: %d, %s", resp.Status, resp.StatusCode, req.URL)
+	}
+	return getParsedResponse(resp, obj)
+}
+
+// Encodes a given map into a struct.
+// Lazyly Uses json package instead of reflecting directly
+func map2struct(in interface{}, out interface{}) error {
+	data, err := json.Marshal(in)
+	if err != nil {
+		return err
+	}
+	return json.Unmarshal(data, out)
+}
+
+// Find gin repos under a certain directory, to which the authenticated user has access
+func findRepos(rpath string, rbd *ReIndexRequest, gins *GinServer) ([]*gogs.Repository, error) {
+	var repos [] *gogs.Repository
+	err := filepath.Walk(rpath, func(path string, info os.FileInfo, err error) error {
+		if ! info.IsDir() {
+			return nil
+		}
+		repo, err := gig.OpenRepository(path)
+		if err != nil {
+			return nil
+		}
+		gRepo, err := hasRepoAccess(repo, rbd, gins)
+		if err != nil {
+			log.Debugf("no acces to repo:%+v", err)
+			return filepath.SkipDir
+		}
+		repos = append(repos, gRepo)
+		return filepath.SkipDir
+	})
+	return repos, err
+}
+
+func hasRepoAccess(repository *gig.Repository, rbd *ReIndexRequest, gins *GinServer) (*gogs.Repository, error) {
+	splPath := strings.Split(repository.Path, string(filepath.Separator))
+	if ! (len(splPath) > 2) {
+		return nil, fmt.Errorf("not a repo path %s", repository.Path)
+	}
+	owner := splPath[len(splPath)-2]
+	name := strings.TrimSuffix(splPath[len(splPath)-1], ".git")
+	gRepo := gogs.Repository{}
+	err := getParsedHttpCall(http.MethodGet, fmt.Sprintf("%s/api/v1/repos/%s/%s",
+		gins.URL, owner, name), nil, rbd.Token, rbd.CsrfT, &gRepo)
+	if err != nil {
+		return nil, err
+	}
+	return &gRepo, nil
+}
+
+func GetIndexCommitId(id, repoid string) gig.SHA1 {
+	return sha1.Sum([]byte(repoid + id))
+}
+
+func GetIndexBlobId(id, repoid string) gig.SHA1 {
+	return sha1.Sum([]byte(repoid + id))
+}
+
+func GetBlobPath(blid, cid, path string) (string, error) {
+	cmd := git.NewCommand("ls-tree", "-r", cid)
+	res, err := cmd.RunInDirBytes(path)
+	if err != nil {
+		return "", err
+	}
+	pattern := fmt.Sprintf("%s\\s+(.+)", blid)
+	re := regexp.MustCompile(pattern)
+	line_match := re.FindStringSubmatch(string(res))
+	if len(line_match) > 1 {
+		return line_match[1], nil
+	} else {
+		return "", fmt.Errorf("Not found")
+	}
+}

+ 54 - 0
vendor/github.com/G-Node/gin-dex/main.go

@@ -0,0 +1,54 @@
+package main
+
+import (
+	"github.com/docopt/docopt-go"
+	"os"
+	"github.com/G-Node/gin-dex/gindex"
+	"net/http"
+	log  "github.com/Sirupsen/logrus"
+)
+
+func main() {
+	usage := `gin-dex.
+Usage:
+  gin-dex [--eladress=<eladress> --eluser=<eluser> --elpw=<elpw> --rpath=<rpath> --gin=<gin> --port=<port> --debug ]
+
+Options:
+  --eladress=<eladress>           Adress of the elastic server [default: http://localhost:9200]
+  --eluser=<eluser>               Elastic user [default: elastic]
+  --elpw=<elpw>                   Elastic password [default: changeme]
+  --port=<port>                   Server port [default: 8099]
+  --gin=<gin>                     Gin Server Adress [default: https://gin.g-node.org]
+  --rpath=<rpath>                 Path to the repositories [default: /repos]
+  --debug                         Whether debug messages shall be printed
+ `
+	args, err := docopt.Parse(usage, nil, true, "gin-dex0.1a", false)
+	if err != nil {
+		log.Printf("Error while parsing command line: %+v", err)
+		os.Exit(-1)
+	}
+	uname := args["--eluser"].(string)
+	pw := args["--elpw"].(string)
+	els := gindex.NewElServer(args["--eladress"].(string), &uname, &pw)
+	gin := &gindex.GinServer{URL: args["--gin"].(string)}
+	rpath := args["--rpath"].(string)
+
+	http.HandleFunc("/index", func(w http.ResponseWriter, r *http.Request) {
+		gindex.IndexH(w, r, els, &rpath)
+	})
+
+	http.HandleFunc("/search", func(w http.ResponseWriter, r *http.Request) {
+		gindex.SearchH(w, r, els, gin)
+	})
+
+	http.HandleFunc("/reindex", func(w http.ResponseWriter, r *http.Request) {
+		gindex.ReindexH(w, r, els, gin, &rpath)
+	})
+
+
+	if args["--debug"].(bool) {
+		log.SetLevel(log.DebugLevel)
+		log.SetFormatter(&log.TextFormatter{ForceColors: true})
+	}
+	log.Fatal(http.ListenAndServe(":"+args["--port"].(string), nil))
+}