Initial commit
This commit is contained in:
commit
12f82e106e
8
.idea/.gitignore
vendored
Normal file
8
.idea/.gitignore
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
# Default ignored files
|
||||
/shelf/
|
||||
/workspace.xml
|
||||
# Editor-based HTTP Client requests
|
||||
/httpRequests/
|
||||
# Datasource local storage ignored files
|
||||
/dataSources/
|
||||
/dataSources.local.xml
|
8
.idea/modules.xml
Normal file
8
.idea/modules.xml
Normal file
|
@ -0,0 +1,8 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectModuleManager">
|
||||
<modules>
|
||||
<module fileurl="file://$PROJECT_DIR$/.idea/send2gokapi.iml" filepath="$PROJECT_DIR$/.idea/send2gokapi.iml" />
|
||||
</modules>
|
||||
</component>
|
||||
</project>
|
9
.idea/send2gokapi.iml
Normal file
9
.idea/send2gokapi.iml
Normal file
|
@ -0,0 +1,9 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<module type="WEB_MODULE" version="4">
|
||||
<component name="Go" enabled="true" />
|
||||
<component name="NewModuleRootManager">
|
||||
<content url="file://$MODULE_DIR$" />
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
</module>
|
137
chunker.go
Normal file
137
chunker.go
Normal file
|
@ -0,0 +1,137 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
gonanoid "github.com/matoous/go-nanoid/v2"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"io"
|
||||
"mime"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type chunker struct {
|
||||
gc *gokapiClient
|
||||
parallelChunks int
|
||||
chunkSize int
|
||||
}
|
||||
|
||||
func newChunker(gc *gokapiClient, parallelChunks, chunkSize int) *chunker {
|
||||
return &chunker{
|
||||
gc: gc,
|
||||
parallelChunks: parallelChunks,
|
||||
chunkSize: chunkSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *chunker) UploadFile(ctx context.Context, filename string, progress func(ChunkReport)) (UploadResponse, error) {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return UploadResponse{}, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
fstat, err := f.Stat()
|
||||
if err != nil {
|
||||
return UploadResponse{}, err
|
||||
}
|
||||
fname := fstat.Name()
|
||||
|
||||
fi := uploadInfo{
|
||||
chunkID: gonanoid.Must(12),
|
||||
filename: fname,
|
||||
totalSize: fstat.Size(),
|
||||
contentType: mime.TypeByExtension(filepath.Ext(fname)),
|
||||
allowedDownloads: 5,
|
||||
expiryDays: 7,
|
||||
password: "",
|
||||
}
|
||||
|
||||
return c.upload(ctx, fi, f, progress)
|
||||
}
|
||||
|
||||
func (c *chunker) upload(ctx context.Context, fi uploadInfo, r io.ReaderAt, progress func(ChunkReport)) (UploadResponse, error) {
|
||||
bufPool := sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]byte, c.chunkSize)
|
||||
},
|
||||
}
|
||||
|
||||
chunks := int(fi.totalSize/int64(c.chunkSize) + 1)
|
||||
|
||||
chunkUploaded := make(chan uploadedChunk)
|
||||
doneChunkReport := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneChunkReport)
|
||||
|
||||
uploadedChunks := 0
|
||||
uploadedBytes := 0
|
||||
|
||||
progress(ChunkReport{
|
||||
UploadedChunks: 0,
|
||||
UploadedBytes: 0,
|
||||
TotalChunks: chunks,
|
||||
TotalSize: fi.totalSize,
|
||||
})
|
||||
|
||||
for r := range chunkUploaded {
|
||||
uploadedChunks += 1
|
||||
uploadedBytes += r.ChunkSize
|
||||
|
||||
progress(ChunkReport{
|
||||
UploadedChunks: uploadedChunks,
|
||||
UploadedBytes: int64(uploadedBytes),
|
||||
TotalChunks: chunks,
|
||||
TotalSize: fi.totalSize,
|
||||
})
|
||||
}
|
||||
}()
|
||||
|
||||
errGroup, egctx := errgroup.WithContext(ctx)
|
||||
errGroup.SetLimit(c.parallelChunks)
|
||||
|
||||
for i := 0; i < chunks; i++ {
|
||||
errGroup.Go(func() error {
|
||||
offset := int64(i * c.chunkSize)
|
||||
|
||||
buf := bufPool.Get().([]byte)
|
||||
defer bufPool.Put(buf)
|
||||
|
||||
thisBuf := buf
|
||||
if offset+int64(c.chunkSize) > fi.totalSize {
|
||||
thisBuf = buf[:fi.totalSize-offset]
|
||||
}
|
||||
|
||||
n, err := r.ReadAt(thisBuf, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if n != len(thisBuf) {
|
||||
return fmt.Errorf("chunk %d: expected %d bytes but only read %d", i, len(thisBuf), n)
|
||||
}
|
||||
|
||||
if err := c.gc.uploadChunk(egctx, fi, offset, bytes.NewReader(thisBuf)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
chunkUploaded <- uploadedChunk{ChunkSize: len(thisBuf)}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := errGroup.Wait(); err != nil {
|
||||
return UploadResponse{}, err
|
||||
}
|
||||
|
||||
close(chunkUploaded)
|
||||
<-doneChunkReport
|
||||
|
||||
return c.gc.finalizeChunk(ctx, fi)
|
||||
}
|
||||
|
||||
type uploadedChunk struct {
|
||||
ChunkSize int
|
||||
}
|
116
client.go
Normal file
116
client.go
Normal file
|
@ -0,0 +1,116 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type gokapiClient struct {
|
||||
httpClient *http.Client
|
||||
host *url.URL
|
||||
apiKey string
|
||||
}
|
||||
|
||||
func newGokapiClient(host *url.URL, apiKey string) *gokapiClient {
|
||||
return &gokapiClient{
|
||||
httpClient: &http.Client{},
|
||||
host: host,
|
||||
apiKey: apiKey,
|
||||
}
|
||||
}
|
||||
|
||||
func (gc *gokapiClient) uploadChunk(ctx context.Context, fi uploadInfo, offset int64, data io.Reader) error {
|
||||
var body bytes.Buffer
|
||||
|
||||
boundary, err := gc.prepUploadChunkBody(&body, fi, offset, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
actionURL := gc.host.ResolveReference(&url.URL{Path: "/api/chunk/add"})
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", actionURL.String(), &body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("Apikey", gc.apiKey)
|
||||
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
|
||||
|
||||
resp, err := gc.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("upload chunk failed with status code %d", resp.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gc *gokapiClient) finalizeChunk(ctx context.Context, fi uploadInfo) (UploadResponse, error) {
|
||||
actionURL := gc.host.ResolveReference(&url.URL{Path: "/api/chunk/complete"})
|
||||
|
||||
formData := url.Values{}
|
||||
formData.Set("uuid", fi.chunkID)
|
||||
formData.Set("filename", fi.filename)
|
||||
formData.Set("filesize", strconv.FormatInt(fi.totalSize, 10))
|
||||
formData.Set("contenttype", fi.contentType)
|
||||
formData.Set("allowedDownloads", strconv.Itoa(fi.allowedDownloads))
|
||||
formData.Set("expiryDays", strconv.Itoa(fi.expiryDays))
|
||||
formData.Set("password", fi.password)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", actionURL.String(), strings.NewReader(formData.Encode()))
|
||||
if err != nil {
|
||||
return UploadResponse{}, err
|
||||
}
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("Apikey", gc.apiKey)
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
resp, err := gc.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return UploadResponse{}, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return UploadResponse{}, fmt.Errorf("upload chunk finalization with status code %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var r UploadResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&r); err != nil {
|
||||
return UploadResponse{}, err
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (gc *gokapiClient) prepUploadChunkBody(w io.Writer, fi uploadInfo, offset int64, data io.Reader) (string, error) {
|
||||
mw := multipart.NewWriter(w)
|
||||
defer mw.Close()
|
||||
|
||||
mw.WriteField("uuid", fi.chunkID)
|
||||
mw.WriteField("filesize", strconv.FormatInt(fi.totalSize, 10))
|
||||
mw.WriteField("offset", strconv.FormatInt(offset, 10))
|
||||
|
||||
fileWriter, err := mw.CreateFormFile("file", fi.filename)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
_, err = io.Copy(fileWriter, data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return mw.Boundary(), nil
|
||||
}
|
8
config.go
Normal file
8
config.go
Normal file
|
@ -0,0 +1,8 @@
|
|||
package main
|
||||
|
||||
type Config struct {
|
||||
Hostname string
|
||||
APIKey string
|
||||
ParallelChunks int
|
||||
ChunkSize int
|
||||
}
|
13
go.mod
Normal file
13
go.mod
Normal file
|
@ -0,0 +1,13 @@
|
|||
module lmika.dev/cmd/send2gokapi
|
||||
|
||||
go 1.23.3
|
||||
|
||||
require (
|
||||
github.com/matoous/go-nanoid/v2 v2.1.0 // indirect
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/schollz/progressbar/v3 v3.17.1 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
)
|
14
go.sum
Normal file
14
go.sum
Normal file
|
@ -0,0 +1,14 @@
|
|||
github.com/matoous/go-nanoid/v2 v2.1.0 h1:P64+dmq21hhWdtvZfEAofnvJULaRR1Yib0+PnU669bE=
|
||||
github.com/matoous/go-nanoid/v2 v2.1.0/go.mod h1:KlbGNQ+FhrUNIHUxZdL63t7tl4LaPkZNpUULS8H4uVM=
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/schollz/progressbar/v3 v3.17.1 h1:bI1MTaoQO+v5kzklBjYNRQLoVpe0zbyRZNK6DFkVC5U=
|
||||
github.com/schollz/progressbar/v3 v3.17.1/go.mod h1:RzqpnsPQNjUyIgdglUjRLgD7sVnxN1wpmBMV+UiEbL4=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
50
main.go
Normal file
50
main.go
Normal file
|
@ -0,0 +1,50 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"github.com/schollz/progressbar/v3"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func main() {
|
||||
config := Config{
|
||||
Hostname: os.Getenv("GOKAPI_HOSTNAME"),
|
||||
APIKey: os.Getenv("GOKAPI_API_KEY"),
|
||||
ParallelChunks: 4,
|
||||
ChunkSize: 1024 * 100,
|
||||
}
|
||||
flag.Parse()
|
||||
|
||||
hostUrl, err := url.Parse(config.Hostname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
client := newGokapiClient(hostUrl, config.APIKey)
|
||||
cnkr := newChunker(client, config.ParallelChunks, config.ChunkSize)
|
||||
|
||||
for _, file := range flag.Args() {
|
||||
_, err := cnkr.UploadFile(ctx, file, progressBarReport(file))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func progressBarReport(filename string) func(cr ChunkReport) {
|
||||
var pr *progressbar.ProgressBar
|
||||
|
||||
return func(cr ChunkReport) {
|
||||
if cr.UploadedChunks == 0 && pr == nil {
|
||||
pr = progressbar.DefaultBytes(cr.TotalSize, filepath.Base(filename))
|
||||
}
|
||||
pr.Set(int(cr.UploadedBytes))
|
||||
}
|
||||
}
|
46
models.go
Normal file
46
models.go
Normal file
|
@ -0,0 +1,46 @@
|
|||
package main
|
||||
|
||||
type ChunkReport struct {
|
||||
UploadedChunks int
|
||||
UploadedBytes int64
|
||||
TotalChunks int
|
||||
TotalSize int64
|
||||
}
|
||||
|
||||
type uploadInfo struct {
|
||||
chunkID string
|
||||
filename string
|
||||
totalSize int64
|
||||
contentType string
|
||||
allowedDownloads int
|
||||
expiryDays int
|
||||
password string
|
||||
}
|
||||
|
||||
type UploadResponse struct {
|
||||
Result string `json:"Result"`
|
||||
FileInfo FileInfo `json:"FileInfo"`
|
||||
IncludeFilename bool `json:"IncludeFilename"`
|
||||
}
|
||||
|
||||
type FileInfo struct {
|
||||
ID string `json:"Id"`
|
||||
Name string `json:"Name"`
|
||||
Size string `json:"Size"`
|
||||
HotlinkId string `json:"HotlinkId"`
|
||||
ContentType string `json:"ContentType"`
|
||||
ExpireAtString string `json:"ExpireAtString"`
|
||||
UrlDownload string `json:"UrlDownload"`
|
||||
UrlHotlink string `json:"UrlHotlink"`
|
||||
ExpireAt int64 `json:"ExpireAt"`
|
||||
SizeBytes int64 `json:"SizeBytes"`
|
||||
DownloadsRemaining int `json:"DownloadsRemaining"`
|
||||
DownloadsCount int `json:"DownloadsCount"`
|
||||
UnlimitedDownloads bool `json:"UnlimitedDownloads"`
|
||||
UnlimitedTime bool `json:"UnlimitedTime"`
|
||||
RequiresClientSideDecryption bool `json:"RequiresClientSideDecryption"`
|
||||
IsEncrypted bool `json:"IsEncrypted"`
|
||||
IsEndToEndEncrypted bool `json:"IsEndToEndEncrypted"`
|
||||
IsPasswordProtected bool `json:"IsPasswordProtected"`
|
||||
IsSavedOnLocalStorage bool `json:"IsSavedOnLocalStorage"`
|
||||
}
|
Loading…
Reference in a new issue