File size: 5,049 Bytes
7107f0b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 |
package mega
import (
"context"
"errors"
"fmt"
"io"
"time"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/pquerna/otp/totp"
"github.com/rclone/rclone/lib/readers"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
log "github.com/sirupsen/logrus"
"github.com/t3rm1n4l/go-mega"
)
type Mega struct {
model.Storage
Addition
c *mega.Mega
}
func (d *Mega) Config() driver.Config {
return config
}
func (d *Mega) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Mega) Init(ctx context.Context) error {
var twoFACode = d.TwoFACode
d.c = mega.New()
if d.TwoFASecret != "" {
code, err := totp.GenerateCode(d.TwoFASecret, time.Now())
if err != nil {
return fmt.Errorf("generate totp code failed: %w", err)
}
twoFACode = code
}
return d.c.MultiFactorLogin(d.Email, d.Password, twoFACode)
}
func (d *Mega) Drop(ctx context.Context) error {
return nil
}
func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if node, ok := dir.(*MegaNode); ok {
nodes, err := d.c.FS.GetChildren(node.n)
if err != nil {
return nil, err
}
res := make([]model.Obj, 0)
for i := range nodes {
n := nodes[i]
if n.GetType() == mega.FILE || n.GetType() == mega.FOLDER {
res = append(res, &MegaNode{n})
}
}
return res, nil
}
log.Errorf("can't convert: %+v", dir)
return nil, fmt.Errorf("unable to convert dir to mega n")
}
func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
n := d.c.FS.GetRoot()
log.Debugf("mega root: %+v", *n)
return &MegaNode{n}, nil
}
func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if node, ok := file.(*MegaNode); ok {
//down, err := d.c.NewDownload(n.Node)
//if err != nil {
// return nil, fmt.Errorf("open download file failed: %w", err)
//}
size := file.GetSize()
var finalClosers utils.Closers
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
length := httpRange.Length
if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size {
length = -1
}
var down *mega.Download
err := utils.Retry(3, time.Second, func() (err error) {
down, err = d.c.NewDownload(node.n)
return err
})
if err != nil {
return nil, fmt.Errorf("open download file failed: %w", err)
}
oo := &openObject{
ctx: ctx,
d: down,
skip: httpRange.Start,
}
finalClosers.Add(oo)
return readers.NewLimitedReadCloser(oo, length), nil
}
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: finalClosers}
resultLink := &model.Link{
RangeReadCloser: resultRangeReadCloser,
}
return resultLink, nil
}
return nil, fmt.Errorf("unable to convert dir to mega n")
}
func (d *Mega) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if parentNode, ok := parentDir.(*MegaNode); ok {
_, err := d.c.CreateDir(dirName, parentNode.n)
return err
}
return fmt.Errorf("unable to convert dir to mega n")
}
func (d *Mega) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
if srcNode, ok := srcObj.(*MegaNode); ok {
if dstNode, ok := dstDir.(*MegaNode); ok {
return d.c.Move(srcNode.n, dstNode.n)
}
}
return fmt.Errorf("unable to convert dir to mega n")
}
func (d *Mega) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if srcNode, ok := srcObj.(*MegaNode); ok {
return d.c.Rename(srcNode.n, newName)
}
return fmt.Errorf("unable to convert dir to mega n")
}
func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
return errs.NotImplement
}
func (d *Mega) Remove(ctx context.Context, obj model.Obj) error {
if node, ok := obj.(*MegaNode); ok {
return d.c.Delete(node.n, false)
}
return fmt.Errorf("unable to convert dir to mega n")
}
func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
if dstNode, ok := dstDir.(*MegaNode); ok {
u, err := d.c.NewUpload(dstNode.n, stream.GetName(), stream.GetSize())
if err != nil {
return err
}
for id := 0; id < u.Chunks(); id++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
_, chkSize, err := u.ChunkLocation(id)
if err != nil {
return err
}
chunk := make([]byte, chkSize)
n, err := io.ReadFull(stream, chunk)
if err != nil && err != io.EOF {
return err
}
if n != len(chunk) {
return errors.New("chunk too short")
}
err = u.UploadChunk(id, chunk)
if err != nil {
return err
}
up(float64(id) * 100 / float64(u.Chunks()))
}
_, err = u.Finish()
return err
}
return fmt.Errorf("unable to convert dir to mega n")
}
//func (d *Mega) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*Mega)(nil)
|