Skip to content

Commit 4366e8d

Browse files
authored
Merge branch 'main' into feat-obj-ctx
2 parents 078583d + b50ffb5 commit 4366e8d

File tree

7 files changed

+29
-21
lines changed

7 files changed

+29
-21
lines changed

pkg/meta/utils.go

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -370,11 +370,7 @@ func (m *baseMeta) GetSummary(ctx Context, inode Ino, summary *Summary, recursiv
370370
return st
371371
}
372372
if attr.Typ != TypeDirectory {
373-
if attr.Typ == TypeDirectory {
374-
summary.Dirs++
375-
} else {
376-
summary.Files++
377-
}
373+
summary.Files++
378374
summary.Size += uint64(align4K(attr.Length))
379375
if attr.Typ == TypeFile {
380376
summary.Length += attr.Length

pkg/object/interface.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ type ObjectStorage interface {
9494
Head(ctx context.Context, key string) (Object, error)
9595
// List returns a list of objects using ListObjectV2.
9696
List(ctx context.Context, prefix, startAfter, token, delimiter string, limit int64, followLink bool) ([]Object, bool, string, error)
97-
// ListAll returns all the objects as an channel.
97+
// ListAll returns all the objects as a channel.
9898
ListAll(ctx context.Context, prefix, marker string, followLink bool) (<-chan Object, error)
9999

100100
// CreateMultipartUpload starts to upload a large object part by part.
@@ -105,7 +105,7 @@ type ObjectStorage interface {
105105
UploadPartCopy(ctx context.Context, key string, uploadID string, num int, srcKey string, off, size int64) (*Part, error)
106106
// AbortUpload abort a multipart upload.
107107
AbortUpload(ctx context.Context, key string, uploadID string)
108-
// CompleteUpload finish an multipart upload.
108+
// CompleteUpload finish a multipart upload.
109109
CompleteUpload(ctx context.Context, key string, uploadID string, parts []*Part) error
110110
// ListUploads lists existing multipart uploads.
111111
ListUploads(ctx context.Context, marker string) ([]*PendingPart, string, error)

pkg/object/minio.go

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -35,11 +35,17 @@ import (
3535

3636
type minio struct {
3737
s3client
38-
endpoint string
3938
}
4039

4140
func (m *minio) String() string {
42-
return fmt.Sprintf("minio://%s/%s/", m.endpoint, m.s3client.bucket)
41+
if m.s3.Options().BaseEndpoint != nil {
42+
endpoint := *m.s3.Options().BaseEndpoint
43+
if idx := strings.Index(endpoint, "://"); idx >= 0 {
44+
endpoint = endpoint[idx+3:]
45+
}
46+
return fmt.Sprintf("minio://%s/%s/", endpoint, m.bucket)
47+
}
48+
return fmt.Sprintf("minio://%s/", m.bucket)
4349
}
4450

4551
func (m *minio) Limits() Limits {
@@ -103,7 +109,7 @@ func newMinio(endpoint, accessKey, secretKey, token string) (ObjectStorage, erro
103109
bucket = bucket[len("minio/"):]
104110
}
105111
bucket = strings.Split(bucket, "/")[0]
106-
return &minio{s3client{bucket: bucket, s3: client, region: region}, endpoint}, nil
112+
return &minio{s3client{bucket: bucket, s3: client, region: region}}, nil
107113
}
108114

109115
func init() {

pkg/object/oss.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ func (o *ossClient) Head(ctx context.Context, key string) (Object, error) {
8383
})
8484
if err != nil {
8585
var svcErr *oss.ServiceError
86-
if errors.As(err, &svcErr); svcErr.StatusCode == http.StatusNotFound {
86+
if errors.As(err, &svcErr) && svcErr.StatusCode == http.StatusNotFound {
8787
err = os.ErrNotExist
8888
}
8989
return nil, err

pkg/vfs/reader.go

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ const (
5151

5252
const readSessions = 2
5353

54-
var readBufferUsed int64
54+
var readBufferUsed atomic.Int64
5555

5656
type sstate uint8
5757

@@ -267,7 +267,7 @@ func (s *sliceReader) delete() {
267267
} else {
268268
s.file.last = s.prev
269269
}
270-
atomic.AddInt64(&readBufferUsed, -int64(cap(s.page.Data)))
270+
readBufferUsed.Add(-int64(cap(s.page.Data)))
271271
s.page.Release()
272272
}
273273

@@ -325,7 +325,7 @@ func (f *fileReader) newSlice(block *frange) *sliceReader {
325325
*(f.last) = s
326326
f.last = &(s.next)
327327
go s.run()
328-
atomic.AddInt64(&readBufferUsed, int64(cap(s.page.Data)))
328+
readBufferUsed.Add(int64(cap(s.page.Data)))
329329
return s
330330
}
331331

@@ -417,7 +417,7 @@ func (f *fileReader) checkReadahead(block *frange) int {
417417
ses := &f.sessions[idx]
418418
seqdata := ses.total
419419
readahead := ses.readahead
420-
used := uint64(atomic.LoadInt64(&readBufferUsed))
420+
used := uint64(readBufferUsed.Load())
421421
if readahead == 0 && f.r.blockSize <= f.r.readAheadMax && (block.off == 0 || seqdata > block.len) { // begin with read-ahead turned on
422422
ses.readahead = f.r.blockSize
423423
} else if readahead < f.r.readAheadMax && seqdata >= readahead && f.r.readAheadTotal > used+readahead*4 {
@@ -482,7 +482,7 @@ func (f *fileReader) releaseIdleBuffer() {
482482
defer f.Unlock()
483483
now := time.Now()
484484
var idle = time.Minute
485-
used := atomic.LoadInt64(&readBufferUsed)
485+
used := readBufferUsed.Load()
486486
if used > int64(f.r.readAheadTotal) {
487487
idle /= time.Duration(used / int64(f.r.readAheadTotal))
488488
}
@@ -538,7 +538,7 @@ func (f *fileReader) readAhead(block *frange) {
538538
}
539539
return true
540540
})
541-
if block.len > 0 && block.off < f.length && uint64(atomic.LoadInt64(&readBufferUsed)) < f.r.readAheadTotal {
541+
if block.len > 0 && block.off < f.length && uint64(readBufferUsed.Load()) < f.r.readAheadTotal {
542542
if block.len < f.r.blockSize {
543543
block.len += f.r.blockSize - block.end()%f.r.blockSize // align to end of a block
544544
}
@@ -724,7 +724,7 @@ func NewDataReader(conf *Config, m meta.Meta, store chunk.ChunkStore) DataReader
724724
}
725725

726726
func (r *dataReader) readBufferUsed() int64 {
727-
used := atomic.LoadInt64(&readBufferUsed)
727+
used := readBufferUsed.Load()
728728
return used
729729
}
730730

pkg/vfs/writer.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ func (c *chunkWriter) commitThread() {
183183
f := c.file
184184
defer f.w.free(f)
185185
f.Lock()
186-
defer f.Unlock()
186+
187187
// the slices should be committed in the order that are created
188188
for len(c.slices) > 0 {
189189
s := c.slices[0]
@@ -218,6 +218,7 @@ func (c *chunkWriter) commitThread() {
218218
c.slices = c.slices[1:]
219219
}
220220
f.freeChunk(c)
221+
f.Unlock()
221222
}
222223

223224
type fileWriter struct {

sdk/python/juicefs/juicefs/juicefs.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,12 @@
3535
XATTR_REPLACE = 2
3636

3737
def check_error(r, fn, args):
38-
if r < 0:
38+
if fn.__name__ == "jfs_init" and r == 0:
39+
name = args[0].decode()
40+
e = OSError(f'JuiceFS initialized failed for {name}')
41+
e.errno = 1
42+
raise e
43+
elif r < 0:
3944
formatted_args = []
4045
for arg in args[2:]:
4146
if isinstance(arg, (bytes, bytearray)) and len(arg) > 1024:
@@ -350,7 +355,7 @@ def removexattr(self, path, name):
350355
self.lib.jfs_removeXattr(c_int64(_tid()), c_int64(self.h), _bin(path), _bin(name))
351356

352357
def clone(self, src, dst, preserve=False):
353-
"""Clone a file."""
358+
"""Clone a file or directory."""
354359
self.lib.jfs_clone(c_int64(_tid()), c_int64(self.h), _bin(src), _bin(dst), c_bool(preserve))
355360

356361
def set_quota(self, path, capacity=0, inodes=0, create=False, strict=False):

0 commit comments

Comments
 (0)