diff --git a/pkg/meta/base.go b/pkg/meta/base.go index 3210c144e241..212148df1389 100644 --- a/pkg/meta/base.go +++ b/pkg/meta/base.go @@ -115,6 +115,7 @@ type engine interface { doLink(ctx Context, inode, parent Ino, name string, attr *Attr) syscall.Errno doUnlink(ctx Context, parent Ino, name string, attr *Attr, skipCheckTrash ...bool) syscall.Errno doRmdir(ctx Context, parent Ino, name string, inode *Ino, attr *Attr, skipCheckTrash ...bool) syscall.Errno + doBatchUnlink(ctx Context, parent Ino, entries []Entry, length *int64, space *int64, inodes *int64, userGroupQuotas *[]UserGroupQuotaDelta, skipCheckTrash ...bool) (errno syscall.Errno) doReadlink(ctx Context, inode Ino, noatime bool) (int64, []byte, error) doReaddir(ctx Context, inode Ino, plus uint8, entries *[]*Entry, limit int) syscall.Errno doRename(ctx Context, parentSrc Ino, nameSrc string, parentDst Ino, nameDst string, flags uint32, inode, tinode *Ino, attr, tattr *Attr) syscall.Errno @@ -1461,6 +1462,44 @@ func (m *baseMeta) Rmdir(ctx Context, parent Ino, name string, skipCheckTrash .. return st } +func (m *baseMeta) BatchUnlink(ctx Context, parent Ino, entries []Entry, count *uint64, skipCheckTrash bool) syscall.Errno { + var length int64 + var space int64 + var inodes int64 + userGroupQuotas := make([]UserGroupQuotaDelta, 0, len(entries)) + st := m.en.doBatchUnlink(ctx, parent, entries, &length, &space, &inodes, &userGroupQuotas, skipCheckTrash) + if st == 0 { + m.updateDirStat(ctx, parent, length, space, inodes) + if !parent.IsTrash() { + m.updateDirQuota(ctx, parent, space, inodes) + for _, quota := range userGroupQuotas { + m.updateUserGroupQuota(ctx, quota.Uid, quota.Gid, quota.Space, quota.Inodes) + } + } + if count != nil && len(entries) > 0 { + atomic.AddUint64(count, uint64(len(entries))) + } + } else if st == syscall.ENOTSUP { + for _, e := range entries { + if e.Attr.Typ == TypeDirectory { + continue + } + if ctx.Canceled() { + return syscall.EINTR + } + if st := m.Unlink(ctx, parent, string(e.Name), skipCheckTrash); st != 0 && st != syscall.ENOENT { + return st + } + if count != nil { + atomic.AddUint64(count, 1) + } + } + } else if st != 0 { + return st + } + return 0 +} + func (m *baseMeta) Rename(ctx Context, parentSrc Ino, nameSrc string, parentDst Ino, nameDst string, flags uint32, inode *Ino, attr *Attr) syscall.Errno { if parentSrc == RootInode && nameSrc == TrashName || parentDst == RootInode && nameDst == TrashName { return syscall.EPERM diff --git a/pkg/meta/base_test.go b/pkg/meta/base_test.go index de30a96543d6..fd8e99a42d36 100644 --- a/pkg/meta/base_test.go +++ b/pkg/meta/base_test.go @@ -4513,6 +4513,10 @@ func testUserGroupQuota(t *testing.T, m Meta) { testHardlinkQuota(t, m, ctx, parent, uid, gid) }) + t.Run("BatchUnlinkWithUserGroupQuota", func(t *testing.T) { + testBatchUnlinkWithUserGroupQuota(t, m, ctx, parent, uid, gid) + }) + cleanupQuotaTest(ctx, m, parent, uid, gid) } @@ -4680,3 +4684,203 @@ func testHardlinkQuota(t *testing.T, m Meta, ctx Context, parent Ino, uid, gid u m.HandleQuota(ctx, QuotaDel, "", uid, gid, nil, false, false, false) m.HandleQuota(ctx, QuotaDel, parentPath, 0, 0, nil, false, false, false) } + +func testBatchUnlinkWithUserGroupQuota(t *testing.T, m Meta, ctx Context, parent Ino, uid, gid uint32) { + if err := m.HandleQuota(ctx, QuotaSet, "", uid, gid, map[string]*Quota{UGQuotaKey: {MaxSpace: 100 << 20, MaxInodes: 100}}, false, false, false); err != nil { + t.Fatalf("Set user group quota: %s", err) + } + m.getBase().loadQuotas() + + var fileInodes []Ino + var fileAttrs []Attr + fileNames := []string{"batch_file1", "batch_file2", "batch_file3"} + fileSize := uint64(4096) // 4KB per file + + for _, fileName := range fileNames { + var inode Ino + var attr Attr + if st := m.Create(ctx, parent, fileName, 0644, 0, 0, &inode, &attr); st != 0 { + t.Fatalf("Create %s: %s", fileName, st) + } + if st := m.SetAttr(ctx, inode, SetAttrUID|SetAttrGID, 0, &Attr{Uid: uid, Gid: gid}); st != 0 { + t.Fatalf("SetAttr UID and GID for %s: %s", fileName, st) + } + var sliceId uint64 + if st := m.NewSlice(ctx, &sliceId); st != 0 { + t.Fatalf("NewSlice for %s: %s", fileName, st) + } + slice := Slice{Id: sliceId, Size: uint32(fileSize), Len: uint32(fileSize)} + if st := m.Write(ctx, inode, 0, 0, slice, time.Now()); st != 0 { + t.Fatalf("Write data to %s: %s", fileName, st) + } + fileInodes = append(fileInodes, inode) + fileAttrs = append(fileAttrs, attr) + } + + m.getBase().doFlushQuotas() + time.Sleep(200 * time.Millisecond) + + qs := make(map[string]*Quota) + if err := m.HandleQuota(ctx, QuotaGet, "", uid, gid, qs, false, false, false); err != nil { + t.Fatalf("Get user group quota before batch unlink: %s", err) + } + ugQuotaBefore := qs[UGQuotaKey] + if ugQuotaBefore == nil { + t.Fatalf("User group quota not found before batch unlink") + } + + var entries []Entry + for i, fileName := range fileNames { + var attr Attr + if st := m.GetAttr(ctx, fileInodes[i], &attr); st != 0 { + t.Fatalf("GetAttr for %s: %s", fileName, st) + } + entries = append(entries, Entry{ + Inode: fileInodes[i], + Name: []byte(fileName), + Attr: &attr, + }) + } + + var count uint64 + if st := m.getBase().BatchUnlink(ctx, parent, entries, &count, false); st != 0 { + t.Fatalf("BatchUnlink failed: %s", st) + } + + if count != uint64(len(fileNames)) { + t.Fatalf("BatchUnlink count mismatch: expected %d, got %d", len(fileNames), count) + } + + m.getBase().doFlushQuotas() + time.Sleep(200 * time.Millisecond) + + qs = make(map[string]*Quota) + if err := m.HandleQuota(ctx, QuotaGet, "", uid, gid, qs, false, false, false); err != nil { + t.Fatalf("Get user group quota after batch unlink: %s", err) + } + ugQuotaAfter := qs[UGQuotaKey] + if ugQuotaAfter == nil { + t.Fatalf("User group quota not found after batch unlink") + } + + expectedInodeDecrease := int64(len(fileNames)) + actualInodeDecrease := ugQuotaBefore.UsedInodes - ugQuotaAfter.UsedInodes + + if actualInodeDecrease != expectedInodeDecrease { + t.Fatalf("User group quota inode decrease mismatch: expected %d, got %d", expectedInodeDecrease, actualInodeDecrease) + } + + expectedSpaceDecrease := align4K(fileSize) * int64(len(fileNames)) + actualSpaceDecrease := ugQuotaBefore.UsedSpace - ugQuotaAfter.UsedSpace + + if actualSpaceDecrease != expectedSpaceDecrease { + t.Fatalf("User group quota space decrease mismatch: expected %d, got %d", expectedSpaceDecrease, actualSpaceDecrease) + } + + var originalInode Ino + var originalAttr Attr + hardlinkFileSize := uint64(8192) // 8KB + hardlinkFileName := "hardlink_original" + if st := m.Create(ctx, parent, hardlinkFileName, 0644, 0, 0, &originalInode, &originalAttr); st != 0 { + t.Fatalf("Create original file for hardlink test: %s", st) + } + if st := m.SetAttr(ctx, originalInode, SetAttrUID|SetAttrGID, 0, &Attr{Uid: uid, Gid: gid}); st != 0 { + t.Fatalf("SetAttr UID and GID for original file: %s", st) + } + var sliceId uint64 + if st := m.NewSlice(ctx, &sliceId); st != 0 { + t.Fatalf("NewSlice for original file: %s", st) + } + slice := Slice{Id: sliceId, Size: uint32(hardlinkFileSize), Len: uint32(hardlinkFileSize)} + if st := m.Write(ctx, originalInode, 0, 0, slice, time.Now()); st != 0 { + t.Fatalf("Write data to original file: %s", st) + } + + hardlinkFileName2 := "hardlink_link" + if st := m.Link(ctx, originalInode, parent, hardlinkFileName2, &originalAttr); st != 0 { + t.Fatalf("Create hardlink: %s", st) + } + + m.getBase().doFlushQuotas() + time.Sleep(200 * time.Millisecond) + + qs = make(map[string]*Quota) + if err := m.HandleQuota(ctx, QuotaGet, "", uid, gid, qs, false, false, false); err != nil { + t.Fatalf("Get user group quota before hardlink unlink: %s", err) + } + ugQuotaBeforeHardlink := qs[UGQuotaKey] + if ugQuotaBeforeHardlink == nil { + t.Fatalf("User group quota not found before hardlink unlink") + } + + var hardlinkAttr Attr + var hardlinkInode Ino + if st := m.Lookup(ctx, parent, hardlinkFileName2, &hardlinkInode, &hardlinkAttr, false); st != 0 { + t.Fatalf("Lookup hardlink file: %s", st) + } + if hardlinkInode != originalInode { + t.Fatalf("Hardlink inode mismatch: expected %d, got %d", originalInode, hardlinkInode) + } + if hardlinkAttr.Nlink < 2 { + t.Fatalf("Expected Nlink >= 2 for hardlink, got %d", hardlinkAttr.Nlink) + } + + var hardlinkEntry Attr + if st := m.GetAttr(ctx, hardlinkInode, &hardlinkEntry); st != 0 { + t.Fatalf("GetAttr for hardlink: %s", st) + } + hardlinkEntries := []Entry{ + { + Inode: hardlinkInode, + Name: []byte(hardlinkFileName2), + Attr: &hardlinkEntry, + }, + } + + count = 0 + if st := m.getBase().BatchUnlink(ctx, parent, hardlinkEntries, &count, false); st != 0 { + t.Fatalf("BatchUnlink hardlink failed: %s", st) + } + + if count != 1 { + t.Fatalf("BatchUnlink hardlink count mismatch: expected 1, got %d", count) + } + + m.getBase().doFlushQuotas() + time.Sleep(200 * time.Millisecond) + + qs = make(map[string]*Quota) + if err := m.HandleQuota(ctx, QuotaGet, "", uid, gid, qs, false, false, false); err != nil { + t.Fatalf("Get user group quota after hardlink unlink: %s", err) + } + ugQuotaAfterHardlink := qs[UGQuotaKey] + if ugQuotaAfterHardlink == nil { + t.Fatalf("User group quota not found after hardlink unlink") + } + + expectedHardlinkInodeDecrease := int64(1) + expectedHardlinkSpaceDecrease := int64(0) + + actualHardlinkInodeDecrease := ugQuotaBeforeHardlink.UsedInodes - ugQuotaAfterHardlink.UsedInodes + actualHardlinkSpaceDecrease := ugQuotaBeforeHardlink.UsedSpace - ugQuotaAfterHardlink.UsedSpace + + if actualHardlinkInodeDecrease != expectedHardlinkInodeDecrease { + t.Fatalf("Hardlink unlink: user group quota inode decrease mismatch: expected %d, got %d", expectedHardlinkInodeDecrease, actualHardlinkInodeDecrease) + } + if actualHardlinkSpaceDecrease != expectedHardlinkSpaceDecrease { + t.Fatalf("Hardlink unlink: user group quota space decrease mismatch: expected %d, got %d (should be 0 for hardlink deletion)", expectedHardlinkSpaceDecrease, actualHardlinkSpaceDecrease) + } + + var checkAttr Attr + if st := m.GetAttr(ctx, originalInode, &checkAttr); st != 0 { + t.Fatalf("Original file should still exist after hardlink deletion: %s", st) + } + if checkAttr.Nlink != hardlinkAttr.Nlink-1 { + t.Fatalf("Original file Nlink should decrease by 1: expected %d, got %d", hardlinkAttr.Nlink-1, checkAttr.Nlink) + } + + m.Unlink(ctx, parent, hardlinkFileName) + if err := m.HandleQuota(ctx, QuotaDel, "", uid, gid, nil, false, false, false); err != nil { + t.Fatalf("Delete user group quota: %s", err) + } +} diff --git a/pkg/meta/interface.go b/pkg/meta/interface.go index 2fc58eb0443c..d93bd4fc448e 100644 --- a/pkg/meta/interface.go +++ b/pkg/meta/interface.go @@ -327,6 +327,14 @@ type Summary struct { Dirs uint64 } +// UserGroupQuotaDelta represents quota changes for a specific user and group. +type UserGroupQuotaDelta struct { + Uid uint32 + Gid uint32 + Space int64 + Inodes int64 +} + type TreeSummary struct { Inode Ino Path string diff --git a/pkg/meta/redis.go b/pkg/meta/redis.go index 335ab4d574b2..0ba6a3ea108b 100644 --- a/pkg/meta/redis.go +++ b/pkg/meta/redis.go @@ -1666,6 +1666,10 @@ func (m *redisMeta) doUnlink(ctx Context, parent Ino, name string, attr *Attr, s return errno(err) } +func (m *redisMeta) doBatchUnlink(ctx Context, parent Ino, entries []Entry, length *int64, space *int64, inodes *int64, userGroupQuotas *[]UserGroupQuotaDelta, skipCheckTrash ...bool) syscall.Errno { + return syscall.ENOTSUP +} + func (m *redisMeta) doRmdir(ctx Context, parent Ino, name string, pinode *Ino, oldAttr *Attr, skipCheckTrash ...bool) syscall.Errno { var trash Ino if !(len(skipCheckTrash) == 1 && skipCheckTrash[0]) { diff --git a/pkg/meta/sql.go b/pkg/meta/sql.go index cb6ca664b4d6..d2a9a157496b 100644 --- a/pkg/meta/sql.go +++ b/pkg/meta/sql.go @@ -2586,6 +2586,286 @@ func (m *dbMeta) doReaddir(ctx Context, inode Ino, plus uint8, entries *[]*Entry })) } +func recordDeletionStats( + n *node, + entrySpace int64, + ugSpace int64, + totalLength *int64, + totalSpace *int64, + totalInodes *int64, + userGroupQuotas *[]UserGroupQuotaDelta, + isTrash bool, +) { + *totalLength -= int64(n.Length) + *totalSpace -= entrySpace + *totalInodes-- + + if userGroupQuotas != nil && !isTrash { + *userGroupQuotas = append(*userGroupQuotas, UserGroupQuotaDelta{ + Uid: n.Uid, + Gid: n.Gid, + Space: -ugSpace, + Inodes: -1, + }) + } +} + +func (m *dbMeta) doBatchUnlink(ctx Context, parent Ino, entries []Entry, length *int64, space *int64, inodes *int64, userGroupQuotas *[]UserGroupQuotaDelta, skipCheckTrash ...bool) syscall.Errno { + if len(entries) == 0 { + return 0 + } + + var trash Ino + if len(skipCheckTrash) == 0 || !skipCheckTrash[0] { + if st := m.checkTrash(parent, &trash); st != 0 { + return st + } + } + + type entryInfo struct { + e edge + n node + opened bool + trash Ino + trashName string + lastLink bool + } + var entryInfos []entryInfo + var totalLength, totalSpace, totalInodes int64 + if userGroupQuotas != nil { + *userGroupQuotas = make([]UserGroupQuotaDelta, 0, len(entries)) + } + err := m.txn(func(s *xorm.Session) error { + pn := node{Inode: parent} + ok, err := s.Get(&pn) + if err != nil { + return err + } + if !ok { + return syscall.ENOENT + } + if pn.Type != TypeDirectory { + return syscall.ENOTDIR + } + var pattr Attr + m.parseAttr(&pn, &pattr) + if st := m.Access(ctx, parent, MODE_MASK_W|MODE_MASK_X, &pattr); st != 0 { + return st + } + if (pn.Flags&FlagAppend != 0) || (pn.Flags&FlagImmutable) != 0 { + return syscall.EPERM + } + + entryInfos = make([]entryInfo, 0, len(entries)) + now := time.Now().UnixNano() + + inodes := make([]Ino, 0, len(entries)) + inodeM := make(map[Ino]struct{}) // filter hardlinks + for _, entry := range entries { + e := edge{Parent: parent, Name: entry.Name, Inode: entry.Inode} + if entry.Attr != nil { + e.Type = entry.Attr.Typ + } + entryInfos = append(entryInfos, entryInfo{e: e, trash: trash}) + if _, exists := inodeM[entry.Inode]; !exists { + inodeM[entry.Inode] = struct{}{} + inodes = append(inodes, entry.Inode) + } + } + + if len(inodes) > 0 { + var nodes []node + if err := s.ForUpdate().In("inode", inodes).Find(&nodes); err != nil { + return err + } + // some inodes may not exist + nodeMap := make(map[Ino]*node, len(nodes)) + for i := range nodes { + nodeMap[nodes[i].Inode] = &nodes[i] + } + + for i := range entryInfos { + info := &entryInfos[i] + n, ok := nodeMap[info.e.Inode] + if !ok { + entryInfos[i].e.Inode = 0 + continue + } + if ctx.Uid() != 0 && pn.Mode&01000 != 0 && ctx.Uid() != pn.Uid && ctx.Uid() != n.Uid { + return syscall.EACCES + } + if (n.Flags&FlagAppend) != 0 || (n.Flags&FlagImmutable) != 0 { + return syscall.EPERM + } + if (n.Flags & FlagSkipTrash) != 0 { + info.trash = 0 + } + info.n = *n + } + + filteredInfos := entryInfos[:0] + for i := range entryInfos { + if entryInfos[i].e.Inode != 0 { + filteredInfos = append(filteredInfos, entryInfos[i]) + } + } + entryInfos = filteredInfos + } + + for i := range entryInfos { + info := &entryInfos[i] + if info.trash > 0 && info.n.Nlink > 1 { + info.trashName = m.trashEntry(parent, info.e.Inode, string(info.e.Name)) + te := edge{ + Parent: info.trash, + Name: []byte(info.trashName), + Inode: info.e.Inode, + Type: info.e.Type, + } + if ok, err := s.Get(&te); err == nil && ok { + info.trash = 0 + } + } + } + + for i := range entryInfos { + info := &entryInfos[i] + info.n.setCtime(now) + if info.trash != 0 && info.n.Parent > 0 { + info.n.Parent = info.trash + } + } + + seen := make(map[Ino]uint32) + for i := range entryInfos { + info := &entryInfos[i] + if info.e.Type == TypeDirectory { + continue + } + processed := seen[info.e.Inode] + 1 + var finalNlink int64 + if info.trash == 0 { + finalNlink = int64(info.n.Nlink) - int64(processed) + if finalNlink < 0 { + finalNlink = 0 + } + } else { + finalNlink = int64(info.n.Nlink) + } + info.lastLink = (info.trash == 0 && finalNlink == 0) + if info.lastLink && info.e.Type == TypeFile && m.sid > 0 { + info.opened = m.of.IsOpen(info.e.Inode) + } + info.n.Nlink = uint32(finalNlink) + seen[info.e.Inode] = processed + } + + trashInserted := make(map[Ino]bool) + nowUnix := time.Now().Unix() + + for _, info := range entryInfos { + if info.e.Type == TypeDirectory { + continue + } + e := edge{Parent: parent, Name: info.e.Name} + if _, err := s.Delete(&e); err != nil { + return err + } + + if info.n.Nlink > 0 { + if _, err := s.Cols("nlink", "ctime", "ctimensec", "parent").Update(&info.n, &node{Inode: info.n.Inode}); err != nil { + return err + } + if info.trash > 0 && !trashInserted[info.e.Inode] { + if info.trashName == "" { + info.trashName = m.trashEntry(parent, info.e.Inode, string(info.e.Name)) + } + te := edge{ + Parent: info.trash, + Name: []byte(info.trashName), + Inode: info.e.Inode, + Type: info.e.Type, + } + if err := mustInsert(s, &te); err != nil { + return err + } + trashInserted[info.e.Inode] = true + } + recordDeletionStats(&info.n, align4K(info.n.Length), 0, &totalLength, &totalSpace, &totalInodes, userGroupQuotas, parent.IsTrash()) + } else { + var entrySpace int64 + needRecordStats := false + switch info.e.Type { + case TypeFile: + entrySpace = align4K(info.n.Length) + needRecordStats = true + if info.opened { + if err := mustInsert(s, &sustained{Sid: m.sid, Inode: info.e.Inode}); err != nil { + return err + } + if _, err := s.Cols("nlink", "ctime", "ctimensec").Update(&info.n, &node{Inode: info.n.Inode}); err != nil { + return err + } + } else { + if err := mustInsert(s, &delfile{info.e.Inode, info.n.Length, nowUnix}); err != nil { + return err + } + if _, err := s.Delete(&node{Inode: info.e.Inode}); err != nil { + return err + } + } + case TypeSymlink: + if _, err := s.Delete(&symlink{Inode: info.e.Inode}); err != nil { + return err + } + fallthrough + default: + if _, err := s.Delete(&node{Inode: info.e.Inode}); err != nil { + return err + } + if info.e.Type != TypeFile { + entrySpace = align4K(0) + needRecordStats = true + } + } + if needRecordStats { + recordDeletionStats(&info.n, entrySpace, entrySpace, &totalLength, &totalSpace, &totalInodes, userGroupQuotas, parent.IsTrash() ) + } + if _, err := s.Delete(&xattr{Inode: info.e.Inode}); err != nil { + return err + } + } + } + + for _, info := range entryInfos { + if info.e.Type != TypeDirectory { + m.of.InvalidateChunk(info.e.Inode, invalidateAttrOnly) + } + } + + return nil + }) + + if err != nil { + return errno(err) + } + + if trash == 0 { + for _, info := range entryInfos { + if info.n.Type == TypeFile && info.lastLink { + isTrash := parent.IsTrash() + m.fileDeleted(info.opened, isTrash, info.e.Inode, info.n.Length) + } + } + m.updateStats(totalSpace, totalInodes) + } + + *length = totalLength + *space = totalSpace + *inodes = totalInodes + return 0 +} + func (m *dbMeta) doCleanStaleSession(sid uint64) error { var fail bool // release locks diff --git a/pkg/meta/tkv.go b/pkg/meta/tkv.go index cbafbd6d924d..c3b80efcc442 100644 --- a/pkg/meta/tkv.go +++ b/pkg/meta/tkv.go @@ -1427,6 +1427,10 @@ func (m *kvMeta) doUnlink(ctx Context, parent Ino, name string, attr *Attr, skip return errno(err) } +func (m *kvMeta) doBatchUnlink(ctx Context, parent Ino, entries []Entry, length *int64, space *int64, inodes *int64, userGroupQuotas *[]UserGroupQuotaDelta, skipCheckTrash ...bool) syscall.Errno { + return syscall.ENOTSUP +} + func (m *kvMeta) doRmdir(ctx Context, parent Ino, name string, pinode *Ino, oldAttr *Attr, skipCheckTrash ...bool) syscall.Errno { var trash Ino if !(len(skipCheckTrash) == 1 && skipCheckTrash[0]) { diff --git a/pkg/meta/utils.go b/pkg/meta/utils.go index 833ed0c96275..fc717e0233d5 100644 --- a/pkg/meta/utils.go +++ b/pkg/meta/utils.go @@ -277,14 +277,7 @@ func (m *baseMeta) emptyDir(ctx Context, inode Ino, skipCheckTrash bool, count * } var wg sync.WaitGroup var status syscall.Errno - // try directories first to increase parallel - var dirs int - for i, e := range entries { - if e.Attr.Typ == TypeDirectory { - entries[dirs], entries[i] = entries[i], entries[dirs] - dirs++ - } - } + var nonDirEntries []Entry for i, e := range entries { if e.Attr.Typ == TypeDirectory { select { @@ -305,13 +298,7 @@ func (m *baseMeta) emptyDir(ctx Context, inode Ino, skipCheckTrash bool, count * } } } else { - if count != nil { - atomic.AddUint64(count, 1) - } - if st := m.Unlink(ctx, inode, string(e.Name), skipCheckTrash); st != 0 && st != syscall.ENOENT { - ctx.Cancel() - return st - } + nonDirEntries = append(nonDirEntries, *e) } if ctx.Canceled() { return syscall.EINTR @@ -319,6 +306,11 @@ func (m *baseMeta) emptyDir(ctx Context, inode Ino, skipCheckTrash bool, count * entries[i] = nil // release memory } wg.Wait() + + if status == 0 { + status = m.BatchUnlink(ctx, inode, nonDirEntries, count, skipCheckTrash) + } + if status != 0 || inode == TrashInode { // try only once for .trash return status }