Compare commits

..

746 Commits

Author SHA1 Message Date
Aaron Liu
5b823305d5 fix: typo in issue contact links 2026-02-24 16:46:50 +08:00
Aaron Liu
edf16a9ed8 misc: issue templaye syntax issue 2026-02-24 16:45:19 +08:00
Aaron Liu
5d915f11ff misc: add more issue templates 2026-02-24 16:40:38 +08:00
Aaron Liu
d9baa74c81 misc: update issue template 2026-02-24 16:26:56 +08:00
Darren Yu
3180c72b53 fix(dbfs): folder summary type error in cache (#3332)
* fix(dbfs): folder summary type error in cache

* Update dbfs.go
2026-02-24 14:37:57 +08:00
Aaron Liu
95865add54 fix(slave node): panic when transfer files in slave node 2026-02-15 09:18:24 +08:00
Aaron Liu
9a59c8348e fix(fts): increase default timeout for media meta queue 2026-02-15 09:16:50 +08:00
Aaron Liu
846366d223 update submodule 2026-02-14 10:08:53 +08:00
Aaron Liu
5d45691e43 chore: add v4 tag for docker images (close #3284) 2026-02-14 10:00:53 +08:00
Aaron Liu
2a59407916 fix(oauth): openid scope is not added by default / add scope name and list in dashboard (close #3274) 2026-02-14 10:00:14 +08:00
Aaron Liu
a8a625e967 update submodule 2026-02-13 10:11:32 +08:00
Aaron Liu
153a00ecd5 feat(fts): start background task to force build index for existing files (close #2895) 2026-02-12 13:40:11 +08:00
Aaron Liu
1e3b851e19 feat: full-text search and RAG powered search 2026-02-11 16:05:09 +08:00
Aaron Liu
ec9fdd33bc fix(direct link): cannot access direct link for files without blobs (close #3239) 2026-02-05 19:36:04 +08:00
Aaron Liu
6322a9e951 fix(s3): nil pointer when uploading files to SeaweedFS (close #3265) 2026-02-05 19:25:43 +08:00
Aaron Liu
57239e81af security: use crypto/rand for secret keys 2026-02-05 19:19:39 +08:00
Aaron Liu
9dcc82ead8 fix(router): login consent should be for authenticated audience only 2026-02-05 19:16:13 +08:00
Aaron Liu
b913b4683f fix(eventhub): mark as offline if keepalive fails to send 2026-02-05 19:12:42 +08:00
Copilot
1f580f0d8a Adjust OAuth grant validation limits (no code changes yet) (#3261)
* Initial plan

* Increase OAuth state limit

Co-authored-by: HFO4 <16058869+HFO4@users.noreply.github.com>

* Default PKCE method when missing

Co-authored-by: HFO4 <16058869+HFO4@users.noreply.github.com>

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: HFO4 <16058869+HFO4@users.noreply.github.com>
2026-02-03 14:55:00 +08:00
Aaron Liu
87d48ac4a7 Merge remote-tracking branch 'origin/master' 2026-01-28 15:15:01 +08:00
Aaron Liu
5d9cfaa973 fix(eventhub): nil pointer trying get owner 2026-01-28 13:46:27 +08:00
Aaron Liu
2241a9e2c8 feat(dashboard): add setting option for event push 2026-01-28 12:54:42 +08:00
Aaron Liu
1c5eefdc6a feat(devices): add landing page for desktop client 2026-01-28 12:54:23 +08:00
Aaron Liu
c99a4ece90 feat(oauth): user can manage existing OAuth grant 2026-01-28 12:53:21 +08:00
Aaron Liu
43d77d2319 feat(dashboard): manage OAuth apps 2026-01-28 12:52:13 +08:00
WittF
e4e6beb52d feat(share): add Open Graph preview for social media crawlers (#3234)
* feat(share): add Open Graph preview for social media crawlers

Add middleware to intercept social media bot requests and return
OG meta tags for share links, enabling rich previews on platforms
like Facebook, Twitter, Discord, etc.

* feat(og): reuse existing share service and show thumbnail if possible

* resolve comments

---------

Co-authored-by: Aaron Liu <abslant.liu@gmail.com>
2026-01-25 12:48:14 +08:00
Darren Yu
47218607ff docs: correct links (#3213) 2026-01-23 15:32:10 +08:00
Darren Yu
5b214beadc feat(thumb): change image type to NRGBA when do resize to keep transparency (#3207) 2026-01-23 15:31:42 +08:00
Darren Yu
2ecc7f4f59 fix(cos): missing response-content-disposition header when enable not sign for CDN url (#2546) (#3202)
* fix(cos): missing response-content-disposition header for public-read bucket

* fix(cos): anonymous GET req not support response header
2026-01-23 15:30:35 +08:00
Darren Yu
2725bd47b5 fix(share): download on folder share wrongly not counted (#3196) 2026-01-23 15:25:54 +08:00
Aaron Liu
864332f2e5 fix(route): force CORS header for content route with correct header parameters (close #3192) 2026-01-23 15:23:36 +08:00
Aaron Liu
a84c5d8e97 feat(oauth): OAuth for 3rd party apps 2026-01-23 15:22:29 +08:00
Aaron Liu
a908ec462f chore(readme): update readme badges 2026-01-14 17:36:42 +08:00
Aaron Liu
bc6845bd74 fix(ci): disable docker provenance on self-hosted agent 2026-01-14 15:54:25 +08:00
Aaron Liu
7039fa801d chore(ado): switch to self-hosted vsts pool 2026-01-14 15:00:43 +08:00
Aaron Liu
6f8aecd35a update submodule 2026-01-14 12:44:50 +08:00
Aaron Liu
722abb81c5 fix(nodepool): exclusive lock should be held for weight LB 2026-01-14 12:41:26 +08:00
Aaron Liu
e8f965e980 fix(security): resolve multiple vulnerability.
Vulnerability identified and fix provided by Kolega.dev (https://kolega.dev)
2026-01-14 12:39:42 +08:00
Aaron Liu
f01ed64bdb feat(perf): improve memory usage for importing task / add configurable Pprof endpoint (fix #3059) 2026-01-14 11:32:22 +08:00
Aaron Liu
736414fa10 fix(dbfs): setting version does not change file size 2026-01-14 10:37:05 +08:00
Aaron Liu
5924e406ab chore(golang): upgrade to 1.25 2026-01-14 10:36:18 +08:00
dependabot[bot]
87b1020c4a chore(deps): bump github.com/quic-go/quic-go from 0.55.0 to 0.57.0 (#3133)
Bumps [github.com/quic-go/quic-go](https://github.com/quic-go/quic-go) from 0.55.0 to 0.57.0.
- [Release notes](https://github.com/quic-go/quic-go/releases)
- [Commits](https://github.com/quic-go/quic-go/compare/v0.55.0...v0.57.0)

---
updated-dependencies:
- dependency-name: github.com/quic-go/quic-go
  dependency-version: 0.57.0
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-01-03 08:37:07 +08:00
Aaron Liu
32632db36f feat(fs): change event debounce before emitting to subscriber 2025-12-13 14:48:01 +08:00
Aaron Liu
c01b748dfc feat(fs): fs change event notification via SSE / show panic stack trace in task queue 2025-12-13 14:48:01 +08:00
Darren Yu
05c68b4062 fix(thumb blob path): separators be wrongly modified (#3062) (#3116)
* fix(thumb blob path): separators be wrongly modified

* Update common.go
2025-12-05 15:57:58 +08:00
Darren Yu
a08c796e3f fix(ks3): fix content disposition format for download filename (#3040) (#3057) 2025-12-05 15:33:18 +08:00
Aaron Liu
fec4dec3ac feat(upload): etag check in client-side upload / support empty policy ID 2025-12-05 15:17:07 +08:00
Aaron Liu
67c6f937c9 fix(oss): disable RSA min key size check for OSS callback (#3038) 2025-11-15 11:59:09 +08:00
Aaron Liu
6ad72e07f4 update submodule 2025-11-14 11:18:39 +08:00
Aaron Liu
994ef7af81 fix(search): multiple metadata search does not work (#3027) 2025-11-12 13:57:38 +08:00
Darren Yu
b507c1b893 docs: update feature description (#3023)
* docs: update feature description

* Apply suggestion from @HFO4

---------

Co-authored-by: AaronLiu <abslant.liu@gmail.com>
2025-11-12 13:55:38 +08:00
Darren Yu
deecc5c20b feat(thumb blob path): support magic variables in thumb blob path (#3030) 2025-11-12 13:49:32 +08:00
dependabot[bot]
6085f2090f chore(deps): bump golang.org/x/image (#2093)
Bumps [golang.org/x/image](https://github.com/golang/image) from 0.0.0-20211028202545-6944b10bf410 to 0.18.0.
- [Commits](https://github.com/golang/image/commits/v0.18.0)

---
updated-dependencies:
- dependency-name: golang.org/x/image
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-28 10:56:01 +08:00
dependabot[bot]
670b79eef3 chore(deps): bump github.com/gin-contrib/cors from 1.3.0 to 1.6.0 (#2097)
Bumps [github.com/gin-contrib/cors](https://github.com/gin-contrib/cors) from 1.3.0 to 1.6.0.
- [Release notes](https://github.com/gin-contrib/cors/releases)
- [Changelog](https://github.com/gin-contrib/cors/blob/master/.goreleaser.yaml)
- [Commits](https://github.com/gin-contrib/cors/compare/v1.3.0...v1.6.0)

---
updated-dependencies:
- dependency-name: github.com/gin-contrib/cors
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-28 10:53:40 +08:00
dependabot[bot]
4785be81c2 chore(deps): bump github.com/wneessen/go-mail from 0.6.2 to 0.7.1 (#2939)
Bumps [github.com/wneessen/go-mail](https://github.com/wneessen/go-mail) from 0.6.2 to 0.7.1.
- [Release notes](https://github.com/wneessen/go-mail/releases)
- [Commits](https://github.com/wneessen/go-mail/compare/v0.6.2...v0.7.1)

---
updated-dependencies:
- dependency-name: github.com/wneessen/go-mail
  dependency-version: 0.7.1
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-28 10:50:54 +08:00
Aaron Liu
f27969d74f chore: update required golang version and gzip middleware 2025-10-24 15:07:12 +08:00
Aaron Liu
e3580d9351 feat(encryption): add UI and settings for file encryption 2025-10-24 15:04:54 +08:00
Aaron Liu
16b02b1fb3 feat: file blob encryption 2025-10-21 14:54:13 +08:00
Darren Yu
6bd30a8af7 fix(oss): change default expire ttl and sign param to adapt SDK v2 (#2979)
* fix(oss): change default expire ttl and sign param to adapt SDK v2

* fix(oss): add expire ttl limit
2025-10-16 11:49:21 +08:00
Aaron Liu
21cdafb2af fix(oss): traffic limit should be in query instead of headers (#2977) 2025-10-16 07:46:22 +08:00
Aaron Liu
e29237d593 fix(webdav): error code for missing parent in mkcol should be 409 instead of 404 (#2953) 2025-10-15 10:28:31 +08:00
Aaron Liu
46897e2880 fix(oss): presigned multipart upload mismatch 2025-10-14 10:21:43 +08:00
Aaron Liu
213eaa54dd update submodule 2025-10-14 09:29:24 +08:00
Aaron Liu
e7d6fb25e4 feat(oss): upgrade to SDK v2 (#2963) 2025-10-14 08:49:45 +08:00
Darren Yu
e3e08a9b75 feat(share): adapt to keep specified path in V3 sharing link (#2958) 2025-10-12 10:28:40 +08:00
酸柠檬猹Char
78f7ec8b08 fix: Some containers won't auto restart in the current Docker Compose (#2932)
Add "restart: unless-stopped" to the database and redis container.
2025-09-27 22:04:38 +08:00
Aaron Liu
3d41e00384 feat(media meta): add Mapbox as a map provider option (#2922) 2025-09-27 10:19:22 +08:00
Aaron Liu
5e5dca40c4 feat(media meta): reverse geocoding from mapbox (#2922) 2025-09-26 11:27:46 +08:00
Mason Liu
668b542c59 feat: update reset thumbnail feature (#2854)
* update reset thumbnail feature

* consolidate supported thumbnail extensions into site config; remove dedicated API

* allow patching thumb ; remove Reset Thumbnail API

* fix code formatting

---------

Co-authored-by: Aaron Liu <abslant.liu@gmail.com>
2025-09-23 11:24:38 +08:00
Aaron Liu
440ab775b8 chore(compose): add aria2 port mapping 2025-09-23 09:53:31 +08:00
Darren Yu
678593f30d fix(thumb blob path): remove extra randomkey in thumb blob path (#2893)
* fix(thumb blob path): remove extra randomkey in thumb blob path

* Update upload.go

Refactor SavePath assignment for clarity.

* Update thumbnail.go
2025-09-16 11:44:22 +08:00
Darren Yu
58ceae9708 fix(uploader): failed to generate upload token for some file types (#2847) (#2900)
* fix(mime): `mimeType` not assigned to new value when is empty

* fix(mime): add fallback mime type
2025-09-16 10:35:30 +08:00
Darren Yu
3b8110b648 fix(cos): traffic limit wrongly given in bytes, should be bits (#2899) 2025-09-16 10:33:41 +08:00
Darren Yu
f0c5b08428 feat(extract): preserve last modified when extract archive file (#2897) 2025-09-16 10:31:09 +08:00
Darren Yu
9434c2f29b fix(upgrade v3): validation on unique magic var in either blob name or path (#2890)
* fix(upgrade v3): validation on unique magic var in either blob name or path

* Update policy.go
2025-09-13 16:18:18 +08:00
Aaron Liu
7d97237593 feat(archive viewer): option to select text encoding for zip files (#2867) 2025-09-12 15:41:43 +08:00
Aaron Liu
a581851f84 feat(webdav): option to disable system file uploads (#2871) 2025-09-12 14:04:51 +08:00
Darren Yu
fe7cf5d0d8 feat(thumb): enhance native thumbnail generater with encoding format and quality (#2868)
* feat(thumb): enhance native thumbnail generater with encoding format and quality

* Update thumbnail.go

* Update obs.go
2025-09-05 11:40:30 +08:00
Aaron Liu
cec2b55e1e update submodule 2025-09-02 13:06:56 +08:00
Darren Yu
af43746ba2 feat(email): migrate magic variables to email templates title in patches (#2862) 2025-09-02 11:57:49 +08:00
Aaron Liu
9f1cb52cfb feat(explorer): preview archive file content and extract selected files (#2852) 2025-09-02 11:54:04 +08:00
Aaron Liu
4acf9401b8 feat(uploader): concurrent chunk uploads for local/remote storage policy 2025-08-30 10:37:08 +08:00
Aaron Liu
c3ed4f5839 feat(uploader): concurrent chunk uploads 2025-08-30 10:36:20 +08:00
Aaron Liu
9b40e0146f fix(dbfs): remove recursive limit for deleting files 2025-08-28 11:26:55 +08:00
Aaron Liu
a16b491f65 fix(entitysource): rate limiter applied to nil reader (#2834) 2025-08-26 11:30:55 +08:00
Darren Yu
a095117061 feat(email): support magic variables in email title, add init email template for multiple languages (#2814)
* feat(email): add init email template for multiple languages

* Update setting.go

* Update setting.go

* feat(email): support magic variables in email title
2025-08-26 11:02:38 +08:00
Aaron Liu
acc660f112 update submodule 2025-08-22 09:19:35 +08:00
Aaron Liu
a677e23394 feat(dashboard): filter file by shared link, direct link, uploading status (#2782) 2025-08-21 14:12:30 +08:00
Aaron Liu
13e774f27d feat(dashboard): filter file by shared link, direct link, uploading status (#2667) 2025-08-21 13:14:11 +08:00
Aaron Liu
91717b7c49 feat(archive): add support for 7z and bz2 / extract rar and 7zip files protected with password (#2668) 2025-08-21 10:20:13 +08:00
Aaron Liu
a1ce16bd5e fix(smtp): SMTP reset error should be ignored for non-standard SMTP server implementation (#2791) 2025-08-19 09:43:23 +08:00
Aaron Liu
872b08e5da fix(smtp): force enabling SSL does not work (#2777) 2025-08-13 18:54:56 +08:00
Aaron Liu
f73583b370 update submodule 2025-08-12 13:27:33 +08:00
Aaron Liu
c0132a10cb feat(dashboard): upgrade promotion 2025-08-12 13:27:07 +08:00
Aaron Liu
927c3bff00 fix(dep): remove undefined dependency 2025-08-12 13:12:54 +08:00
Aaron Liu
bb9b42eb10 feat(audit): flush audit logs into DB in a standalone goroutine 2025-08-12 13:10:55 +08:00
Aaron Liu
5f18d277c8 fix(conf): ProxyHeader should be optional (#2760) 2025-08-12 09:53:15 +08:00
Aaron Liu
b0057fe92f feat(profile): options to select why kind of share links to show in user's profile (#2453) 2025-08-12 09:52:47 +08:00
Darren Yu
bb3db2e326 fix(middleware): left deafult ProxyHeader config item as blank to reduce risk of fake xff (#2760) 2025-08-12 09:35:36 +08:00
Aaron Liu
8deeadb1e5 fix(middleware): only select first client IP from X-Forwarded-For (#2748) 2025-08-10 10:47:29 +08:00
Aaron Liu
8688069fac refactor(mail): migrate to wneessen/go-mail (#2738) 2025-08-10 10:40:21 +08:00
Aaron Liu
4c08644b05 fix(dbfs): generate thumbnail blob should not update file modification date 2025-08-10 09:38:27 +08:00
Aaron Liu
4c976b8627 feat(blob path): diable {path} magic var for blob path 2025-08-07 11:35:28 +08:00
Aaron Liu
b0375f5a24 fix(recycle): nil pointer if failed to found files in trash (#2750) 2025-08-07 11:03:02 +08:00
Aaron Liu
48e9719336 fix(dbfs): deadlock in SQLite while creating upload session 2025-08-07 10:30:44 +08:00
Darren Yu
7654ce889c fix(blob path): Random variables in blob save path be wrongly fixed (#2741)
* fix(blob path): Random variables in blob save path be wrongly fixed

* feat(blob path): Use regex to match all magic variables
2025-08-05 20:29:14 +08:00
Aaron Liu
80b25e88ee fix(dbfs): file modified_at should not be updated by ent 2025-08-05 15:11:32 +08:00
Aaron Liu
e31a6cbcb3 fix(workflow): concurrent read&write to progress map while transfer files in batch (#2737) 2025-08-05 12:02:17 +08:00
Curious
51d9e06f21 chore(docker compose): pin postgres to major version (#2723) 2025-08-04 14:52:21 +08:00
Git'Fellow
36be9b7a19 Fix typos on README (#2693) 2025-07-31 11:18:48 +08:00
Aaron Liu
c8c2a60adb feat(storage policy): set deny/allow list for file extension and custom regexp (#2695) 2025-07-25 11:32:04 +08:00
Aaron Liu
60bf0e02b3 fix(qbittorrent): download task option not working (#2666) 2025-07-25 10:15:55 +08:00
omiku
488f32512d Add Kingsoft Cloud object storage policy to solve the cross-domain and friendly file name incompatibility problem of s3 compatible storage policy. (#2665)
* 新增金山云对象存储策略,解决s3兼容存储策略的跨域及友好文件名不兼容问题

* fix bug&add download Expire time args

* Handling of expiration times when they may be empty
2025-07-21 16:08:22 +08:00
Aaron Liu
1cdccf5fc9 feat(thumb): adding option to define custom input argument for FFmpeg (#2657) 2025-07-15 14:11:42 +08:00
Aaron Liu
15762cb393 feat(thumb): support output webp thumbnails for vips generator (#2657) 2025-07-15 13:51:23 +08:00
Aaron Liu
e96b595622 feat(direct link): add option to get direct link with download enforced (#2651) 2025-07-15 13:22:04 +08:00
Aaron Liu
d19fc0e75c feat(remote download): sanitize file names with special characters (#2648) 2025-07-15 12:00:39 +08:00
Aaron Liu
195d68c535 chore(docker): add LibRAW into docker image (#2645) 2025-07-15 11:01:44 +08:00
Aaron Liu
000124f6c7 feat(ui): custom HTML content in predefined locations (#2621) 2025-07-15 10:45:32 +08:00
Aaron Liu
ca57ca1ba0 feat(custom): custom sidebar items 2025-07-15 10:41:13 +08:00
Aaron Liu
3cda4d1ef7 feat(fs): custom properties for files (#2407) 2025-07-12 11:15:33 +08:00
Aaron Liu
b13490357b feat(dashboard): cleanup tasks and events (#2368) 2025-07-05 11:52:15 +08:00
Aaron Liu
617d3a4262 feat(qiniu): use accelerated upload domain (#2497) 2025-07-05 10:50:51 +08:00
Aaron Liu
75a03aa708 fix(auth): unified empty path for sign content (#2616) 2025-07-05 10:05:09 +08:00
Aaron Liu
fe2ccb4d4e feat(share): add option to automatically render and show README file (#2382) 2025-07-04 14:40:32 +08:00
Aaron Liu
aada3aab02 feat(storage): load balance storage policy (#2436) 2025-07-04 10:05:15 +08:00
Samler
a0aefef691 feat: platform self-adaptation for file viewer application (#2603) 2025-07-03 14:04:14 +08:00
Aaron Liu
17fc598fb3 doc: duplicated OneDrive in README 2025-06-30 19:46:22 +08:00
Samler
19a65b065c fix: new user group error in without replication (#2596) 2025-06-30 19:34:18 +08:00
Anye
e0b2b4649e fix(db): map MariaDB type to MySQL (#2587)
* fix(db): 将MariaDB数据库类型映射到MySQL类型

* Update client.go
2025-06-30 19:32:21 +08:00
Aaron Liu
642c32c6cc chore: update fatih/color (#2591) 2025-06-29 10:48:25 +08:00
WittF
6106b57bc7 feat(captcha): update static asset source option (#2589)
* feat(captcha): Add captcha_cap_asset_server configuration option to support static asset server settings (#2584)

* fix(captcha): Backend default: cdn → jsdelivr
2025-06-29 10:14:26 +08:00
Aaron Liu
f38f32f9f5 fix(db): sslmode prefer not supported in some pg version (?) related: #2540 2025-06-27 13:54:10 +08:00
Aaron Liu
d382bd8f8d fix(dashboard): cannot change storage policy for groups (#2577) 2025-06-27 12:53:07 +08:00
Aaron Liu
02abeaed2e update submodule 2025-06-27 09:21:19 +08:00
Aaron Liu
6c9a72af14 update submodule 2025-06-26 18:51:44 +08:00
Aaron Liu
4562042b8d fix(dashboard): cannot save settings for anonymous group 2025-06-26 18:48:07 +08:00
Aaron Liu
dc611bcb0d feat(explorer): manage created direct links / option to enable unique redirected direct links 2025-06-26 18:45:54 +08:00
WittF
2500ebc6a4 refactor(captcha): update Cap to 2.0.0 (#2573)
* refactor(captcha): update Cap backend to 2.0.0 API format

* feat(captcha): add Cap version config for 1.x/2.x compatibility

* fix(captcha): change Cap default version to 1.x for backward compatibility

* refactor(captcha): remove Cap 1.x compatibility, keep only 2.x support

* feat(captcha): update field names to Cap 2.0 standard - Site Key and Secret Key

* fix(captcha): update Cap field names in defaults configuration
2025-06-26 14:58:58 +08:00
Aaron Liu
3db522609e feat(thumb): support generating thumbnails using simple_dcraw from LibRAW 2025-06-24 10:47:36 +08:00
Anye
d1bbfd4bc4 feat(db): add mariadb alias (#2560) 2025-06-23 17:17:18 +08:00
WittF
b11188fa50 feat(file): add support for more file extensions (#2557)
- Add aac audio format support
- Add ini, env, json, log, yml text file extensions
- Add iso archive format support
- Add ico, icns thumbnail generation support
2025-06-23 17:16:29 +08:00
charlieJ107
1bd62e8feb [Feature](database): Add Support for SSL Connections and Database URL Configuration (#2540)
* feat(database): add support for SSL connections and database URL configuration

* feat(config): update Redis configuration to use TLS in configurre name instead of SSL

* fix(database): remove default values for DatabaseURL and SSLMode in DatabaseConfig

* chore(.gitignore): add cloudreve built binary to ignore list
2025-06-23 17:12:20 +08:00
Aaron Liu
fec549f5ec feat(ent): migrate DB settings in patches 2025-06-22 10:31:33 +08:00
Aaron Liu
8fe2889772 feat(file apps): add excalidraw (#2317) 2025-06-21 12:03:08 +08:00
Aaron Liu
bdc0aafab0 fix(remote download): file path slashes incorrectly formated for remote download transfer if master and slave node use different path style (#2532) 2025-06-21 09:51:12 +08:00
Aaron Liu
3de33aeb10 update submodule 2025-06-20 14:07:03 +08:00
WittF
9f9796f2f3 Add Cap Captcha support (#2511)
* Add Cap Captcha support

- Add CaptchaCap type constant in types.go
- Add Cap struct with InstanceURL, KeyID, and KeySecret fields
- Add CapCaptcha method in provider.go to return Cap settings
- Add default settings for Cap captcha in setting.go
- Implement Cap captcha verification logic in middleware
- Expose Cap captcha settings in site API

This adds support for Cap captcha service as an alternative
captcha option alongside existing reCAPTCHA, Turnstile and
built-in captcha options.

* update cap json tags
2025-06-19 11:31:17 +08:00
Aaron Liu
9a216cd09e feat(share): improve UI for share link result (follow up #https://github.com/cloudreve/frontend/pull/259) 2025-06-17 11:37:30 +08:00
WintBit
41eb010698 fix: allow empty password for ShareCreateService (#2498) 2025-06-17 11:03:29 +08:00
WintBit
9d28fde00c feat: fileshare custom password support (#2493)
* feat: fileshare custom password support

* fix: blank password check

* feat: backend share link password check

* Revert "feat: backend share link password check"

This reverts commit 22c7bb0b35.

* feat: use go-playground/validator binding rule
2025-06-16 16:34:47 +08:00
Aaron Liu
40644f5234 chore: refresh README 2025-06-14 11:06:42 +08:00
Aaron Liu
d6d615e689 fix(docker compose): redis failed to persist due to permission error (#2476) 2025-06-14 10:07:32 +08:00
Aaron Liu
95d2b5804e fix(router): remove unused get all group route 2025-06-13 13:30:13 +08:00
Aaron Liu
a517f41ab1 Revert "chore(deps): bump github.com/mojocn/base64Captcha (#2266)"
This reverts commit e57e11a30e.
2025-06-12 11:59:50 +08:00
dependabot[bot]
e57e11a30e chore(deps): bump github.com/mojocn/base64Captcha (#2266)
Bumps [github.com/mojocn/base64Captcha](https://github.com/mojocn/base64Captcha) from 0.0.0-20190801020520-752b1cd608b2 to 1.3.6.
- [Release notes](https://github.com/mojocn/base64Captcha/releases)
- [Commits](https://github.com/mojocn/base64Captcha/commits/v1.3.6)

---
updated-dependencies:
- dependency-name: github.com/mojocn/base64Captcha
  dependency-version: 1.3.6
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-12 11:56:41 +08:00
dependabot[bot]
5f1b3a2bed chore(deps): bump golang.org/x/net (#2244)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.0.0-20220630215102-69896b714898 to 0.38.0.
- [Commits](https://github.com/golang/net/commits/v0.38.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-version: 0.38.0
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-12 11:54:40 +08:00
dependabot[bot]
b5136fc5e4 chore(deps): bump github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 (#2267)
Bumps [github.com/golang-jwt/jwt/v5](https://github.com/golang-jwt/jwt) from 5.2.1 to 5.2.2.
- [Release notes](https://github.com/golang-jwt/jwt/releases)
- [Changelog](https://github.com/golang-jwt/jwt/blob/main/VERSION_HISTORY.md)
- [Commits](https://github.com/golang-jwt/jwt/compare/v5.2.1...v5.2.2)

---
updated-dependencies:
- dependency-name: github.com/golang-jwt/jwt/v5
  dependency-version: 5.2.2
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-12 11:52:08 +08:00
Aaron Liu
633ea479d7 fix(es): redact non-ASCII file name in filename segment (#2473) 2025-06-12 11:48:41 +08:00
Aaron Liu
8d6d188c3f fix(docker): cannot generate thumbs for heic/heif images if boot from docker image 2025-06-12 11:27:38 +08:00
Aaron Liu
6561e3075f fix(qiniu): cannot sign URL for file blob with % (#2463) 2025-06-12 10:48:39 +08:00
Aaron Liu
e750cbfb77 fix(cron): add missing tasks to collect expired items in mem KV (#2466) 2025-06-12 09:55:05 +08:00
Aaron Liu
3ab86e9b1d fix: lock conflict while changing view / fix: sign out not blocking refresh tokens 2025-06-12 09:47:01 +08:00
Aaron Liu
e2dbb0404a Update submodule 2025-06-05 18:16:52 +08:00
Aaron Liu
2a7b46437f fix(onedrive): cannot list folder with more than 500 direct children (#2446) 2025-06-05 16:51:48 +08:00
Aaron Liu
fe309b234c chore: add new build target: freebsd_amd64, freebsd_arm64, linux_loong64 (#2442) 2025-06-05 16:36:33 +08:00
Aaron Liu
522fcca6af feat(explorer): save user's view setting to server / optionally share view setting via share link (#2232) 2025-06-05 10:00:37 +08:00
Aaron Liu
c13b7365b0 fix(s3): avoid perform URL encoding for proxyed path again if possible (#2413) 2025-05-29 10:19:06 +08:00
Aaron Liu
51fa9f66a5 feat(dashboard): traverse file URI from file ID (#2412) 2025-05-29 09:44:11 +08:00
Aaron Liu
65095855c1 fix(directlink): direct link should not be accessible if the parent file is in trash bin (#2415) 2025-05-29 09:42:22 +08:00
Aaron Liu
ec53769e33 fix(session): kv is not initialized 2025-05-23 20:01:10 +08:00
Aaron Liu
9a96a88243 chore(dockerfile): fix syntax error 2025-05-23 19:00:59 +08:00
Aaron Liu
e0b193427c chore(dockerfile): improve layer cache hit rate 2025-05-23 18:23:39 +08:00
Aaron Liu
1fa70dc699 fix(dashboard): remove default shortcut setting 2025-05-23 18:20:05 +08:00
Aaron Liu
db7b54c5d7 feat(session): sign out and revoke root token 2025-05-23 16:49:01 +08:00
Aaron Liu
c6ee3e5dcd feat(dbfs): set default share shortcut for new users 2025-05-23 15:39:57 +08:00
Aaron Liu
9f5ebe11b6 feat(wopi): put relative file 2025-05-23 15:37:02 +08:00
Aaron Liu
1a3c3311e6 fix(arai2): cannot select file back after unselected (#2386) 2025-05-23 15:35:01 +08:00
Aaron Liu
acffd984c1 fix(dbfs): file uri name_or op should be compatiable with older version 2025-05-20 11:59:40 +08:00
Aaron Liu
7ddb611d6c fix(upload): metadata in upload session request is not created 2025-05-20 11:59:24 +08:00
Aaron Liu
7bace40a4d fix(import): import from slave not working / fix(recycle): skip sending delete request if no blob needs to be deleted in this batch 2025-05-20 10:46:27 +08:00
Aaron Liu
2fac086127 feat(dashboard): admin can now view task detailed steps 2025-05-20 10:45:50 +08:00
Aaron Liu
a10a008ed7 feat(workflow): import files from external storage 2025-05-20 10:45:16 +08:00
Aaron Liu
5d72faf688 fix(thumb): thumb queue retry setting not working (#2367) 2025-05-16 13:52:54 +08:00
Aaron Liu
0a28bf1689 refactor(download): handle stream saver download outside of driver implementation (fix #2366) 2025-05-16 13:52:31 +08:00
Aaron Liu
bdaf091aca fix(wopi): community edition cannot open WOPI viewer from shared file 2025-05-16 13:44:48 +08:00
Aaron Liu
d60c3e6bf4 update submodule 2025-05-13 17:36:44 +08:00
Aaron Liu
1e2cfe0061 fix: undefined types 2025-05-13 16:07:57 +08:00
Aaron Liu
71a624c10e enhance(dbfs): admin should be able to access user's files even if it's inactive (#2327) 2025-05-13 15:08:00 +08:00
Aaron Liu
1b8beb3390 feat(directlink): allow admin create direct link for users (#2340) 2025-05-13 15:07:36 +08:00
Aaron Liu
762811d50f fix(share): share link should be marked as expired if the file is in trash bin (#2347) 2025-05-13 15:06:54 +08:00
Aaron Liu
edd50147e7 fix(qbittorrent): unknown state after seeding complete 2025-05-13 15:05:14 +08:00
Aaron Liu
006bcabcdb fix(session): increase password length limit to 128 2025-05-13 15:05:01 +08:00
小白-白
10e3854082 fix(thumb): libreoffice warning (#2358) 2025-05-13 14:38:17 +08:00
Aaron Liu
fbf1d1d42c fix(uri): hash tag is not properly handled in CrUri 2025-05-08 19:20:19 +08:00
Aaron Liu
c5467f228a fix(onedrive): show detailed error for oauth callback 2025-05-08 19:20:19 +08:00
AaronLiu
2a6a43d242 Update SECURITY.md 2025-05-08 17:03:23 +08:00
AaronLiu
6e82ce2a9d Create SECURITY.md 2025-05-08 16:50:10 +08:00
Aaron Liu
a0b4c97db0 fix(remote download): slave canceled task error is not passed to master in non-debug mode (#2301) 2025-04-27 10:38:35 +08:00
Aaron Liu
2333ed3501 fix(chunk): omit error for teeing file to chunk buffer (#2303) 2025-04-27 10:38:04 +08:00
Aaron Liu
67d3b25c87 fix(dbfs): set current version is not executed from existing transaction 2025-04-27 10:37:33 +08:00
Aaron Liu
ca47f79ecb feat(aria2): patching select file operation is not filtered by existing state (#2294) 2025-04-27 10:36:33 +08:00
Aaron Liu
77ae381474 feat(aria2): show detailed error message from aria2 (#2296) 2025-04-27 10:36:06 +08:00
小白-白
c6eef43590 feat(video player): add flv to default video file viewers map (#2308) 2025-04-26 10:42:46 +08:00
北野 - Michael
d8fc81d0eb feat(embed fs): enable conditional cache for go:embed files (#2299)
* feat(embed fs): add static assets 304 cache policy support

* optimize embed fs for cache policy support logic
2025-04-25 18:26:15 +08:00
Aaron Liu
d195002bf7 fix: compile error 2025-04-24 15:56:10 +08:00
Aaron Liu
d6496ee9a0 feat(video player): add m3u8 to default video file icon map 2025-04-24 15:28:34 +08:00
Aaron Liu
55a3669a9e fix(dashboard): user avatar not shown in details popup (fix #2289) 2025-04-24 15:27:19 +08:00
Aaron Liu
969e35192a fix(remote download): improve file pre-upload validation (fix #2286) 2025-04-24 15:26:58 +08:00
Aaron Liu
224ac28ffe fix(defaults): use utf-8 in Mimetype for txt file by default 2025-04-24 15:26:29 +08:00
Aaron Liu
cc69178310 feat: upgrade from community to pro / remove unused edges in storage policy 2025-04-24 15:25:40 +08:00
Aaron Liu
9226d0c8ec chore: update dockerhub service connection 2025-04-21 20:24:56 +08:00
Aaron Liu
7b5e0e8581 fix(dbfs): enforce root protection for single file share 2025-04-21 19:43:09 +08:00
Aaron Liu
d60e400f83 fix(dashboard): incorrectly calculate user total storage 2025-04-20 18:55:01 +08:00
AaronLiu
21d158db07 Init V4 community edition (#2265)
* Init V4 community edition

* Init V4 community edition
2025-04-20 17:31:25 +08:00
AaronLiu
da4e44b77a Create giscus.json 2025-02-25 18:06:11 +08:00
AaronLiu
3373b9dc02 Update README.md 2024-10-25 13:31:21 +08:00
sam
12e3f10ad7 缩略图生成器支持 LibRaw (#2109)
* feat: 增加 LibRaw 缩略图生成器。

* feat: 生成 RAW 图像的缩略图时,旋转缩略图的方向。

* update: RAW 缩略图支持镜像方向。
2024-07-31 12:11:33 +08:00
sam
23d009d611 fix: 缩略图生成器中 exts 被多次解析(#2105) (#2106) 2024-07-30 19:52:36 +08:00
EpicMo
3edb00a648 fit: gmail (#1949) 2024-02-24 21:36:59 +08:00
Aaron Liu
88409cc1f0 release: 3.8.3 2023-10-07 20:08:38 +08:00
Darren Yu
cd6eee0b60 Fix: doc preview src of local storage policy starts with ":/" (#1842) (#1844)
* Fix: doc preview src of local storage policy starts with ":/"

* Fix: doc preview src of local storage policy starts with ":/"
2023-10-06 12:34:49 +08:00
Sam
3ffce1e356 fix(admin): Able to change deafult user status (#1811) 2023-08-14 13:24:58 +08:00
Aaron Liu
ce832bf13d release: 3.8.2 2023-08-07 20:09:23 +08:00
Aaron Liu
5642dd3b66 feat(webdav): support setting download proxy 2023-07-29 08:58:14 +08:00
Aaron Liu
a1747073df feat(webdav): support setting download proxy 2023-07-29 08:53:26 +08:00
WeidiDeng
ad6c6bcd93 feat(webdav): supoort rename in copy and move (#1774) 2023-07-18 15:27:56 +08:00
WeidiDeng
f4a04ce3c3 fix webdav proppatch (#1771) 2023-07-18 15:25:43 +08:00
Aaron Liu
247e31079c fix(thumb): cannot generate thumb using ffmpeg for specific format (#1756) 2023-07-18 15:18:54 +08:00
Darren Yu
a26893aabc Add: thumb quality for 3rd storage policy (#1763)
Add thumb quality for third party storage policy.
2023-07-05 22:13:24 +08:00
初雪
ce759c02b1 feat(redis): support confiuring username (#1752)
替换Golang Redis依赖: redigo的版本至当前最新版1.8.9
(v2.0.0被标记为已撤回,且长期未更新)

Redis 6 及以上版本均可配置为使用username+password认证的ACL,故作此变更。
2023-07-05 22:12:33 +08:00
Aaron Liu
9f6f9adc89 Merge remote-tracking branch 'origin/master' 2023-06-25 18:53:54 +08:00
Aaron Liu
91025b9f24 fix(thumb): cannot generate thumbnails in slave mode 2023-06-25 18:53:37 +08:00
hallucination
a9bee3e638 Update docker-compose.yml (#1727)
add  Redis retains login sessions after restarting
2023-06-19 12:38:29 +08:00
Aaron Liu
243c312066 fix: failed UT 2023-06-11 09:50:57 +08:00
Aaron Liu
1d52ddd93a release: 3.8.0 2023-06-11 09:45:27 +08:00
Aaron Liu
cbc549229b fix(wopi): anonymous users cannot preview files 2023-06-11 09:45:06 +08:00
Aaron Liu
173ca6cdf8 fix(preview): use absolute URL for local storage policy 2023-06-11 09:44:43 +08:00
Aaron Liu
fb166fb3e4 release: 3.8.0-beta1 2023-05-27 14:06:19 +08:00
Aaron Liu
b1344616b8 test: fix failed ut 2023-05-27 10:44:28 +08:00
Aaron Liu
89ee147961 feat(upload): detect and specify mime type for files uploaded to S3 and OSS (fix#1681) 2023-05-25 19:51:51 +08:00
Aaron Liu
4aafe1dc7a enhance(download): Use just-in-time host in download URl, instead of SiteURL in site settings 2023-05-25 19:49:32 +08:00
Aaron Liu
4c834e75fa adhoc: commit todo changes related to google drive 2023-05-25 19:46:05 +08:00
Aaron Liu
31d4a3445d fix(cache): panic if redis connection fails 2023-05-25 19:44:59 +08:00
Aaron Liu
37926e3133 feat(policy): add Google Drive Oauth client 2023-05-24 14:39:54 +08:00
WeidiDeng
4c18e5acd1 webdav兼容nextcloud propset设置修改时间 (#1710) 2023-05-24 12:10:03 +08:00
Arkylin
6358740cc9 modified: models/policy.go (#1718)
modified:   models/policy_test.go
2023-05-24 12:09:24 +08:00
Aaron Liu
00d56d6d07 test: fix failed ut 2023-04-16 09:25:57 +08:00
Aaron Liu
b9143b53f6 chore: update runner to ubuntu-latest 2023-04-16 09:20:30 +08:00
Aaron Liu
b9d9e036c9 feat(kv): persist cache and session into disk before shutdown 2023-04-16 09:17:06 +08:00
Aaron Liu
4d131db504 test(hook): NewWebdavAfterUploadHook 2023-04-15 09:21:29 +08:00
Aaron Liu
c5ffdbfcfb Merge remote-tracking branch 'origin/master' 2023-04-13 19:39:22 +08:00
Aaron Liu
8e2fc1a8f6 test(thumb): new changes in filesystem pkg 2023-04-13 19:39:12 +08:00
AaronLiu
ce579d387a Merge pull request #1690 from cloudreve/webdav-checksum
webdav兼容rclone的nextcloud选项(修改日期和checksum)
2023-04-08 10:10:46 +08:00
AaronLiu
f1e7af67bc Merge branch 'master' into webdav-checksum 2023-04-08 10:09:55 +08:00
AaronLiu
98788dc72b Merge pull request #1679 from xkeyC/master
feat(Webdav): Add overwrite support for moveFiles and copyFiles
2023-04-08 10:08:13 +08:00
Weidi Deng
1b4eff624d webdav兼容rclone的nextcloud选项(修改日期和checksum) 2023-04-07 22:16:11 +08:00
Aaron Liu
408733a974 test(thumb): new changes in models/cache pkg 2023-04-07 20:33:05 +08:00
Aaron Liu
c8b736bd8f fix(dashboard): add missing utils for thumb setting 2023-04-07 19:42:23 +08:00
Aaron Liu
cf03206283 feat(thumb): generator settings and test button 2023-04-07 19:33:02 +08:00
Aaron Liu
ac536408c6 feat(thumb): use libreoffice to generate thumb 2023-04-07 19:31:43 +08:00
Aaron Liu
98b86b37de feat(thumb): use ffmpeg to generate thumb 2023-04-07 19:30:41 +08:00
Aaron Liu
b55344459d feat(thumb): use libvips to generate thumb 2023-04-07 19:30:10 +08:00
Aaron Liu
bde4459519 feat(thumb): add ext whitelist for all policy types 2023-04-07 19:29:43 +08:00
Aaron Liu
f5a21a7e6f feat(thumb): set size limit for original file 2023-04-07 19:28:39 +08:00
Aaron Liu
b910254cc5 feat(thumb): delete generated thumb file
fix(s3): return empty list of file failed to be deleted
2023-04-07 19:27:57 +08:00
Aaron Liu
e115497dfe feat(thumb): generate thumb for OneDrive files 2023-04-07 19:27:31 +08:00
Aaron Liu
62b73b577b feat(thumb): generate and return sidecar thumb 2023-04-07 19:26:39 +08:00
Aaron Liu
7cb5e68b78 refactor(thumb): thumb logic for slave policy 2023-04-07 19:25:29 +08:00
Aaron Liu
ae118c337e refactor(thumb): reset thumb status after renaming a file with no thumb available 2023-04-07 19:09:13 +08:00
Aaron Liu
f36e39991d refactor(thumb): new thumb pipeline model to generate thumb on-demand 2023-04-07 19:08:54 +08:00
Aaron Liu
da1eaf2d1f fix(wopi): cannot set preferred language for LibreOffice online 2023-04-07 19:06:46 +08:00
xkeyC
42f7613bfa moveFiles 修改回无条件 overwrite (Move 或 Rename 都会处罚冲突问题) 2023-03-29 20:16:09 +08:00
xkeyC
e8e38029ca fix:error code 2023-03-28 00:19:18 +08:00
xkeyC
cd9e9e25b9 fix:仅在需要移动时 overwrite 2023-03-28 00:06:10 +08:00
xkeyC
ca7b21dc3e feat(Webdav):Add overwrite support for moveFiles and copyFiles 2023-03-27 22:55:20 +08:00
Aaron Liu
f172220825 release: 3.7.1 2023-02-13 19:28:00 +08:00
Aaron Liu
37cb292530 fix(db): SQLite3 dialects return empty rows in HasColumn method 2023-02-12 20:02:35 +08:00
Aaron Liu
835605a5cb chore: keep artifacts naming consistent 2023-02-10 13:02:37 +08:00
Aaron Liu
35c4215c0f chore: update archive config to keep original arch name 2023-02-10 12:13:14 +08:00
Aaron Liu
3db803ed38 chore: update readme and archive config 2023-02-09 20:29:38 +08:00
Aaron Liu
c2d7168c26 release: 3.7.0 2023-02-09 19:03:52 +08:00
Aaron Liu
b441d884f6 chore: fix amd64.v1 inconsistency 2023-02-08 22:10:58 +08:00
Aaron Liu
d4c79cb962 chore: fix amd64.v1 inconsistency 2023-02-08 21:39:04 +08:00
Aaron Liu
e134826bd1 chore: skip git validation before release 2023-02-08 21:10:55 +08:00
Aaron Liu
b78f475df8 chore: use goreleaser to build docker images 2023-02-08 21:06:14 +08:00
Aaron Liu
e7de7e868d chore: use goreleaser to build artifacts 2023-02-08 20:04:45 +08:00
Aaron Liu
a58e3b19ec Revert "chore(build): add go-task support (#1608)"
This reverts commit abe90e4c88.

Revert "chore: fix env in task yaml and test new build action"

This reverts commit 7dfe8fb439.

Revert "remove unused env"

This reverts commit 076aa2c567.

Revert "fix: ci build failed as env in go tasks cannot be overwritten"

This reverts commit 71cc332109.
2023-02-08 15:42:42 +08:00
Aaron Liu
71cc332109 fix: ci build failed as env in go tasks cannot be overwritten 2023-02-08 15:32:34 +08:00
Aaron Liu
076aa2c567 remove unused env 2023-02-08 15:11:53 +08:00
Aaron Liu
7dfe8fb439 chore: fix env in task yaml and test new build action 2023-02-08 15:08:20 +08:00
Aaron Liu
b1b74b7be5 Merge remote-tracking branch 'origin/master' 2023-02-08 13:57:54 +08:00
mritd
abe90e4c88 chore(build): add go-task support (#1608)
* chore(build): add go-task support

add go-task support

Signed-off-by: kovacs <mritd@linux.com>

* chore(docker): build with go-task

build with go-task

Signed-off-by: kovacs <mritd@linux.com>

* chore(task): support cross compile

support cross compile

Signed-off-by: kovacs <mritd@linux.com>

* chore(task): remove GCC build

remove GCC build

Signed-off-by: kovacs <mritd@linux.com>

* docs(task): update README

update README

Signed-off-by: kovacs <mritd@linux.com>

---------

Signed-off-by: kovacs <mritd@linux.com>
2023-02-08 13:57:21 +08:00
Aaron Liu
95027e4f5d refactor(db): move dialects to a standalone pkg 2023-02-08 10:06:24 +08:00
VigorFox
9c58278e08 refactor(db): change SQLite driver from github.com/jinzhu/gorm/dialects/sqlite to github.com/glebarez/go-sqlite (#1626)
* sqlite 驱动从 github.com/jinzhu/gorm/dialects/sqlite 改为 github.com/glebarez/go-sqlite,以移除对 cgo 的依赖

* // 兼容已有配置中的 "sqlite3" 配置项

* Update models/init.go: 修改变量名
2023-02-08 09:53:41 +08:00
Aaron Liu
6d1c44f21b test: fix failed ut 2023-02-07 20:24:21 +08:00
Aaron Liu
489a2bab4f test: delete file while user not found 2023-02-07 20:18:13 +08:00
Aaron Liu
d67d0512f8 feat(explorer): advance delete options for users 2023-02-07 20:08:22 +08:00
Aaron Liu
1c1cd9b342 feat(dashboard): unlink file while not deleting its physical source (#789) 2023-02-07 20:07:05 +08:00
Aaron Liu
2a1e82aede fix(fs): cannot delete file while user is deleted (fix #1586) 2023-02-07 20:04:53 +08:00
WeidiDeng
a93ea2cfa0 feat(webdav): add read-only option (#1629) 2023-02-07 19:43:28 +08:00
HFO4
ffbafca994 Merge remote-tracking branch 'origin/master' 2023-01-10 19:56:59 +08:00
HFO4
99434d7aa5 test(wopi): add tests for wopi client 2023-01-10 19:56:02 +08:00
HFO4
f7fdf10d70 feat(wopi): edit WOPI related settings 2023-01-09 19:38:55 +08:00
HFO4
9ad2c3508f enhancement(upload): keep original file content after failed to update document files 2023-01-09 19:38:31 +08:00
HFO4
5a8c86c72e feat(wopi): adapt libreoffice online 2023-01-09 19:38:12 +08:00
HFO4
1c922ac981 feat(wopi): implement required rest api as a WOPI host 2023-01-09 19:37:46 +08:00
HFO4
4541400755 feat(wopi): change doc preview config based on WOPI discovery results 2023-01-09 19:36:41 +08:00
HFO4
c39daeb0d0 feat(wopi): fetch discover endpoint 2023-01-09 19:34:39 +08:00
5aaee9
8dafb4f40a feat: support connect to mysql with unix socket (#1571) 2022-12-19 19:23:47 +08:00
HFO4
42a31f2fd1 fix: timeout while fetching yarn pkgs in building docker image action 2022-12-19 18:22:18 +08:00
HFO4
ca80051a89 release: 3.6.2 2022-12-19 17:53:11 +08:00
HFO4
bc0c374f00 feat(mobile): only allow request from mobile client to copy session 2022-12-19 17:35:39 +08:00
HFO4
e4c87483d6 feat(session): generate temp URL to copy/refresh user session 2022-12-19 17:34:57 +08:00
HFO4
1227f35d3c doc: change readme link 2022-12-19 17:33:15 +08:00
HFO4
08fa6964a9 doc: change readme link 2022-12-16 21:13:17 +08:00
HFO4
9eafe07f4e doc: add English README 2022-12-16 21:12:09 +08:00
HFO4
73d0f2db9b release: 3.6.1 2022-12-16 17:37:21 +08:00
HFO4
82b4e29a80 enhance: escalate ProxyHeader as a global config 2022-12-16 16:58:06 +08:00
HFO4
9860ebbca9 feat(doc preview): add magic variable for file name 2022-12-16 16:55:47 +08:00
HFO4
435a03dd34 fix: nil reference while trying to shut down DB in slave mode (#1416) 2022-12-16 16:55:28 +08:00
HFO4
4e8ab75211 feat(s3): support setting for force using path style endpoint (#1559) 2022-12-16 16:54:58 +08:00
HFO4
6ceb255512 dep: git mod tidy 2022-12-16 14:01:59 +08:00
AHdark
74e1bd6a43 Added same-site policy for session options (#1381)
* Feat: added same-site policy for session options

* Feat: configurations in conf package to control the `SameSite` mode and `Secure` value of the session.

Co-authored-by: AaronLiu <abslant@126.com>
2022-12-16 13:59:26 +08:00
topjohncian
fd59d1b5ca Enhance(dashboard): optimize get policies request (#1539) 2022-12-16 13:55:52 +08:00
Code
2bb28a9845 fix(s3): use HEAD method to get file info (#1521)
建议更换成更好的 HeadObject 方法因为 HeadObject 方法并不会返回文件 Body 因此不需要 defer res.Body.Close()
2022-12-16 13:54:12 +08:00
vvisionnn
5f4f6bd91a refactor: build docker image using build.sh (#1562) 2022-12-15 22:23:34 +08:00
Nya Candy
053e4352b4 fix: Dockerfile (#1561) 2022-12-14 22:31:36 +08:00
HFO4
08e4d2257a release: 3.6.0 2022-12-14 20:17:14 +08:00
HFO4
f02b6f0286 feat(net): customize socket file permission 2022-12-14 15:28:45 +08:00
HFO4
50a3917a65 feat(cache): set max-age for public accessible static resources 2022-12-14 15:28:19 +08:00
HFO4
8c5ba89f7d feat: mobile app promotion page 2022-12-12 20:35:48 +08:00
HFO4
4519dc025b update(version): 3.6.0-beta1 2022-11-23 20:44:09 +08:00
HFO4
92cbc9f312 i18n: logs in database script 2022-11-23 18:31:43 +08:00
HFO4
756769335f feat(dashboard): edit and remove 2FA secret for users 2022-11-23 17:55:23 +08:00
HFO4
6b63195d28 enhance(session): increase default cookie ttl to 60 days 2022-11-21 19:09:54 +08:00
HFO4
db6681f448 fix(avatar): add default cache max age for avatar response 2022-11-21 19:09:37 +08:00
HFO4
4b85541d73 fix(security): CVE-2022-32167 2022-11-21 19:08:51 +08:00
HFO4
f8ed4b4a5a feat(remote download): show download node in list page 2022-10-30 10:45:25 +08:00
HFO4
7dda81368d test(source link): add unit test 2022-10-30 09:41:14 +08:00
HFO4
1c25232b06 feat(source link): record downloads for redirected source link 2022-10-29 11:08:16 +08:00
HFO4
8d7ecedf47 feat(source link): create perm source link with shorter url 2022-10-29 11:06:07 +08:00
HFO4
1f836a4b8b feat(task): not fail immediately after failed upload in transfer tasks 2022-10-17 19:26:09 +08:00
HFO4
c17cf1946a fix(static): add placeholder empty zip file for go embed 2022-10-15 16:40:00 +08:00
HFO4
392c824a33 feat(OneDrive): support Retry-After throttling control from Graph API (#280) 2022-10-15 16:35:02 +08:00
HFO4
8494bd6eb9 fix(request): deep copy shared header object in request options 2022-10-15 16:16:17 +08:00
HFO4
c7dc143d30 Merge remote-tracking branch 'origin/master' 2022-10-15 10:16:08 +08:00
HFO4
8b30593822 fix: cannot delete mass files (>=333) in SQLite (#622) 2022-10-15 10:12:23 +08:00
HFO4
56fa01ed61 fix: failed UT 2022-10-15 09:55:44 +08:00
HFO4
560097145b fix: metadata mismatch if file name contains % while uploading to OneDrive/SharePoint 2022-10-15 09:20:25 +08:00
topjohncian
8cec65b0a7 Fix: cannot finish callback when uploading an office file using sharepoint.cn (#1503) 2022-10-15 09:06:12 +08:00
WeidiDeng
f89653cea7 feat(static): release static files into memory while startup (#1471)
* 初始化时解压zip文件读取内存中

* update go.mod

* 更新各种go版本
2022-10-15 09:05:05 +08:00
HFO4
6b0b44f6d0 test: fix failed UT 2022-10-15 09:02:28 +08:00
HFO4
63b536e5db Merge remote-tracking branch 'origin/master' 2022-10-08 19:24:57 +08:00
HFO4
19a2f69a19 test: fix failed UT 2022-10-08 19:24:38 +08:00
SuperHgO
2271fcfdef fix: s3(minio) file remove operation hangout (#1491) 2022-10-08 19:21:38 +08:00
HFO4
16b5fc3f60 fix: failed test due to missing error message in filesystem 2022-10-08 19:20:41 +08:00
HFO4
f431eb0cbd fix: metadata mismatch if file name contains % while uploading to OneDrive/SharePoint (#1301) 2022-10-08 19:09:51 +08:00
HFO4
644a326580 i18n: logs in rest pkgs 2022-10-08 18:51:52 +08:00
HFO4
f2c53dda31 Merge remote-tracking branch 'origin/master' 2022-09-29 17:43:06 +08:00
HFO4
28c2ffe72e i18n: logs in filesystem 2022-09-29 17:42:23 +08:00
HFO4
196729bae8 i18n: logs in conf/crontab/email/fs.driver 2022-09-29 17:42:05 +08:00
HFO4
9bb4a5263c i18n: logs in aria2/auth/cache/cluster/serializer 2022-09-29 17:40:56 +08:00
HFO4
7366ff534e i18n: logs in models 2022-09-29 17:40:22 +08:00
HFO4
db23f4061d i18n: logs in bootstrapper and response code in middleware 2022-09-29 17:39:48 +08:00
HFO4
16d17ac1e6 i18n: user setting route 2022-09-29 17:38:52 +08:00
HFO4
9464ee2103 i18n: user route 2022-09-29 17:37:05 +08:00
topjohncian
88e10aeaa2 Fix: unexpected querying all files when deleting an empty folder (#1469) 2022-09-29 09:26:30 +08:00
XYenon
b1685d2863 feat: seeding status for aria2 download tasks (#1422)
* feat: add aria2 seeding

* fix: move RecycleTaskType to the bottom

* refactor: refactor recycle aria2 temp file
2022-09-29 09:24:58 +08:00
WeidiDeng
846438e3af graceful 关闭服务器 (#1416) 2022-08-22 19:49:19 +08:00
HFO4
96daed26b4 i18n: objects / share / slave / tag operations 2022-07-20 20:03:41 +08:00
HFO4
906e9857bc i18n: file operation 2022-07-20 20:01:34 +08:00
HFO4
08104646ba i18n: error codes for aria2 / callback/ directory operation 2022-07-20 19:59:13 +08:00
HFO4
a1880672b1 i18n: error codes for dashboard operations 2022-07-18 20:03:57 +08:00
小白-白
9869671633 fix: incorrect progress count (#1379)
* fix: incorrect progress count

文件中转 已完成文件计数 应在文件成功上传后+1 #1367

* fix failed ut

Co-authored-by: HFO4 <912394456@qq.com>
2022-07-12 19:30:41 +08:00
AHdark
c99b36f788 chore: better way to remove frontend map files (#1380)
* Feat: better way to remove frontend map files

* Feat: Docker use `GENERATE_SOURCEMAP` in the frontend build section to avoid generating map files.
2022-07-12 19:20:14 +08:00
topjohncian
25d56fad6e Fix: admin summary cannot be cached in redis (#1329) 2022-06-14 14:42:02 +08:00
HFO4
f083d52e17 feat: tps limit for OneDrive policy 2022-06-09 16:11:36 +08:00
HFO4
4859ea6ee5 feat: update user storage in calibrating no matter if the actual storage match presisted 2022-06-09 16:11:10 +08:00
HFO4
21d2b817f4 Merge remote-tracking branch 'origin/master' 2022-05-25 20:02:43 +08:00
HFO4
04b0b87082 enhance: remove icp footer 2022-05-25 20:02:13 +08:00
HFO4
2a3759c315 i18n: reading dashboard announcements from custom tag 2022-05-25 20:01:41 +08:00
WeidiDeng
36b310133c fix: IP address is empty in unix socket mode (#1314) 2022-05-24 11:01:00 +08:00
WeidiDeng
3fa1249678 fix: use file extension to search for content-type (#1313) 2022-05-24 10:57:20 +08:00
vvisionnn
fb56b27062 fix: delete socket file before run (fixed #1262) (#1279)
* fix: delete socket file before run (fixed #1262)

* refactor: remove useless logs
2022-05-09 19:24:40 +08:00
HFO4
e705dedc22 Merge remote-tracking branch 'origin/master' 2022-05-09 19:06:21 +08:00
HFO4
7bd5a8e3cd dep: update webautn client for better compatibility 2022-05-09 14:51:11 +08:00
XYenon
5bd711afc6 fix: catch s3 presign err (#1277) 2022-05-05 14:24:35 +08:00
AaronLiu
eef6c40441 Merge pull request #1259 from xb2016/master
Update how to build
2022-05-05 13:48:26 +08:00
HFO4
a78407d878 i18n: tag management 2022-05-02 10:29:33 +08:00
HFO4
46c6ee9be7 i18n: add error codes related to sign up 2022-05-02 10:27:51 +08:00
HFO4
c9eefcb946 i18n: captcha, reset password 2022-04-30 16:51:24 +08:00
HFO4
4fe79859a9 enhance: generate error message for parameter error
i18n: use explicit error code for login controlelr
2022-04-30 16:50:59 +08:00
小白-白
4d4a31c250 Update how to build 2022-04-30 14:21:47 +08:00
HFO4
0e5683bc3b test: search file with limited parent ids 2022-04-30 10:02:57 +08:00
HFO4
a31ac2299a update version number 2022-04-29 20:15:20 +08:00
HFO4
3b16d7d77c fix: error code overlap 2022-04-29 20:04:26 +08:00
HFO4
8ab0fe0e2f feat: search file under current folder 2022-04-29 20:03:52 +08:00
HFO4
d51351eebd fix: cannot generate thumbnail for COS policy 2022-04-29 20:02:55 +08:00
HFO4
6af1eeb9fb fix: increase SharePoint size verify tolerance to 1 MB 2022-04-29 20:02:29 +08:00
HFO4
94507fe609 feat: create aria2 task in batch 2022-04-29 20:01:43 +08:00
HFO4
1038bae238 feat: get file source link in batch 2022-04-29 19:59:25 +08:00
HFO4
4a4375a796 fix: recursive .map file is not deleted in build script 2022-04-26 19:57:33 +08:00
HFO4
862c7b2fd8 Merge remote-tracking branch 'origin/master' 2022-04-26 19:48:16 +08:00
HFO4
9ab643a71b fix: zip assets folder path error 2022-04-26 19:47:22 +08:00
AaronLiu
7bdbf3e754 Merge pull request #1239 from vvisionnn/master
Keep updated at column when rename
2022-04-26 19:38:34 +08:00
AaronLiu
da68e8ede4 Merge pull request #1240 from vvisionnn/dockerHubDescription
Automatically update docker hub description at each version released
2022-04-26 19:38:06 +08:00
HFO4
23642d7597 test: fix failed test related to Folder.Create 2022-04-26 19:34:19 +08:00
HFO4
a523fc4e2c test: Folder.Create 2022-04-26 19:22:37 +08:00
HFO4
70b30f8d5f release: 3.5.2 2022-04-26 19:10:03 +08:00
HFO4
7c8e9054ce fix: OneDrive chunk upload time should be 0, avoiding upload timeouts when chunk size is large 2022-04-26 19:08:30 +08:00
HFO4
853bd4c280 fix: duplicate entry in insert transaction 2022-04-26 19:07:42 +08:00
HFO4
d845824bd8 fix: overwrite should be disabled when copy on write 2022-04-26 19:07:23 +08:00
HFO4
ae33e077a3 fix: text too long for some option field 2022-04-26 19:06:51 +08:00
HFO4
11043b43e6 fix: should use CompressSize to check size of files before creating compress task 2022-04-26 19:06:17 +08:00
HFO4
c62e355345 fix: cannot use LAN OSS endpoint for uploading 2022-04-26 19:05:54 +08:00
AaronLiu
a3d0291f41 Merge pull request #1244 from cloudreve/webdav-root
fix: cannot redirect root folder (close #1242)
2022-04-26 16:42:03 +08:00
Weidi Deng
024f09f666 修复webdav上传的根目录 2022-04-26 14:37:03 +08:00
vvisionnn
f46e40f31c fix: remove unused script 2022-04-25 20:50:06 +08:00
vvisionnn
b29bf11748 feat: auto update docker hub description 2022-04-25 20:36:40 +08:00
vvisionnn
2dcf1664a6 fix: keep update at column when rename 2022-04-25 20:23:53 +08:00
AaronLiu
dc69a63217 Merge pull request #1232 from cloudreve/archiver-decompress
Archiver decompress
2022-04-25 18:11:23 +08:00
HFO4
86876a1c11 feat: select encoding for decompressing zip file 2022-04-25 18:07:47 +08:00
HFO4
cb51046305 test: new changes in decompress method 2022-04-25 17:23:42 +08:00
HFO4
ac78e9db02 add empty assets.zip for placeholder 2022-04-25 16:53:57 +08:00
HFO4
d10639fd19 release: 3.5.1 2022-04-24 15:33:29 +08:00
HFO4
ba0e3278e3 fix: signature error when finishing oss upload 2022-04-24 15:16:47 +08:00
HFO4
0fb31f4523 fix: deadlock while creating default user in SQLite 2022-04-24 15:16:25 +08:00
HFO4
d0779f564e release: 3.5.0 2022-04-22 20:29:53 +08:00
HFO4
350954911e fix: add no-cache option to service worker file 2022-04-22 16:18:20 +08:00
HFO4
b8bc5bed13 test: new overwrite param in CreateUploadSession 2022-04-22 16:05:57 +08:00
HFO4
91377f4676 fix: cached folder props should ignore date and policy 2022-04-22 15:58:39 +08:00
HFO4
b1803fa51f fix: cannot overwrite file to slave policy / fix: remove lock system for webdav to resolve Windows Explorer issue. 2022-04-22 15:57:21 +08:00
HFO4
f8b7e086ba fix: database is locked when using sqlite 2022-04-22 15:56:45 +08:00
Weidi Deng
23bd1389bc 使用archiver对压缩文件进行解压 2022-04-21 16:33:10 +08:00
HFO4
ff22f5c8b9 Merge remote-tracking branch 'origin/master' 2022-04-21 14:49:10 +08:00
HFO4
aaf8a793ee test: new changes related to filesystem.CreateDirectory 2022-04-21 14:29:10 +08:00
HFO4
2ab2662fcd fix: cannot upload file to onedrive because file info not match (fix #1215)
Path in onedrive is not case-sensitive while Cloudreve cares, thus file size is not matching when finishing upload.
2022-04-21 13:58:54 +08:00
HFO4
71df067a76 fix: create directory now ignore conflict error, it will return the existed folder 2022-04-21 13:58:22 +08:00
Weidi Deng
7a3d44451b precompress embedded frontend. import mholt/archiver. 2022-04-20 19:56:00 +08:00
AaronLiu
d34cb3e5d3 Update README.md 2022-04-20 19:20:26 +08:00
AaronLiu
b5e8e4843f chore: add codecov to test workflow 2022-04-20 19:05:21 +08:00
HFO4
86877aef4b fix: failed ut 2022-04-20 18:59:09 +08:00
HFO4
3d9b9ae5d6 Merge remote-tracking branch 'origin/master' 2022-04-20 18:52:39 +08:00
HFO4
8741c3cc78 feat: return create date while list files 2022-04-20 18:51:43 +08:00
HFO4
6c93e37777 Update version number 2022-04-20 18:50:46 +08:00
HFO4
841a2e258d fix: ignore folder name conflict while creating upload session 2022-04-20 18:50:07 +08:00
HFO4
da2f6c5b07 chore: upgrade gin to 1.7.7 2022-04-20 11:50:15 +08:00
HFO4
a26183875f fix: in decompress, file stream should be closed after copy it to temp file. 2022-04-20 11:49:01 +08:00
vvisionnn
79913a5dfa fix: Dockerfile build issue (#1217) 2022-04-20 09:14:17 +08:00
HFO4
4f6989f1b8 Update submodule 2022-04-19 21:27:38 +08:00
HFO4
fcc29e31eb Update version number 2022-04-19 20:17:37 +08:00
HFO4
00e2b26294 fix: remove filesystem upload log 2022-04-19 20:05:01 +08:00
HFO4
4f65d0e859 fix: use default chunk size if it is set as 0 2022-04-19 19:41:03 +08:00
HFO4
3804efd792 enhance: use transaction to update site settings 2022-04-19 15:36:29 +08:00
HFO4
0c9383e329 feat: cache dashboard site summary 2022-04-19 15:15:50 +08:00
HFO4
13d36c25d4 test: fix failed test in model/file/deleteFile 2022-04-15 16:03:00 +08:00
HFO4
18f5bffed1 test: fix failed test 2022-04-15 15:53:10 +08:00
HFO4
478d390867 Fix: show modified date instead of creating date in file list 2022-04-13 17:54:10 +08:00
HFO4
febbd0c5a0 Feat: batch download in streamming paradism
Fix: add cache-controler header in API call responses
2022-04-13 17:53:46 +08:00
HFO4
32a655f84e Merge remote-tracking branch 'origin/master' 2022-04-12 19:13:26 +08:00
HFO4
0a18d984ab Fix: embed static file not work (introduced in #1107)
embed file system should be declared in main pkg
2022-04-12 19:11:44 +08:00
AaronLiu
265bc099b2 Update .travis.yml 2022-04-12 17:20:02 +08:00
AaronLiu
90a47c9ec0 chore: trigger build manually 2022-04-12 17:13:49 +08:00
HFO4
6451e4c903 Merge branch 'master' of https://github.com/cloudreve/Cloudreve 2022-04-12 16:39:09 +08:00
vvisionnn
b50756dbcb feat: docker/docker-compose support (#1203)
* Feat: add official Dockerfile

* Feat: add dev docker build actions

* update github actions for docker

* update docker actions

* update docker actions

* update docker actions

* update docker actions

* update docker actions

* update docker actions

* fix: add npm default registry

* fix: remove yarn.lock

* fix: update frontend checksum

* remove set registry

* update Dockerfile

* feat: basic docker-compose solution

* remove old Dockerfile

* fix typo

* fix: frontend version

* fix: remove unused comments
2022-04-11 22:13:33 +08:00
AaronLiu
23dc7e370e Create stale.yml 2022-04-09 21:07:52 +08:00
dependabot[bot]
1f3c1d7ce2 Chore(deps): Bump github.com/gin-gonic/gin from 1.5.0 to 1.7.0 (#1198)
Bumps [github.com/gin-gonic/gin](https://github.com/gin-gonic/gin) from 1.5.0 to 1.7.0.
- [Release notes](https://github.com/gin-gonic/gin/releases)
- [Changelog](https://github.com/gin-gonic/gin/blob/master/CHANGELOG.md)
- [Commits](https://github.com/gin-gonic/gin/compare/v1.5.0...v1.7.0)

---
updated-dependencies:
- dependency-name: github.com/gin-gonic/gin
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-04-09 21:02:14 +08:00
Ink33
84807be1ca use go:embed for static resources (#1107)
* feat: use go:embed to embed static files

* ci: fix broken test

* docs: update readme.md

* chore: remove statik

* feat: simplify code

Co-authored-by: AaronLiu <abslant@126.com>
2022-04-09 20:58:07 +08:00
HFO4
20e90e3963 Improve error message when uploading is conflicted 2022-04-09 09:57:22 +08:00
HFO4
ace398d87b Fix: file size is ready dirty when clean upload sessions
After listing to be deleted files, before delete is committed to database, file size might be changed by ongoing upload, causing inconsistent user storage.
2022-04-03 20:39:50 +08:00
HFO4
ec776ac837 Test: new changes in pkg request, serializer, task, xml, router 2022-03-31 20:17:01 +08:00
HFO4
d117080991 Test: new changes pkg filesystem 2022-03-30 20:38:02 +08:00
HFO4
1c0a735df8 Test: new changes pkg remote, fsctx, part of filesystem 2022-03-29 20:13:05 +08:00
HFO4
c6130ab078 Feat: new changes in pkg: chunk, backoff, local, onedrive 2022-03-27 11:14:30 +08:00
HFO4
31315c86ee Feat: support option for cache streamed chunk data into temp file for potential retry. 2022-03-26 15:33:31 +08:00
HFO4
636ac52a3f Test: new changes in pkg: cache, cluster, conf 2022-03-26 15:32:57 +08:00
HFO4
1821923b74 Test: new changes in model pkg 2022-03-24 20:07:56 +08:00
HFO4
a568e5e45a Test: new changes in middleware pkg 2022-03-23 20:05:10 +08:00
HFO4
e51c5cd70d Fix: root folder should not be deleted 2022-03-23 19:32:31 +08:00
HFO4
5a3ea89866 Feat: support {ext} and {uuid} magic variable 2022-03-23 19:26:25 +08:00
HFO4
eaa8c9e12d Refactor: move thumbnail config from ini file to database 2022-03-23 19:02:39 +08:00
HFO4
d54ca151b2 Feat: overwrite database settings in conf.ini for slave node. 2022-03-23 18:58:18 +08:00
HFO4
7eb8173101 Feat: adapt new uploader for s3 like policy
This commit also fix #730, #713, #756, #5
2022-03-20 11:29:50 +08:00
HFO4
d3016b60af Feat: adapt new uploader for upyun policy 2022-03-20 11:27:43 +08:00
HFO4
9e5713b139 Feat: adapt new uploader for COS policy 2022-03-20 11:27:17 +08:00
HFO4
07f13cc350 Refactor: factory method for OSS client
Fix: use HTTPS schema by default in OSS client
Feat: new handler for Qiniu policy
2022-03-20 11:26:26 +08:00
HFO4
0df9529b32 Feat: generating token and callback url for OSS muiltpart upload, support resume upload in sever-side uploading for OSS 2022-03-20 11:23:55 +08:00
HFO4
015ccd5026 Feat: use new ChunkManager for OneDrive API client 2022-03-20 11:20:09 +08:00
HFO4
5802161102 Fix: inherited policy ID didn't pass through second layer in Folder / version verification in Ping router 2022-03-20 11:17:04 +08:00
HFO4
b6efca1878 Feat: uploading OneDrive files in client side 2022-03-20 11:16:25 +08:00
HFO4
15e3e3db5c Fix: unused import and Ping router return wrong version 2022-03-16 11:44:40 +08:00
HFO4
24dfb2c24e Fix: undefined method in transfer task 2022-03-13 19:27:33 +08:00
HFO4
dd4c3e05d3 Feat: show pro flag in ping response 2022-03-13 16:21:32 +08:00
HFO4
5bda037d74 Fix: cannot list multiple pages in async task page 2022-03-13 16:21:09 +08:00
HFO4
c89327631e Fix: panics inside of task was not correctly logged into DB
Feat: slave node use new API to upload file to master
2022-03-13 16:20:50 +08:00
HFO4
9136f3caec Fix: while placeholder file got conflict, original file might be deleted 2022-03-13 16:19:05 +08:00
HFO4
0650684dd9 Feat: cancel upload session in slave node 2022-03-13 16:18:39 +08:00
HFO4
effbc8607e Refactor: use chunk manager to manage resume upload in server side 2022-03-13 16:17:20 +08:00
HFO4
b96019be7c Feat: client method to upload file from master node to slave node 2022-03-13 16:16:58 +08:00
HFO4
081e75146c Fix: add optimism lock when updating file size 2022-03-13 16:15:19 +08:00
HFO4
e0714fdd53 Feat: process upload callback sent from slave node 2022-03-03 19:17:25 +08:00
HFO4
4925a356e3 Enable overwrite for non-first chunk uploading request 2022-03-03 19:15:25 +08:00
HFO4
050a68a359 Chore: update golang version to 1.17.x 2022-03-02 19:29:18 +08:00
HFO4
7214e59c25 Feat: creating upload session and credential from master server 2022-02-28 17:52:59 +08:00
HFO4
118d738797 Feat: support apply append mode and overwrite mode for FileStream 2022-02-28 17:49:00 +08:00
HFO4
285611baf7 Feat: truncate file if uploaded chunk is overlapped 2022-02-28 17:47:57 +08:00
HFO4
521c5c8dc4 Feat: use transactions to manipulate user's used storage 2022-02-27 14:24:17 +08:00
HFO4
285e80ba76 Feat: use database transactions to delete / update file size 2022-02-27 14:23:26 +08:00
HFO4
2811ee3285 Feat: slave policy creating upload session API 2022-02-27 14:22:09 +08:00
HFO4
7dd636da74 Feat: upload session recycle crontab job / API for cleanup all upload session 2022-02-27 14:16:36 +08:00
HFO4
3444b4a75e Feat: chunk upload handling for local policy 2022-02-27 14:13:39 +08:00
HFO4
c301bd6045 Feat: API for receiviing chunk data 2022-02-27 14:11:01 +08:00
HFO4
72173bf894 Refactor: create placeholder file and record upload session id in it 2022-02-27 14:07:12 +08:00
HFO4
6fdf77e00e Feat: support setting "last modified" props when creating upload session 2022-02-27 14:05:21 +08:00
HFO4
e37e93a7b6 Feat: create hidden file when creating upload session 2022-02-27 14:04:30 +08:00
HFO4
868a88e5fc Refactor: use universal FileHeader when handling file upload, remove usage of global ctx with FileHeader, SavePath, DisableOverwrite 2022-02-27 14:03:07 +08:00
KAAAsS
8a222e7df4 fix: nil pointer in qiniu and upyun driver (#1146) 2022-02-26 08:39:47 +08:00
HFO4
8443a30fb1 Feat: support chunk size option in policy 2022-02-10 19:31:06 +08:00
HFO4
de9c41082c Feat: create upload session and pre-upload check 2022-02-10 19:30:08 +08:00
HFO4
855c9d92c4 Feat: get policy from directory props / Feat: return source enabled flag in file list 2022-02-10 19:25:38 +08:00
vvisionnn
c84d0114ae Fix: trigger err when move folder into itself (#1128) 2022-02-04 12:07:56 +08:00
HFO4
c31c77a089 Merge remote-tracking branch 'origin/master' 2021-11-30 19:31:48 +08:00
HFO4
6b15cae0b5 Update version number 2021-11-30 19:27:09 +08:00
HFO4
84d81f201f Fix: refresh interval not working 2021-11-30 19:26:35 +08:00
HFO4
af4d9767c2 Fix: slave node cannot transfer files to other slave node 2021-11-30 19:26:07 +08:00
milkice
45597adcd3 Integrate aria2c support & fix unintended behavior for docker image (#1073)
* Update Dockerfile

* Create docker-bootstrap.sh

In addition to spawn cloudreve, this script generates password for aria2 so that users can take advantage of aria2 more conveniently instead of configuring aria2 by themselves.
2021-11-29 17:45:33 +08:00
AaronLiu
762f0f9c68 Update app.go 2021-11-26 15:48:19 +08:00
AaronLiu
c5074df1c7 Update README.md 2021-11-26 11:17:19 +08:00
HFO4
7ea72cf364 Fix: default CORS setting header should be applied with new change 2021-11-26 11:05:12 +08:00
HFO4
4eb7525c51 Fix: cannot transfer tasks with multiple files in slave node 2021-11-26 10:58:01 +08:00
HFO4
3948ee7f3a Fix: use X-Cr- as custom header prefix 2021-11-23 21:22:23 +08:00
HFO4
865a801fa8 Update version number 2021-11-22 21:08:35 +08:00
HFO4
05941616df Fix: node should not be refreshed when editing node with status=inactive 2021-11-22 20:48:16 +08:00
HFO4
51b1e5b854 Merge remote-tracking branch 'origin/master' 2021-11-22 20:38:21 +08:00
HFO4
4dbe867020 Fix: failed unit test due to import cycle 2021-11-22 20:38:03 +08:00
WeidiDeng
8c8ad3e149 Fix: WebDAV cannot move and rename at the same time (#1056) 2021-11-22 20:29:45 +08:00
HFO4
fce38209bc Merge remote-tracking branch 'origin/master' 2021-11-22 20:27:07 +08:00
HFO4
700e13384e Fix: using url escape instead of unescape in remote handler (#1051) 2021-11-22 20:23:34 +08:00
HFO4
7fd984f95d Feat: support custom office preview service (Fix #1050) 2021-11-22 20:16:24 +08:00
HFO4
9fc08292a0 Feat: migration DB support custom upgrade scripts 2021-11-22 19:53:42 +08:00
milkice
8c5445a26d Fix problems in Dockerfile (#1059)
Fix js heap out of memory
Fix can't find cloudreve for cloudreve has been renamed to Cloudreve
2021-11-20 17:43:31 +08:00
HFO4
96b84bb5e5 Test: tasks pkg 2021-11-20 17:14:45 +08:00
HFO4
9056ef9171 Test: new changes in 3.4.0 2021-11-20 16:59:29 +08:00
HFO4
532bff820a Test: new modifications in filesystem pkg 2021-11-16 20:54:21 +08:00
HFO4
fcd9eddc54 Test: pkg/cluster 2021-11-16 20:14:27 +08:00
HFO4
6c9967b120 Test: cluster/node.go and controller.go 2021-11-15 20:30:25 +08:00
HFO4
416f4c1dd2 Test: balancer / auth / controller in pkg 2021-11-11 20:56:16 +08:00
HFO4
f0089045d7 Test: aria2 task monitor 100% cover 2021-11-11 19:49:02 +08:00
WeidiDeng
4b88eacb6a Remove unnecessary import "C" (#1048)
There are no C binding in this file. And for users to compile themselves, this line will cause compilation to fail for those who don't need sqlite support.

移除不必要的c binding,使用CGO=0时,这行会导致编译失败。禁用CGO会导致sqlite无法使用,但可以方便编译(不需要安装gcc,跨平台编译方便),这点可以在文档中说明。
2021-11-11 17:45:51 +08:00
kikoqiu
54ed7e43ca Feat: improve thumbnails proformance and GC for local policy (#1044)
* thumb generating improvement

Replace "github.com/nfnt/resize" with "golang.org/x/image/draw". Add thumb task queue to avoid oom when batch thumb operation

* thumb improvement

* Add some tests for thumbnail generation
2021-11-11 17:45:22 +08:00
HFO4
4d7b8685b9 Test: aria2 task monitor
Fix: tmp file not deleted after transfer task failed to create
2021-11-09 20:53:42 +08:00
HFO4
eeee43d569 Test: newly added sb models 2021-11-09 19:29:56 +08:00
HFO4
3064ed60f3 Test: new database models and middlewares 2021-11-08 20:49:07 +08:00
HFO4
e41ec9defa Refactor: move slave pkg inside of cluster
Test: middleware for node communication
2021-11-08 19:54:26 +08:00
HFO4
eaa0f6be91 Update version number 2021-11-07 10:14:30 +08:00
HFO4
5db476634a Fix: deadlock and sync issue in node pool 2021-11-03 21:27:53 +08:00
HFO4
1f06ee3af6 Fix: node cannot be reloaded when db model changes 2021-11-01 19:23:19 +08:00
HFO4
22bbfe7da1 Merge remote-tracking branch 'origin/master' 2021-10-31 09:50:55 +08:00
HFO4
f1dc4c4758 Chore: update ubuntu image version 2021-10-31 09:50:07 +08:00
HFO4
5f861b963a Update submodule version 2021-10-31 09:48:31 +08:00
AaronLiu
056de22edb Feat: aria2 download and transfer in slave node (#1040)
* Feat: retrieve nodes from data table

* Feat: master node ping slave node in REST API

* Feat: master send scheduled ping request

* Feat: inactive nodes recover loop

* Modify: remove database operations from aria2 RPC caller implementation

* Feat: init aria2 client in master node

* Feat: Round Robin load balancer

* Feat: create and monitor aria2 task in master node

* Feat: salve receive and handle heartbeat

* Fix: Node ID will be 0 in download record generated in older version

* Feat: sign request headers with all `X-` prefix

* Feat: API call to slave node will carry meta data in headers

* Feat: call slave aria2 rpc method from master

* Feat: get slave aria2 task status
Feat: encode slave response data using gob

* Feat: aria2 callback to master node / cancel or select task to slave node

* Fix: use dummy aria2 client when caller initialize failed in master node

* Feat: slave aria2 status event callback / salve RPC auth

* Feat: prototype for slave driven filesystem

* Feat: retry for init aria2 client in master node

* Feat: init request client with global options

* Feat: slave receive async task from master

* Fix: competition write in request header

* Refactor: dependency initialize order

* Feat: generic message queue implementation

* Feat: message queue implementation

* Feat: master waiting slave transfer result

* Feat: slave transfer file in stateless policy

* Feat: slave transfer file in slave policy

* Feat: slave transfer file in local policy

* Feat: slave transfer file in OneDrive policy

* Fix: failed to initialize update checker http client

* Feat: list slave nodes for dashboard

* Feat: test aria2 rpc connection in slave

* Feat: add and save node

* Feat: add and delete node in node pool

* Fix: temp file cannot be removed when aria2 task fails

* Fix: delete node in admin panel

* Feat: edit node and get node info

* Modify: delete unused settings
2021-10-31 09:41:56 +08:00
想出网名啦
a3b4a22dbc bug fix: can't connect to postgres database (#992)
* bug fix: can't connect to postgres database

* remove useless arg

* remove vscode setting
2021-10-29 20:30:26 +08:00
WeidiDeng
9ff1b47646 fix webdav prop get (#1023)
修复了displayname为空,potplayer可以正常使用webdav功能
2021-09-27 22:28:36 +08:00
AaronLiu
65c4367689 Revert "delete $name policy (#831)" (#961)
This reverts commit e6959a5026.
2021-07-30 11:22:18 +08:00
HFO4
db7489fb61 Update version number 2021-07-11 20:10:51 +08:00
HFO4
622b928a90 Feat: support database charset option and more DMBS driver 2021-07-11 14:46:01 +08:00
HFO4
c0158ea224 Merge branch 'master' of https://github.com/cloudreve/Cloudreve 2021-07-11 14:33:42 +08:00
Songtao
e6959a5026 delete $name policy (#831) 2021-07-11 14:32:47 +08:00
HFO4
9d64bdd9f6 Fix: attr field overflow when downloading large torrent (#941) 2021-07-11 14:20:19 +08:00
kleinsea
c85c2da523 feat: append config parameter: registerEnabled (#911) 2021-05-29 09:58:11 +08:00
HFO4
8659bdcf77 Fix: unable to read file from UPYUN (#472, #472, #836) 2021-05-11 21:52:55 +08:00
HFO4
641fe352da Fix: user encryption setting will now overwrite the default one in gomail (#869 #857 #723 #545) 2021-04-25 16:06:22 +08:00
HFO4
96712fb066 Feat: use RFC3339 time format in returned results (#811) 2021-04-03 16:57:13 +08:00
HFO4
a1252c810b Update version number to 3.3.1 2021-03-23 10:46:17 +08:00
HFO4
e781185ad2 Test: captcha verify middleware 2021-03-22 21:19:43 +08:00
HFO4
95802efcec Merge remote-tracking branch 'origin/master' 2021-03-22 18:28:57 +08:00
topjohncian
233648b956 Refactor: captcha (#796) 2021-03-22 02:28:12 -08:00
HFO4
53acadf098 Modify: limit forum threads numbers in admin index 2021-03-22 16:35:58 +08:00
HFO4
c0f7214cdb Revert "Fix: OSS SDK will encode all object key (#694)"
This reverts commit 270f617b and fix #802
2021-03-22 16:22:21 +08:00
HFO4
ccaefdab33 Fix: unable to upload file in WebDAV (#803) 2021-03-22 13:50:43 +08:00
HFO4
6efd8e8183 Fix: file size not match while uploading office docs to SharePoint sites 2021-03-21 21:02:31 +08:00
AaronLiu
144b534486 Update build.yml 2021-03-20 22:46:01 -08:00
AaronLiu
e160154d3b Update test.yml 2021-03-20 22:43:40 -08:00
AaronLiu
2381eca230 Rename build.yaml to build.yml 2021-03-20 22:42:13 -08:00
AaronLiu
adde486a30 Create build.yaml 2021-03-20 22:41:58 -08:00
AaronLiu
a9c0d6ed17 Update and rename build.yml to test.yml 2021-03-20 22:41:28 -08:00
HFO4
595f4a1350 Test: get parament source in OneDrive handler 2021-03-21 14:32:10 +08:00
HFO4
a5f80a4431 Feat: get permanent URL for OneDrive policy 2021-03-20 12:33:39 +08:00
AaronLiu
6fb419d998 Fix: downgrade glibc 2021-03-18 00:48:47 -08:00
AaronLiu
3f0f33b4fc Update build.yml 2021-03-18 00:44:32 -08:00
HFO4
052e6be393 Update submodule version 2021-03-18 11:29:26 +08:00
HFO4
a4b0ad81e9 Feat: database script for resetting admin password 2021-03-17 14:34:12 +08:00
HFO4
8431906b94 Update version number 2021-03-17 14:21:32 +08:00
HFO4
40476953aa Fix: stop listening HTTP port if unix socket is enabled (#729) 2021-03-17 14:19:05 +08:00
ihipop
270f617b9d Fix: OSS SDK will encode all object key (#694)
(cherry picked from commit b9cd82b849065f0d1ad093708f09c8722339bf2a)
2021-03-16 21:56:14 -08:00
HFO4
170f2279c1 Fix: failed to get thumbnails under global OneDrive policy 2021-03-14 11:03:10 +08:00
HFO4
d1377262e3 Fix: ignore requiring SharePoint site ID after edit / nil pointer in user setting routers 2021-03-14 10:26:45 +08:00
HFO4
c9acf7e64e Update submodule 2021-03-12 17:06:10 +08:00
HFO4
4e2f243436 Feat: support using SharePoint site to store files 2021-03-12 17:05:13 +08:00
HFO4
a54acd71c2 Merge remote-tracking branch 'origin/master' 2021-03-11 14:52:27 +08:00
HFO4
fec2fe14f8 Modify: json tag for QueryDate 2021-03-11 14:50:32 +08:00
HFO4
1f1bc056e3 Feat: API for getting object property 2021-03-11 14:50:02 +08:00
AaronLiu
e44ec0e6bf Update issue templates 2021-03-05 15:44:33 +08:00
HFO4
a93b964d8b Modify: OneDrive file URL cache will refreshed after file is updated 2021-03-03 17:07:26 +08:00
HFO4
d9cff24c75 Modify: disable association_autoupdate in model.File.UpdateSourceName 2021-03-03 14:10:08 +08:00
HFO4
e2488841b4 Test: #765 2021-03-02 12:45:54 +08:00
日下部 詩
a276be4098 注册帐号时,如果尚未验证,再发一次验证信 (#765)
* 注册帐号时,如果尚未验证,再发一次验证信

* 修正2个bug。 1:未验证显示密码错误 2:未验证无法重发email

* 小修正,如果已存在user,拿已有user资讯取代掉新user资讯来寄送激活码

* 激活码改成激活邮件

* 忘记密码以后,重设二步验证设定

* Revert "忘记密码以后,重设二步验证设定"

This reverts commit c5ac10b11c.

* 實作 https://github.com/cloudreve/Cloudreve/pull/765#discussion_r584313520
2021-03-02 12:43:14 +08:00
HFO4
4cf6c81534 Fix: failed unit test 2021-03-02 12:32:34 +08:00
HFO4
5a66af3105 Fix: failed unit test 2021-03-02 12:21:43 +08:00
HFO4
fc5c67cc20 Feat: disable overwrite for OneDrive policy 2021-03-01 13:27:18 +08:00
HFO4
5e226efea1 Feat: disable overwrite for non-updating put request, only works under local,slave,OneDrive,OSS policy. (#764) 2021-03-01 13:03:49 +08:00
HFO4
c949d47161 Update submodule version 2021-02-28 16:51:27 +08:00
HFO4
e699287ffd Modify: mark as success when deleting a file that does not exist;
Fix: minio is not usable in S3 policy
Modify: use batch request to delete S3 files
2021-02-28 16:48:51 +08:00
Cinhi Young
9c78515c72 Fix: email address should be lowercase for requesting Gravatar (#758) 2021-02-08 19:33:09 +08:00
HFO4
3b22b4fd25 Update version number 2021-01-06 18:18:24 +08:00
HFO4
08d998b41e Update submodule 2021-01-06 17:38:20 +08:00
Breeze Chen
488e62f762 Fix qiniu last modify time. (#691) 2021-01-06 17:01:24 +08:00
HFO4
f35ad3fe0a Fix: #663 2021-01-06 16:35:31 +08:00
HFO4
61e6d9b591 Update version number 2020-12-10 17:26:39 +08:00
HFO4
feb1134a7c Update submodule 2020-12-10 17:05:06 +08:00
HFO4
9f2f14cacf Fix: https://github.com/cloudreve/Cloudreve/issues/504 2020-12-10 16:59:45 +08:00
HFO4
055ed0e075 Fix: standardize the use of error codes related to login credentials 2020-12-08 20:13:42 +08:00
HFO4
c87109c8b1 Fix: incorrect attr column type in download table 2020-12-08 19:55:23 +08:00
HFO4
8057c4b8bc Update: submodule 2020-12-08 18:53:20 +08:00
HFO4
5ab93a6e0d Test: frontend middleware 2020-12-08 18:15:02 +08:00
HFO4
5d406f1c6a Feat: use history router mode 2020-12-08 17:36:19 +08:00
HFO4
5b44606276 Test: replace cdn proxy url for OneDrive policy 2020-12-08 17:31:37 +08:00
HFO4
bd2bdf253b Feat: using custom reverse proxying in OneDrive file downloading 2020-12-08 17:30:22 +08:00
HFO4
0cfa61e264 Test: user storage calibration script 2020-12-06 16:50:08 +08:00
HFO4
f7c8039116 Feat: execute database script to calibrate user storage 2020-12-06 16:49:49 +08:00
HFO4
6486e8799b Fix: user storage might be returned twice when canceling uploading request (#645) 2020-12-03 18:10:10 +08:00
HFO4
7279be2924 Fix: user storage might be returned twice when OneDrive uploading canceled in WebDAV requests 2020-12-01 19:22:52 +08:00
HFO4
33f8419999 Merge remote-tracking branch 'origin/master' 2020-11-29 19:16:24 +08:00
HFO4
a5805b022a Feat: enable using LAN endpoint in serverside request of OSS policy (#399) 2020-11-29 19:15:35 +08:00
Archerx
ae89b402f6 fix: statik data needs to be initialized (#640)
Co-authored-by: xuc2 <xuc2@knownsec.com>
2020-11-25 21:23:26 +08:00
HFO4
0d210e87b3 Fix: aria2 task failed due to limited size of attr filed in DB 2020-11-24 18:47:44 +08:00
HFO4
f0a68236a8 Feat: delete aria2 record in client side (#335) 2020-11-23 19:24:56 +08:00
HFO4
c6110e9e75 Feat: keep folder structure in aria2 transferring 2020-11-23 18:44:13 +08:00
HFO4
d97bc26042 Fix: add recycleLock preventing recycle FileSystem used by another goroutine 2020-11-21 19:32:25 +08:00
HFO4
11c218eb94 Merge branch 'master' of https://github.com/cloudreve/Cloudreve 2020-11-21 18:19:24 +08:00
Loyalsoldier
79b8784934 Comply with Golang semantic import versioning (#630)
* Code: compatible with semantic import versioning

* Tools & Docs: compatible with semantic import versioning

* Clean go.mod & go.sum
2020-11-21 17:34:55 +08:00
HFO4
59d50b1b98 Modify: change Unix to UnixSocket in config section 2020-10-26 15:42:18 +08:00
HFO4
746aa3e8ef Test: s3 policy 2020-10-26 15:33:28 +08:00
HFO4
95f318e069 Feat: adapt minio for S3 policy and fix listing files 2020-10-26 15:06:02 +08:00
HFO4
77394313aa Fix: S3 adaption for minio 2020-10-11 13:05:14 +08:00
HFO4
41eb84a221 Update submodule version 2020-10-10 13:38:39 +08:00
mritd
40414fe6ae chore(dockerfile): update node image to lts-buster (#557)
* chore(dockerfile): update node image to lts-buster

update node image to lts-buster, because the alpine image cannot be obtained on arm/arm64,
it will support `docker buildx` build after the upgrade.

Signed-off-by: mritd <mritd@linux.com>

* chore(docker): update golang build image

update golang build image

Signed-off-by: mritd <mritd@linux.com>
2020-09-09 14:56:14 +08:00
mritd
7df09537e0 fix(db_driver): fix the panic when sqlite3 is used in the conf (#551)
* fix(db_driver): fix the panic when sqlite3 is used in the conf

fix the panic when sqlite3 is used in the conf

ref cloudreve/Cloudreve#550

Signed-off-by: mritd <mritd@linux.com>

* fix(nullpointer): fix possible null pointer error

fix possible null pointer error

Signed-off-by: mritd <mritd@linux.com>
2020-09-03 12:07:38 +08:00
mritd
f478c38307 chore(docker): add dockerfile (#549)
* chore(docker): add dockerfile

add dockerfile

Signed-off-by: mritd <mritd@linux.com>

* chore(docker): fix docker file

fix docker file

Signed-off-by: mritd <mritd@linux.com>

* chore(docker): mv bin file to /cloudreve

mv bin file to /cloudreve

Signed-off-by: mritd <mritd@linux.com>

* chore(docker): remove GOPROXY

remove GOPROXY

Signed-off-by: mritd <mritd@linux.com>
2020-09-03 12:05:00 +08:00
GuerraMorgan
bfd2340732 Add: Unix Socket support (#466)
* Update conf.go

* Update driver.go

* Update session.go

* Update defaults.go

* Update main.go

* Update conf.go

* Update defaults.go
2020-08-12 20:31:28 +08:00
ZZF
dd50ef1c25 添加S3策略的支持 (#425)
* 添加亚马逊S3策略的支持

* 添加CDN支持,公有目录删除无用参数

* 增加Region
2020-06-05 14:45:24 +08:00
HFO4
27bf8ca9b2 Fix: node js version 2020-06-01 20:27:06 +08:00
HFO4
c71a2c5b64 Modify: version number to 3.1.1 2020-06-01 20:22:14 +08:00
HFO4
a7ba357cb8 Fix: hash mark in file name lead to 404 error (#405) 2020-05-30 19:35:28 +08:00
HFO4
14f5982b47 Modify: set node_js version to node_js in travis.yml 2020-05-24 11:31:00 +08:00
HFO4
e607311268 Update: version number for 3.1.0 2020-05-24 10:47:30 +08:00
HFO4
acc5d53bab Update: submodules 2020-05-24 10:36:48 +08:00
HFO4
aa3e8913ab Feat: add ".zip" suffix if not specified while creating compressed file 2020-05-24 10:36:26 +08:00
HFO4
ee0f8e964d Feat: eject internal static files 2020-05-23 13:57:02 +08:00
HFO4
60745ac8ba Merge remote-tracking branch 'origin/master' 2020-05-23 13:19:10 +08:00
HFO4
bfb5b34edc Fix: concurrent logging lock / Feat: listen SSL (#287) 2020-05-23 13:17:48 +08:00
hiCasper
a5000c0621 Modify: update actions (#398) 2020-05-20 11:48:00 +08:00
HFO4
e038350cf0 Merge branch 'master' of https://github.com/cloudreve/Cloudreve 2020-05-19 11:25:12 +08:00
HFO4
5af3c4e244 Fix: directory renaming should not be limited by file extensions (#395) 2020-05-19 11:25:01 +08:00
AaronLiu
7ed14c4d81 Update FUNDING.yml 2020-05-19 11:12:45 +08:00
AaronLiu
869c0006c5 Create FUNDING.yml 2020-05-19 11:11:56 +08:00
HFO4
4c458df666 Fix: failed tests while static files not loaded 2020-05-19 10:34:16 +08:00
HFO4
ed684420a2 Modify: actions should not perform clean before build 2020-05-19 10:26:24 +08:00
HFO4
2076d56f0f Feat: dynamic writing site title, favicon, description, custom html (#286) 2020-05-19 10:24:15 +08:00
HFO4
280308bc05 Modify: increase OneDrive client uploading chunk size to 100MB 2020-05-13 09:09:11 +08:00
HFO4
1172765c58 Feat: option for using SSL connection in mail queue 2020-05-11 09:29:19 +08:00
HFO4
58856612e2 Feat: create empty file in web panel (#305) 2020-05-09 10:35:18 +08:00
HFO4
ee0f224cbb Feat: use Monaco code editor 2020-05-03 13:16:40 +08:00
HFO4
e8a6df9a86 Feat: import file from existing outer folder 2020-05-02 10:22:28 +08:00
HFO4
b02d27ca0a Merge branch 'master' of https://github.com/cloudreve/Cloudreve 2020-05-02 10:15:57 +08:00
TS
51f66eb06b Feat: ICP 备案信息 (#348) 2020-05-01 13:50:16 +08:00
HFO4
0df8a9ba65 Modify: search shares without login required 2020-05-01 09:33:59 +08:00
HFO4
79daf92896 Test: List files in OneDrive 2020-05-01 09:22:27 +08:00
HFO4
8a2be58ef3 Feat: List files in OneDrive 2020-04-30 11:45:35 +08:00
HFO4
ce7784090f Feat: List files in upyun bucket 2020-04-30 10:37:48 +08:00
HFO4
dfb663a6e0 Feat: List files in Qiniu bucket 2020-04-29 10:37:15 +08:00
HFO4
cfaf20926f Feat: List files in COS bucket 2020-04-29 10:36:52 +08:00
HFO4
305497e7cb Test: oss.List 2020-04-29 09:18:26 +08:00
HFO4
52c2422be9 Feat: list oss files 2020-04-28 11:43:32 +08:00
HFO4
1afc750dae Modify: create folder if not exist while importing files 2020-04-28 10:47:40 +08:00
HFO4
960c886496 Test: slave side list file 2020-04-28 10:16:04 +08:00
HFO4
5d579cdadc Feat: slave side list file 2020-04-28 10:02:53 +08:00
HFO4
d5fc5745b4 Fix: failed test due to missing AdminCreateImportTask controller 2020-04-27 14:45:27 +08:00
HFO4
a732025d5a Update: submodules 2020-04-27 14:40:09 +08:00
HFO4
030fd4ac57 Test: task.ImportTask 2020-04-27 14:39:49 +08:00
HFO4
9eeb4b6d19 Feat: task for importing file from existing filesystem 2020-04-27 14:39:16 +08:00
HFO4
36e5b31f73 Modify: add recursive options for list method
Test: local.List
2020-04-27 10:31:34 +08:00
cha0sCat
26d4d34837 Feat: webdav 适配 rclone (#330)
rclone在传输时会反复创建父目录, 导致cloudreve返回409, 最终导致传输失败.
因此检测如果是rclone则忽略文目录已存在错误
2020-04-27 10:01:00 +08:00
HFO4
8c547a05fd Fix: mismatched pointer receiver in oss.Driver 2020-04-24 11:14:32 +08:00
HFO4
f7311f906b Feat: add List method to Handler interface / implement local.List 2020-04-24 11:08:07 +08:00
HFO4
6006ff4d22 Feat: regenerate when thumbnail files not exist 2020-04-24 10:18:01 +08:00
ihipop
034ed956a3 Bad syntax for struct tag value authn (#314) 2020-04-23 09:30:19 +08:00
hiCasper
700c5795f5 Support Github Actions (#310) 2020-04-22 11:15:26 +08:00
HFO4
8c3287d380 Test: Policy.IsThumbExist / filesystem.AddFile 2020-04-22 11:10:43 +08:00
HFO4
304e7b502c Feat: thumbnail support for COS policy 2020-04-22 11:09:33 +08:00
HFO4
514e069113 Refactor: decide if there is a thumbnail based on the file extension 2020-04-22 11:04:35 +08:00
HFO4
7b571499a7 Feat: forcibly delete files in dashboard (#277) 2020-04-21 09:47:54 +08:00
HFO4
8b68d46bdf Update: submodules 2020-04-19 10:12:26 +08:00
HFO4
ab3b59e63d Feat: add option to force delete file record in database 2020-04-19 10:09:16 +08:00
HFO4
9910f8d732 Modify: check static file version when statics folder exists 2020-04-12 12:46:59 +08:00
HFO4
99033d61c6 Add: arm64 build option 2020-04-12 12:00:01 +08:00
HFO4
e9f3a55eb8 Modify: migrate from travis.org to .com 2020-04-11 11:27:17 +08:00
HFO4
3cc9940924 Remove: arm64 build option 2020-04-11 11:17:34 +08:00
HFO4
bcdf94fd93 Fix: missing sys/cdefs.h in CI building 2020-04-11 10:59:51 +08:00
HFO4
3c09ad7c02 Feat: arm64 build option in CI 2020-04-11 10:45:18 +08:00
HFO4
7be0366b1f Dep: update submodule 2020-04-11 10:03:21 +08:00
HFO4
0575b0aa92 Fix: search function should not convert case (#288) 2020-04-11 09:42:22 +08:00
HFO4
2e342806b6 Fix: missing deps in login recaptcha service 2020-04-11 09:15:27 +08:00
HFO4
cf9dc1c24f Dep: update submodule 2020-04-11 09:11:07 +08:00
topjohncian
e58fb82463 Feat: ReCaptcha support (#292)
* Add custom mysql database port.

* Modify: add cloudreve bin file to .gitignore

* Feat:增加后端对ReCaptcha的支持
P.S.必须要执行迁移
2020-04-11 09:09:44 +08:00
HFO4
fa900b166a Feat: check static files version 2020-04-08 10:00:03 +08:00
HFO4
2e43f8ed5b Modify: use INT represent Database port 2020-04-08 09:04:03 +08:00
topjohncian
554493dea4 Add custom mysql database port. (#289) 2020-04-08 08:59:43 +08:00
HFO4
816b537787 Fix: permission error while creating folder recursively 2020-04-07 11:09:27 +08:00
HFO4
e07b09186d Modify: use 644 permission when creating a new physical directory (#274) 2020-04-07 10:56:02 +08:00
HFO4
8c7d075484 Fix: repeated return of capacity when OneDrive WebDAV upload canceled 2020-04-07 10:51:54 +08:00
HFO4
46743f3c1e Fix: unsupported GET parameters in public OSS bucket 2020-04-07 10:15:11 +08:00
HFO4
1a1543f190 Fix: catch internal error in OneDrive OAuth callback 2020-04-07 10:08:34 +08:00
HFO4
4aef12bf7e Merge remote-tracking branch 'origin/master' 2020-04-06 09:05:57 +08:00
HFO4
691c9aeb7d Feat: gzip support for static files 2020-04-06 09:05:30 +08:00
yeungc
6285e45e34 Fix: filename and code typo (#260)
* Rename handller.go to handler.go

* Rename handller.go to handler.go

* Rename handller_test.go to handler_test.go

* Rename handller.go to handler.go

* Rename handller.go to handler.go

* Rename handller.go to handler.go

* Rename handller.go to handler.go

* Fix typo
2020-03-23 13:44:59 +08:00
HFO4
f594d0ab83 Feat: check new versions while booting 2020-03-18 11:36:34 +08:00
HFO4
25d1735c1d Feat: compatible digest algorithm with V2 2020-03-18 11:21:04 +08:00
HFO4
c4c174f560 Fix: redirect should be 302 status in dashboard 2020-03-18 11:11:57 +08:00
HFO4
175c4d781f Fix: absolute path cannot be saved for new policy 2020-03-18 10:45:33 +08:00
HFO4
87fde687eb Fix: files with percent sign (%) cannot be deleted in OneDrive 2020-03-18 10:42:41 +08:00
HFO4
65cf0f57aa Fix: failed test due to policy cache 2020-03-18 09:47:06 +08:00
HFO4
0eb04ed0ea Feat: cache for OneDrive META request 2020-03-18 09:35:00 +08:00
HFO4
96983ddc70 Feat: custom SQLite db file path 2020-03-18 09:03:25 +08:00
HFO4
b98e5efb83 Merge remote-tracking branch 'origin/master' 2020-03-17 16:14:43 +08:00
HFO4
ff2dae80f0 Fix: incorrect thumb path detect while deleting files 2020-03-17 16:11:11 +08:00
HFO4
32c0232105 Fix: file preview URL in share page should not be accessed directly 2020-03-17 15:57:38 +08:00
AaronLiu
a05a3de0e1 Update: typo 2020-03-15 21:21:34 +08:00
HFO4
79f898e0a9 Update: submodule 2020-03-15 10:13:40 +08:00
HFO4
d1ca65461c Fix: incorrect capacity unit for OSS traffic limit 2020-03-15 09:56:50 +08:00
HFO4
15074015b3 Fix: file not deleted when aria2 download complete 2020-03-15 09:18:15 +08:00
HFO4
e1aced0f01 Modify: check DB version before migration 2020-03-14 18:12:26 +08:00
HFO4
75da09c339 Fix: mis-spelling 2020-03-13 11:28:03 +08:00
HFO4
7636e59dfe Update: license link 2020-03-12 19:22:07 +08:00
HFO4
e34e67648f Add: README & License 2020-03-12 19:20:40 +08:00
619 changed files with 150736 additions and 29146 deletions

View File

@@ -0,0 +1,7 @@
[supervisord]
nodaemon=false
[program:background_process]
command=aria2c --enable-rpc --save-session /cloudreve/data
autostart=true
autorestart=true

15
.build/build-assets.sh Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/bash
set -e
export NODE_OPTIONS="--max-old-space-size=8192"
# This script is used to build the assets for the application.
cd assets
rm -rf build
yarn install --network-timeout 1000000
yarn version --new-version $1 --no-git-tag-version
yarn run build
# Copy the build files to the application directory
cd ../
zip -r - assets/build >assets.zip
mv assets.zip application/statics

2
.build/entrypoint.sh Executable file
View File

@@ -0,0 +1,2 @@
supervisord -c ./aria2.supervisor.conf
./cloudreve

19
.github/DISCUSSION_TEMPLATE/general.yml vendored Normal file
View File

@@ -0,0 +1,19 @@
title: "General Discussion"
body:
- type: checkboxes
attributes:
label: Self Checks
description: "To make sure we get to you in time, please check the following :)"
options:
- label: I have searched for existing issues [search for existing issues](https://github.com/cloudreve/cloudreve/issues), including closed ones.
required: true
- label: I confirm that I am using English to submit this report, otherwise it will be closed. / 请使用英语提交,否则会被关闭。
required: true
- label: "Please do not modify this template :) and fill in all the required fields."
required: true
- type: textarea
attributes:
label: Content
placeholder: Please describe the content you would like to discuss.
validations:
required: true

35
.github/DISCUSSION_TEMPLATE/ideas.yml vendored Normal file
View File

@@ -0,0 +1,35 @@
title: Suggestions for New Features
body:
- type: checkboxes
attributes:
label: Self Checks
description: "To make sure we get to you in time, please check the following :)"
options:
- label: I have searched for existing issues [search for existing issues](https://github.com/cloudreve/cloudreve/issues), including closed ones.
required: true
- label: I confirm that I am using English to submit this report, otherwise it will be closed. / 请使用英语提交,否则会被关闭。
required: true
- label: "Please do not modify this template :) and fill in all the required fields."
required: true
- type: textarea
attributes:
label: 1. Is this request related to a challenge you're experiencing? Tell me about your story.
placeholder: Please describe the specific scenario or problem you're facing as clearly as possible. For instance "I was trying to use [feature] for [specific task], and [what happened]... It was frustrating because...."
validations:
required: true
- type: textarea
attributes:
label: 2. Additional context or comments
placeholder: (Any other information, comments, documentations, links, or screenshots that would provide more clarity. This is the place to add anything else not covered above.)
validations:
required: false
- type: checkboxes
attributes:
label: 3. Can you help us with this feature?
description: Let us know! This is not a commitment, but a starting point for collaboration.
options:
- label: I am interested in contributing to this feature.
required: false
- type: markdown
attributes:
value: Please limit one request per issue.

28
.github/DISCUSSION_TEMPLATE/q-a.yml vendored Normal file
View File

@@ -0,0 +1,28 @@
title: "Q&A"
body:
- type: checkboxes
attributes:
label: Self Checks
description: "To make sure we get to you in time, please check the following :)"
options:
- label: I have searched for existing issues [search for existing issues](https://github.com/cloudreve/cloudreve/issues), including closed ones.
required: true
- label: I confirm that I am using English to submit this report, otherwise it will be closed. / 请使用英语提交,否则会被关闭。
required: true
- label: "Please do not modify this template :) and fill in all the required fields."
required: true
- type: textarea
attributes:
label: 1. Is this request related to a challenge you're experiencing? Tell me about your story.
placeholder: Please describe the specific scenario or problem you're facing as clearly as possible. For instance "I was trying to use [feature] for [specific task], and [what happened]... It was frustrating because...."
validations:
required: true
- type: textarea
attributes:
label: 2. Additional context or comments
placeholder: (Any other information, comments, documentations, links, or screenshots that would provide more clarity. This is the place to add anything else not covered above.)
validations:
required: false
- type: markdown
attributes:
value: Please limit one request per issue.

1
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1 @@
custom: ["https://cloudreve.org/pricing"]

91
.github/ISSUE_TEMPLATE/bug_report.yml vendored Normal file
View File

@@ -0,0 +1,91 @@
name: "🕷️ Bug report"
description: Report errors or unexpected behavior
labels:
- bug
body:
- type: checkboxes
attributes:
label: Self Checks
description: "To make sure we get to you in time, please check the following :)"
options:
- label: I have read the [Contributing Guide](https://docs.cloudreve.org/api/contributing) and [Language Policy](https://github.com/cloudreve/cloudreve/discussions/3335).
required: true
- label: This is only for bug report, if you would like to ask a question, please head to [Discussions](https://github.com/cloudreve/cloudreve/discussions).
required: true
- label: I have searched for existing issues [search for existing issues](https://github.com/cloudreve/cloudreve/issues), including closed ones.
required: true
- label: I confirm that I am using English to submit this report, otherwise it will be closed. / 请使用英语提交,否则会被关闭。
required: true
- label: "Please do not modify this template :) and fill in all the required fields."
required: true
- type: input
attributes:
label: Cloudreve version
description: e.g. 4.14.0
validations:
required: true
- type: dropdown
attributes:
label: Pro or Community Edition
description: What version of Cloudreve are you using?
multiple: true
options:
- Pro
- Community (Open Source)
validations:
required: true
- type: dropdown
attributes:
label: Database type
description: What database are you using?
multiple: true
options:
- MySQL
- PostgreSQL
- SQLite
- I don't know
validations:
required: true
- type: input
attributes:
label: Browser and operating system
description: What browser and operating system are you using?
placeholder: E.g. Chrome 123.0.0 on macOS 14.0.0
validations:
required: false
- type: textarea
attributes:
label: Steps to reproduce
description: We highly suggest including screenshots and a bug report log. Please use the right markdown syntax for code blocks.
placeholder: Having detailed steps helps us reproduce the bug. If you have logs, please use fenced code blocks (triple backticks ```) to format them.
validations:
required: true
- type: textarea
attributes:
label: ✔️ Expected Behavior
description: Describe what you expected to happen.
placeholder: What were you expecting? Please do not copy and paste the steps to reproduce here.
validations:
required: true
- type: textarea
attributes:
label: ❌ Actual Behavior
description: Describe what actually happened.
placeholder: What happened instead? Please do not copy and paste the steps to reproduce here.
validations:
required: false
- type: input
attributes:
label: Addition context information
description: Provide any additional context information that might be helpful.
placeholder: Any additional information that might be helpful.
validations:
required: false

14
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@@ -0,0 +1,14 @@
blank_issues_enabled: false
contact_links:
- name: "\U0001F4F1 iOS App related issues"
url: "https://github.com/cloudreve/ios-feedback/issues/new"
about: Report issues related to the official iOS/iPadOS client.
- name: "\U0001F5A5 Desktop client related issues"
url: "https://github.com/cloudreve/desktop/issues/new"
about: Report issues related to the official desktop client.
- name: "\U0001F4AC Documentation Issues"
url: "https://github.com/cloudreve/docs/issues/new"
about: Report issues with the documentation, such as typos, outdated information, or missing content. Please provide the specific section and details of the issue.
- name: "\U0001F4E7 Discussions"
url: https://github.com/cloudreve/cloudreve/discussions
about: General discussions and seek help from the community

View File

@@ -0,0 +1,40 @@
name: "⭐ Feature or enhancement request"
description: Propose something new.
labels:
- enhancement
body:
- type: checkboxes
attributes:
label: Self Checks
description: "To make sure we get to you in time, please check the following :)"
options:
- label: I have read the [Contributing Guide](https://docs.cloudreve.org/api/contributing) and [Language Policy](https://github.com/cloudreve/cloudreve/discussions/3335).
required: true
- label: I have searched for existing issues [search for existing issues](https://github.com/cloudreve/cloudreve/issues), including closed ones.
required: true
- label: I confirm that I am using English to submit this report, otherwise it will be closed. / 请使用英语提交,否则会被关闭。
required: true
- label: "Please do not modify this template :) and fill in all the required fields."
required: true
- type: textarea
attributes:
label: 1. Is this request related to a challenge you're experiencing? Tell me about your story.
placeholder: Please describe the specific scenario or problem you're facing as clearly as possible. For instance "I was trying to use [feature] for [specific task], and [what happened]... It was frustrating because...."
validations:
required: true
- type: textarea
attributes:
label: 2. Additional context or comments
placeholder: (Any other information, comments, documentations, links, or screenshots that would provide more clarity. This is the place to add anything else not covered above.)
validations:
required: false
- type: checkboxes
attributes:
label: 3. Can you help us with this feature?
description: Let us know! This is not a commitment, but a starting point for collaboration.
options:
- label: I am interested in contributing to this feature.
required: false
- type: markdown
attributes:
value: Please limit one request per issue.

61
.github/stale.yml vendored Normal file
View File

@@ -0,0 +1,61 @@
# Configuration for probot-stale - https://github.com/probot/stale
# Number of days of inactivity before an Issue or Pull Request becomes stale
daysUntilStale: 360
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
daysUntilClose: 30
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
onlyLabels: []
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
exemptLabels:
- pinned
- security
- "[Status] Maybe Later"
# Set to true to ignore issues in a project (defaults to false)
exemptProjects: true
# Set to true to ignore issues in a milestone (defaults to false)
exemptMilestones: true
# Set to true to ignore issues with an assignee (defaults to false)
exemptAssignees: true
# Label to use when marking as stale
staleLabel: wontfix
# Comment to post when marking as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
# Comment to post when removing the stale label.
# unmarkComment: >
# Your comment here.
# Comment to post when closing a stale Issue or Pull Request.
# closeComment: >
# Your comment here.
# Limit the number of actions per hour, from 1-30. Default is 30
limitPerRun: 30
# Limit to only `issues` or `pulls`
# only: issues
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
# pulls:
# daysUntilStale: 30
# markComment: >
# This pull request has been automatically marked as stale because it has not had
# recent activity. It will be closed if no further activity occurs. Thank you
# for your contributions.
# issues:
# exemptLabels:
# - confirmed

8
.gitignore vendored
View File

@@ -7,6 +7,7 @@
*.db
*.bin
/release/
application/statics/assets.zip
# Test binary, build with `go test -c`
*.test
@@ -26,3 +27,10 @@ version.lock
*.ini
conf/conf.ini
/statik/
.vscode/
dist/
data/
tmp/
.devcontainer/
cloudreve

118
.goreleaser.yaml Normal file
View File

@@ -0,0 +1,118 @@
version: 2
before:
hooks:
- go mod tidy
- chmod +x ./.build/build-assets.sh
- ./.build/build-assets.sh {{.Version}}
builds:
- env:
- CGO_ENABLED=0
binary: cloudreve
ldflags:
- -s -w
- -X 'github.com/cloudreve/Cloudreve/v4/application/constants.BackendVersion={{.Tag}}' -X 'github.com/cloudreve/Cloudreve/v4/application/constants.LastCommit={{.ShortCommit}}'
goos:
- linux
- windows
- darwin
- freebsd
goarch:
- amd64
- arm
- arm64
- loong64
goarm:
- 5
- 6
- 7
ignore:
- goos: windows
goarm: 5
- goos: windows
goarm: 6
- goos: windows
goarm: 7
- goos: windows
goarch: loong64
- goos: freebsd
goarch: loong64
- goos: freebsd
goarch: arm
archives:
- formats: ["tar.gz"]
# this name template makes the OS and Arch compatible with the results of uname.
name_template: >-
cloudreve_{{.Tag}}_{{- .Os }}_{{ .Arch }}
{{- if .Arm }}v{{ .Arm }}{{ end }}
# use zip for windows archives
format_overrides:
- goos: windows
formats: ["zip"]
checksum:
name_template: "checksums.txt"
snapshot:
version_template: "{{ incpatch .Version }}-next"
changelog:
sort: asc
filters:
exclude:
- "^docs:"
- "^test:"
release:
draft: true
prerelease: auto
target_commitish: "{{ .Commit }}"
name_template: "{{.Version}}"
dockers:
- dockerfile: Dockerfile
use: buildx
build_flag_templates:
- "--platform=linux/amd64"
- "--provenance=false"
goos: linux
goarch: amd64
goamd64: v1
extra_files:
- .build/aria2.supervisor.conf
- .build/entrypoint.sh
image_templates:
- "cloudreve/cloudreve:{{ .Tag }}-amd64"
- dockerfile: Dockerfile
use: buildx
build_flag_templates:
- "--platform=linux/arm64"
- "--provenance=false"
goos: linux
goarch: arm64
extra_files:
- .build/aria2.supervisor.conf
- .build/entrypoint.sh
image_templates:
- "cloudreve/cloudreve:{{ .Tag }}-arm64"
docker_manifests:
- name_template: "cloudreve/cloudreve:latest"
image_templates:
- "cloudreve/cloudreve:{{ .Tag }}-amd64"
- "cloudreve/cloudreve:{{ .Tag }}-arm64"
- name_template: "cloudreve/cloudreve:v4"
image_templates:
- "cloudreve/cloudreve:{{ .Tag }}-amd64"
- "cloudreve/cloudreve:{{ .Tag }}-arm64"
- name_template: "cloudreve/cloudreve:{{ .Tag }}"
image_templates:
- "cloudreve/cloudreve:{{ .Tag }}-amd64"
- "cloudreve/cloudreve:{{ .Tag }}-arm64"

View File

@@ -1,28 +0,0 @@
language: go
go:
- 1.13.x
git:
depth: 1
install:
- go get github.com/rakyll/statik
before_script:
- statik -src=models -f
script:
- go test -coverprofile=coverage.txt -covermode=atomic ./...
after_success:
- bash <(curl -s https://codecov.io/bash)
before_deploy:
- sudo apt-get update
- sudo apt-get -y install gcc-mingw-w64-x86-64
- sudo apt-get -y install gcc-arm-linux-gnueabihf libc6-dev-armhf-cross
- chmod +x ./build.sh
- ./build.sh -r b
deploy:
provider: releases
api_key: $GITHUB_TOKEN
file_glob: true
file: release/*
draft: true
skip_cleanup: true
on:
tags: true

30
Dockerfile Normal file
View File

@@ -0,0 +1,30 @@
FROM alpine:latest
WORKDIR /cloudreve
RUN apk update \
&& apk add --no-cache tzdata vips-tools ffmpeg libreoffice aria2 supervisor font-noto font-noto-cjk libheif libraw-tools\
&& cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \
&& echo "Asia/Shanghai" > /etc/timezone \
&& mkdir -p ./data/temp/aria2 \
&& chmod -R 766 ./data/temp/aria2
ENV CR_ENABLE_ARIA2=1 \
CR_SETTING_DEFAULT_thumb_ffmpeg_enabled=1 \
CR_SETTING_DEFAULT_thumb_vips_enabled=1 \
CR_SETTING_DEFAULT_thumb_libreoffice_enabled=1 \
CR_SETTING_DEFAULT_media_meta_ffprobe=1 \
CR_SETTING_DEFAULT_thumb_libraw_enabled=1
COPY .build/aria2.supervisor.conf .build/entrypoint.sh ./
COPY cloudreve ./cloudreve
RUN chmod +x ./cloudreve \
&& chmod +x ./entrypoint.sh
EXPOSE 5212 443
VOLUME ["/cloudreve/data"]
ENTRYPOINT ["sh", "./entrypoint.sh"]

674
LICENSE Normal file
View File

@@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.

View File

@@ -1,2 +1,76 @@
# Backend-V3 [![Build Status](https://travis-ci.com/HFO4/Backend-V3.svg?token=oj9m4tLKnqXfpizaq19A&branch=master)](https://travis-ci.com/HFO4/Backend-V3) [![codecov](https://codecov.io/gh/HFO4/Backend-V3/branch/master/graph/badge.svg?token=R6MIuXEO8P)](https://codecov.io/gh/HFO4/Backend-V3)
Still in devepolment
[中文版本](https://github.com/cloudreve/cloudreve/blob/master/README_zh-CN.md)
<h1 align="center">
<br>
<a href="https://cloudreve.org/" alt="logo" ><img src="https://raw.githubusercontent.com/cloudreve/frontend/master/public/static/img/logo192.png" width="150"/></a>
<br>
Cloudreve
<br>
</h1>
<h4 align="center">Self-hosted file management system with multi-cloud support.</h4>
<p align="center">
<a href="https://dev.azure.com/abslantliu/cloudreve/_build?definitionId=6">
<img src="https://img.shields.io/github/check-runs/cloudreve/cloudreve/master"
alt="Azure pipelines">
</a>
<a href="https://github.com/cloudreve/cloudreve/releases">
<img src="https://img.shields.io/github/v/release/cloudreve/cloudreve?include_prereleases" />
</a>
<a href="https://github.com/cloudreve/cloudreve/releases">
<img src="https://badgen.net/static/release%20size/34%20MB/blue"/>
</a>
<a href="https://hub.docker.com/r/cloudreve/cloudreve">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/cloudreve/cloudreve" />
</a>
</p>
<p align="center">
<a href="https://cloudreve.org">Homepage</a> •
<a href="https://demo.cloudreve.org">Try it</a> •
<a href="https://github.com/cloudreve/cloudreve/discussions">Discussion</a> •
<a href="https://docs.cloudreve.org">Documents</a> •
<a href="https://github.com/cloudreve/cloudreve/releases">Download</a> •
<a href="https://t.me/cloudreve_official">Telegram</a> •
<a href="https://discord.com/invite/WTpMFpZT76">Discord</a>
</p>
![Screenshot](https://raw.githubusercontent.com/cloudreve/docs/master/images/homepage.png)
## :sparkles: Features
- :cloud: Support storing files into Local, Remote node, OneDrive, S3 compatible API, Qiniu Kodo, Aliyun OSS, Tencent COS, Huawei Cloud OBS, Kingsoft Cloud KS3, Upyun.
- :outbox_tray: Upload/Download in directly transmission from client to storage providers.
- 💾 Integrate with Aria2/qBittorrent to download files in background, use multiple download nodes to share the load.
- 📚 Compress/Extract/Preview archived files, download files in batch.
- 💻 WebDAV support covering all storage providers.
- :zap:Drag&Drop to upload files or folders, with parallel resumable upload support.
- :card_file_box: Extract media metadata from files, search files by metadata or tags.
- :family_woman_girl_boy: Multi-users with multi-groups.
- :link: Create share links for files and folders with expiration date.
- :eye_speech_bubble: Preview videos, images, audios, ePub files online; edit texts, diagrams, Markdown, images, Office documents online.
- :art: Customize theme colors, dark mode, PWA application, SPA, i18n.
- :rocket: All-in-one packaging, with all features out of the box.
- 🌈 ... ...
## :hammer_and_wrench: Deploy
To deploy Cloudreve, you can refer to [Getting started](https://docs.cloudreve.org/overview/quickstart) for a quick local deployment to test.
When you're ready to deploy Cloudreve to a production environment, you can refer to [Deploy](https://docs.cloudreve.org/overview/deploy/) for a complete deployment.
## :gear: Build
Please refer to [Build](https://docs.cloudreve.org/overview/build/) for how to build Cloudreve from source code.
## :rocket: Contributing
If you're interested in contributing to Cloudreve, please refer to [Contributing](https://docs.cloudreve.org/api/contributing/) for how to contribute to Cloudreve.
## :alembic: Stacks
- [Go](https://golang.org/) + [Gin](https://github.com/gin-gonic/gin) + [ent](https://github.com/ent/ent)
- [React](https://github.com/facebook/react) + [Redux](https://github.com/reduxjs/redux) + [Material-UI](https://github.com/mui-org/material-ui)
## :scroll: License
GPL V3

77
README_zh-CN.md Normal file
View File

@@ -0,0 +1,77 @@
[English Version](https://github.com/cloudreve/cloudreve/blob/master/README.md)
<h1 align="center">
<br>
<a href="https://cloudreve.org/" alt="logo" ><img src="https://raw.githubusercontent.com/cloudreve/frontend/master/public/static/img/logo192.png" width="150"/></a>
<br>
Cloudreve
<br>
</h1>
<h4 align="center">支持多家云存储驱动的公有云文件系统.</h4>
<p align="center">
<a href="https://dev.azure.com/abslantliu/cloudreve/_build?definitionId=6">
<img src="https://img.shields.io/github/check-runs/cloudreve/cloudreve/master"
alt="Azure pipelines">
</a>
<a href="https://github.com/cloudreve/cloudreve/releases">
<img src="https://img.shields.io/github/v/release/cloudreve/cloudreve?include_prereleases" />
</a>
<a href="https://github.com/cloudreve/cloudreve/releases">
<img src="https://badgen.net/static/release%20size/34%20MB/blue"/>
</a>
<a href="https://hub.docker.com/r/cloudreve/cloudreve">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/cloudreve/cloudreve" />
</a>
</p>
<p align="center">
<a href="https://cloudreve.org">主页</a> •
<a href="https://demo.cloudreve.org">演示</a> •
<a href="https://github.com/cloudreve/cloudreve/discussions">讨论</a> •
<a href="https://docs.cloudreve.org">文档</a> •
<a href="https://github.com/cloudreve/cloudreve/releases">下载</a> •
<a href="https://t.me/cloudreve_official">Telegram</a> •
<a href="https://discord.com/invite/WTpMFpZT76">Discord</a>
</p>
![Screenshot](https://raw.githubusercontent.com/cloudreve/docs/master/images/homepage.png)
## :sparkles: 特性
- :cloud: 支持本机、从机、七牛 Kodo、阿里云 OSS、腾讯云 COS、华为云 OBS、金山云 KS3、又拍云、OneDrive (包括世纪互联版) 、S3 兼容协议 作为存储端
- :outbox_tray: 上传/下载 支持客户端直传,支持下载限速
- 💾 可对接 Aria2/qBittorrent 离线下载,可使用多个从机节点分担下载任务
- 📚 在线 压缩/解压缩/压缩包预览、多文件打包下载
- 💻 覆盖全部存储策略的 WebDAV 协议支持
- :zap: 拖拽上传、目录上传、并行分片上传
- :card_file_box: 提取媒体元数据,通过元数据或标签搜索文件
- :family_woman_girl_boy: 多用户、用户组、多存储策略
- :link: 创建文件、目录的分享链接,可设定自动过期
- :eye_speech_bubble: 视频、图像、音频、 ePub 在线预览文本、Office 文档在线编辑
- :art: 自定义配色、黑暗模式、PWA 应用、全站单页应用、国际化支持
- :rocket: All-in-One 打包,开箱即用
- 🌈 ... ...
## :hammer_and_wrench: 部署
你可以参考 [快速开始](https://docs.cloudreve.org/overview/quickstart) 启动一个本地实例进行体验、测试。
当你准备好将 Cloudreve 部署到生产环境时,可以参考 [部署](https://docs.cloudreve.org/overview/deploy/) 进行完整部署。
## :gear: 构建
你可以参考 [构建](https://docs.cloudreve.org/overview/build/) 从源代码构建 Cloudreve。
## :rocket: 贡献
如果你有兴趣为 Cloudreve 贡献代码,请参考 [贡献](https://docs.cloudreve.org/api/contributing/) 了解如何贡献。
## :alembic: 技术栈
- [Go](https://golang.org/) + [Gin](https://github.com/gin-gonic/gin) + [ent](https://github.com/ent/ent)
- [React](https://github.com/facebook/react) + [Redux](https://github.com/reduxjs/redux) + [Material-UI](https://github.com/mui-org/material-ui)
## :scroll: 许可证
GPL V3

12
SECURITY.md Normal file
View File

@@ -0,0 +1,12 @@
# Security Policy
## Supported Versions
* For security issues with high-impacts (e.g. related to payments or user permission), we support 3.8.x and all 4.x version. But the fix for 4.x will released only in latest sub-version.
* For all other security issues, we mainly support version >= 4.x (in which `x` is the latest stable sub-version).
## Reporting a Vulnerability
Please send the details about the security issue to `support@cloudreve.org`. Once the vulnerability is comfirmed or fixed, you will get updates from the email thread.
We will reward you with bounty/swag for success submission of securty issues.

247
application/application.go Normal file
View File

@@ -0,0 +1,247 @@
package application
import (
"context"
"errors"
"fmt"
"net"
"net/http"
_ "net/http/pprof"
"os"
"time"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/crontab"
"github.com/cloudreve/Cloudreve/v4/pkg/email"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/onedrive"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/cloudreve/Cloudreve/v4/routers"
"github.com/gin-gonic/gin"
)
type Server interface {
// Start starts the Cloudreve server.
Start() error
PrintBanner()
Close()
}
// NewServer constructs a new Cloudreve server instance with given dependency.
func NewServer(dep dependency.Dep) Server {
return &server{
dep: dep,
logger: dep.Logger(),
config: dep.ConfigProvider(),
}
}
type server struct {
dep dependency.Dep
logger logging.Logger
dbClient *ent.Client
config conf.ConfigProvider
server *http.Server
pprofServer *http.Server
kv cache.Driver
mailQueue email.Driver
}
func (s *server) PrintBanner() {
fmt.Print(`
___ _ _
/ __\ | ___ _ _ __| |_ __ _____ _____
/ / | |/ _ \| | | |/ _ | '__/ _ \ \ / / _ \
/ /___| | (_) | |_| | (_| | | | __/\ V / __/
\____/|_|\___/ \__,_|\__,_|_| \___| \_/ \___|
V` + constants.BackendVersion + ` Commit #` + constants.LastCommit + ` Pro=` + constants.IsPro + `
================================================
`)
}
func (s *server) Start() error {
// Debug 关闭时,切换为生产模式
if !s.config.System().Debug {
gin.SetMode(gin.ReleaseMode)
}
s.kv = s.dep.KV()
// delete all cached settings
_ = s.kv.Delete(setting.KvSettingPrefix)
if memKv, ok := s.kv.(*cache.MemoStore); ok {
memKv.GarbageCollect(s.logger)
}
// TODO: make sure redis is connected in dep before user traffic.
if s.config.System().Mode == conf.MasterMode {
s.dbClient = s.dep.DBClient()
// TODO: make sure all dep is initialized before server start.
s.dep.LockSystem()
s.dep.UAParser()
// Initialize OneDrive credentials
credentials, err := onedrive.RetrieveOneDriveCredentials(context.Background(), s.dep.StoragePolicyClient())
if err != nil {
return fmt.Errorf("faield to retrieve OneDrive credentials for CredManager: %w", err)
}
if err := s.dep.CredManager().Upsert(context.Background(), credentials...); err != nil {
return fmt.Errorf("failed to upsert OneDrive credentials to CredManager: %w", err)
}
crontab.Register(setting.CronTypeOauthCredRefresh, func(ctx context.Context) {
dep := dependency.FromContext(ctx)
cred := dep.CredManager()
cred.RefreshAll(ctx)
})
// Initialize email queue before user traffic starts.
_ = s.dep.EmailClient(context.Background())
// Start all queues
s.dep.MediaMetaQueue(context.Background()).Start()
s.dep.EntityRecycleQueue(context.Background()).Start()
s.dep.IoIntenseQueue(context.Background()).Start()
s.dep.RemoteDownloadQueue(context.Background()).Start()
// Start cron jobs
c, err := crontab.NewCron(context.Background(), s.dep)
if err != nil {
return err
}
c.Start()
// Start node pool
if _, err := s.dep.NodePool(context.Background()); err != nil {
return err
}
} else {
s.dep.SlaveQueue(context.Background()).Start()
}
s.dep.ThumbQueue(context.Background()).Start()
api := routers.InitRouter(s.dep)
api.TrustedPlatform = s.config.System().ProxyHeader
s.server = &http.Server{Handler: api}
// Start pprof server if configured
if pprofAddr := s.config.System().Pprof; pprofAddr != "" {
s.pprofServer = &http.Server{
Addr: pprofAddr,
Handler: http.DefaultServeMux,
}
go func() {
s.logger.Info("pprof server listening on %q", pprofAddr)
if err := s.pprofServer.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
s.logger.Error("pprof server error: %s", err)
}
}()
}
// 如果启用了SSL
if s.config.SSL().CertPath != "" {
s.logger.Info("Listening to %q", s.config.SSL().Listen)
s.server.Addr = s.config.SSL().Listen
if err := s.server.ListenAndServeTLS(s.config.SSL().CertPath, s.config.SSL().KeyPath); err != nil && !errors.Is(err, http.ErrServerClosed) {
return fmt.Errorf("failed to listen to %q: %w", s.config.SSL().Listen, err)
}
return nil
}
// 如果启用了Unix
if s.config.Unix().Listen != "" {
// delete socket file before listening
if _, err := os.Stat(s.config.Unix().Listen); err == nil {
if err = os.Remove(s.config.Unix().Listen); err != nil {
return fmt.Errorf("failed to delete socket file %q: %w", s.config.Unix().Listen, err)
}
}
s.logger.Info("Listening to %q", s.config.Unix().Listen)
if err := s.runUnix(s.server); err != nil && !errors.Is(err, http.ErrServerClosed) {
return fmt.Errorf("failed to listen to %q: %w", s.config.Unix().Listen, err)
}
return nil
}
s.logger.Info("Listening to %q", s.config.System().Listen)
s.server.Addr = s.config.System().Listen
if err := s.server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
return fmt.Errorf("failed to listen to %q: %w", s.config.System().Listen, err)
}
return nil
}
func (s *server) Close() {
if s.dbClient != nil {
s.logger.Info("Shutting down database connection...")
if err := s.dbClient.Close(); err != nil {
s.logger.Error("Failed to close database connection: %s", err)
}
}
ctx := context.Background()
if conf.SystemConfig.GracePeriod != 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Duration(s.config.System().GracePeriod)*time.Second)
defer cancel()
}
s.dep.EventHub().Close()
// Shutdown http server
if s.server != nil {
err := s.server.Shutdown(ctx)
if err != nil {
s.logger.Error("Failed to shutdown server: %s", err)
}
}
// Shutdown pprof server
if s.pprofServer != nil {
if err := s.pprofServer.Shutdown(ctx); err != nil {
s.logger.Error("Failed to shutdown pprof server: %s", err)
}
}
if s.kv != nil {
if err := s.kv.Persist(util.DataPath(cache.DefaultCacheFile)); err != nil {
s.logger.Warning("Failed to persist cache: %s", err)
}
}
if err := s.dep.Shutdown(ctx); err != nil {
s.logger.Warning("Failed to shutdown dependency manager: %s", err)
}
}
func (s *server) runUnix(server *http.Server) error {
listener, err := net.Listen("unix", s.config.Unix().Listen)
if err != nil {
return err
}
defer listener.Close()
defer os.Remove(s.config.Unix().Listen)
if conf.UnixConfig.Perm > 0 {
err = os.Chmod(conf.UnixConfig.Listen, os.FileMode(s.config.Unix().Perm))
if err != nil {
s.logger.Warning(
"Failed to set permission to %q for socket file %q: %s",
s.config.Unix().Perm,
s.config.Unix().Listen,
err,
)
}
}
return server.Serve(listener)
}

View File

@@ -0,0 +1,34 @@
package constants
// These values will be injected at build time, DO NOT EDIT.
// BackendVersion 当前后端版本号
var BackendVersion = "4.14.0"
// IsPro 是否为Pro版本
var IsPro = "false"
var IsProBool = IsPro == "true"
// LastCommit 最后commit id
var LastCommit = "000000"
const (
APIPrefix = "/api/v4"
APIPrefixSlave = "/api/v4/slave"
CrHeaderPrefix = "X-Cr-"
)
const CloudreveScheme = "cloudreve"
type (
FileSystemType string
)
const (
FileSystemMy = FileSystemType("my")
FileSystemShare = FileSystemType("share")
FileSystemTrash = FileSystemType("trash")
FileSystemSharedWithMe = FileSystemType("shared_with_me")
FileSystemUnknown = FileSystemType("unknown")
)

View File

@@ -0,0 +1,8 @@
package constants
const (
MB = 1 << 20
GB = 1 << 30
TB = 1 << 40
PB = 1 << 50
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,175 @@
package dependency
import (
"io/fs"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/email"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/searcher"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/gin-contrib/static"
)
// Option 发送请求的额外设置
type Option interface {
apply(*dependency)
}
type optionFunc func(*dependency)
func (f optionFunc) apply(o *dependency) {
f(o)
}
// WithConfigPath Set the path of the config file.
func WithConfigPath(p string) Option {
return optionFunc(func(o *dependency) {
o.configPath = p
})
}
// WithLogger Set the default logging.
func WithLogger(l logging.Logger) Option {
return optionFunc(func(o *dependency) {
o.logger = l
})
}
// WithConfigProvider Set the default config provider.
func WithConfigProvider(c conf.ConfigProvider) Option {
return optionFunc(func(o *dependency) {
o.configProvider = c
})
}
// WithStatics Set the default statics FS.
func WithStatics(c fs.FS) Option {
return optionFunc(func(o *dependency) {
o.statics = c
})
}
// WithServerStaticFS Set the default statics FS for server.
func WithServerStaticFS(c static.ServeFileSystem) Option {
return optionFunc(func(o *dependency) {
o.serverStaticFS = c
})
}
// WithProFlag Set if current instance is a pro version.
func WithProFlag(c bool) Option {
return optionFunc(func(o *dependency) {
o.isPro = c
})
}
// WithRawEntClient Set the default raw ent client.
func WithRawEntClient(c *ent.Client) Option {
return optionFunc(func(o *dependency) {
o.rawEntClient = c
})
}
// WithDbClient Set the default ent client.
func WithDbClient(c *ent.Client) Option {
return optionFunc(func(o *dependency) {
o.dbClient = c
})
}
// WithRequiredDbVersion Set the required db version.
func WithRequiredDbVersion(c string) Option {
return optionFunc(func(o *dependency) {
o.requiredDbVersion = c
})
}
// WithKV Set the default KV store driverold
func WithKV(c cache.Driver) Option {
return optionFunc(func(o *dependency) {
o.kv = c
})
}
// WithSettingClient Set the default setting client
func WithSettingClient(s inventory.SettingClient) Option {
return optionFunc(func(o *dependency) {
o.settingClient = s
})
}
// WithSettingProvider Set the default setting provider
func WithSettingProvider(s setting.Provider) Option {
return optionFunc(func(o *dependency) {
o.settingProvider = s
})
}
// WithUserClient Set the default user client
func WithUserClient(s inventory.UserClient) Option {
return optionFunc(func(o *dependency) {
o.userClient = s
})
}
// WithEmailClient Set the default email client
func WithEmailClient(s email.Driver) Option {
return optionFunc(func(o *dependency) {
o.emailClient = s
})
}
// WithGeneralAuth Set the default general auth
func WithGeneralAuth(s auth.Auth) Option {
return optionFunc(func(o *dependency) {
o.generalAuth = s
})
}
// WithHashIDEncoder Set the default hash id encoder
func WithHashIDEncoder(s hashid.Encoder) Option {
return optionFunc(func(o *dependency) {
o.hashidEncoder = s
})
}
// WithTokenAuth Set the default token auth
func WithTokenAuth(s auth.TokenAuth) Option {
return optionFunc(func(o *dependency) {
o.tokenAuth = s
})
}
// WithFileClient Set the default file client
func WithFileClient(s inventory.FileClient) Option {
return optionFunc(func(o *dependency) {
o.fileClient = s
})
}
// WithShareClient Set the default share client
func WithShareClient(s inventory.ShareClient) Option {
return optionFunc(func(o *dependency) {
o.shareClient = s
})
}
// WithSearchIndexer Set the default search indexer
func WithSearchIndexer(s searcher.SearchIndexer) Option {
return optionFunc(func(o *dependency) {
o.searchIndexer = s
})
}
// WithTextExtractor Set the default text extractor
func WithTextExtractor(s searcher.TextExtractor) Option {
return optionFunc(func(o *dependency) {
o.textExtractor = s
})
}

View File

@@ -0,0 +1,47 @@
package migrator
import (
"fmt"
"io"
"os"
"path/filepath"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
)
func migrateAvatars(m *Migrator) error {
m.l.Info("Migrating avatars files...")
avatarRoot := util.RelativePath(m.state.V3AvatarPath)
for uid, _ := range m.state.UserIDs {
avatarPath := filepath.Join(avatarRoot, fmt.Sprintf("avatar_%d_2.png", uid))
// check if file exists
if util.Exists(avatarPath) {
m.l.Info("Migrating avatar for user %d", uid)
// Copy to v4 avatar path
v4Path := filepath.Join(util.DataPath("avatar"), fmt.Sprintf("avatar_%d.png", uid))
// copy
origin, err := os.Open(avatarPath)
if err != nil {
return fmt.Errorf("failed to open avatar file: %w", err)
}
defer origin.Close()
dest, err := util.CreatNestedFile(v4Path)
if err != nil {
return fmt.Errorf("failed to create avatar file: %w", err)
}
defer dest.Close()
_, err = io.Copy(dest, origin)
if err != nil {
m.l.Warning("Failed to copy avatar file: %s, skipping...", err)
}
}
}
return nil
}

View File

@@ -0,0 +1,124 @@
package conf
import (
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/go-ini/ini"
"github.com/go-playground/validator/v10"
)
// database 数据库
type database struct {
Type string
User string
Password string
Host string
Name string
TablePrefix string
DBFile string
Port int
Charset string
UnixSocket bool
}
// system 系统通用配置
type system struct {
Mode string `validate:"eq=master|eq=slave"`
Listen string `validate:"required"`
Debug bool
SessionSecret string
HashIDSalt string
GracePeriod int `validate:"gte=0"`
ProxyHeader string
}
type ssl struct {
CertPath string `validate:"omitempty,required"`
KeyPath string `validate:"omitempty,required"`
Listen string `validate:"required"`
}
type unix struct {
Listen string
Perm uint32
}
// slave 作为slave存储端配置
type slave struct {
Secret string `validate:"omitempty,gte=64"`
CallbackTimeout int `validate:"omitempty,gte=1"`
SignatureTTL int `validate:"omitempty,gte=1"`
}
// redis 配置
type redis struct {
Network string
Server string
User string
Password string
DB string
}
// 跨域配置
type cors struct {
AllowOrigins []string
AllowMethods []string
AllowHeaders []string
AllowCredentials bool
ExposeHeaders []string
SameSite string
Secure bool
}
var cfg *ini.File
// Init 初始化配置文件
func Init(l logging.Logger, path string) error {
var err error
cfg, err = ini.Load(path)
if err != nil {
l.Error("Failed to parse config file %q: %s", path, err)
return err
}
sections := map[string]interface{}{
"Database": DatabaseConfig,
"System": SystemConfig,
"SSL": SSLConfig,
"UnixSocket": UnixConfig,
"Redis": RedisConfig,
"CORS": CORSConfig,
"Slave": SlaveConfig,
}
for sectionName, sectionStruct := range sections {
err = mapSection(sectionName, sectionStruct)
if err != nil {
l.Error("Failed to parse config section %q: %s", sectionName, err)
return err
}
}
// 映射数据库配置覆盖
for _, key := range cfg.Section("OptionOverwrite").Keys() {
OptionOverwrite[key.Name()] = key.Value()
}
return nil
}
// mapSection 将配置文件的 Section 映射到结构体上
func mapSection(section string, confStruct interface{}) error {
err := cfg.Section(section).MapTo(confStruct)
if err != nil {
return err
}
// 验证合法性
validate := validator.New()
err = validate.Struct(confStruct)
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,55 @@
package conf
// RedisConfig Redis服务器配置
var RedisConfig = &redis{
Network: "tcp",
Server: "",
Password: "",
DB: "0",
}
// DatabaseConfig 数据库配置
var DatabaseConfig = &database{
Type: "UNSET",
Charset: "utf8",
DBFile: "cloudreve.db",
Port: 3306,
UnixSocket: false,
}
// SystemConfig 系统公用配置
var SystemConfig = &system{
Debug: false,
Mode: "master",
Listen: ":5212",
ProxyHeader: "",
}
// CORSConfig 跨域配置
var CORSConfig = &cors{
AllowOrigins: []string{"UNSET"},
AllowMethods: []string{"PUT", "POST", "GET", "OPTIONS"},
AllowHeaders: []string{"Cookie", "X-Cr-Policy", "Authorization", "Content-Length", "Content-Type", "X-Cr-Path", "X-Cr-FileName"},
AllowCredentials: false,
ExposeHeaders: nil,
SameSite: "Default",
Secure: false,
}
// SlaveConfig 从机配置
var SlaveConfig = &slave{
CallbackTimeout: 20,
SignatureTTL: 60,
}
var SSLConfig = &ssl{
Listen: ":443",
CertPath: "",
KeyPath: "",
}
var UnixConfig = &unix{
Listen: "",
}
var OptionOverwrite = map[string]interface{}{}

View File

@@ -0,0 +1,82 @@
package migrator
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
)
func (m *Migrator) migrateDirectLink() error {
m.l.Info("Migrating direct links...")
batchSize := 1000
offset := m.state.DirectLinkOffset
ctx := context.Background()
if m.state.DirectLinkOffset > 0 {
m.l.Info("Resuming direct link migration from offset %d", offset)
}
for {
m.l.Info("Migrating direct links with offset %d", offset)
var directLinks []model.SourceLink
if err := model.DB.Limit(batchSize).Offset(offset).Find(&directLinks).Error; err != nil {
return fmt.Errorf("failed to list v3 direct links: %w", err)
}
if len(directLinks) == 0 {
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
m.l.Info("Resetting direct link ID sequence for postgres...")
m.v4client.DirectLink.ExecContext(ctx, "SELECT SETVAL('direct_links_id_seq', (SELECT MAX(id) FROM direct_links))")
}
break
}
tx, err := m.v4client.Tx(ctx)
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to start transaction: %w", err)
}
for _, dl := range directLinks {
sourceId := int(dl.FileID) + m.state.LastFolderID
// check if file exists
_, err = tx.File.Query().Where(file.ID(sourceId)).First(ctx)
if err != nil {
m.l.Warning("File %d not found, skipping direct link %d", sourceId, dl.ID)
continue
}
stm := tx.DirectLink.Create().
SetCreatedAt(formatTime(dl.CreatedAt)).
SetUpdatedAt(formatTime(dl.UpdatedAt)).
SetRawID(int(dl.ID)).
SetFileID(sourceId).
SetName(dl.Name).
SetDownloads(dl.Downloads).
SetSpeed(0)
if _, err := stm.Save(ctx); err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to create direct link %d: %w", dl.ID, err)
}
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
offset += batchSize
m.state.DirectLinkOffset = offset
if err := m.saveState(); err != nil {
m.l.Warning("Failed to save state after direct link batch: %s", err)
} else {
m.l.Info("Saved migration state after processing this batch")
}
}
return nil
}

View File

@@ -0,0 +1,189 @@
package migrator
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
)
func (m *Migrator) migrateFile() error {
m.l.Info("Migrating files...")
batchSize := 1000
offset := m.state.FileOffset
ctx := context.Background()
if m.state.FileConflictRename == nil {
m.state.FileConflictRename = make(map[uint]string)
}
if m.state.EntitySources == nil {
m.state.EntitySources = make(map[string]int)
}
if offset > 0 {
m.l.Info("Resuming file migration from offset %d", offset)
}
out:
for {
m.l.Info("Migrating files with offset %d", offset)
var files []model.File
if err := model.DB.Limit(batchSize).Offset(offset).Find(&files).Error; err != nil {
return fmt.Errorf("failed to list v3 files: %w", err)
}
if len(files) == 0 {
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
m.l.Info("Resetting file ID sequence for postgres...")
m.v4client.File.ExecContext(ctx, "SELECT SETVAL('files_id_seq', (SELECT MAX(id) FROM files))")
}
break
}
tx, err := m.v4client.Tx(ctx)
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to start transaction: %w", err)
}
for _, f := range files {
if _, ok := m.state.FolderIDs[int(f.FolderID)]; !ok {
m.l.Warning("Folder ID %d for file %d not found, skipping", f.FolderID, f.ID)
continue
}
if _, ok := m.state.UserIDs[int(f.UserID)]; !ok {
m.l.Warning("User ID %d for file %d not found, skipping", f.UserID, f.ID)
continue
}
if _, ok := m.state.PolicyIDs[int(f.PolicyID)]; !ok {
m.l.Warning("Policy ID %d for file %d not found, skipping", f.PolicyID, f.ID)
continue
}
metadata := make(map[string]string)
if f.Metadata != "" {
json.Unmarshal([]byte(f.Metadata), &metadata)
}
var (
thumbnail *ent.Entity
entity *ent.Entity
err error
)
if metadata[model.ThumbStatusMetadataKey] == model.ThumbStatusExist {
size := int64(0)
if m.state.LocalPolicyIDs[int(f.PolicyID)] {
thumbFile, err := os.Stat(f.SourceName + m.state.ThumbSuffix)
if err == nil {
size = thumbFile.Size()
}
m.l.Warning("Thumbnail file %s for file %d not found, use 0 size", f.SourceName+m.state.ThumbSuffix, f.ID)
}
// Insert thumbnail entity
thumbnail, err = m.insertEntity(tx, f.SourceName+m.state.ThumbSuffix, int(types.EntityTypeThumbnail), int(f.PolicyID), int(f.UserID), size)
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to insert thumbnail entity: %w", err)
}
}
// Insert file version entity
entity, err = m.insertEntity(tx, f.SourceName, int(types.EntityTypeVersion), int(f.PolicyID), int(f.UserID), int64(f.Size))
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to insert file version entity: %w", err)
}
fname := f.Name
if _, ok := m.state.FileConflictRename[f.ID]; ok {
fname = m.state.FileConflictRename[f.ID]
}
stm := tx.File.Create().
SetCreatedAt(formatTime(f.CreatedAt)).
SetUpdatedAt(formatTime(f.UpdatedAt)).
SetName(fname).
SetRawID(int(f.ID) + m.state.LastFolderID).
SetOwnerID(int(f.UserID)).
SetSize(int64(f.Size)).
SetPrimaryEntity(entity.ID).
SetFileChildren(int(f.FolderID)).
SetType(int(types.FileTypeFile)).
SetStoragePoliciesID(int(f.PolicyID)).
AddEntities(entity)
if thumbnail != nil {
stm.AddEntities(thumbnail)
}
if _, err := stm.Save(ctx); err != nil {
_ = tx.Rollback()
if ent.IsConstraintError(err) {
if _, ok := m.state.FileConflictRename[f.ID]; ok {
return fmt.Errorf("file %d already exists, but new name is already in conflict rename map, please resolve this manually", f.ID)
}
m.l.Warning("File %d already exists, will retry with new name in next batch", f.ID)
m.state.FileConflictRename[f.ID] = fmt.Sprintf("%d_%s", f.ID, f.Name)
continue out
}
return fmt.Errorf("failed to create file %d: %w", f.ID, err)
}
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
offset += batchSize
m.state.FileOffset = offset
if err := m.saveState(); err != nil {
m.l.Warning("Failed to save state after file batch: %s", err)
} else {
m.l.Info("Saved migration state after processing this batch")
}
}
return nil
}
func (m *Migrator) insertEntity(tx *ent.Tx, source string, entityType, policyID, createdBy int, size int64) (*ent.Entity, error) {
// find existing one
entityKey := strconv.Itoa(policyID) + "+" + source
if existingId, ok := m.state.EntitySources[entityKey]; ok {
existing, err := tx.Entity.UpdateOneID(existingId).
AddReferenceCount(1).
Save(context.Background())
if err == nil {
return existing, nil
}
m.l.Warning("Failed to update existing entity %d: %s, fallback to create new one.", existingId, err)
}
// create new one
e, err := tx.Entity.Create().
SetSource(source).
SetType(entityType).
SetSize(size).
SetStoragePolicyEntities(policyID).
SetCreatedBy(createdBy).
SetReferenceCount(1).
Save(context.Background())
if err != nil {
return nil, fmt.Errorf("failed to create new entity: %w", err)
}
m.state.EntitySources[entityKey] = e.ID
return e, nil
}

View File

@@ -0,0 +1,147 @@
package migrator
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
)
func (m *Migrator) migrateFolders() error {
m.l.Info("Migrating folders...")
batchSize := 1000
// Start from the saved offset if available
offset := m.state.FolderOffset
ctx := context.Background()
foldersCount := 0
if m.state.FolderIDs == nil {
m.state.FolderIDs = make(map[int]bool)
}
if offset > 0 {
m.l.Info("Resuming folder migration from offset %d", offset)
}
for {
m.l.Info("Migrating folders with offset %d", offset)
var folders []model.Folder
if err := model.DB.Limit(batchSize).Offset(offset).Find(&folders).Error; err != nil {
return fmt.Errorf("failed to list v3 folders: %w", err)
}
if len(folders) == 0 {
break
}
tx, err := m.v4client.Tx(ctx)
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to start transaction: %w", err)
}
batchFoldersCount := 0
for _, f := range folders {
if _, ok := m.state.UserIDs[int(f.OwnerID)]; !ok {
m.l.Warning("Owner ID %d not found, skipping folder %d", f.OwnerID, f.ID)
continue
}
isRoot := f.ParentID == nil
if isRoot {
f.Name = ""
} else if *f.ParentID == 0 {
m.l.Warning("Parent ID %d not found, skipping folder %d", *f.ParentID, f.ID)
continue
}
stm := tx.File.Create().
SetRawID(int(f.ID)).
SetType(int(types.FileTypeFolder)).
SetCreatedAt(formatTime(f.CreatedAt)).
SetUpdatedAt(formatTime(f.UpdatedAt)).
SetName(f.Name).
SetOwnerID(int(f.OwnerID))
if _, err := stm.Save(ctx); err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to create folder %d: %w", f.ID, err)
}
m.state.FolderIDs[int(f.ID)] = true
m.state.LastFolderID = int(f.ID)
foldersCount++
batchFoldersCount++
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
// Update the offset in state and save after each batch
offset += batchSize
m.state.FolderOffset = offset
if err := m.saveState(); err != nil {
m.l.Warning("Failed to save state after folder batch: %s", err)
} else {
m.l.Info("Saved migration state after processing %d folders in this batch", batchFoldersCount)
}
}
m.l.Info("Successfully migrated %d folders", foldersCount)
return nil
}
func (m *Migrator) migrateFolderParent() error {
m.l.Info("Migrating folder parent...")
batchSize := 1000
offset := m.state.FolderParentOffset
ctx := context.Background()
for {
m.l.Info("Migrating folder parent with offset %d", offset)
var folderParents []model.Folder
if err := model.DB.Limit(batchSize).Offset(offset).Find(&folderParents).Error; err != nil {
return fmt.Errorf("failed to list v3 folder parents: %w", err)
}
if len(folderParents) == 0 {
break
}
tx, err := m.v4client.Tx(ctx)
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to start transaction: %w", err)
}
for _, f := range folderParents {
if f.ParentID != nil {
if _, ok := m.state.FolderIDs[int(*f.ParentID)]; !ok {
m.l.Warning("Folder ID %d not found, skipping folder parent %d", f.ID, f.ID)
continue
}
if _, err := tx.File.UpdateOneID(int(f.ID)).SetParentID(int(*f.ParentID)).Save(ctx); err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to update folder parent %d: %w", f.ID, err)
}
}
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
// Update the offset in state and save after each batch
offset += batchSize
m.state.FolderParentOffset = offset
if err := m.saveState(); err != nil {
m.l.Warning("Failed to save state after folder parent batch: %s", err)
}
}
return nil
}

View File

@@ -0,0 +1,92 @@
package migrator
import (
"context"
"encoding/json"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/samber/lo"
)
func (m *Migrator) migrateGroup() error {
m.l.Info("Migrating groups...")
var groups []model.Group
if err := model.DB.Find(&groups).Error; err != nil {
return fmt.Errorf("failed to list v3 groups: %w", err)
}
for _, group := range groups {
cap := &boolset.BooleanSet{}
var (
opts model.GroupOption
policies []int
)
if err := json.Unmarshal([]byte(group.Options), &opts); err != nil {
return fmt.Errorf("failed to unmarshal options for group %q: %w", group.Name, err)
}
if err := json.Unmarshal([]byte(group.Policies), &policies); err != nil {
return fmt.Errorf("failed to unmarshal policies for group %q: %w", group.Name, err)
}
policies = lo.Filter(policies, func(id int, _ int) bool {
_, exist := m.state.PolicyIDs[id]
return exist
})
newOpts := &types.GroupSetting{
CompressSize: int64(opts.CompressSize),
DecompressSize: int64(opts.DecompressSize),
RemoteDownloadOptions: opts.Aria2Options,
SourceBatchSize: opts.SourceBatchSize,
RedirectedSource: opts.RedirectedSource,
Aria2BatchSize: opts.Aria2BatchSize,
MaxWalkedFiles: 100000,
TrashRetention: 7 * 24 * 3600,
}
boolset.Sets(map[types.GroupPermission]bool{
types.GroupPermissionIsAdmin: group.ID == 1,
types.GroupPermissionIsAnonymous: group.ID == 3,
types.GroupPermissionShareDownload: opts.ShareDownload,
types.GroupPermissionWebDAV: group.WebDAVEnabled,
types.GroupPermissionArchiveDownload: opts.ArchiveDownload,
types.GroupPermissionArchiveTask: opts.ArchiveTask,
types.GroupPermissionWebDAVProxy: opts.WebDAVProxy,
types.GroupPermissionRemoteDownload: opts.Aria2,
types.GroupPermissionAdvanceDelete: opts.AdvanceDelete,
types.GroupPermissionShare: group.ShareEnabled,
types.GroupPermissionRedirectedSource: opts.RedirectedSource,
}, cap)
stm := m.v4client.Group.Create().
SetRawID(int(group.ID)).
SetCreatedAt(formatTime(group.CreatedAt)).
SetUpdatedAt(formatTime(group.UpdatedAt)).
SetName(group.Name).
SetMaxStorage(int64(group.MaxStorage)).
SetSpeedLimit(group.SpeedLimit).
SetPermissions(cap).
SetSettings(newOpts)
if len(policies) > 0 {
stm.SetStoragePoliciesID(policies[0])
}
if _, err := stm.Save(context.Background()); err != nil {
return fmt.Errorf("failed to create group %q: %w", group.Name, err)
}
}
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
m.l.Info("Resetting group ID sequence for postgres...")
m.v4client.Group.ExecContext(context.Background(), "SELECT SETVAL('groups_id_seq', (SELECT MAX(id) FROM groups))")
}
return nil
}

View File

@@ -0,0 +1,314 @@
package migrator
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"time"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/application/migrator/conf"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
)
// State stores the migration progress
type State struct {
PolicyIDs map[int]bool `json:"policy_ids,omitempty"`
LocalPolicyIDs map[int]bool `json:"local_policy_ids,omitempty"`
UserIDs map[int]bool `json:"user_ids,omitempty"`
FolderIDs map[int]bool `json:"folder_ids,omitempty"`
EntitySources map[string]int `json:"entity_sources,omitempty"`
LastFolderID int `json:"last_folder_id,omitempty"`
Step int `json:"step,omitempty"`
UserOffset int `json:"user_offset,omitempty"`
FolderOffset int `json:"folder_offset,omitempty"`
FileOffset int `json:"file_offset,omitempty"`
ShareOffset int `json:"share_offset,omitempty"`
GiftCodeOffset int `json:"gift_code_offset,omitempty"`
DirectLinkOffset int `json:"direct_link_offset,omitempty"`
WebdavOffset int `json:"webdav_offset,omitempty"`
StoragePackOffset int `json:"storage_pack_offset,omitempty"`
FileConflictRename map[uint]string `json:"file_conflict_rename,omitempty"`
FolderParentOffset int `json:"folder_parent_offset,omitempty"`
ThumbSuffix string `json:"thumb_suffix,omitempty"`
V3AvatarPath string `json:"v3_avatar_path,omitempty"`
}
// Step identifiers for migration phases
const (
StepInitial = 0
StepSchema = 1
StepSettings = 2
StepNode = 3
StepPolicy = 4
StepGroup = 5
StepUser = 6
StepFolders = 7
StepFolderParent = 8
StepFile = 9
StepShare = 10
StepDirectLink = 11
Step_CommunityPlaceholder1 = 12
Step_CommunityPlaceholder2 = 13
StepAvatar = 14
StepWebdav = 15
StepCompleted = 16
StateFileName = "migration_state.json"
)
type Migrator struct {
dep dependency.Dep
l logging.Logger
v4client *ent.Client
state *State
statePath string
}
func NewMigrator(dep dependency.Dep, v3ConfPath string) (*Migrator, error) {
m := &Migrator{
dep: dep,
l: dep.Logger(),
state: &State{
PolicyIDs: make(map[int]bool),
UserIDs: make(map[int]bool),
Step: StepInitial,
UserOffset: 0,
FolderOffset: 0,
},
}
// Determine state file path
configDir := filepath.Dir(v3ConfPath)
m.statePath = filepath.Join(configDir, StateFileName)
// Try to load existing state
if util.Exists(m.statePath) {
m.l.Info("Found existing migration state file, loading from %s", m.statePath)
if err := m.loadState(); err != nil {
return nil, fmt.Errorf("failed to load migration state: %w", err)
}
stepName := "unknown"
switch m.state.Step {
case StepInitial:
stepName = "initial"
case StepSchema:
stepName = "schema creation"
case StepSettings:
stepName = "settings migration"
case StepNode:
stepName = "node migration"
case StepPolicy:
stepName = "policy migration"
case StepGroup:
stepName = "group migration"
case StepUser:
stepName = "user migration"
case StepFolders:
stepName = "folders migration"
case StepCompleted:
stepName = "completed"
case StepWebdav:
stepName = "webdav migration"
case StepAvatar:
stepName = "avatar migration"
}
m.l.Info("Resumed migration from step %d (%s)", m.state.Step, stepName)
// Log batch information if applicable
if m.state.Step == StepUser && m.state.UserOffset > 0 {
m.l.Info("Will resume user migration from batch offset %d", m.state.UserOffset)
}
if m.state.Step == StepFolders && m.state.FolderOffset > 0 {
m.l.Info("Will resume folder migration from batch offset %d", m.state.FolderOffset)
}
}
err := conf.Init(m.dep.Logger(), v3ConfPath)
if err != nil {
return nil, err
}
err = model.Init()
if err != nil {
return nil, err
}
v4client, err := inventory.NewRawEntClient(m.l, m.dep.ConfigProvider())
if err != nil {
return nil, err
}
m.v4client = v4client
return m, nil
}
// saveState persists migration state to file
func (m *Migrator) saveState() error {
data, err := json.Marshal(m.state)
if err != nil {
return fmt.Errorf("failed to marshal state: %w", err)
}
return os.WriteFile(m.statePath, data, 0644)
}
// loadState reads migration state from file
func (m *Migrator) loadState() error {
data, err := os.ReadFile(m.statePath)
if err != nil {
return fmt.Errorf("failed to read state file: %w", err)
}
return json.Unmarshal(data, m.state)
}
// updateStep updates current step and persists state
func (m *Migrator) updateStep(step int) error {
m.state.Step = step
return m.saveState()
}
func (m *Migrator) Migrate() error {
// Continue from the current step
if m.state.Step <= StepSchema {
m.l.Info("Creating basic v4 table schema...")
if err := m.v4client.Schema.Create(context.Background()); err != nil {
return fmt.Errorf("failed creating schema resources: %w", err)
}
if err := m.updateStep(StepSettings); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepSettings {
if err := m.migrateSettings(); err != nil {
return err
}
if err := m.updateStep(StepNode); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepNode {
if err := m.migrateNode(); err != nil {
return err
}
if err := m.updateStep(StepPolicy); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepPolicy {
allPolicyIDs, err := m.migratePolicy()
if err != nil {
return err
}
m.state.PolicyIDs = allPolicyIDs
if err := m.updateStep(StepGroup); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepGroup {
if err := m.migrateGroup(); err != nil {
return err
}
if err := m.updateStep(StepUser); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepUser {
if err := m.migrateUser(); err != nil {
m.saveState()
return err
}
// Reset user offset after completion
m.state.UserOffset = 0
if err := m.updateStep(StepFolders); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepFolders {
if err := m.migrateFolders(); err != nil {
m.saveState()
return err
}
// Reset folder offset after completion
m.state.FolderOffset = 0
if err := m.updateStep(StepFolderParent); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepFolderParent {
if err := m.migrateFolderParent(); err != nil {
return err
}
if err := m.updateStep(StepFile); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepFile {
if err := m.migrateFile(); err != nil {
return err
}
if err := m.updateStep(StepShare); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepShare {
if err := m.migrateShare(); err != nil {
return err
}
if err := m.updateStep(StepDirectLink); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepDirectLink {
if err := m.migrateDirectLink(); err != nil {
return err
}
if err := m.updateStep(StepAvatar); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepAvatar {
if err := migrateAvatars(m); err != nil {
return err
}
if err := m.updateStep(StepWebdav); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
if m.state.Step <= StepWebdav {
if err := m.migrateWebdav(); err != nil {
return err
}
if err := m.updateStep(StepCompleted); err != nil {
return fmt.Errorf("failed to update step: %w", err)
}
}
m.l.Info("Migration completed successfully")
return nil
}
func formatTime(t time.Time) time.Time {
newTime := time.UnixMilli(t.UnixMilli())
return newTime
}

View File

@@ -0,0 +1,288 @@
package dialects
import (
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"github.com/jinzhu/gorm"
)
var keyNameRegex = regexp.MustCompile("[^a-zA-Z0-9]+")
// DefaultForeignKeyNamer contains the default foreign key name generator method
type DefaultForeignKeyNamer struct {
}
type commonDialect struct {
db gorm.SQLCommon
DefaultForeignKeyNamer
}
func (commonDialect) GetName() string {
return "common"
}
func (s *commonDialect) SetDB(db gorm.SQLCommon) {
s.db = db
}
func (commonDialect) BindVar(i int) string {
return "$$$" // ?
}
func (commonDialect) Quote(key string) string {
return fmt.Sprintf(`"%s"`, key)
}
func (s *commonDialect) fieldCanAutoIncrement(field *gorm.StructField) bool {
if value, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok {
return strings.ToLower(value) != "false"
}
return field.IsPrimaryKey
}
func (s *commonDialect) DataTypeOf(field *gorm.StructField) string {
var dataValue, sqlType, size, additionalType = gorm.ParseFieldStructForDialect(field, s)
if sqlType == "" {
switch dataValue.Kind() {
case reflect.Bool:
sqlType = "BOOLEAN"
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr:
if s.fieldCanAutoIncrement(field) {
sqlType = "INTEGER AUTO_INCREMENT"
} else {
sqlType = "INTEGER"
}
case reflect.Int64, reflect.Uint64:
if s.fieldCanAutoIncrement(field) {
sqlType = "BIGINT AUTO_INCREMENT"
} else {
sqlType = "BIGINT"
}
case reflect.Float32, reflect.Float64:
sqlType = "FLOAT"
case reflect.String:
if size > 0 && size < 65532 {
sqlType = fmt.Sprintf("VARCHAR(%d)", size)
} else {
sqlType = "VARCHAR(65532)"
}
case reflect.Struct:
if _, ok := dataValue.Interface().(time.Time); ok {
sqlType = "TIMESTAMP"
}
default:
if _, ok := dataValue.Interface().([]byte); ok {
if size > 0 && size < 65532 {
sqlType = fmt.Sprintf("BINARY(%d)", size)
} else {
sqlType = "BINARY(65532)"
}
}
}
}
if sqlType == "" {
panic(fmt.Sprintf("invalid sql type %s (%s) for commonDialect", dataValue.Type().Name(), dataValue.Kind().String()))
}
if strings.TrimSpace(additionalType) == "" {
return sqlType
}
return fmt.Sprintf("%v %v", sqlType, additionalType)
}
func currentDatabaseAndTable(dialect gorm.Dialect, tableName string) (string, string) {
if strings.Contains(tableName, ".") {
splitStrings := strings.SplitN(tableName, ".", 2)
return splitStrings[0], splitStrings[1]
}
return dialect.CurrentDatabase(), tableName
}
func (s commonDialect) HasIndex(tableName string, indexName string) bool {
var count int
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.STATISTICS WHERE table_schema = ? AND table_name = ? AND index_name = ?", currentDatabase, tableName, indexName).Scan(&count)
return count > 0
}
func (s commonDialect) RemoveIndex(tableName string, indexName string) error {
_, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v", indexName))
return err
}
func (s commonDialect) HasForeignKey(tableName string, foreignKeyName string) bool {
return false
}
func (s commonDialect) HasTable(tableName string) bool {
var count int
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = ? AND table_name = ?", currentDatabase, tableName).Scan(&count)
return count > 0
}
func (s commonDialect) HasColumn(tableName string, columnName string) bool {
var count int
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = ? AND table_name = ? AND column_name = ?", currentDatabase, tableName, columnName).Scan(&count)
return count > 0
}
func (s commonDialect) ModifyColumn(tableName string, columnName string, typ string) error {
_, err := s.db.Exec(fmt.Sprintf("ALTER TABLE %v ALTER COLUMN %v TYPE %v", tableName, columnName, typ))
return err
}
func (s commonDialect) CurrentDatabase() (name string) {
s.db.QueryRow("SELECT DATABASE()").Scan(&name)
return
}
func (commonDialect) LimitAndOffsetSQL(limit, offset interface{}) (sql string) {
if limit != nil {
if parsedLimit, err := strconv.ParseInt(fmt.Sprint(limit), 0, 0); err == nil && parsedLimit >= 0 {
sql += fmt.Sprintf(" LIMIT %d", parsedLimit)
}
}
if offset != nil {
if parsedOffset, err := strconv.ParseInt(fmt.Sprint(offset), 0, 0); err == nil && parsedOffset >= 0 {
sql += fmt.Sprintf(" OFFSET %d", parsedOffset)
}
}
return
}
func (commonDialect) SelectFromDummyTable() string {
return ""
}
func (commonDialect) LastInsertIDReturningSuffix(tableName, columnName string) string {
return ""
}
func (commonDialect) DefaultValueStr() string {
return "DEFAULT VALUES"
}
// BuildKeyName returns a valid key name (foreign key, index key) for the given table, field and reference
func (DefaultForeignKeyNamer) BuildKeyName(kind, tableName string, fields ...string) string {
keyName := fmt.Sprintf("%s_%s_%s", kind, tableName, strings.Join(fields, "_"))
keyName = keyNameRegex.ReplaceAllString(keyName, "_")
return keyName
}
// NormalizeIndexAndColumn returns argument's index name and column name without doing anything
func (commonDialect) NormalizeIndexAndColumn(indexName, columnName string) (string, string) {
return indexName, columnName
}
// IsByteArrayOrSlice returns true of the reflected value is an array or slice
func IsByteArrayOrSlice(value reflect.Value) bool {
return (value.Kind() == reflect.Array || value.Kind() == reflect.Slice) && value.Type().Elem() == reflect.TypeOf(uint8(0))
}
type sqlite struct {
commonDialect
}
func init() {
gorm.RegisterDialect("sqlite", &sqlite{})
}
func (sqlite) GetName() string {
return "sqlite"
}
// Get Data Type for Sqlite Dialect
func (s *sqlite) DataTypeOf(field *gorm.StructField) string {
var dataValue, sqlType, size, additionalType = gorm.ParseFieldStructForDialect(field, s)
if sqlType == "" {
switch dataValue.Kind() {
case reflect.Bool:
sqlType = "bool"
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr:
if s.fieldCanAutoIncrement(field) {
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
sqlType = "integer primary key autoincrement"
} else {
sqlType = "integer"
}
case reflect.Int64, reflect.Uint64:
if s.fieldCanAutoIncrement(field) {
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
sqlType = "integer primary key autoincrement"
} else {
sqlType = "bigint"
}
case reflect.Float32, reflect.Float64:
sqlType = "real"
case reflect.String:
if size > 0 && size < 65532 {
sqlType = fmt.Sprintf("varchar(%d)", size)
} else {
sqlType = "text"
}
case reflect.Struct:
if _, ok := dataValue.Interface().(time.Time); ok {
sqlType = "datetime"
}
default:
if IsByteArrayOrSlice(dataValue) {
sqlType = "blob"
}
}
}
if sqlType == "" {
panic(fmt.Sprintf("invalid sql type %s (%s) for sqlite", dataValue.Type().Name(), dataValue.Kind().String()))
}
if strings.TrimSpace(additionalType) == "" {
return sqlType
}
return fmt.Sprintf("%v %v", sqlType, additionalType)
}
func (s sqlite) HasIndex(tableName string, indexName string) bool {
var count int
s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND sql LIKE '%%INDEX %v ON%%'", indexName), tableName).Scan(&count)
return count > 0
}
func (s sqlite) HasTable(tableName string) bool {
var count int
s.db.QueryRow("SELECT count(*) FROM sqlite_master WHERE type='table' AND name=?", tableName).Scan(&count)
return count > 0
}
func (s sqlite) HasColumn(tableName string, columnName string) bool {
var count int
s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND (sql LIKE '%%\"%v\" %%' OR sql LIKE '%%%v %%');", columnName, columnName), tableName).Scan(&count)
return count > 0
}
func (s sqlite) CurrentDatabase() (name string) {
var (
ifaces = make([]interface{}, 3)
pointers = make([]*string, 3)
i int
)
for i = 0; i < 3; i++ {
ifaces[i] = &pointers[i]
}
if err := s.db.QueryRow("PRAGMA database_list").Scan(ifaces...); err != nil {
return
}
if pointers[1] != nil {
name = *pointers[1]
}
return
}

View File

@@ -0,0 +1,39 @@
package model
import (
"github.com/jinzhu/gorm"
)
// File 文件
type File struct {
// 表字段
gorm.Model
Name string `gorm:"unique_index:idx_only_one"`
SourceName string `gorm:"type:text"`
UserID uint `gorm:"index:user_id;unique_index:idx_only_one"`
Size uint64
PicInfo string
FolderID uint `gorm:"index:folder_id;unique_index:idx_only_one"`
PolicyID uint
UploadSessionID *string `gorm:"index:session_id;unique_index:session_only_one"`
Metadata string `gorm:"type:text"`
// 关联模型
Policy Policy `gorm:"PRELOAD:false,association_autoupdate:false"`
// 数据库忽略字段
Position string `gorm:"-"`
MetadataSerialized map[string]string `gorm:"-"`
}
// Thumb related metadata
const (
ThumbStatusNotExist = ""
ThumbStatusExist = "exist"
ThumbStatusNotAvailable = "not_available"
ThumbStatusMetadataKey = "thumb_status"
ThumbSidecarMetadataKey = "thumb_sidecar"
ChecksumMetadataKey = "webdav_checksum"
)

View File

@@ -0,0 +1,18 @@
package model
import (
"github.com/jinzhu/gorm"
)
// Folder 目录
type Folder struct {
// 表字段
gorm.Model
Name string `gorm:"unique_index:idx_only_one_name"`
ParentID *uint `gorm:"index:parent_id;unique_index:idx_only_one_name"`
OwnerID uint `gorm:"index:owner_id"`
// 数据库忽略字段
Position string `gorm:"-"`
WebdavDstName string `gorm:"-"`
}

View File

@@ -0,0 +1,38 @@
package model
import (
"github.com/jinzhu/gorm"
)
// Group 用户组模型
type Group struct {
gorm.Model
Name string
Policies string
MaxStorage uint64
ShareEnabled bool
WebDAVEnabled bool
SpeedLimit int
Options string `json:"-" gorm:"size:4294967295"`
// 数据库忽略字段
PolicyList []uint `gorm:"-"`
OptionsSerialized GroupOption `gorm:"-"`
}
// GroupOption 用户组其他配置
type GroupOption struct {
ArchiveDownload bool `json:"archive_download,omitempty"` // 打包下载
ArchiveTask bool `json:"archive_task,omitempty"` // 在线压缩
CompressSize uint64 `json:"compress_size,omitempty"` // 可压缩大小
DecompressSize uint64 `json:"decompress_size,omitempty"`
OneTimeDownload bool `json:"one_time_download,omitempty"`
ShareDownload bool `json:"share_download,omitempty"`
Aria2 bool `json:"aria2,omitempty"` // 离线下载
Aria2Options map[string]interface{} `json:"aria2_options,omitempty"` // 离线下载用户组配置
SourceBatchSize int `json:"source_batch,omitempty"`
RedirectedSource bool `json:"redirected_source,omitempty"`
Aria2BatchSize int `json:"aria2_batch,omitempty"`
AdvanceDelete bool `json:"advance_delete,omitempty"`
WebDAVProxy bool `json:"webdav_proxy,omitempty"`
}

View File

@@ -0,0 +1,96 @@
package model
import (
"fmt"
"time"
"github.com/jinzhu/gorm"
"github.com/cloudreve/Cloudreve/v4/application/migrator/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
_ "github.com/jinzhu/gorm/dialects/mssql"
_ "github.com/jinzhu/gorm/dialects/mysql"
_ "github.com/jinzhu/gorm/dialects/postgres"
)
// DB 数据库链接单例
var DB *gorm.DB
// Init 初始化 MySQL 链接
func Init() error {
var (
db *gorm.DB
err error
confDBType string = conf.DatabaseConfig.Type
)
// 兼容已有配置中的 "sqlite3" 配置项
if confDBType == "sqlite3" {
confDBType = "sqlite"
}
// 兼容 "mariadb" 数据库
if confDBType == "mariadb" {
confDBType = "mysql"
}
switch confDBType {
case "UNSET", "sqlite":
// 未指定数据库或者明确指定为 sqlite 时,使用 SQLite 数据库
db, err = gorm.Open("sqlite3", util.RelativePath(conf.DatabaseConfig.DBFile))
case "postgres":
db, err = gorm.Open(confDBType, fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=disable",
conf.DatabaseConfig.Host,
conf.DatabaseConfig.User,
conf.DatabaseConfig.Password,
conf.DatabaseConfig.Name,
conf.DatabaseConfig.Port))
case "mysql", "mssql":
var host string
if conf.DatabaseConfig.UnixSocket {
host = fmt.Sprintf("unix(%s)",
conf.DatabaseConfig.Host)
} else {
host = fmt.Sprintf("(%s:%d)",
conf.DatabaseConfig.Host,
conf.DatabaseConfig.Port)
}
db, err = gorm.Open(confDBType, fmt.Sprintf("%s:%s@%s/%s?charset=%s&parseTime=True&loc=Local",
conf.DatabaseConfig.User,
conf.DatabaseConfig.Password,
host,
conf.DatabaseConfig.Name,
conf.DatabaseConfig.Charset))
default:
return fmt.Errorf("unsupported database type %q", confDBType)
}
//db.SetLogger(util.Log())
if err != nil {
return fmt.Errorf("failed to connect to database: %w", err)
}
// 处理表前缀
gorm.DefaultTableNameHandler = func(db *gorm.DB, defaultTableName string) string {
return conf.DatabaseConfig.TablePrefix + defaultTableName
}
// Debug模式下输出所有 SQL 日志
db.LogMode(true)
//设置连接池
db.DB().SetMaxIdleConns(50)
if confDBType == "sqlite" || confDBType == "UNSET" {
db.DB().SetMaxOpenConns(1)
} else {
db.DB().SetMaxOpenConns(100)
}
//超时
db.DB().SetConnMaxLifetime(time.Second * 30)
DB = db
return nil
}

View File

@@ -0,0 +1,51 @@
package model
import (
"github.com/jinzhu/gorm"
)
// Node 从机节点信息模型
type Node struct {
gorm.Model
Status NodeStatus // 节点状态
Name string // 节点别名
Type ModelType // 节点状态
Server string // 服务器地址
SlaveKey string `gorm:"type:text"` // 主->从 通信密钥
MasterKey string `gorm:"type:text"` // 从->主 通信密钥
Aria2Enabled bool // 是否支持用作离线下载节点
Aria2Options string `gorm:"type:text"` // 离线下载配置
Rank int // 负载均衡权重
// 数据库忽略字段
Aria2OptionsSerialized Aria2Option `gorm:"-"`
}
// Aria2Option 非公有的Aria2配置属性
type Aria2Option struct {
// RPC 服务器地址
Server string `json:"server,omitempty"`
// RPC 密钥
Token string `json:"token,omitempty"`
// 临时下载目录
TempPath string `json:"temp_path,omitempty"`
// 附加下载配置
Options string `json:"options,omitempty"`
// 下载监控间隔
Interval int `json:"interval,omitempty"`
// RPC API 请求超时
Timeout int `json:"timeout,omitempty"`
}
type NodeStatus int
type ModelType int
const (
NodeActive NodeStatus = iota
NodeSuspend
)
const (
SlaveNodeType ModelType = iota
MasterNodeType
)

View File

@@ -0,0 +1,62 @@
package model
import (
"github.com/jinzhu/gorm"
)
// Policy 存储策略
type Policy struct {
// 表字段
gorm.Model
Name string
Type string
Server string
BucketName string
IsPrivate bool
BaseURL string
AccessKey string `gorm:"type:text"`
SecretKey string `gorm:"type:text"`
MaxSize uint64
AutoRename bool
DirNameRule string
FileNameRule string
IsOriginLinkEnable bool
Options string `gorm:"type:text"`
// 数据库忽略字段
OptionsSerialized PolicyOption `gorm:"-"`
MasterID string `gorm:"-"`
}
// PolicyOption 非公有的存储策略属性
type PolicyOption struct {
// Upyun访问Token
Token string `json:"token"`
// 允许的文件扩展名
FileType []string `json:"file_type"`
// MimeType
MimeType string `json:"mimetype"`
// OauthRedirect Oauth 重定向地址
OauthRedirect string `json:"od_redirect,omitempty"`
// OdProxy Onedrive 反代地址
OdProxy string `json:"od_proxy,omitempty"`
// OdDriver OneDrive 驱动器定位符
OdDriver string `json:"od_driver,omitempty"`
// Region 区域代码
Region string `json:"region,omitempty"`
// ServerSideEndpoint 服务端请求使用的 Endpoint为空时使用 Policy.Server 字段
ServerSideEndpoint string `json:"server_side_endpoint,omitempty"`
// 分片上传的分片大小
ChunkSize uint64 `json:"chunk_size,omitempty"`
// 分片上传时是否需要预留空间
PlaceholderWithSize bool `json:"placeholder_with_size,omitempty"`
// 每秒对存储端的 API 请求上限
TPSLimit float64 `json:"tps_limit,omitempty"`
// 每秒 API 请求爆发上限
TPSLimitBurst int `json:"tps_limit_burst,omitempty"`
// Set this to `true` to force the request to use path-style addressing,
// i.e., `http://s3.amazonaws.com/BUCKET/KEY `
S3ForcePathStyle bool `json:"s3_path_style"`
// File extensions that support thumbnail generation using native policy API.
ThumbExts []string `json:"thumb_exts,omitempty"`
}

View File

@@ -0,0 +1,13 @@
package model
import (
"github.com/jinzhu/gorm"
)
// Setting 系统设置模型
type Setting struct {
gorm.Model
Type string `gorm:"not null"`
Name string `gorm:"unique;not null;index:setting_key"`
Value string `gorm:"size:65535"`
}

View File

@@ -0,0 +1,27 @@
package model
import (
"time"
"github.com/jinzhu/gorm"
)
// Share 分享模型
type Share struct {
gorm.Model
Password string // 分享密码,空值为非加密分享
IsDir bool // 原始资源是否为目录
UserID uint // 创建用户ID
SourceID uint // 原始资源ID
Views int // 浏览数
Downloads int // 下载数
RemainDownloads int // 剩余下载配额,负值标识无限制
Expires *time.Time // 过期时间,空值表示无过期时间
PreviewEnabled bool // 是否允许直接预览
SourceName string `gorm:"index:source"` // 用于搜索的字段
// 数据库忽略字段
User User `gorm:"PRELOAD:false,association_autoupdate:false"`
File File `gorm:"PRELOAD:false,association_autoupdate:false"`
Folder Folder `gorm:"PRELOAD:false,association_autoupdate:false"`
}

View File

@@ -0,0 +1,16 @@
package model
import (
"github.com/jinzhu/gorm"
)
// SourceLink represent a shared file source link
type SourceLink struct {
gorm.Model
FileID uint // corresponding file ID
Name string // name of the file while creating the source link, for annotation
Downloads int // 下载数
// 关联模型
File File `gorm:"save_associations:false:false"`
}

View File

@@ -0,0 +1,23 @@
package model
import (
"github.com/jinzhu/gorm"
)
// Tag 用户自定义标签
type Tag struct {
gorm.Model
Name string // 标签名
Icon string // 图标标识
Color string // 图标颜色
Type int // 标签类型(文件分类/目录直达)
Expression string `gorm:"type:text"` // 搜索表表达式/直达路径
UserID uint // 创建者ID
}
const (
// FileTagType 文件分类标签
FileTagType = iota
// DirectoryLinkType 目录快捷方式标签
DirectoryLinkType
)

View File

@@ -0,0 +1,16 @@
package model
import (
"github.com/jinzhu/gorm"
)
// Task 任务模型
type Task struct {
gorm.Model
Status int // 任务状态
Type int // 任务类型
UserID uint // 发起者UID0表示为系统发起
Progress int // 进度
Error string `gorm:"type:text"` // 错误信息
Props string `gorm:"type:text"` // 任务属性
}

View File

@@ -0,0 +1,45 @@
package model
import (
"github.com/jinzhu/gorm"
)
const (
// Active 账户正常状态
Active = iota
// NotActivicated 未激活
NotActivicated
// Baned 被封禁
Baned
// OveruseBaned 超额使用被封禁
OveruseBaned
)
// User 用户模型
type User struct {
// 表字段
gorm.Model
Email string `gorm:"type:varchar(100);unique_index"`
Nick string `gorm:"size:50"`
Password string `json:"-"`
Status int
GroupID uint
Storage uint64
TwoFactor string
Avatar string
Options string `json:"-" gorm:"size:4294967295"`
Authn string `gorm:"size:4294967295"`
// 关联模型
Group Group `gorm:"save_associations:false:false"`
Policy Policy `gorm:"PRELOAD:false,association_autoupdate:false"`
// 数据库忽略字段
OptionsSerialized UserOption `gorm:"-"`
}
// UserOption 用户个性化配置字段
type UserOption struct {
ProfileOff bool `json:"profile_off,omitempty"`
PreferredTheme string `json:"preferred_theme,omitempty"`
}

View File

@@ -0,0 +1,16 @@
package model
import (
"github.com/jinzhu/gorm"
)
// Webdav 应用账户
type Webdav struct {
gorm.Model
Name string // 应用名称
Password string `gorm:"unique_index:password_only_on"` // 应用密码
UserID uint `gorm:"unique_index:password_only_on"` // 用户ID
Root string `gorm:"type:text"` // 根目录
Readonly bool `gorm:"type:bool"` // 是否只读
UseProxy bool `gorm:"type:bool"` // 是否进行反代
}

View File

@@ -0,0 +1,89 @@
package migrator
import (
"context"
"encoding/json"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/ent/node"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
)
func (m *Migrator) migrateNode() error {
m.l.Info("Migrating nodes...")
var nodes []model.Node
if err := model.DB.Find(&nodes).Error; err != nil {
return fmt.Errorf("failed to list v3 nodes: %w", err)
}
for _, n := range nodes {
nodeType := node.TypeSlave
nodeStatus := node.StatusSuspended
if n.Type == model.MasterNodeType {
nodeType = node.TypeMaster
}
if n.Status == model.NodeActive {
nodeStatus = node.StatusActive
}
cap := &boolset.BooleanSet{}
settings := &types.NodeSetting{
Provider: types.DownloaderProviderAria2,
}
if n.Aria2Enabled {
boolset.Sets(map[types.NodeCapability]bool{
types.NodeCapabilityRemoteDownload: true,
}, cap)
aria2Options := &model.Aria2Option{}
if err := json.Unmarshal([]byte(n.Aria2Options), aria2Options); err != nil {
return fmt.Errorf("failed to unmarshal aria2 options: %w", err)
}
downloaderOptions := map[string]any{}
if aria2Options.Options != "" {
if err := json.Unmarshal([]byte(aria2Options.Options), &downloaderOptions); err != nil {
return fmt.Errorf("failed to unmarshal aria2 options: %w", err)
}
}
settings.Aria2Setting = &types.Aria2Setting{
Server: aria2Options.Server,
Token: aria2Options.Token,
Options: downloaderOptions,
TempPath: aria2Options.TempPath,
}
}
if n.Type == model.MasterNodeType {
boolset.Sets(map[types.NodeCapability]bool{
types.NodeCapabilityExtractArchive: true,
types.NodeCapabilityCreateArchive: true,
}, cap)
}
stm := m.v4client.Node.Create().
SetRawID(int(n.ID)).
SetCreatedAt(formatTime(n.CreatedAt)).
SetUpdatedAt(formatTime(n.UpdatedAt)).
SetName(n.Name).
SetType(nodeType).
SetStatus(nodeStatus).
SetServer(n.Server).
SetSlaveKey(n.SlaveKey).
SetCapabilities(cap).
SetSettings(settings).
SetWeight(n.Rank)
if err := stm.Exec(context.Background()); err != nil {
return fmt.Errorf("failed to create node %q: %w", n.Name, err)
}
}
return nil
}

View File

@@ -0,0 +1,196 @@
package migrator
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/ent/node"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/samber/lo"
)
func (m *Migrator) migratePolicy() (map[int]bool, error) {
m.l.Info("Migrating storage policies...")
var policies []model.Policy
if err := model.DB.Find(&policies).Error; err != nil {
return nil, fmt.Errorf("failed to list v3 storage policies: %w", err)
}
if m.state.LocalPolicyIDs == nil {
m.state.LocalPolicyIDs = make(map[int]bool)
}
if m.state.PolicyIDs == nil {
m.state.PolicyIDs = make(map[int]bool)
}
m.l.Info("Found %d v3 storage policies to be migrated.", len(policies))
// get thumb proxy settings
var (
thumbProxySettings []model.Setting
thumbProxyEnabled bool
thumbProxyPolicy []int
)
if err := model.DB.Where("name in (?)", []string{"thumb_proxy_enabled", "thumb_proxy_policy"}).Find(&thumbProxySettings).Error; err != nil {
m.l.Warning("Failed to list v3 thumb proxy settings: %w", err)
}
tx, err := m.v4client.Tx(context.Background())
if err != nil {
return nil, fmt.Errorf("failed to start transaction: %w", err)
}
for _, s := range thumbProxySettings {
if s.Name == "thumb_proxy_enabled" {
thumbProxyEnabled = setting.IsTrueValue(s.Value)
} else if s.Name == "thumb_proxy_policy" {
if err := json.Unmarshal([]byte(s.Value), &thumbProxyPolicy); err != nil {
m.l.Warning("Failed to unmarshal v3 thumb proxy policy: %w", err)
}
}
}
for _, policy := range policies {
m.l.Info("Migrating storage policy %q...", policy.Name)
if err := json.Unmarshal([]byte(policy.Options), &policy.OptionsSerialized); err != nil {
return nil, fmt.Errorf("failed to unmarshal options for policy %q: %w", policy.Name, err)
}
settings := &types.PolicySetting{
Token: policy.OptionsSerialized.Token,
FileType: policy.OptionsSerialized.FileType,
OauthRedirect: policy.OptionsSerialized.OauthRedirect,
OdDriver: policy.OptionsSerialized.OdDriver,
Region: policy.OptionsSerialized.Region,
ServerSideEndpoint: policy.OptionsSerialized.ServerSideEndpoint,
ChunkSize: int64(policy.OptionsSerialized.ChunkSize),
TPSLimit: policy.OptionsSerialized.TPSLimit,
TPSLimitBurst: policy.OptionsSerialized.TPSLimitBurst,
S3ForcePathStyle: policy.OptionsSerialized.S3ForcePathStyle,
ThumbExts: policy.OptionsSerialized.ThumbExts,
}
if policy.Type == types.PolicyTypeOd {
settings.ThumbSupportAllExts = true
} else {
switch policy.Type {
case types.PolicyTypeCos:
settings.ThumbExts = []string{"png", "jpg", "jpeg", "gif", "bmp", "webp", "heif", "heic"}
case types.PolicyTypeOss:
settings.ThumbExts = []string{"png", "jpg", "jpeg", "gif", "bmp", "webp", "heic", "tiff", "avif"}
case types.PolicyTypeUpyun:
settings.ThumbExts = []string{"png", "jpg", "jpeg", "gif", "bmp", "webp", "svg"}
case types.PolicyTypeQiniu:
settings.ThumbExts = []string{"png", "jpg", "jpeg", "gif", "bmp", "webp", "tiff", "avif", "psd"}
case types.PolicyTypeRemote:
settings.ThumbExts = []string{"png", "jpg", "jpeg", "gif"}
}
}
if policy.Type != types.PolicyTypeOd && policy.BaseURL != "" {
settings.CustomProxy = true
settings.ProxyServer = policy.BaseURL
} else if policy.OptionsSerialized.OdProxy != "" {
settings.CustomProxy = true
settings.ProxyServer = policy.OptionsSerialized.OdProxy
}
if policy.Type == types.PolicyTypeCos {
settings.ChunkSize = 1024 * 1024 * 25
}
if thumbProxyEnabled && lo.Contains(thumbProxyPolicy, int(policy.ID)) {
settings.ThumbGeneratorProxy = true
}
mustContain := []string{"{randomkey16}", "{randomkey8}", "{uuid}"}
hasRandomElement := false
for _, c := range mustContain {
if strings.Contains(policy.FileNameRule, c) {
hasRandomElement = true
break
}
if strings.Contains(policy.DirNameRule, c) {
hasRandomElement = true
break
}
}
if !hasRandomElement {
if policy.DirNameRule == "" {
policy.DirNameRule = "uploads/{uid}/{path}"
}
policy.FileNameRule = "{uid}_{randomkey8}_{originname}"
m.l.Warning("Storage policy %q has no random element in file name rule, using default file name rule.", policy.Name)
}
stm := tx.StoragePolicy.Create().
SetRawID(int(policy.ID)).
SetCreatedAt(formatTime(policy.CreatedAt)).
SetUpdatedAt(formatTime(policy.UpdatedAt)).
SetName(policy.Name).
SetType(policy.Type).
SetServer(policy.Server).
SetBucketName(policy.BucketName).
SetIsPrivate(policy.IsPrivate).
SetAccessKey(policy.AccessKey).
SetSecretKey(policy.SecretKey).
SetMaxSize(int64(policy.MaxSize)).
SetDirNameRule(policy.DirNameRule).
SetFileNameRule(policy.FileNameRule).
SetSettings(settings)
if policy.Type == types.PolicyTypeRemote {
m.l.Info("Storage policy %q is remote, creating node for it...", policy.Name)
bs := &boolset.BooleanSet{}
n, err := tx.Node.Create().
SetName(policy.Name).
SetStatus(node.StatusActive).
SetServer(policy.Server).
SetSlaveKey(policy.SecretKey).
SetType(node.TypeSlave).
SetCapabilities(bs).
SetSettings(&types.NodeSetting{
Provider: types.DownloaderProviderAria2,
}).
Save(context.Background())
if err != nil {
return nil, fmt.Errorf("failed to create node for storage policy %q: %w", policy.Name, err)
}
stm.SetNodeID(n.ID)
}
if _, err := stm.Save(context.Background()); err != nil {
return nil, fmt.Errorf("failed to create storage policy %q: %w", policy.Name, err)
}
m.state.PolicyIDs[int(policy.ID)] = true
if policy.Type == types.PolicyTypeLocal {
m.state.LocalPolicyIDs[int(policy.ID)] = true
}
}
if err := tx.Commit(); err != nil {
return nil, fmt.Errorf("failed to commit transaction: %w", err)
}
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
m.l.Info("Resetting storage policy ID sequence for postgres...")
m.v4client.StoragePolicy.ExecContext(context.Background(), "SELECT SETVAL('storage_policies_id_seq', (SELECT MAX(id) FROM storage_policies))")
}
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
m.l.Info("Resetting node ID sequence for postgres...")
m.v4client.Node.ExecContext(context.Background(), "SELECT SETVAL('nodes_id_seq', (SELECT MAX(id) FROM nodes))")
}
return m.state.PolicyIDs, nil
}

View File

@@ -0,0 +1,213 @@
package migrator
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/migrator/conf"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
)
// TODO:
// 1. Policy thumb proxy migration
type (
settignMigrator func(allSettings map[string]string, name, value string) ([]settingMigrated, error)
settingMigrated struct {
name string
value string
}
// PackProduct 容量包商品
PackProduct struct {
ID int64 `json:"id"`
Name string `json:"name"`
Size uint64 `json:"size"`
Time int64 `json:"time"`
Price int `json:"price"`
Score int `json:"score"`
}
GroupProducts struct {
ID int64 `json:"id"`
Name string `json:"name"`
GroupID uint `json:"group_id"`
Time int64 `json:"time"`
Price int `json:"price"`
Score int `json:"score"`
Des []string `json:"des"`
Highlight bool `json:"highlight"`
}
)
var noopMigrator = func(allSettings map[string]string, name, value string) ([]settingMigrated, error) {
return nil, nil
}
var migrators = map[string]settignMigrator{
"siteKeywords": noopMigrator,
"over_used_template": noopMigrator,
"download_timeout": noopMigrator,
"preview_timeout": noopMigrator,
"doc_preview_timeout": noopMigrator,
"slave_node_retry": noopMigrator,
"slave_ping_interval": noopMigrator,
"slave_recover_interval": noopMigrator,
"slave_transfer_timeout": noopMigrator,
"onedrive_monitor_timeout": noopMigrator,
"onedrive_source_timeout": noopMigrator,
"share_download_session_timeout": noopMigrator,
"onedrive_callback_check": noopMigrator,
"mail_activation_template": noopMigrator,
"mail_reset_pwd_template": noopMigrator,
"appid": noopMigrator,
"appkey": noopMigrator,
"wechat_enabled": noopMigrator,
"wechat_appid": noopMigrator,
"wechat_mchid": noopMigrator,
"wechat_serial_no": noopMigrator,
"wechat_api_key": noopMigrator,
"wechat_pk_content": noopMigrator,
"hot_share_num": noopMigrator,
"defaultTheme": noopMigrator,
"theme_options": noopMigrator,
"max_worker_num": noopMigrator,
"max_parallel_transfer": noopMigrator,
"secret_key": noopMigrator,
"avatar_size_m": noopMigrator,
"avatar_size_s": noopMigrator,
"home_view_method": noopMigrator,
"share_view_method": noopMigrator,
"cron_recycle_upload_session": noopMigrator,
"captcha_type": func(allSettings map[string]string, name, value string) ([]settingMigrated, error) {
if value == "tcaptcha" {
value = "normal"
}
return []settingMigrated{
{
name: "captcha_type",
value: value,
},
}, nil
},
"captcha_TCaptcha_CaptchaAppId": noopMigrator,
"captcha_TCaptcha_AppSecretKey": noopMigrator,
"captcha_TCaptcha_SecretId": noopMigrator,
"captcha_TCaptcha_SecretKey": noopMigrator,
"thumb_file_suffix": func(allSettings map[string]string, name, value string) ([]settingMigrated, error) {
return []settingMigrated{
{
name: "thumb_entity_suffix",
value: value,
},
}, nil
},
"thumb_max_src_size": func(allSettings map[string]string, name, value string) ([]settingMigrated, error) {
return []settingMigrated{
{
name: "thumb_music_cover_max_size",
value: value,
},
{
name: "thumb_libreoffice_max_size",
value: value,
},
{
name: "thumb_ffmpeg_max_size",
value: value,
},
{
name: "thumb_vips_max_size",
value: value,
},
{
name: "thumb_builtin_max_size",
value: value,
},
}, nil
},
"initial_files": noopMigrator,
"office_preview_service": noopMigrator,
"phone_required": noopMigrator,
"phone_enabled": noopMigrator,
"wopi_session_timeout": func(allSettings map[string]string, name, value string) ([]settingMigrated, error) {
return []settingMigrated{
{
name: "viewer_session_timeout",
value: value,
},
}, nil
},
"custom_payment_enabled": noopMigrator,
"custom_payment_endpoint": noopMigrator,
"custom_payment_secret": noopMigrator,
"custom_payment_name": noopMigrator,
}
func (m *Migrator) migrateSettings() error {
m.l.Info("Migrating settings...")
// 1. List all settings
var settings []model.Setting
if err := model.DB.Find(&settings).Error; err != nil {
return fmt.Errorf("failed to list v3 settings: %w", err)
}
m.l.Info("Found %d v3 setting pairs to be migrated.", len(settings))
allSettings := make(map[string]string)
for _, s := range settings {
allSettings[s.Name] = s.Value
}
migratedSettings := make([]settingMigrated, 0)
for _, s := range settings {
if s.Name == "thumb_file_suffix" {
m.state.ThumbSuffix = s.Value
}
if s.Name == "avatar_path" {
m.state.V3AvatarPath = s.Value
}
migrator, ok := migrators[s.Name]
if ok {
newSettings, err := migrator(allSettings, s.Name, s.Value)
if err != nil {
return fmt.Errorf("failed to migrate setting %q: %w", s.Name, err)
}
migratedSettings = append(migratedSettings, newSettings...)
} else {
migratedSettings = append(migratedSettings, settingMigrated{
name: s.Name,
value: s.Value,
})
}
}
tx, err := m.v4client.Tx(context.Background())
if err != nil {
return fmt.Errorf("failed to start transaction: %w", err)
}
// Insert hash_id_salt
if conf.SystemConfig.HashIDSalt != "" {
if err := tx.Setting.Create().SetName("hash_id_salt").SetValue(conf.SystemConfig.HashIDSalt).Exec(context.Background()); err != nil {
if err := tx.Rollback(); err != nil {
return fmt.Errorf("failed to rollback transaction: %w", err)
}
return fmt.Errorf("failed to create setting hash_id_salt: %w", err)
}
} else {
return fmt.Errorf("hash ID salt is not set, please set it from v3 conf file")
}
for _, s := range migratedSettings {
if err := tx.Setting.Create().SetName(s.name).SetValue(s.value).Exec(context.Background()); err != nil {
if err := tx.Rollback(); err != nil {
return fmt.Errorf("failed to rollback transaction: %w", err)
}
return fmt.Errorf("failed to create setting %q: %w", s.name, err)
}
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
return nil
}

View File

@@ -0,0 +1,102 @@
package migrator
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
)
func (m *Migrator) migrateShare() error {
m.l.Info("Migrating shares...")
batchSize := 1000
offset := m.state.ShareOffset
ctx := context.Background()
if offset > 0 {
m.l.Info("Resuming share migration from offset %d", offset)
}
for {
m.l.Info("Migrating shares with offset %d", offset)
var shares []model.Share
if err := model.DB.Limit(batchSize).Offset(offset).Find(&shares).Error; err != nil {
return fmt.Errorf("failed to list v3 shares: %w", err)
}
if len(shares) == 0 {
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
m.l.Info("Resetting share ID sequence for postgres...")
m.v4client.Share.ExecContext(ctx, "SELECT SETVAL('shares_id_seq', (SELECT MAX(id) FROM shares))")
}
break
}
tx, err := m.v4client.Tx(ctx)
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to start transaction: %w", err)
}
for _, s := range shares {
sourceId := int(s.SourceID)
if !s.IsDir {
sourceId += m.state.LastFolderID
}
// check if file exists
_, err = tx.File.Query().Where(file.ID(sourceId)).First(ctx)
if err != nil {
m.l.Warning("File %d not found, skipping share %d", sourceId, s.ID)
continue
}
// check if user exist
if _, ok := m.state.UserIDs[int(s.UserID)]; !ok {
m.l.Warning("User %d not found, skipping share %d", s.UserID, s.ID)
continue
}
stm := tx.Share.Create().
SetCreatedAt(formatTime(s.CreatedAt)).
SetUpdatedAt(formatTime(s.UpdatedAt)).
SetViews(s.Views).
SetRawID(int(s.ID)).
SetDownloads(s.Downloads).
SetFileID(sourceId).
SetUserID(int(s.UserID))
if s.Password != "" {
stm.SetPassword(s.Password)
}
if s.Expires != nil {
stm.SetNillableExpires(s.Expires)
}
if s.RemainDownloads >= 0 {
stm.SetRemainDownloads(s.RemainDownloads)
}
if _, err := stm.Save(ctx); err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to create share %d: %w", s.ID, err)
}
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
offset += batchSize
m.state.ShareOffset = offset
if err := m.saveState(); err != nil {
m.l.Warning("Failed to save state after share batch: %s", err)
} else {
m.l.Info("Saved migration state after processing this batch")
}
}
return nil
}

View File

@@ -0,0 +1,109 @@
package migrator
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
)
func (m *Migrator) migrateUser() error {
m.l.Info("Migrating users...")
batchSize := 1000
// Start from the saved offset if available
offset := m.state.UserOffset
ctx := context.Background()
if m.state.UserIDs == nil {
m.state.UserIDs = make(map[int]bool)
}
// If we're resuming, load existing user IDs
if len(m.state.UserIDs) > 0 {
m.l.Info("Resuming user migration from offset %d, %d users already migrated", offset, len(m.state.UserIDs))
}
for {
m.l.Info("Migrating users with offset %d", offset)
var users []model.User
if err := model.DB.Limit(batchSize).Offset(offset).Find(&users).Error; err != nil {
return fmt.Errorf("failed to list v3 users: %w", err)
}
if len(users) == 0 {
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
m.l.Info("Resetting user ID sequence for postgres...")
m.v4client.User.ExecContext(ctx, "SELECT SETVAL('users_id_seq', (SELECT MAX(id) FROM users))")
}
break
}
tx, err := m.v4client.Tx(context.Background())
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to start transaction: %w", err)
}
for _, u := range users {
userStatus := user.StatusActive
switch u.Status {
case model.Active:
userStatus = user.StatusActive
case model.NotActivicated:
userStatus = user.StatusInactive
case model.Baned:
userStatus = user.StatusManualBanned
case model.OveruseBaned:
userStatus = user.StatusSysBanned
}
setting := &types.UserSetting{
VersionRetention: true,
VersionRetentionMax: 10,
}
stm := tx.User.Create().
SetRawID(int(u.ID)).
SetCreatedAt(formatTime(u.CreatedAt)).
SetUpdatedAt(formatTime(u.UpdatedAt)).
SetEmail(u.Email).
SetNick(u.Nick).
SetStatus(userStatus).
SetStorage(int64(u.Storage)).
SetGroupID(int(u.GroupID)).
SetSettings(setting).
SetPassword(u.Password)
if u.TwoFactor != "" {
stm.SetTwoFactorSecret(u.TwoFactor)
}
if u.Avatar != "" {
stm.SetAvatar(u.Avatar)
}
if _, err := stm.Save(ctx); err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to create user %d: %w", u.ID, err)
}
m.state.UserIDs[int(u.ID)] = true
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
// Update the offset in state and save after each batch
offset += batchSize
m.state.UserOffset = offset
if err := m.saveState(); err != nil {
m.l.Warning("Failed to save state after user batch: %s", err)
} else {
m.l.Info("Saved migration state after processing %d users", offset)
}
}
return nil
}

View File

@@ -0,0 +1,93 @@
package migrator
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
)
func (m *Migrator) migrateWebdav() error {
m.l.Info("Migrating webdav accounts...")
batchSize := 1000
offset := m.state.WebdavOffset
ctx := context.Background()
if m.state.WebdavOffset > 0 {
m.l.Info("Resuming webdav migration from offset %d", offset)
}
for {
m.l.Info("Migrating webdav accounts with offset %d", offset)
var webdavAccounts []model.Webdav
if err := model.DB.Limit(batchSize).Offset(offset).Find(&webdavAccounts).Error; err != nil {
return fmt.Errorf("failed to list v3 webdav accounts: %w", err)
}
if len(webdavAccounts) == 0 {
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
m.l.Info("Resetting webdav account ID sequence for postgres...")
m.v4client.DavAccount.ExecContext(ctx, "SELECT SETVAL('dav_accounts_id_seq', (SELECT MAX(id) FROM dav_accounts))")
}
break
}
tx, err := m.v4client.Tx(ctx)
if err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to start transaction: %w", err)
}
for _, webdavAccount := range webdavAccounts {
if _, ok := m.state.UserIDs[int(webdavAccount.UserID)]; !ok {
m.l.Warning("User %d not found, skipping webdav account %d", webdavAccount.UserID, webdavAccount.ID)
continue
}
props := types.DavAccountProps{}
options := boolset.BooleanSet{}
if webdavAccount.Readonly {
boolset.Set(int(types.DavAccountReadOnly), true, &options)
}
if webdavAccount.UseProxy {
boolset.Set(int(types.DavAccountProxy), true, &options)
}
stm := tx.DavAccount.Create().
SetCreatedAt(formatTime(webdavAccount.CreatedAt)).
SetUpdatedAt(formatTime(webdavAccount.UpdatedAt)).
SetRawID(int(webdavAccount.ID)).
SetName(webdavAccount.Name).
SetURI("cloudreve://my" + webdavAccount.Root).
SetPassword(webdavAccount.Password).
SetProps(&props).
SetOptions(&options).
SetOwnerID(int(webdavAccount.UserID))
if _, err := stm.Save(ctx); err != nil {
_ = tx.Rollback()
return fmt.Errorf("failed to create webdav account %d: %w", webdavAccount.ID, err)
}
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
offset += batchSize
m.state.WebdavOffset = offset
if err := m.saveState(); err != nil {
m.l.Warning("Failed to save state after webdav batch: %s", err)
} else {
m.l.Info("Saved migration state after processing this batch")
}
}
return nil
}

View File

@@ -0,0 +1,433 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package embed provides access to files embedded in the running Go program.
//
// Go source files that import "embed" can use the //go:embed directive
// to initialize a variable of type string, []byte, or FS with the contents of
// files read from the package directory or subdirectories at compile time.
//
// For example, here are three ways to embed a file named hello.txt
// and then print its contents at run time.
//
// Embedding one file into a string:
//
// import _ "embed"
//
// //go:embed hello.txt
// var s string
// print(s)
//
// Embedding one file into a slice of bytes:
//
// import _ "embed"
//
// //go:embed hello.txt
// var b []byte
// print(string(b))
//
// Embedded one or more files into a file system:
//
// import "embed"
//
// //go:embed hello.txt
// var f embed.FS
// data, _ := f.ReadFile("hello.txt")
// print(string(data))
//
// # Directives
//
// A //go:embed directive above a variable declaration specifies which files to embed,
// using one or more path.Match patterns.
//
// The directive must immediately precede a line containing the declaration of a single variable.
// Only blank lines and // line comments are permitted between the directive and the declaration.
//
// The type of the variable must be a string type, or a slice of a byte type,
// or FS (or an alias of FS).
//
// For example:
//
// package server
//
// import "embed"
//
// // content holds our static web server content.
// //go:embed image/* template/*
// //go:embed html/index.html
// var content embed.FS
//
// The Go build system will recognize the directives and arrange for the declared variable
// (in the example above, content) to be populated with the matching files from the file system.
//
// The //go:embed directive accepts multiple space-separated patterns for
// brevity, but it can also be repeated, to avoid very long lines when there are
// many patterns. The patterns are interpreted relative to the package directory
// containing the source file. The path separator is a forward slash, even on
// Windows systems. Patterns may not contain . or .. or empty path elements,
// nor may they begin or end with a slash. To match everything in the current
// directory, use * instead of .. To allow for naming files with spaces in
// their names, patterns can be written as Go double-quoted or back-quoted
// string literals.
//
// If a pattern names a directory, all files in the subtree rooted at that directory are
// embedded (recursively), except that files with names beginning with . or _
// are excluded. So the variable in the above example is almost equivalent to:
//
// // content is our static web server content.
// //go:embed image template html/index.html
// var content embed.FS
//
// The difference is that image/* embeds image/.tempfile while image does not.
// Neither embeds image/dir/.tempfile.
//
// If a pattern begins with the prefix all:, then the rule for walking directories is changed
// to include those files beginning with . or _. For example, all:image embeds
// both image/.tempfile and image/dir/.tempfile.
//
// The //go:embed directive can be used with both exported and unexported variables,
// depending on whether the package wants to make the data available to other packages.
// It can only be used with variables at package scope, not with local variables.
//
// Patterns must not match files outside the package's module, such as .git/* or symbolic links.
// Patterns must not match files whose names include the special punctuation characters " * < > ? ` ' | / \ and :.
// Matches for empty directories are ignored. After that, each pattern in a //go:embed line
// must match at least one file or non-empty directory.
//
// If any patterns are invalid or have invalid matches, the build will fail.
//
// # Strings and Bytes
//
// The //go:embed line for a variable of type string or []byte can have only a single pattern,
// and that pattern can match only a single file. The string or []byte is initialized with
// the contents of that file.
//
// The //go:embed directive requires importing "embed", even when using a string or []byte.
// In source files that don't refer to embed.FS, use a blank import (import _ "embed").
//
// # File Systems
//
// For embedding a single file, a variable of type string or []byte is often best.
// The FS type enables embedding a tree of files, such as a directory of static
// web server content, as in the example above.
//
// FS implements the io/fs package's FS interface, so it can be used with any package that
// understands file systems, including net/http, text/template, and html/template.
//
// For example, given the content variable in the example above, we can write:
//
// http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.FS(content))))
//
// template.ParseFS(content, "*.tmpl")
//
// # Tools
//
// To support tools that analyze Go packages, the patterns found in //go:embed lines
// are available in “go list” output. See the EmbedPatterns, TestEmbedPatterns,
// and XTestEmbedPatterns fields in the “go help list” output.
package statics
import (
"errors"
"io"
"io/fs"
"time"
)
// An FS is a read-only collection of files, usually initialized with a //go:embed directive.
// When declared without a //go:embed directive, an FS is an empty file system.
//
// An FS is a read-only value, so it is safe to use from multiple goroutines
// simultaneously and also safe to assign values of type FS to each other.
//
// FS implements fs.FS, so it can be used with any package that understands
// file system interfaces, including net/http, text/template, and html/template.
//
// See the package documentation for more details about initializing an FS.
type FS struct {
// The compiler knows the layout of this struct.
// See cmd/compile/internal/staticdata's WriteEmbed.
//
// The files list is sorted by name but not by simple string comparison.
// Instead, each file's name takes the form "dir/elem" or "dir/elem/".
// The optional trailing slash indicates that the file is itself a directory.
// The files list is sorted first by dir (if dir is missing, it is taken to be ".")
// and then by base, so this list of files:
//
// p
// q/
// q/r
// q/s/
// q/s/t
// q/s/u
// q/v
// w
//
// is actually sorted as:
//
// p # dir=. elem=p
// q/ # dir=. elem=q
// w/ # dir=. elem=w
// q/r # dir=q elem=r
// q/s/ # dir=q elem=s
// q/v # dir=q elem=v
// q/s/t # dir=q/s elem=t
// q/s/u # dir=q/s elem=u
//
// This order brings directory contents together in contiguous sections
// of the list, allowing a directory read to use binary search to find
// the relevant sequence of entries.
files *[]file
}
// split splits the name into dir and elem as described in the
// comment in the FS struct above. isDir reports whether the
// final trailing slash was present, indicating that name is a directory.
func split(name string) (dir, elem string, isDir bool) {
if name[len(name)-1] == '/' {
isDir = true
name = name[:len(name)-1]
}
i := len(name) - 1
for i >= 0 && name[i] != '/' {
i--
}
if i < 0 {
return ".", name, isDir
}
return name[:i], name[i+1:], isDir
}
// trimSlash trims a trailing slash from name, if present,
// returning the possibly shortened name.
func trimSlash(name string) string {
if len(name) > 0 && name[len(name)-1] == '/' {
return name[:len(name)-1]
}
return name
}
var (
_ fs.ReadDirFS = FS{}
_ fs.ReadFileFS = FS{}
)
// A file is a single file in the FS.
// It implements fs.FileInfo and fs.DirEntry.
type file struct {
// The compiler knows the layout of this struct.
// See cmd/compile/internal/staticdata's WriteEmbed.
name string
data string
hash [16]byte // truncated SHA256 hash
modTime time.Time
}
var (
_ fs.FileInfo = (*file)(nil)
_ fs.DirEntry = (*file)(nil)
)
func (f *file) Name() string { _, elem, _ := split(f.name); return elem }
func (f *file) Size() int64 { return int64(len(f.data)) }
func (f *file) ModTime() time.Time { return f.modTime }
func (f *file) IsDir() bool { _, _, isDir := split(f.name); return isDir }
func (f *file) Sys() any { return nil }
func (f *file) Type() fs.FileMode { return f.Mode().Type() }
func (f *file) Info() (fs.FileInfo, error) { return f, nil }
func (f *file) Mode() fs.FileMode {
if f.IsDir() {
return fs.ModeDir | 0555
}
return 0444
}
// dotFile is a file for the root directory,
// which is omitted from the files list in a FS.
var dotFile = &file{name: "./"}
// lookup returns the named file, or nil if it is not present.
func (f FS) lookup(name string) *file {
if !fs.ValidPath(name) {
// The compiler should never emit a file with an invalid name,
// so this check is not strictly necessary (if name is invalid,
// we shouldn't find a match below), but it's a good backstop anyway.
return nil
}
if name == "." {
return dotFile
}
if f.files == nil {
return nil
}
// Binary search to find where name would be in the list,
// and then check if name is at that position.
dir, elem, _ := split(name)
files := *f.files
i := sortSearch(len(files), func(i int) bool {
idir, ielem, _ := split(files[i].name)
return idir > dir || idir == dir && ielem >= elem
})
if i < len(files) && trimSlash(files[i].name) == name {
return &files[i]
}
return nil
}
// readDir returns the list of files corresponding to the directory dir.
func (f FS) readDir(dir string) []file {
if f.files == nil {
return nil
}
// Binary search to find where dir starts and ends in the list
// and then return that slice of the list.
files := *f.files
i := sortSearch(len(files), func(i int) bool {
idir, _, _ := split(files[i].name)
return idir >= dir
})
j := sortSearch(len(files), func(j int) bool {
jdir, _, _ := split(files[j].name)
return jdir > dir
})
return files[i:j]
}
// Open opens the named file for reading and returns it as an fs.File.
//
// The returned file implements io.Seeker when the file is not a directory.
func (f FS) Open(name string) (fs.File, error) {
file := f.lookup(name)
if file == nil {
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
}
if file.IsDir() {
return &openDir{file, f.readDir(name), 0}, nil
}
return &openFile{file, 0}, nil
}
// ReadDir reads and returns the entire named directory.
func (f FS) ReadDir(name string) ([]fs.DirEntry, error) {
file, err := f.Open(name)
if err != nil {
return nil, err
}
dir, ok := file.(*openDir)
if !ok {
return nil, &fs.PathError{Op: "read", Path: name, Err: errors.New("not a directory")}
}
list := make([]fs.DirEntry, len(dir.files))
for i := range list {
list[i] = &dir.files[i]
}
return list, nil
}
// ReadFile reads and returns the content of the named file.
func (f FS) ReadFile(name string) ([]byte, error) {
file, err := f.Open(name)
if err != nil {
return nil, err
}
ofile, ok := file.(*openFile)
if !ok {
return nil, &fs.PathError{Op: "read", Path: name, Err: errors.New("is a directory")}
}
return []byte(ofile.f.data), nil
}
// An openFile is a regular file open for reading.
type openFile struct {
f *file // the file itself
offset int64 // current read offset
}
var (
_ io.Seeker = (*openFile)(nil)
)
func (f *openFile) Close() error { return nil }
func (f *openFile) Stat() (fs.FileInfo, error) { return f.f, nil }
func (f *openFile) Read(b []byte) (int, error) {
if f.offset >= int64(len(f.f.data)) {
return 0, io.EOF
}
if f.offset < 0 {
return 0, &fs.PathError{Op: "read", Path: f.f.name, Err: fs.ErrInvalid}
}
n := copy(b, f.f.data[f.offset:])
f.offset += int64(n)
return n, nil
}
func (f *openFile) Seek(offset int64, whence int) (int64, error) {
switch whence {
case 0:
// offset += 0
case 1:
offset += f.offset
case 2:
offset += int64(len(f.f.data))
}
if offset < 0 || offset > int64(len(f.f.data)) {
return 0, &fs.PathError{Op: "seek", Path: f.f.name, Err: fs.ErrInvalid}
}
f.offset = offset
return offset, nil
}
// An openDir is a directory open for reading.
type openDir struct {
f *file // the directory file itself
files []file // the directory contents
offset int // the read offset, an index into the files slice
}
func (d *openDir) Close() error { return nil }
func (d *openDir) Stat() (fs.FileInfo, error) { return d.f, nil }
func (d *openDir) Read([]byte) (int, error) {
return 0, &fs.PathError{Op: "read", Path: d.f.name, Err: errors.New("is a directory")}
}
func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
n := len(d.files) - d.offset
if n == 0 {
if count <= 0 {
return nil, nil
}
return nil, io.EOF
}
if count > 0 && n > count {
n = count
}
list := make([]fs.DirEntry, n)
for i := range list {
list[i] = &d.files[d.offset+i]
}
d.offset += n
return list, nil
}
// sortSearch is like sort.Search, avoiding an import.
func sortSearch(n int, f func(int) bool) int {
// Define f(-1) == false and f(n) == true.
// Invariant: f(i-1) == false, f(j) == true.
i, j := 0, n
for i < j {
h := int(uint(i+j) >> 1) // avoid overflow when computing h
// i ≤ h < j
if !f(h) {
i = h + 1 // preserves f(i-1) == false
} else {
j = h // preserves f(j) == true
}
}
// i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
return i
}

View File

@@ -0,0 +1,230 @@
package statics
import (
"archive/zip"
"bufio"
"crypto/sha256"
"debug/buildinfo"
_ "embed"
"encoding/json"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/gin-contrib/static"
"io"
"io/fs"
"net/http"
"os"
"path/filepath"
"sort"
"strings"
"time"
)
const StaticFolder = "statics"
//go:embed assets.zip
var zipContent string
type GinFS struct {
FS http.FileSystem
}
type version struct {
Name string `json:"name"`
Version string `json:"version"`
}
// Open 打开文件
func (b *GinFS) Open(name string) (http.File, error) {
return b.FS.Open(name)
}
// Exists 文件是否存在
func (b *GinFS) Exists(prefix string, filepath string) bool {
if _, err := b.FS.Open(filepath); err != nil {
return false
}
return true
}
// NewServerStaticFS 初始化静态资源文件
func NewServerStaticFS(l logging.Logger, statics fs.FS, isPro bool) (static.ServeFileSystem, error) {
var staticFS static.ServeFileSystem
if util.Exists(util.DataPath(StaticFolder)) {
l.Info("Folder with %q already exists, it will be used to serve static files.", util.DataPath(StaticFolder))
staticFS = static.LocalFile(util.DataPath(StaticFolder), false)
} else {
// 初始化静态资源
embedFS, err := fs.Sub(statics, "assets/build")
if err != nil {
return nil, fmt.Errorf("failed to initialize static resources: %w", err)
}
staticFS = &GinFS{
FS: http.FS(embedFS),
}
}
// 检查静态资源的版本
f, err := staticFS.Open("version.json")
if err != nil {
l.Warning("Missing version identifier file in static resources, please delete \"statics\" folder and rebuild it.")
return staticFS, nil
}
b, err := io.ReadAll(f)
if err != nil {
l.Warning("Failed to read version identifier file in static resources, please delete \"statics\" folder and rebuild it.")
return staticFS, nil
}
var v version
if err := json.Unmarshal(b, &v); err != nil {
l.Warning("Failed to parse version identifier file in static resources: %s", err)
return staticFS, nil
}
staticName := "cloudreve-frontend"
if isPro {
staticName += "-pro"
}
if v.Name != staticName {
l.Error("Static resource version mismatch, please delete \"statics\" folder and rebuild it.")
}
if v.Version != constants.BackendVersion {
l.Error("Static resource version mismatch [Current %s, Desired: %s]please delete \"statics\" folder and rebuild it.", v.Version, constants.BackendVersion)
}
return staticFS, nil
}
func NewStaticFS(l logging.Logger) fs.FS {
zipReader, err := zip.NewReader(strings.NewReader(zipContent), int64(len(zipContent)))
if err != nil {
l.Panic("Static resource is not a valid zip file: %s", err)
}
var files []file
modTime := getBuildTime()
err = fs.WalkDir(zipReader, ".", func(path string, d fs.DirEntry, err error) error {
if err != nil {
return fmt.Errorf("cannot walk into %q: %w", path, err)
}
if path == "." {
return nil
}
f := file{modTime: modTime}
if d.IsDir() {
f.name = path + "/"
} else {
f.name = path
rc, err := zipReader.Open(path)
if err != nil {
return fmt.Errorf("canot open %q: %w", path, err)
}
defer rc.Close()
data, err := io.ReadAll(rc)
if err != nil {
return fmt.Errorf("cannot read %q: %w", path, err)
}
f.data = string(data)
hash := sha256.Sum256(data)
for i := range f.hash {
f.hash[i] = ^hash[i]
}
}
files = append(files, f)
return nil
})
if err != nil {
l.Panic("Failed to initialize static resources: %s", err)
}
sort.Slice(files, func(i, j int) bool {
fi, fj := files[i], files[j]
di, ei, _ := split(fi.name)
dj, ej, _ := split(fj.name)
if di != dj {
return di < dj
}
return ei < ej
})
var embedFS FS
embedFS.files = &files
return embedFS
}
// Eject 抽离内置静态资源
func Eject(l logging.Logger, statics fs.FS) error {
// 初始化静态资源
embedFS, err := fs.Sub(statics, "assets/build")
if err != nil {
l.Panic("Failed to initialize static resources: %s", err)
}
var walk func(relPath string, d fs.DirEntry, err error) error
walk = func(relPath string, d fs.DirEntry, err error) error {
if err != nil {
return fmt.Errorf("failed to read info of %q: %s, skipping...", relPath, err)
}
if !d.IsDir() {
// 写入文件
dst := util.DataPath(filepath.Join(StaticFolder, relPath))
out, err := util.CreatNestedFile(dst)
defer out.Close()
if err != nil {
return fmt.Errorf("failed to create file %q: %s, skipping...", dst, err)
}
l.Info("Ejecting %q...", dst)
obj, _ := embedFS.Open(relPath)
if _, err := io.Copy(out, bufio.NewReader(obj)); err != nil {
return fmt.Errorf("cannot write file %q: %s, skipping...", relPath, err)
}
}
return nil
}
// util.Log().Info("开始导出内置静态资源...")
err = fs.WalkDir(embedFS, ".", walk)
if err != nil {
return fmt.Errorf("failed to eject static resources: %w", err)
}
l.Info("Finish ejecting static resources.")
return nil
}
func getBuildTime() (buildTime time.Time) {
buildTime = time.Now()
exe, err := os.Executable()
if err != nil {
return
}
info, err := buildinfo.ReadFile(exe)
if err != nil {
return
}
for _, s := range info.Settings {
if s.Key == "vcs.time" && s.Value != "" {
if t, err := time.Parse(time.RFC3339, s.Value); err == nil {
buildTime = t
}
break
}
}
return
}

2
assets

Submodule assets updated: 53aa9ace96...8f98777045

47
azure-pipelines.yml Normal file
View File

@@ -0,0 +1,47 @@
trigger:
tags:
include:
- '*'
variables:
GO_VERSION: "1.25.5"
NODE_VERSION: "22.x"
DOCKER_BUILDKIT: 1
pool:
name: Default
jobs:
- job: Release
steps:
- checkout: self
submodules: true
persistCredentials: true
- task: NodeTool@0
inputs:
versionSpec: '$(NODE_VERSION)'
displayName: 'Install Node.js'
- task: GoTool@0
inputs:
version: "$(GO_VERSION)"
displayName: Install Go
- task: Docker@2
inputs:
containerRegistry: "CR DockerHub"
command: "login"
addPipelineData: false
addBaseImageData: false
- task: CmdLine@2
displayName: "Install tonistiigi/binfmt"
inputs:
script: |
docker run --privileged --rm tonistiigi/binfmt --install all
- task: goreleaser@0
condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags/'))
inputs:
version: "latest"
distribution: "goreleaser"
workdir: "$(Build.SourcesDirectory)"
args: "release --timeout 60m -p 4"
env:
GITHUB_TOKEN: $(GITHUB_TOKEN)

View File

@@ -1,21 +0,0 @@
package bootstrap
import (
"fmt"
"github.com/HFO4/cloudreve/pkg/conf"
)
// InitApplication 初始化应用常量
func InitApplication() {
fmt.Print(`
___ _ _
/ __\ | ___ _ _ __| |_ __ _____ _____
/ / | |/ _ \| | | |/ _ | '__/ _ \ \ / / _ \
/ /___| | (_) | |_| | (_| | | | __/\ V / __/
\____/|_|\___/ \__,_|\__,_|_| \___| \_/ \___|
V` + conf.BackendVersion + ` Commit #` + conf.LastCommit + ` Pro=` + conf.IsPro + `
================================================
`)
}

View File

@@ -1,33 +0,0 @@
package bootstrap
import (
model "github.com/HFO4/cloudreve/models"
"github.com/HFO4/cloudreve/pkg/aria2"
"github.com/HFO4/cloudreve/pkg/auth"
"github.com/HFO4/cloudreve/pkg/cache"
"github.com/HFO4/cloudreve/pkg/conf"
"github.com/HFO4/cloudreve/pkg/crontab"
"github.com/HFO4/cloudreve/pkg/email"
"github.com/HFO4/cloudreve/pkg/task"
"github.com/gin-gonic/gin"
)
// Init 初始化启动
func Init(path string) {
InitApplication()
conf.Init(path)
// Debug 关闭时,切换为生产模式
if !conf.SystemConfig.Debug {
gin.SetMode(gin.ReleaseMode)
}
cache.Init()
if conf.SystemConfig.Mode == "master" {
model.Init()
task.Init()
aria2.Init(false)
email.Init()
crontab.Init()
InitStatic()
}
auth.Init()
}

View File

@@ -1,48 +0,0 @@
package bootstrap
import (
"github.com/HFO4/cloudreve/pkg/util"
_ "github.com/HFO4/cloudreve/statik"
"github.com/gin-contrib/static"
"github.com/rakyll/statik/fs"
"net/http"
)
type GinFS struct {
FS http.FileSystem
}
// StaticFS 内置静态文件资源
var StaticFS static.ServeFileSystem
// Open 打开文件
func (b *GinFS) Open(name string) (http.File, error) {
return b.FS.Open(name)
}
// Exists 文件是否存在
func (b *GinFS) Exists(prefix string, filepath string) bool {
if _, err := b.FS.Open(filepath); err != nil {
return false
}
return true
}
// InitStatic 初始化静态资源文件
func InitStatic() {
var err error
if util.Exists(util.RelativePath("statics")) {
util.Log().Info("检测到 statics 目录存在,将使用此目录下的静态资源文件")
StaticFS = static.LocalFile(util.RelativePath("statics"), false)
} else {
StaticFS = &GinFS{}
StaticFS.(*GinFS).FS, err = fs.New()
if err != nil {
util.Log().Panic("无法初始化静态资源, %s", err)
}
}
}

128
build.sh
View File

@@ -1,128 +0,0 @@
#!/bin/bash
REPO=$(cd $(dirname $0); pwd)
COMMIT_SHA=$(git rev-parse --short HEAD)
VERSION=$(git describe --tags)
ASSETS="false"
BINARY="false"
RELEASE="false"
debugInfo () {
echo "Repo: $REPO"
echo "Build assets: $ASSETS"
echo "Build binary: $BINARY"
echo "Release: $RELEASE"
echo "Version: $VERSION"
echo "Commit: $COMMIT_SHA"
}
buildAssets () {
cd $REPO
rm -rf assets/build
rm -f statik/statik.go
export CI=false
cd $REPO/assets
yarn install
yarn run build
if ! [ -x "$(command -v statik)" ]; then
export CGO_ENABLED=0
go get github.com/rakyll/statik
fi
cd $REPO
statik -src=assets/build/ -include=*.html,*.js,*.json,*.css,*.png,*.svg,*.ico -f
}
buildBinary () {
cd $REPO
go build -a -o cloudreve -ldflags " -X 'github.com/HFO4/cloudreve/pkg/conf.BackendVersion=$VERSION' -X 'github.com/HFO4/cloudreve/pkg/conf.LastCommit=$COMMIT_SHA'"
}
_build() {
local osarch=$1
IFS=/ read -r -a arr <<<"$osarch"
os="${arr[0]}"
arch="${arr[1]}"
gcc="${arr[2]}"
# Go build to build the binary.
export GOOS=$os
export GOARCH=$arch
export CC=$gcc
export CGO_ENABLED=1
out="release/cloudreve_${VERSION}_${os}_${arch}"
go build -a -o "${out}" -ldflags " -X 'github.com/HFO4/cloudreve/pkg/conf.BackendVersion=$VERSION' -X 'github.com/HFO4/cloudreve/pkg/conf.LastCommit=$COMMIT_SHA'"
if [ "$os" = "windows" ]; then
mv $out release/cloudreve.exe
zip -j -q "${out}.zip" release/cloudreve.exe
rm -f "release/cloudreve.exe"
else
mv $out release/cloudreve
tar -zcvf "${out}.tar.gz" -C release cloudreve
rm -f "release/cloudreve"
fi
}
release(){
cd $REPO
## List of architectures and OS to test coss compilation.
SUPPORTED_OSARCH="linux/amd64/gcc linux/arm/arm-linux-gnueabihf-gcc windows/amd64/x86_64-w64-mingw32-gcc"
echo "Release builds for OS/Arch/CC: ${SUPPORTED_OSARCH}"
for each_osarch in ${SUPPORTED_OSARCH}; do
_build "${each_osarch}"
done
}
usage() {
echo "Usage: $0 [-a] [-c] [-b] [-r]" 1>&2;
exit 1;
}
while getopts "bacr:d" o; do
case "${o}" in
b)
ASSETS="true"
BINARY="true"
;;
a)
ASSETS="true"
;;
c)
BINARY="true"
;;
r)
ASSETS="true"
RELEASE="true"
;;
d)
DEBUG="true"
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
if [ "$DEBUG" = "true" ]; then
debugInfo
fi
if [ "$ASSETS" = "true" ]; then
buildAssets
fi
if [ "$BINARY" = "true" ]; then
buildBinary
fi
if [ "$RELEASE" = "true" ]; then
release
fi

30
cmd/eject.go Normal file
View File

@@ -0,0 +1,30 @@
package cmd
import (
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/application/statics"
"github.com/spf13/cobra"
"os"
)
func init() {
rootCmd.AddCommand(ejectCmd)
}
var ejectCmd = &cobra.Command{
Use: "eject",
Short: "Eject all embedded static files",
Run: func(cmd *cobra.Command, args []string) {
dep := dependency.NewDependency(
dependency.WithConfigPath(confPath),
dependency.WithProFlag(constants.IsPro == "true"),
)
logger := dep.Logger()
if err := statics.Eject(dep.Logger(), dep.Statics()); err != nil {
logger.Error("Failed to eject static files: %s", err)
os.Exit(1)
}
},
}

230
cmd/masterkey.go Normal file
View File

@@ -0,0 +1,230 @@
package cmd
import (
"context"
"crypto/rand"
"encoding/base64"
"fmt"
"io"
"os"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/spf13/cobra"
)
var (
outputToFile string
newMasterKeyFile string
)
func init() {
rootCmd.AddCommand(masterKeyCmd)
masterKeyCmd.AddCommand(masterKeyGenerateCmd)
masterKeyCmd.AddCommand(masterKeyGetCmd)
masterKeyCmd.AddCommand(masterKeyRotateCmd)
masterKeyGenerateCmd.Flags().StringVarP(&outputToFile, "output", "o", "", "Output master key to file instead of stdout")
masterKeyRotateCmd.Flags().StringVarP(&newMasterKeyFile, "new-key", "n", "", "Path to file containing the new master key (base64 encoded).")
}
var masterKeyCmd = &cobra.Command{
Use: "master-key",
Short: "Master encryption key management",
Long: "Manage master encryption keys for file encryption. Use subcommands to generate, get, or rotate keys.",
Run: func(cmd *cobra.Command, args []string) {
_ = cmd.Help()
},
}
var masterKeyGenerateCmd = &cobra.Command{
Use: "generate",
Short: "Generate a new master encryption key",
Long: "Generate a new random 32-byte (256-bit) master encryption key and output it in base64 format.",
Run: func(cmd *cobra.Command, args []string) {
// Generate 32-byte random key
key := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, key); err != nil {
fmt.Fprintf(os.Stderr, "Error: Failed to generate random key: %v\n", err)
os.Exit(1)
}
// Encode to base64
encodedKey := base64.StdEncoding.EncodeToString(key)
if outputToFile != "" {
// Write to file
if err := os.WriteFile(outputToFile, []byte(encodedKey), 0600); err != nil {
fmt.Fprintf(os.Stderr, "Error: Failed to write key to file: %v\n", err)
os.Exit(1)
}
fmt.Printf("Master key generated and saved to: %s\n", outputToFile)
} else {
// Output to stdout
fmt.Println(encodedKey)
}
},
}
var masterKeyGetCmd = &cobra.Command{
Use: "get",
Short: "Get the current master encryption key",
Long: "Retrieve and display the current master encryption key from the configured vault (setting, env, or file).",
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
dep := dependency.NewDependency(
dependency.WithConfigPath(confPath),
)
logger := dep.Logger()
// Get the master key vault
vault := encrypt.NewMasterEncryptKeyVault(ctx, dep.SettingProvider())
// Retrieve the master key
key, err := vault.GetMasterKey(ctx)
if err != nil {
logger.Error("Failed to get master key: %s", err)
os.Exit(1)
}
// Encode to base64 and display
encodedKey := base64.StdEncoding.EncodeToString(key)
fmt.Println("")
fmt.Println(encodedKey)
},
}
var masterKeyRotateCmd = &cobra.Command{
Use: "rotate",
Short: "Rotate the master encryption key",
Long: `Rotate the master encryption key by re-encrypting all encrypted file keys with a new master key.
This operation:
1. Retrieves the current master key
2. Loads a new master key from file
3. Re-encrypts all file encryption keys with the new master key
4. Updates the master key in the settings database
Warning: This is a critical operation. Make sure to backup your database before proceeding.`,
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
dep := dependency.NewDependency(
dependency.WithConfigPath(confPath),
)
logger := dep.Logger()
logger.Info("Starting master key rotation...")
// Get the old master key
vault := encrypt.NewMasterEncryptKeyVault(ctx, dep.SettingProvider())
oldMasterKey, err := vault.GetMasterKey(ctx)
if err != nil {
logger.Error("Failed to get current master key: %s", err)
os.Exit(1)
}
logger.Info("Retrieved current master key")
// Get or generate the new master key
var newMasterKey []byte
// Load from file
keyData, err := os.ReadFile(newMasterKeyFile)
if err != nil {
logger.Error("Failed to read new master key file: %s", err)
os.Exit(1)
}
newMasterKey, err = base64.StdEncoding.DecodeString(string(keyData))
if err != nil {
logger.Error("Failed to decode new master key: %s", err)
os.Exit(1)
}
if len(newMasterKey) != 32 {
logger.Error("Invalid new master key: must be 32 bytes (256 bits), got %d bytes", len(newMasterKey))
os.Exit(1)
}
logger.Info("Loaded new master key from file: %s", newMasterKeyFile)
// Query all entities with encryption metadata
db := dep.DBClient()
entities, err := db.Entity.Query().
Where(entity.Not(entity.PropsIsNil())).
All(ctx)
if err != nil {
logger.Error("Failed to query entities: %s", err)
os.Exit(1)
}
logger.Info("Found %d entities to check for encryption", len(entities))
// Re-encrypt each entity's encryption key
encryptedCount := 0
for _, ent := range entities {
if ent.Props == nil || ent.Props.EncryptMetadata == nil {
continue
}
encMeta := ent.Props.EncryptMetadata
// Decrypt the file key with old master key
decryptedFileKey, err := encrypt.DecryptWithMasterKey(oldMasterKey, encMeta.Key)
if err != nil {
logger.Error("Failed to decrypt key for entity %d: %s", ent.ID, err)
os.Exit(1)
}
// Re-encrypt the file key with new master key
newEncryptedKey, err := encrypt.EncryptWithMasterKey(newMasterKey, decryptedFileKey)
if err != nil {
logger.Error("Failed to re-encrypt key for entity %d: %s", ent.ID, err)
os.Exit(1)
}
// Update the entity
newProps := *ent.Props
newProps.EncryptMetadata = &types.EncryptMetadata{
Algorithm: encMeta.Algorithm,
Key: newEncryptedKey,
KeyPlainText: nil, // Don't store plaintext
IV: encMeta.IV,
}
err = db.Entity.UpdateOne(ent).
SetProps(&newProps).
Exec(ctx)
if err != nil {
logger.Error("Failed to update entity %d: %s", ent.ID, err)
os.Exit(1)
}
encryptedCount++
}
logger.Info("Re-encrypted %d file keys", encryptedCount)
// Update the master key in settings
keyStore := dep.SettingProvider().MasterEncryptKeyVault(ctx)
if keyStore == setting.MasterEncryptKeyVaultTypeSetting {
encodedNewKey := base64.StdEncoding.EncodeToString(newMasterKey)
err = dep.SettingClient().Set(ctx, map[string]string{
"encrypt_master_key": encodedNewKey,
})
if err != nil {
logger.Error("Failed to update master key in settings: %s", err)
logger.Error("WARNING: File keys have been re-encrypted but master key update failed!")
logger.Error("Please manually update the encrypt_master_key setting.")
os.Exit(1)
}
} else {
logger.Info("Current master key is stored in %q", keyStore)
if keyStore == setting.MasterEncryptKeyVaultTypeEnv {
logger.Info("Please update the new master encryption key in your \"CR_ENCRYPT_MASTER_KEY\" environment variable.")
} else if keyStore == setting.MasterEncryptKeyVaultTypeFile {
logger.Info("Please update the new master encryption key in your key file: %q", dep.SettingProvider().MasterEncryptKeyFile(ctx))
}
logger.Info("Last step: Please manually update the new master encryption key in your ENV or key file.")
}
logger.Info("Master key rotation completed successfully")
},
}

69
cmd/migrate.go Normal file
View File

@@ -0,0 +1,69 @@
package cmd
import (
"os"
"path/filepath"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/application/migrator"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/spf13/cobra"
)
var (
v3ConfPath string
forceReset bool
)
func init() {
rootCmd.AddCommand(migrateCmd)
migrateCmd.PersistentFlags().StringVar(&v3ConfPath, "v3-conf", "", "Path to the v3 config file")
migrateCmd.PersistentFlags().BoolVar(&forceReset, "force-reset", false, "Force reset migration state and start from beginning")
}
var migrateCmd = &cobra.Command{
Use: "migrate",
Short: "Migrate from v3 to v4",
Run: func(cmd *cobra.Command, args []string) {
dep := dependency.NewDependency(
dependency.WithConfigPath(confPath),
dependency.WithRequiredDbVersion(constants.BackendVersion),
dependency.WithProFlag(constants.IsPro == "true"),
)
logger := dep.Logger()
logger.Info("Migrating from v3 to v4...")
if v3ConfPath == "" {
logger.Error("v3 config file is required, please use -v3-conf to specify the path.")
os.Exit(1)
}
// Check if state file exists and warn about resuming
stateFilePath := filepath.Join(filepath.Dir(v3ConfPath), "migration_state.json")
if util.Exists(stateFilePath) && !forceReset {
logger.Info("Found existing migration state file at %s. Migration will resume from the last successful step.", stateFilePath)
logger.Info("If you want to start migration from the beginning, please use --force-reset flag.")
} else if forceReset && util.Exists(stateFilePath) {
logger.Info("Force resetting migration state. Will start from the beginning.")
if err := os.Remove(stateFilePath); err != nil {
logger.Error("Failed to remove migration state file: %s", err)
os.Exit(1)
}
}
migrator, err := migrator.NewMigrator(dep, v3ConfPath)
if err != nil {
logger.Error("Failed to create migrator: %s", err)
os.Exit(1)
}
if err := migrator.Migrate(); err != nil {
logger.Error("Failed to migrate: %s", err)
logger.Info("Migration failed but state has been saved. You can retry with the same command to resume from the last successful step.")
os.Exit(1)
}
logger.Info("Migration from v3 to v4 completed successfully.")
},
}

44
cmd/root.go Normal file
View File

@@ -0,0 +1,44 @@
package cmd
import (
"fmt"
"os"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
var (
confPath string
licenseKey string
)
func init() {
rootCmd.PersistentFlags().StringVarP(&confPath, "conf", "c", util.DataPath("conf.ini"), "Path to the config file")
rootCmd.PersistentFlags().BoolVarP(&util.UseWorkingDir, "use-working-dir", "w", false, "Use working directory, instead of executable directory")
}
var rootCmd = &cobra.Command{
Use: "cloudreve",
Short: "Cloudreve is a server-side self-hosted cloud storage platform",
Long: `Self-hosted file management and sharing system, supports multiple storage providers.
Complete documentation is available at https://docs.cloudreve.org/`,
Run: func(cmd *cobra.Command, args []string) {
// Do Stuff Here
},
}
func Execute() {
cmd, _, err := rootCmd.Find(os.Args[1:])
// redirect to default server cmd if no cmd is given
if err == nil && cmd.Use == rootCmd.Use && cmd.Flags().Parse(os.Args[1:]) != pflag.ErrHelp {
args := append([]string{"server"}, os.Args[1:]...)
rootCmd.SetArgs(args)
}
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}

55
cmd/server.go Normal file
View File

@@ -0,0 +1,55 @@
package cmd
import (
"os"
"os/signal"
"syscall"
"github.com/cloudreve/Cloudreve/v4/application"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(serverCmd)
serverCmd.PersistentFlags().StringVarP(&licenseKey, "license-key", "l", "", "License key of your Cloudreve Pro")
}
var serverCmd = &cobra.Command{
Use: "server",
Short: "Start a Cloudreve server with the given config file",
Run: func(cmd *cobra.Command, args []string) {
dep := dependency.NewDependency(
dependency.WithConfigPath(confPath),
dependency.WithProFlag(constants.IsProBool),
dependency.WithRequiredDbVersion(constants.BackendVersion),
)
server := application.NewServer(dep)
logger := dep.Logger()
server.PrintBanner()
// Graceful shutdown after received signal.
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT)
go shutdown(sigChan, logger, server)
if err := server.Start(); err != nil {
logger.Error("Failed to start server: %s", err)
os.Exit(1)
}
defer func() {
<-sigChan
}()
},
}
func shutdown(sigChan chan os.Signal, logger logging.Logger, server application.Server) {
sig := <-sigChan
logger.Info("Signal %s received, shutting down server...", sig)
server.Close()
close(sigChan)
}

47
docker-compose.yml Normal file
View File

@@ -0,0 +1,47 @@
services:
cloudreve:
image: cloudreve/cloudreve:latest
container_name: cloudreve-backend
depends_on:
- postgresql
- redis
restart: unless-stopped
ports:
- 5212:5212
- 6888:6888
- 6888:6888/udp
environment:
- CR_CONF_Database.Type=postgres
- CR_CONF_Database.Host=postgresql
- CR_CONF_Database.User=cloudreve
- CR_CONF_Database.Name=cloudreve
- CR_CONF_Database.Port=5432
- CR_CONF_Redis.Server=redis:6379
volumes:
- backend_data:/cloudreve/data
postgresql:
# Best practice: Pin to major version.
# NOTE: For major version jumps:
# backup & consult https://www.postgresql.org/docs/current/pgupgrade.html
image: postgres:17
container_name: postgresql
restart: unless-stopped
environment:
- POSTGRES_USER=cloudreve
- POSTGRES_DB=cloudreve
- POSTGRES_HOST_AUTH_METHOD=trust
volumes:
- database_postgres:/var/lib/postgresql/data
redis:
image: redis:latest
container_name: redis
restart: unless-stopped
volumes:
- redis_data:/data
volumes:
backend_data:
database_postgres:
redis_data:

3137
ent/client.go Normal file

File diff suppressed because it is too large Load Diff

242
ent/davaccount.go Normal file
View File

@@ -0,0 +1,242 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"encoding/json"
"fmt"
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
)
// DavAccount is the model entity for the DavAccount schema.
type DavAccount struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// DeletedAt holds the value of the "deleted_at" field.
DeletedAt *time.Time `json:"deleted_at,omitempty"`
// Name holds the value of the "name" field.
Name string `json:"name,omitempty"`
// URI holds the value of the "uri" field.
URI string `json:"uri,omitempty"`
// Password holds the value of the "password" field.
Password string `json:"-"`
// Options holds the value of the "options" field.
Options *boolset.BooleanSet `json:"options,omitempty"`
// Props holds the value of the "props" field.
Props *types.DavAccountProps `json:"props,omitempty"`
// OwnerID holds the value of the "owner_id" field.
OwnerID int `json:"owner_id,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the DavAccountQuery when eager-loading is set.
Edges DavAccountEdges `json:"edges"`
selectValues sql.SelectValues
}
// DavAccountEdges holds the relations/edges for other nodes in the graph.
type DavAccountEdges struct {
// Owner holds the value of the owner edge.
Owner *User `json:"owner,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [1]bool
}
// OwnerOrErr returns the Owner value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e DavAccountEdges) OwnerOrErr() (*User, error) {
if e.loadedTypes[0] {
if e.Owner == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: user.Label}
}
return e.Owner, nil
}
return nil, &NotLoadedError{edge: "owner"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*DavAccount) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
for i := range columns {
switch columns[i] {
case davaccount.FieldProps:
values[i] = new([]byte)
case davaccount.FieldOptions:
values[i] = new(boolset.BooleanSet)
case davaccount.FieldID, davaccount.FieldOwnerID:
values[i] = new(sql.NullInt64)
case davaccount.FieldName, davaccount.FieldURI, davaccount.FieldPassword:
values[i] = new(sql.NullString)
case davaccount.FieldCreatedAt, davaccount.FieldUpdatedAt, davaccount.FieldDeletedAt:
values[i] = new(sql.NullTime)
default:
values[i] = new(sql.UnknownType)
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the DavAccount fields.
func (da *DavAccount) assignValues(columns []string, values []any) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case davaccount.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
da.ID = int(value.Int64)
case davaccount.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
da.CreatedAt = value.Time
}
case davaccount.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
da.UpdatedAt = value.Time
}
case davaccount.FieldDeletedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
} else if value.Valid {
da.DeletedAt = new(time.Time)
*da.DeletedAt = value.Time
}
case davaccount.FieldName:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field name", values[i])
} else if value.Valid {
da.Name = value.String
}
case davaccount.FieldURI:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field uri", values[i])
} else if value.Valid {
da.URI = value.String
}
case davaccount.FieldPassword:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field password", values[i])
} else if value.Valid {
da.Password = value.String
}
case davaccount.FieldOptions:
if value, ok := values[i].(*boolset.BooleanSet); !ok {
return fmt.Errorf("unexpected type %T for field options", values[i])
} else if value != nil {
da.Options = value
}
case davaccount.FieldProps:
if value, ok := values[i].(*[]byte); !ok {
return fmt.Errorf("unexpected type %T for field props", values[i])
} else if value != nil && len(*value) > 0 {
if err := json.Unmarshal(*value, &da.Props); err != nil {
return fmt.Errorf("unmarshal field props: %w", err)
}
}
case davaccount.FieldOwnerID:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field owner_id", values[i])
} else if value.Valid {
da.OwnerID = int(value.Int64)
}
default:
da.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the DavAccount.
// This includes values selected through modifiers, order, etc.
func (da *DavAccount) Value(name string) (ent.Value, error) {
return da.selectValues.Get(name)
}
// QueryOwner queries the "owner" edge of the DavAccount entity.
func (da *DavAccount) QueryOwner() *UserQuery {
return NewDavAccountClient(da.config).QueryOwner(da)
}
// Update returns a builder for updating this DavAccount.
// Note that you need to call DavAccount.Unwrap() before calling this method if this DavAccount
// was returned from a transaction, and the transaction was committed or rolled back.
func (da *DavAccount) Update() *DavAccountUpdateOne {
return NewDavAccountClient(da.config).UpdateOne(da)
}
// Unwrap unwraps the DavAccount entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (da *DavAccount) Unwrap() *DavAccount {
_tx, ok := da.config.driver.(*txDriver)
if !ok {
panic("ent: DavAccount is not a transactional entity")
}
da.config.driver = _tx.drv
return da
}
// String implements the fmt.Stringer.
func (da *DavAccount) String() string {
var builder strings.Builder
builder.WriteString("DavAccount(")
builder.WriteString(fmt.Sprintf("id=%v, ", da.ID))
builder.WriteString("created_at=")
builder.WriteString(da.CreatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("updated_at=")
builder.WriteString(da.UpdatedAt.Format(time.ANSIC))
builder.WriteString(", ")
if v := da.DeletedAt; v != nil {
builder.WriteString("deleted_at=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteString(", ")
builder.WriteString("name=")
builder.WriteString(da.Name)
builder.WriteString(", ")
builder.WriteString("uri=")
builder.WriteString(da.URI)
builder.WriteString(", ")
builder.WriteString("password=<sensitive>")
builder.WriteString(", ")
builder.WriteString("options=")
builder.WriteString(fmt.Sprintf("%v", da.Options))
builder.WriteString(", ")
builder.WriteString("props=")
builder.WriteString(fmt.Sprintf("%v", da.Props))
builder.WriteString(", ")
builder.WriteString("owner_id=")
builder.WriteString(fmt.Sprintf("%v", da.OwnerID))
builder.WriteByte(')')
return builder.String()
}
// SetOwner manually set the edge as loaded state.
func (e *DavAccount) SetOwner(v *User) {
e.Edges.Owner = v
e.Edges.loadedTypes[0] = true
}
// DavAccounts is a parsable slice of DavAccount.
type DavAccounts []*DavAccount

View File

@@ -0,0 +1,144 @@
// Code generated by ent, DO NOT EDIT.
package davaccount
import (
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
// Label holds the string label denoting the davaccount type in the database.
Label = "dav_account"
// FieldID holds the string denoting the id field in the database.
FieldID = "id"
// FieldCreatedAt holds the string denoting the created_at field in the database.
FieldCreatedAt = "created_at"
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
FieldUpdatedAt = "updated_at"
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
FieldDeletedAt = "deleted_at"
// FieldName holds the string denoting the name field in the database.
FieldName = "name"
// FieldURI holds the string denoting the uri field in the database.
FieldURI = "uri"
// FieldPassword holds the string denoting the password field in the database.
FieldPassword = "password"
// FieldOptions holds the string denoting the options field in the database.
FieldOptions = "options"
// FieldProps holds the string denoting the props field in the database.
FieldProps = "props"
// FieldOwnerID holds the string denoting the owner_id field in the database.
FieldOwnerID = "owner_id"
// EdgeOwner holds the string denoting the owner edge name in mutations.
EdgeOwner = "owner"
// Table holds the table name of the davaccount in the database.
Table = "dav_accounts"
// OwnerTable is the table that holds the owner relation/edge.
OwnerTable = "dav_accounts"
// OwnerInverseTable is the table name for the User entity.
// It exists in this package in order to avoid circular dependency with the "user" package.
OwnerInverseTable = "users"
// OwnerColumn is the table column denoting the owner relation/edge.
OwnerColumn = "owner_id"
)
// Columns holds all SQL columns for davaccount fields.
var Columns = []string{
FieldID,
FieldCreatedAt,
FieldUpdatedAt,
FieldDeletedAt,
FieldName,
FieldURI,
FieldPassword,
FieldOptions,
FieldProps,
FieldOwnerID,
}
// ValidColumn reports if the column name is valid (part of the table columns).
func ValidColumn(column string) bool {
for i := range Columns {
if column == Columns[i] {
return true
}
}
return false
}
// Note that the variables below are initialized by the runtime
// package on the initialization of the application. Therefore,
// it should be imported in the main as follows:
//
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
var (
Hooks [1]ent.Hook
Interceptors [1]ent.Interceptor
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
DefaultCreatedAt func() time.Time
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
DefaultUpdatedAt func() time.Time
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
)
// OrderOption defines the ordering options for the DavAccount queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByDeletedAt orders the results by the deleted_at field.
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
}
// ByName orders the results by the name field.
func ByName(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldName, opts...).ToFunc()
}
// ByURI orders the results by the uri field.
func ByURI(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldURI, opts...).ToFunc()
}
// ByPassword orders the results by the password field.
func ByPassword(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldPassword, opts...).ToFunc()
}
// ByOwnerID orders the results by the owner_id field.
func ByOwnerID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldOwnerID, opts...).ToFunc()
}
// ByOwnerField orders the results by owner field.
func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...))
}
}
func newOwnerStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(OwnerInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
)
}

530
ent/davaccount/where.go Normal file
View File

@@ -0,0 +1,530 @@
// Code generated by ent, DO NOT EDIT.
package davaccount
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
)
// ID filters vertices based on their ID field.
func ID(id int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldUpdatedAt, v))
}
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
func DeletedAt(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldDeletedAt, v))
}
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
func Name(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldName, v))
}
// URI applies equality check predicate on the "uri" field. It's identical to URIEQ.
func URI(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldURI, v))
}
// Password applies equality check predicate on the "password" field. It's identical to PasswordEQ.
func Password(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldPassword, v))
}
// Options applies equality check predicate on the "options" field. It's identical to OptionsEQ.
func Options(v *boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldOptions, v))
}
// OwnerID applies equality check predicate on the "owner_id" field. It's identical to OwnerIDEQ.
func OwnerID(v int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldOwnerID, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLTE(FieldUpdatedAt, v))
}
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
func DeletedAtEQ(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldDeletedAt, v))
}
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
func DeletedAtNEQ(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldDeletedAt, v))
}
// DeletedAtIn applies the In predicate on the "deleted_at" field.
func DeletedAtIn(vs ...time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldDeletedAt, vs...))
}
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
func DeletedAtNotIn(vs ...time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldDeletedAt, vs...))
}
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
func DeletedAtGT(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGT(FieldDeletedAt, v))
}
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
func DeletedAtGTE(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGTE(FieldDeletedAt, v))
}
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
func DeletedAtLT(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLT(FieldDeletedAt, v))
}
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
func DeletedAtLTE(v time.Time) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLTE(FieldDeletedAt, v))
}
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
func DeletedAtIsNil() predicate.DavAccount {
return predicate.DavAccount(sql.FieldIsNull(FieldDeletedAt))
}
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
func DeletedAtNotNil() predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotNull(FieldDeletedAt))
}
// NameEQ applies the EQ predicate on the "name" field.
func NameEQ(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldName, v))
}
// NameNEQ applies the NEQ predicate on the "name" field.
func NameNEQ(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldName, v))
}
// NameIn applies the In predicate on the "name" field.
func NameIn(vs ...string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldName, vs...))
}
// NameNotIn applies the NotIn predicate on the "name" field.
func NameNotIn(vs ...string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldName, vs...))
}
// NameGT applies the GT predicate on the "name" field.
func NameGT(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGT(FieldName, v))
}
// NameGTE applies the GTE predicate on the "name" field.
func NameGTE(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGTE(FieldName, v))
}
// NameLT applies the LT predicate on the "name" field.
func NameLT(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLT(FieldName, v))
}
// NameLTE applies the LTE predicate on the "name" field.
func NameLTE(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLTE(FieldName, v))
}
// NameContains applies the Contains predicate on the "name" field.
func NameContains(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldContains(FieldName, v))
}
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
func NameHasPrefix(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldHasPrefix(FieldName, v))
}
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
func NameHasSuffix(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldHasSuffix(FieldName, v))
}
// NameEqualFold applies the EqualFold predicate on the "name" field.
func NameEqualFold(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEqualFold(FieldName, v))
}
// NameContainsFold applies the ContainsFold predicate on the "name" field.
func NameContainsFold(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldContainsFold(FieldName, v))
}
// URIEQ applies the EQ predicate on the "uri" field.
func URIEQ(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldURI, v))
}
// URINEQ applies the NEQ predicate on the "uri" field.
func URINEQ(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldURI, v))
}
// URIIn applies the In predicate on the "uri" field.
func URIIn(vs ...string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldURI, vs...))
}
// URINotIn applies the NotIn predicate on the "uri" field.
func URINotIn(vs ...string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldURI, vs...))
}
// URIGT applies the GT predicate on the "uri" field.
func URIGT(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGT(FieldURI, v))
}
// URIGTE applies the GTE predicate on the "uri" field.
func URIGTE(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGTE(FieldURI, v))
}
// URILT applies the LT predicate on the "uri" field.
func URILT(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLT(FieldURI, v))
}
// URILTE applies the LTE predicate on the "uri" field.
func URILTE(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLTE(FieldURI, v))
}
// URIContains applies the Contains predicate on the "uri" field.
func URIContains(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldContains(FieldURI, v))
}
// URIHasPrefix applies the HasPrefix predicate on the "uri" field.
func URIHasPrefix(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldHasPrefix(FieldURI, v))
}
// URIHasSuffix applies the HasSuffix predicate on the "uri" field.
func URIHasSuffix(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldHasSuffix(FieldURI, v))
}
// URIEqualFold applies the EqualFold predicate on the "uri" field.
func URIEqualFold(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEqualFold(FieldURI, v))
}
// URIContainsFold applies the ContainsFold predicate on the "uri" field.
func URIContainsFold(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldContainsFold(FieldURI, v))
}
// PasswordEQ applies the EQ predicate on the "password" field.
func PasswordEQ(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldPassword, v))
}
// PasswordNEQ applies the NEQ predicate on the "password" field.
func PasswordNEQ(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldPassword, v))
}
// PasswordIn applies the In predicate on the "password" field.
func PasswordIn(vs ...string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldPassword, vs...))
}
// PasswordNotIn applies the NotIn predicate on the "password" field.
func PasswordNotIn(vs ...string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldPassword, vs...))
}
// PasswordGT applies the GT predicate on the "password" field.
func PasswordGT(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGT(FieldPassword, v))
}
// PasswordGTE applies the GTE predicate on the "password" field.
func PasswordGTE(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGTE(FieldPassword, v))
}
// PasswordLT applies the LT predicate on the "password" field.
func PasswordLT(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLT(FieldPassword, v))
}
// PasswordLTE applies the LTE predicate on the "password" field.
func PasswordLTE(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLTE(FieldPassword, v))
}
// PasswordContains applies the Contains predicate on the "password" field.
func PasswordContains(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldContains(FieldPassword, v))
}
// PasswordHasPrefix applies the HasPrefix predicate on the "password" field.
func PasswordHasPrefix(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldHasPrefix(FieldPassword, v))
}
// PasswordHasSuffix applies the HasSuffix predicate on the "password" field.
func PasswordHasSuffix(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldHasSuffix(FieldPassword, v))
}
// PasswordEqualFold applies the EqualFold predicate on the "password" field.
func PasswordEqualFold(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEqualFold(FieldPassword, v))
}
// PasswordContainsFold applies the ContainsFold predicate on the "password" field.
func PasswordContainsFold(v string) predicate.DavAccount {
return predicate.DavAccount(sql.FieldContainsFold(FieldPassword, v))
}
// OptionsEQ applies the EQ predicate on the "options" field.
func OptionsEQ(v *boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldOptions, v))
}
// OptionsNEQ applies the NEQ predicate on the "options" field.
func OptionsNEQ(v *boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldOptions, v))
}
// OptionsIn applies the In predicate on the "options" field.
func OptionsIn(vs ...*boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldOptions, vs...))
}
// OptionsNotIn applies the NotIn predicate on the "options" field.
func OptionsNotIn(vs ...*boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldOptions, vs...))
}
// OptionsGT applies the GT predicate on the "options" field.
func OptionsGT(v *boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGT(FieldOptions, v))
}
// OptionsGTE applies the GTE predicate on the "options" field.
func OptionsGTE(v *boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldGTE(FieldOptions, v))
}
// OptionsLT applies the LT predicate on the "options" field.
func OptionsLT(v *boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLT(FieldOptions, v))
}
// OptionsLTE applies the LTE predicate on the "options" field.
func OptionsLTE(v *boolset.BooleanSet) predicate.DavAccount {
return predicate.DavAccount(sql.FieldLTE(FieldOptions, v))
}
// PropsIsNil applies the IsNil predicate on the "props" field.
func PropsIsNil() predicate.DavAccount {
return predicate.DavAccount(sql.FieldIsNull(FieldProps))
}
// PropsNotNil applies the NotNil predicate on the "props" field.
func PropsNotNil() predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotNull(FieldProps))
}
// OwnerIDEQ applies the EQ predicate on the "owner_id" field.
func OwnerIDEQ(v int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldEQ(FieldOwnerID, v))
}
// OwnerIDNEQ applies the NEQ predicate on the "owner_id" field.
func OwnerIDNEQ(v int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNEQ(FieldOwnerID, v))
}
// OwnerIDIn applies the In predicate on the "owner_id" field.
func OwnerIDIn(vs ...int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldIn(FieldOwnerID, vs...))
}
// OwnerIDNotIn applies the NotIn predicate on the "owner_id" field.
func OwnerIDNotIn(vs ...int) predicate.DavAccount {
return predicate.DavAccount(sql.FieldNotIn(FieldOwnerID, vs...))
}
// HasOwner applies the HasEdge predicate on the "owner" edge.
func HasOwner() predicate.DavAccount {
return predicate.DavAccount(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates).
func HasOwnerWith(preds ...predicate.User) predicate.DavAccount {
return predicate.DavAccount(func(s *sql.Selector) {
step := newOwnerStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.DavAccount) predicate.DavAccount {
return predicate.DavAccount(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.DavAccount) predicate.DavAccount {
return predicate.DavAccount(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.DavAccount) predicate.DavAccount {
return predicate.DavAccount(sql.NotPredicates(p))
}

968
ent/davaccount_create.go Normal file
View File

@@ -0,0 +1,968 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
)
// DavAccountCreate is the builder for creating a DavAccount entity.
type DavAccountCreate struct {
config
mutation *DavAccountMutation
hooks []Hook
conflict []sql.ConflictOption
}
// SetCreatedAt sets the "created_at" field.
func (dac *DavAccountCreate) SetCreatedAt(t time.Time) *DavAccountCreate {
dac.mutation.SetCreatedAt(t)
return dac
}
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
func (dac *DavAccountCreate) SetNillableCreatedAt(t *time.Time) *DavAccountCreate {
if t != nil {
dac.SetCreatedAt(*t)
}
return dac
}
// SetUpdatedAt sets the "updated_at" field.
func (dac *DavAccountCreate) SetUpdatedAt(t time.Time) *DavAccountCreate {
dac.mutation.SetUpdatedAt(t)
return dac
}
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
func (dac *DavAccountCreate) SetNillableUpdatedAt(t *time.Time) *DavAccountCreate {
if t != nil {
dac.SetUpdatedAt(*t)
}
return dac
}
// SetDeletedAt sets the "deleted_at" field.
func (dac *DavAccountCreate) SetDeletedAt(t time.Time) *DavAccountCreate {
dac.mutation.SetDeletedAt(t)
return dac
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (dac *DavAccountCreate) SetNillableDeletedAt(t *time.Time) *DavAccountCreate {
if t != nil {
dac.SetDeletedAt(*t)
}
return dac
}
// SetName sets the "name" field.
func (dac *DavAccountCreate) SetName(s string) *DavAccountCreate {
dac.mutation.SetName(s)
return dac
}
// SetURI sets the "uri" field.
func (dac *DavAccountCreate) SetURI(s string) *DavAccountCreate {
dac.mutation.SetURI(s)
return dac
}
// SetPassword sets the "password" field.
func (dac *DavAccountCreate) SetPassword(s string) *DavAccountCreate {
dac.mutation.SetPassword(s)
return dac
}
// SetOptions sets the "options" field.
func (dac *DavAccountCreate) SetOptions(bs *boolset.BooleanSet) *DavAccountCreate {
dac.mutation.SetOptions(bs)
return dac
}
// SetProps sets the "props" field.
func (dac *DavAccountCreate) SetProps(tap *types.DavAccountProps) *DavAccountCreate {
dac.mutation.SetProps(tap)
return dac
}
// SetOwnerID sets the "owner_id" field.
func (dac *DavAccountCreate) SetOwnerID(i int) *DavAccountCreate {
dac.mutation.SetOwnerID(i)
return dac
}
// SetOwner sets the "owner" edge to the User entity.
func (dac *DavAccountCreate) SetOwner(u *User) *DavAccountCreate {
return dac.SetOwnerID(u.ID)
}
// Mutation returns the DavAccountMutation object of the builder.
func (dac *DavAccountCreate) Mutation() *DavAccountMutation {
return dac.mutation
}
// Save creates the DavAccount in the database.
func (dac *DavAccountCreate) Save(ctx context.Context) (*DavAccount, error) {
if err := dac.defaults(); err != nil {
return nil, err
}
return withHooks(ctx, dac.sqlSave, dac.mutation, dac.hooks)
}
// SaveX calls Save and panics if Save returns an error.
func (dac *DavAccountCreate) SaveX(ctx context.Context) *DavAccount {
v, err := dac.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (dac *DavAccountCreate) Exec(ctx context.Context) error {
_, err := dac.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dac *DavAccountCreate) ExecX(ctx context.Context) {
if err := dac.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (dac *DavAccountCreate) defaults() error {
if _, ok := dac.mutation.CreatedAt(); !ok {
if davaccount.DefaultCreatedAt == nil {
return fmt.Errorf("ent: uninitialized davaccount.DefaultCreatedAt (forgotten import ent/runtime?)")
}
v := davaccount.DefaultCreatedAt()
dac.mutation.SetCreatedAt(v)
}
if _, ok := dac.mutation.UpdatedAt(); !ok {
if davaccount.DefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized davaccount.DefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := davaccount.DefaultUpdatedAt()
dac.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (dac *DavAccountCreate) check() error {
if _, ok := dac.mutation.CreatedAt(); !ok {
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "DavAccount.created_at"`)}
}
if _, ok := dac.mutation.UpdatedAt(); !ok {
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "DavAccount.updated_at"`)}
}
if _, ok := dac.mutation.Name(); !ok {
return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "DavAccount.name"`)}
}
if _, ok := dac.mutation.URI(); !ok {
return &ValidationError{Name: "uri", err: errors.New(`ent: missing required field "DavAccount.uri"`)}
}
if _, ok := dac.mutation.Password(); !ok {
return &ValidationError{Name: "password", err: errors.New(`ent: missing required field "DavAccount.password"`)}
}
if _, ok := dac.mutation.Options(); !ok {
return &ValidationError{Name: "options", err: errors.New(`ent: missing required field "DavAccount.options"`)}
}
if _, ok := dac.mutation.OwnerID(); !ok {
return &ValidationError{Name: "owner_id", err: errors.New(`ent: missing required field "DavAccount.owner_id"`)}
}
if _, ok := dac.mutation.OwnerID(); !ok {
return &ValidationError{Name: "owner", err: errors.New(`ent: missing required edge "DavAccount.owner"`)}
}
return nil
}
func (dac *DavAccountCreate) sqlSave(ctx context.Context) (*DavAccount, error) {
if err := dac.check(); err != nil {
return nil, err
}
_node, _spec := dac.createSpec()
if err := sqlgraph.CreateNode(ctx, dac.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
id := _spec.ID.Value.(int64)
_node.ID = int(id)
dac.mutation.id = &_node.ID
dac.mutation.done = true
return _node, nil
}
func (dac *DavAccountCreate) createSpec() (*DavAccount, *sqlgraph.CreateSpec) {
var (
_node = &DavAccount{config: dac.config}
_spec = sqlgraph.NewCreateSpec(davaccount.Table, sqlgraph.NewFieldSpec(davaccount.FieldID, field.TypeInt))
)
if id, ok := dac.mutation.ID(); ok {
_node.ID = id
id64 := int64(id)
_spec.ID.Value = id64
}
_spec.OnConflict = dac.conflict
if value, ok := dac.mutation.CreatedAt(); ok {
_spec.SetField(davaccount.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = value
}
if value, ok := dac.mutation.UpdatedAt(); ok {
_spec.SetField(davaccount.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := dac.mutation.DeletedAt(); ok {
_spec.SetField(davaccount.FieldDeletedAt, field.TypeTime, value)
_node.DeletedAt = &value
}
if value, ok := dac.mutation.Name(); ok {
_spec.SetField(davaccount.FieldName, field.TypeString, value)
_node.Name = value
}
if value, ok := dac.mutation.URI(); ok {
_spec.SetField(davaccount.FieldURI, field.TypeString, value)
_node.URI = value
}
if value, ok := dac.mutation.Password(); ok {
_spec.SetField(davaccount.FieldPassword, field.TypeString, value)
_node.Password = value
}
if value, ok := dac.mutation.Options(); ok {
_spec.SetField(davaccount.FieldOptions, field.TypeBytes, value)
_node.Options = value
}
if value, ok := dac.mutation.Props(); ok {
_spec.SetField(davaccount.FieldProps, field.TypeJSON, value)
_node.Props = value
}
if nodes := dac.mutation.OwnerIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: davaccount.OwnerTable,
Columns: []string{davaccount.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_node.OwnerID = nodes[0]
_spec.Edges = append(_spec.Edges, edge)
}
return _node, _spec
}
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
// of the `INSERT` statement. For example:
//
// client.DavAccount.Create().
// SetCreatedAt(v).
// OnConflict(
// // Update the row with the new values
// // the was proposed for insertion.
// sql.ResolveWithNewValues(),
// ).
// // Override some of the fields with custom
// // update values.
// Update(func(u *ent.DavAccountUpsert) {
// SetCreatedAt(v+v).
// }).
// Exec(ctx)
func (dac *DavAccountCreate) OnConflict(opts ...sql.ConflictOption) *DavAccountUpsertOne {
dac.conflict = opts
return &DavAccountUpsertOne{
create: dac,
}
}
// OnConflictColumns calls `OnConflict` and configures the columns
// as conflict target. Using this option is equivalent to using:
//
// client.DavAccount.Create().
// OnConflict(sql.ConflictColumns(columns...)).
// Exec(ctx)
func (dac *DavAccountCreate) OnConflictColumns(columns ...string) *DavAccountUpsertOne {
dac.conflict = append(dac.conflict, sql.ConflictColumns(columns...))
return &DavAccountUpsertOne{
create: dac,
}
}
type (
// DavAccountUpsertOne is the builder for "upsert"-ing
// one DavAccount node.
DavAccountUpsertOne struct {
create *DavAccountCreate
}
// DavAccountUpsert is the "OnConflict" setter.
DavAccountUpsert struct {
*sql.UpdateSet
}
)
// SetUpdatedAt sets the "updated_at" field.
func (u *DavAccountUpsert) SetUpdatedAt(v time.Time) *DavAccountUpsert {
u.Set(davaccount.FieldUpdatedAt, v)
return u
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *DavAccountUpsert) UpdateUpdatedAt() *DavAccountUpsert {
u.SetExcluded(davaccount.FieldUpdatedAt)
return u
}
// SetDeletedAt sets the "deleted_at" field.
func (u *DavAccountUpsert) SetDeletedAt(v time.Time) *DavAccountUpsert {
u.Set(davaccount.FieldDeletedAt, v)
return u
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *DavAccountUpsert) UpdateDeletedAt() *DavAccountUpsert {
u.SetExcluded(davaccount.FieldDeletedAt)
return u
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *DavAccountUpsert) ClearDeletedAt() *DavAccountUpsert {
u.SetNull(davaccount.FieldDeletedAt)
return u
}
// SetName sets the "name" field.
func (u *DavAccountUpsert) SetName(v string) *DavAccountUpsert {
u.Set(davaccount.FieldName, v)
return u
}
// UpdateName sets the "name" field to the value that was provided on create.
func (u *DavAccountUpsert) UpdateName() *DavAccountUpsert {
u.SetExcluded(davaccount.FieldName)
return u
}
// SetURI sets the "uri" field.
func (u *DavAccountUpsert) SetURI(v string) *DavAccountUpsert {
u.Set(davaccount.FieldURI, v)
return u
}
// UpdateURI sets the "uri" field to the value that was provided on create.
func (u *DavAccountUpsert) UpdateURI() *DavAccountUpsert {
u.SetExcluded(davaccount.FieldURI)
return u
}
// SetPassword sets the "password" field.
func (u *DavAccountUpsert) SetPassword(v string) *DavAccountUpsert {
u.Set(davaccount.FieldPassword, v)
return u
}
// UpdatePassword sets the "password" field to the value that was provided on create.
func (u *DavAccountUpsert) UpdatePassword() *DavAccountUpsert {
u.SetExcluded(davaccount.FieldPassword)
return u
}
// SetOptions sets the "options" field.
func (u *DavAccountUpsert) SetOptions(v *boolset.BooleanSet) *DavAccountUpsert {
u.Set(davaccount.FieldOptions, v)
return u
}
// UpdateOptions sets the "options" field to the value that was provided on create.
func (u *DavAccountUpsert) UpdateOptions() *DavAccountUpsert {
u.SetExcluded(davaccount.FieldOptions)
return u
}
// SetProps sets the "props" field.
func (u *DavAccountUpsert) SetProps(v *types.DavAccountProps) *DavAccountUpsert {
u.Set(davaccount.FieldProps, v)
return u
}
// UpdateProps sets the "props" field to the value that was provided on create.
func (u *DavAccountUpsert) UpdateProps() *DavAccountUpsert {
u.SetExcluded(davaccount.FieldProps)
return u
}
// ClearProps clears the value of the "props" field.
func (u *DavAccountUpsert) ClearProps() *DavAccountUpsert {
u.SetNull(davaccount.FieldProps)
return u
}
// SetOwnerID sets the "owner_id" field.
func (u *DavAccountUpsert) SetOwnerID(v int) *DavAccountUpsert {
u.Set(davaccount.FieldOwnerID, v)
return u
}
// UpdateOwnerID sets the "owner_id" field to the value that was provided on create.
func (u *DavAccountUpsert) UpdateOwnerID() *DavAccountUpsert {
u.SetExcluded(davaccount.FieldOwnerID)
return u
}
// UpdateNewValues updates the mutable fields using the new values that were set on create.
// Using this option is equivalent to using:
//
// client.DavAccount.Create().
// OnConflict(
// sql.ResolveWithNewValues(),
// ).
// Exec(ctx)
func (u *DavAccountUpsertOne) UpdateNewValues() *DavAccountUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
if _, exists := u.create.mutation.CreatedAt(); exists {
s.SetIgnore(davaccount.FieldCreatedAt)
}
}))
return u
}
// Ignore sets each column to itself in case of conflict.
// Using this option is equivalent to using:
//
// client.DavAccount.Create().
// OnConflict(sql.ResolveWithIgnore()).
// Exec(ctx)
func (u *DavAccountUpsertOne) Ignore() *DavAccountUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
return u
}
// DoNothing configures the conflict_action to `DO NOTHING`.
// Supported only by SQLite and PostgreSQL.
func (u *DavAccountUpsertOne) DoNothing() *DavAccountUpsertOne {
u.create.conflict = append(u.create.conflict, sql.DoNothing())
return u
}
// Update allows overriding fields `UPDATE` values. See the DavAccountCreate.OnConflict
// documentation for more info.
func (u *DavAccountUpsertOne) Update(set func(*DavAccountUpsert)) *DavAccountUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
set(&DavAccountUpsert{UpdateSet: update})
}))
return u
}
// SetUpdatedAt sets the "updated_at" field.
func (u *DavAccountUpsertOne) SetUpdatedAt(v time.Time) *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.SetUpdatedAt(v)
})
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *DavAccountUpsertOne) UpdateUpdatedAt() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateUpdatedAt()
})
}
// SetDeletedAt sets the "deleted_at" field.
func (u *DavAccountUpsertOne) SetDeletedAt(v time.Time) *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *DavAccountUpsertOne) UpdateDeletedAt() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *DavAccountUpsertOne) ClearDeletedAt() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.ClearDeletedAt()
})
}
// SetName sets the "name" field.
func (u *DavAccountUpsertOne) SetName(v string) *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.SetName(v)
})
}
// UpdateName sets the "name" field to the value that was provided on create.
func (u *DavAccountUpsertOne) UpdateName() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateName()
})
}
// SetURI sets the "uri" field.
func (u *DavAccountUpsertOne) SetURI(v string) *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.SetURI(v)
})
}
// UpdateURI sets the "uri" field to the value that was provided on create.
func (u *DavAccountUpsertOne) UpdateURI() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateURI()
})
}
// SetPassword sets the "password" field.
func (u *DavAccountUpsertOne) SetPassword(v string) *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.SetPassword(v)
})
}
// UpdatePassword sets the "password" field to the value that was provided on create.
func (u *DavAccountUpsertOne) UpdatePassword() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.UpdatePassword()
})
}
// SetOptions sets the "options" field.
func (u *DavAccountUpsertOne) SetOptions(v *boolset.BooleanSet) *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.SetOptions(v)
})
}
// UpdateOptions sets the "options" field to the value that was provided on create.
func (u *DavAccountUpsertOne) UpdateOptions() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateOptions()
})
}
// SetProps sets the "props" field.
func (u *DavAccountUpsertOne) SetProps(v *types.DavAccountProps) *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.SetProps(v)
})
}
// UpdateProps sets the "props" field to the value that was provided on create.
func (u *DavAccountUpsertOne) UpdateProps() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateProps()
})
}
// ClearProps clears the value of the "props" field.
func (u *DavAccountUpsertOne) ClearProps() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.ClearProps()
})
}
// SetOwnerID sets the "owner_id" field.
func (u *DavAccountUpsertOne) SetOwnerID(v int) *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.SetOwnerID(v)
})
}
// UpdateOwnerID sets the "owner_id" field to the value that was provided on create.
func (u *DavAccountUpsertOne) UpdateOwnerID() *DavAccountUpsertOne {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateOwnerID()
})
}
// Exec executes the query.
func (u *DavAccountUpsertOne) Exec(ctx context.Context) error {
if len(u.create.conflict) == 0 {
return errors.New("ent: missing options for DavAccountCreate.OnConflict")
}
return u.create.Exec(ctx)
}
// ExecX is like Exec, but panics if an error occurs.
func (u *DavAccountUpsertOne) ExecX(ctx context.Context) {
if err := u.create.Exec(ctx); err != nil {
panic(err)
}
}
// Exec executes the UPSERT query and returns the inserted/updated ID.
func (u *DavAccountUpsertOne) ID(ctx context.Context) (id int, err error) {
node, err := u.create.Save(ctx)
if err != nil {
return id, err
}
return node.ID, nil
}
// IDX is like ID, but panics if an error occurs.
func (u *DavAccountUpsertOne) IDX(ctx context.Context) int {
id, err := u.ID(ctx)
if err != nil {
panic(err)
}
return id
}
func (m *DavAccountCreate) SetRawID(t int) *DavAccountCreate {
m.mutation.SetRawID(t)
return m
}
// DavAccountCreateBulk is the builder for creating many DavAccount entities in bulk.
type DavAccountCreateBulk struct {
config
err error
builders []*DavAccountCreate
conflict []sql.ConflictOption
}
// Save creates the DavAccount entities in the database.
func (dacb *DavAccountCreateBulk) Save(ctx context.Context) ([]*DavAccount, error) {
if dacb.err != nil {
return nil, dacb.err
}
specs := make([]*sqlgraph.CreateSpec, len(dacb.builders))
nodes := make([]*DavAccount, len(dacb.builders))
mutators := make([]Mutator, len(dacb.builders))
for i := range dacb.builders {
func(i int, root context.Context) {
builder := dacb.builders[i]
builder.defaults()
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DavAccountMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err := builder.check(); err != nil {
return nil, err
}
builder.mutation = mutation
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, dacb.builders[i+1].mutation)
} else {
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
spec.OnConflict = dacb.conflict
// Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, dacb.driver, spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
}
}
if err != nil {
return nil, err
}
mutation.id = &nodes[i].ID
if specs[i].ID.Value != nil {
id := specs[i].ID.Value.(int64)
nodes[i].ID = int(id)
}
mutation.done = true
return nodes[i], nil
})
for i := len(builder.hooks) - 1; i >= 0; i-- {
mut = builder.hooks[i](mut)
}
mutators[i] = mut
}(i, ctx)
}
if len(mutators) > 0 {
if _, err := mutators[0].Mutate(ctx, dacb.builders[0].mutation); err != nil {
return nil, err
}
}
return nodes, nil
}
// SaveX is like Save, but panics if an error occurs.
func (dacb *DavAccountCreateBulk) SaveX(ctx context.Context) []*DavAccount {
v, err := dacb.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (dacb *DavAccountCreateBulk) Exec(ctx context.Context) error {
_, err := dacb.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dacb *DavAccountCreateBulk) ExecX(ctx context.Context) {
if err := dacb.Exec(ctx); err != nil {
panic(err)
}
}
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
// of the `INSERT` statement. For example:
//
// client.DavAccount.CreateBulk(builders...).
// OnConflict(
// // Update the row with the new values
// // the was proposed for insertion.
// sql.ResolveWithNewValues(),
// ).
// // Override some of the fields with custom
// // update values.
// Update(func(u *ent.DavAccountUpsert) {
// SetCreatedAt(v+v).
// }).
// Exec(ctx)
func (dacb *DavAccountCreateBulk) OnConflict(opts ...sql.ConflictOption) *DavAccountUpsertBulk {
dacb.conflict = opts
return &DavAccountUpsertBulk{
create: dacb,
}
}
// OnConflictColumns calls `OnConflict` and configures the columns
// as conflict target. Using this option is equivalent to using:
//
// client.DavAccount.Create().
// OnConflict(sql.ConflictColumns(columns...)).
// Exec(ctx)
func (dacb *DavAccountCreateBulk) OnConflictColumns(columns ...string) *DavAccountUpsertBulk {
dacb.conflict = append(dacb.conflict, sql.ConflictColumns(columns...))
return &DavAccountUpsertBulk{
create: dacb,
}
}
// DavAccountUpsertBulk is the builder for "upsert"-ing
// a bulk of DavAccount nodes.
type DavAccountUpsertBulk struct {
create *DavAccountCreateBulk
}
// UpdateNewValues updates the mutable fields using the new values that
// were set on create. Using this option is equivalent to using:
//
// client.DavAccount.Create().
// OnConflict(
// sql.ResolveWithNewValues(),
// ).
// Exec(ctx)
func (u *DavAccountUpsertBulk) UpdateNewValues() *DavAccountUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
for _, b := range u.create.builders {
if _, exists := b.mutation.CreatedAt(); exists {
s.SetIgnore(davaccount.FieldCreatedAt)
}
}
}))
return u
}
// Ignore sets each column to itself in case of conflict.
// Using this option is equivalent to using:
//
// client.DavAccount.Create().
// OnConflict(sql.ResolveWithIgnore()).
// Exec(ctx)
func (u *DavAccountUpsertBulk) Ignore() *DavAccountUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
return u
}
// DoNothing configures the conflict_action to `DO NOTHING`.
// Supported only by SQLite and PostgreSQL.
func (u *DavAccountUpsertBulk) DoNothing() *DavAccountUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.DoNothing())
return u
}
// Update allows overriding fields `UPDATE` values. See the DavAccountCreateBulk.OnConflict
// documentation for more info.
func (u *DavAccountUpsertBulk) Update(set func(*DavAccountUpsert)) *DavAccountUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
set(&DavAccountUpsert{UpdateSet: update})
}))
return u
}
// SetUpdatedAt sets the "updated_at" field.
func (u *DavAccountUpsertBulk) SetUpdatedAt(v time.Time) *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.SetUpdatedAt(v)
})
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *DavAccountUpsertBulk) UpdateUpdatedAt() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateUpdatedAt()
})
}
// SetDeletedAt sets the "deleted_at" field.
func (u *DavAccountUpsertBulk) SetDeletedAt(v time.Time) *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *DavAccountUpsertBulk) UpdateDeletedAt() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *DavAccountUpsertBulk) ClearDeletedAt() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.ClearDeletedAt()
})
}
// SetName sets the "name" field.
func (u *DavAccountUpsertBulk) SetName(v string) *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.SetName(v)
})
}
// UpdateName sets the "name" field to the value that was provided on create.
func (u *DavAccountUpsertBulk) UpdateName() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateName()
})
}
// SetURI sets the "uri" field.
func (u *DavAccountUpsertBulk) SetURI(v string) *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.SetURI(v)
})
}
// UpdateURI sets the "uri" field to the value that was provided on create.
func (u *DavAccountUpsertBulk) UpdateURI() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateURI()
})
}
// SetPassword sets the "password" field.
func (u *DavAccountUpsertBulk) SetPassword(v string) *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.SetPassword(v)
})
}
// UpdatePassword sets the "password" field to the value that was provided on create.
func (u *DavAccountUpsertBulk) UpdatePassword() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.UpdatePassword()
})
}
// SetOptions sets the "options" field.
func (u *DavAccountUpsertBulk) SetOptions(v *boolset.BooleanSet) *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.SetOptions(v)
})
}
// UpdateOptions sets the "options" field to the value that was provided on create.
func (u *DavAccountUpsertBulk) UpdateOptions() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateOptions()
})
}
// SetProps sets the "props" field.
func (u *DavAccountUpsertBulk) SetProps(v *types.DavAccountProps) *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.SetProps(v)
})
}
// UpdateProps sets the "props" field to the value that was provided on create.
func (u *DavAccountUpsertBulk) UpdateProps() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateProps()
})
}
// ClearProps clears the value of the "props" field.
func (u *DavAccountUpsertBulk) ClearProps() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.ClearProps()
})
}
// SetOwnerID sets the "owner_id" field.
func (u *DavAccountUpsertBulk) SetOwnerID(v int) *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.SetOwnerID(v)
})
}
// UpdateOwnerID sets the "owner_id" field to the value that was provided on create.
func (u *DavAccountUpsertBulk) UpdateOwnerID() *DavAccountUpsertBulk {
return u.Update(func(s *DavAccountUpsert) {
s.UpdateOwnerID()
})
}
// Exec executes the query.
func (u *DavAccountUpsertBulk) Exec(ctx context.Context) error {
if u.create.err != nil {
return u.create.err
}
for i, b := range u.create.builders {
if len(b.conflict) != 0 {
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the DavAccountCreateBulk instead", i)
}
}
if len(u.create.conflict) == 0 {
return errors.New("ent: missing options for DavAccountCreateBulk.OnConflict")
}
return u.create.Exec(ctx)
}
// ExecX is like Exec, but panics if an error occurs.
func (u *DavAccountUpsertBulk) ExecX(ctx context.Context) {
if err := u.create.Exec(ctx); err != nil {
panic(err)
}
}

88
ent/davaccount_delete.go Normal file
View File

@@ -0,0 +1,88 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// DavAccountDelete is the builder for deleting a DavAccount entity.
type DavAccountDelete struct {
config
hooks []Hook
mutation *DavAccountMutation
}
// Where appends a list predicates to the DavAccountDelete builder.
func (dad *DavAccountDelete) Where(ps ...predicate.DavAccount) *DavAccountDelete {
dad.mutation.Where(ps...)
return dad
}
// Exec executes the deletion query and returns how many vertices were deleted.
func (dad *DavAccountDelete) Exec(ctx context.Context) (int, error) {
return withHooks(ctx, dad.sqlExec, dad.mutation, dad.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
func (dad *DavAccountDelete) ExecX(ctx context.Context) int {
n, err := dad.Exec(ctx)
if err != nil {
panic(err)
}
return n
}
func (dad *DavAccountDelete) sqlExec(ctx context.Context) (int, error) {
_spec := sqlgraph.NewDeleteSpec(davaccount.Table, sqlgraph.NewFieldSpec(davaccount.FieldID, field.TypeInt))
if ps := dad.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
affected, err := sqlgraph.DeleteNodes(ctx, dad.driver, _spec)
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
dad.mutation.done = true
return affected, err
}
// DavAccountDeleteOne is the builder for deleting a single DavAccount entity.
type DavAccountDeleteOne struct {
dad *DavAccountDelete
}
// Where appends a list predicates to the DavAccountDelete builder.
func (dado *DavAccountDeleteOne) Where(ps ...predicate.DavAccount) *DavAccountDeleteOne {
dado.dad.mutation.Where(ps...)
return dado
}
// Exec executes the deletion query.
func (dado *DavAccountDeleteOne) Exec(ctx context.Context) error {
n, err := dado.dad.Exec(ctx)
switch {
case err != nil:
return err
case n == 0:
return &NotFoundError{davaccount.Label}
default:
return nil
}
}
// ExecX is like Exec, but panics if an error occurs.
func (dado *DavAccountDeleteOne) ExecX(ctx context.Context) {
if err := dado.Exec(ctx); err != nil {
panic(err)
}
}

605
ent/davaccount_query.go Normal file
View File

@@ -0,0 +1,605 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"fmt"
"math"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/cloudreve/Cloudreve/v4/ent/user"
)
// DavAccountQuery is the builder for querying DavAccount entities.
type DavAccountQuery struct {
config
ctx *QueryContext
order []davaccount.OrderOption
inters []Interceptor
predicates []predicate.DavAccount
withOwner *UserQuery
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the DavAccountQuery builder.
func (daq *DavAccountQuery) Where(ps ...predicate.DavAccount) *DavAccountQuery {
daq.predicates = append(daq.predicates, ps...)
return daq
}
// Limit the number of records to be returned by this query.
func (daq *DavAccountQuery) Limit(limit int) *DavAccountQuery {
daq.ctx.Limit = &limit
return daq
}
// Offset to start from.
func (daq *DavAccountQuery) Offset(offset int) *DavAccountQuery {
daq.ctx.Offset = &offset
return daq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (daq *DavAccountQuery) Unique(unique bool) *DavAccountQuery {
daq.ctx.Unique = &unique
return daq
}
// Order specifies how the records should be ordered.
func (daq *DavAccountQuery) Order(o ...davaccount.OrderOption) *DavAccountQuery {
daq.order = append(daq.order, o...)
return daq
}
// QueryOwner chains the current query on the "owner" edge.
func (daq *DavAccountQuery) QueryOwner() *UserQuery {
query := (&UserClient{config: daq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := daq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := daq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(davaccount.Table, davaccount.FieldID, selector),
sqlgraph.To(user.Table, user.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, davaccount.OwnerTable, davaccount.OwnerColumn),
)
fromU = sqlgraph.SetNeighbors(daq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// First returns the first DavAccount entity from the query.
// Returns a *NotFoundError when no DavAccount was found.
func (daq *DavAccountQuery) First(ctx context.Context) (*DavAccount, error) {
nodes, err := daq.Limit(1).All(setContextOp(ctx, daq.ctx, "First"))
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{davaccount.Label}
}
return nodes[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (daq *DavAccountQuery) FirstX(ctx context.Context) *DavAccount {
node, err := daq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
// FirstID returns the first DavAccount ID from the query.
// Returns a *NotFoundError when no DavAccount ID was found.
func (daq *DavAccountQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = daq.Limit(1).IDs(setContextOp(ctx, daq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{davaccount.Label}
return
}
return ids[0], nil
}
// FirstIDX is like FirstID, but panics if an error occurs.
func (daq *DavAccountQuery) FirstIDX(ctx context.Context) int {
id, err := daq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns a single DavAccount entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one DavAccount entity is found.
// Returns a *NotFoundError when no DavAccount entities are found.
func (daq *DavAccountQuery) Only(ctx context.Context) (*DavAccount, error) {
nodes, err := daq.Limit(2).All(setContextOp(ctx, daq.ctx, "Only"))
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{davaccount.Label}
default:
return nil, &NotSingularError{davaccount.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (daq *DavAccountQuery) OnlyX(ctx context.Context) *DavAccount {
node, err := daq.Only(ctx)
if err != nil {
panic(err)
}
return node
}
// OnlyID is like Only, but returns the only DavAccount ID in the query.
// Returns a *NotSingularError when more than one DavAccount ID is found.
// Returns a *NotFoundError when no entities are found.
func (daq *DavAccountQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = daq.Limit(2).IDs(setContextOp(ctx, daq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{davaccount.Label}
default:
err = &NotSingularError{davaccount.Label}
}
return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (daq *DavAccountQuery) OnlyIDX(ctx context.Context) int {
id, err := daq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of DavAccounts.
func (daq *DavAccountQuery) All(ctx context.Context) ([]*DavAccount, error) {
ctx = setContextOp(ctx, daq.ctx, "All")
if err := daq.prepareQuery(ctx); err != nil {
return nil, err
}
qr := querierAll[[]*DavAccount, *DavAccountQuery]()
return withInterceptors[[]*DavAccount](ctx, daq, qr, daq.inters)
}
// AllX is like All, but panics if an error occurs.
func (daq *DavAccountQuery) AllX(ctx context.Context) []*DavAccount {
nodes, err := daq.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
// IDs executes the query and returns a list of DavAccount IDs.
func (daq *DavAccountQuery) IDs(ctx context.Context) (ids []int, err error) {
if daq.ctx.Unique == nil && daq.path != nil {
daq.Unique(true)
}
ctx = setContextOp(ctx, daq.ctx, "IDs")
if err = daq.Select(davaccount.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (daq *DavAccountQuery) IDsX(ctx context.Context) []int {
ids, err := daq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (daq *DavAccountQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, daq.ctx, "Count")
if err := daq.prepareQuery(ctx); err != nil {
return 0, err
}
return withInterceptors[int](ctx, daq, querierCount[*DavAccountQuery](), daq.inters)
}
// CountX is like Count, but panics if an error occurs.
func (daq *DavAccountQuery) CountX(ctx context.Context) int {
count, err := daq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (daq *DavAccountQuery) Exist(ctx context.Context) (bool, error) {
ctx = setContextOp(ctx, daq.ctx, "Exist")
switch _, err := daq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
// ExistX is like Exist, but panics if an error occurs.
func (daq *DavAccountQuery) ExistX(ctx context.Context) bool {
exist, err := daq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the DavAccountQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (daq *DavAccountQuery) Clone() *DavAccountQuery {
if daq == nil {
return nil
}
return &DavAccountQuery{
config: daq.config,
ctx: daq.ctx.Clone(),
order: append([]davaccount.OrderOption{}, daq.order...),
inters: append([]Interceptor{}, daq.inters...),
predicates: append([]predicate.DavAccount{}, daq.predicates...),
withOwner: daq.withOwner.Clone(),
// clone intermediate query.
sql: daq.sql.Clone(),
path: daq.path,
}
}
// WithOwner tells the query-builder to eager-load the nodes that are connected to
// the "owner" edge. The optional arguments are used to configure the query builder of the edge.
func (daq *DavAccountQuery) WithOwner(opts ...func(*UserQuery)) *DavAccountQuery {
query := (&UserClient{config: daq.config}).Query()
for _, opt := range opts {
opt(query)
}
daq.withOwner = query
return daq
}
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.DavAccount.Query().
// GroupBy(davaccount.FieldCreatedAt).
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (daq *DavAccountQuery) GroupBy(field string, fields ...string) *DavAccountGroupBy {
daq.ctx.Fields = append([]string{field}, fields...)
grbuild := &DavAccountGroupBy{build: daq}
grbuild.flds = &daq.ctx.Fields
grbuild.label = davaccount.Label
grbuild.scan = grbuild.Scan
return grbuild
}
// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// }
//
// client.DavAccount.Query().
// Select(davaccount.FieldCreatedAt).
// Scan(ctx, &v)
func (daq *DavAccountQuery) Select(fields ...string) *DavAccountSelect {
daq.ctx.Fields = append(daq.ctx.Fields, fields...)
sbuild := &DavAccountSelect{DavAccountQuery: daq}
sbuild.label = davaccount.Label
sbuild.flds, sbuild.scan = &daq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a DavAccountSelect configured with the given aggregations.
func (daq *DavAccountQuery) Aggregate(fns ...AggregateFunc) *DavAccountSelect {
return daq.Select().Aggregate(fns...)
}
func (daq *DavAccountQuery) prepareQuery(ctx context.Context) error {
for _, inter := range daq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, daq); err != nil {
return err
}
}
}
for _, f := range daq.ctx.Fields {
if !davaccount.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
}
if daq.path != nil {
prev, err := daq.path(ctx)
if err != nil {
return err
}
daq.sql = prev
}
return nil
}
func (daq *DavAccountQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DavAccount, error) {
var (
nodes = []*DavAccount{}
_spec = daq.querySpec()
loadedTypes = [1]bool{
daq.withOwner != nil,
}
)
_spec.ScanValues = func(columns []string) ([]any, error) {
return (*DavAccount).scanValues(nil, columns)
}
_spec.Assign = func(columns []string, values []any) error {
node := &DavAccount{config: daq.config}
nodes = append(nodes, node)
node.Edges.loadedTypes = loadedTypes
return node.assignValues(columns, values)
}
for i := range hooks {
hooks[i](ctx, _spec)
}
if err := sqlgraph.QueryNodes(ctx, daq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
if query := daq.withOwner; query != nil {
if err := daq.loadOwner(ctx, query, nodes, nil,
func(n *DavAccount, e *User) { n.Edges.Owner = e }); err != nil {
return nil, err
}
}
return nodes, nil
}
func (daq *DavAccountQuery) loadOwner(ctx context.Context, query *UserQuery, nodes []*DavAccount, init func(*DavAccount), assign func(*DavAccount, *User)) error {
ids := make([]int, 0, len(nodes))
nodeids := make(map[int][]*DavAccount)
for i := range nodes {
fk := nodes[i].OwnerID
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
if len(ids) == 0 {
return nil
}
query.Where(user.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
nodes, ok := nodeids[n.ID]
if !ok {
return fmt.Errorf(`unexpected foreign-key "owner_id" returned %v`, n.ID)
}
for i := range nodes {
assign(nodes[i], n)
}
}
return nil
}
func (daq *DavAccountQuery) sqlCount(ctx context.Context) (int, error) {
_spec := daq.querySpec()
_spec.Node.Columns = daq.ctx.Fields
if len(daq.ctx.Fields) > 0 {
_spec.Unique = daq.ctx.Unique != nil && *daq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, daq.driver, _spec)
}
func (daq *DavAccountQuery) querySpec() *sqlgraph.QuerySpec {
_spec := sqlgraph.NewQuerySpec(davaccount.Table, davaccount.Columns, sqlgraph.NewFieldSpec(davaccount.FieldID, field.TypeInt))
_spec.From = daq.sql
if unique := daq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if daq.path != nil {
_spec.Unique = true
}
if fields := daq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, davaccount.FieldID)
for i := range fields {
if fields[i] != davaccount.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
if daq.withOwner != nil {
_spec.Node.AddColumnOnce(davaccount.FieldOwnerID)
}
}
if ps := daq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := daq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := daq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := daq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (daq *DavAccountQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(daq.driver.Dialect())
t1 := builder.Table(davaccount.Table)
columns := daq.ctx.Fields
if len(columns) == 0 {
columns = davaccount.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if daq.sql != nil {
selector = daq.sql
selector.Select(selector.Columns(columns...)...)
}
if daq.ctx.Unique != nil && *daq.ctx.Unique {
selector.Distinct()
}
for _, p := range daq.predicates {
p(selector)
}
for _, p := range daq.order {
p(selector)
}
if offset := daq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := daq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// DavAccountGroupBy is the group-by builder for DavAccount entities.
type DavAccountGroupBy struct {
selector
build *DavAccountQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
func (dagb *DavAccountGroupBy) Aggregate(fns ...AggregateFunc) *DavAccountGroupBy {
dagb.fns = append(dagb.fns, fns...)
return dagb
}
// Scan applies the selector query and scans the result into the given value.
func (dagb *DavAccountGroupBy) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, dagb.build.ctx, "GroupBy")
if err := dagb.build.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*DavAccountQuery, *DavAccountGroupBy](ctx, dagb.build, dagb, dagb.build.inters, v)
}
func (dagb *DavAccountGroupBy) sqlScan(ctx context.Context, root *DavAccountQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(dagb.fns))
for _, fn := range dagb.fns {
aggregation = append(aggregation, fn(selector))
}
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(*dagb.flds)+len(dagb.fns))
for _, f := range *dagb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
selector.GroupBy(selector.Columns(*dagb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := dagb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// DavAccountSelect is the builder for selecting fields of DavAccount entities.
type DavAccountSelect struct {
*DavAccountQuery
selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (das *DavAccountSelect) Aggregate(fns ...AggregateFunc) *DavAccountSelect {
das.fns = append(das.fns, fns...)
return das
}
// Scan applies the selector query and scans the result into the given value.
func (das *DavAccountSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, das.ctx, "Select")
if err := das.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*DavAccountQuery, *DavAccountSelect](ctx, das.DavAccountQuery, das, das.inters, v)
}
func (das *DavAccountSelect) sqlScan(ctx context.Context, root *DavAccountQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(das.fns))
for _, fn := range das.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*das.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := das.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}

565
ent/davaccount_update.go Normal file
View File

@@ -0,0 +1,565 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
)
// DavAccountUpdate is the builder for updating DavAccount entities.
type DavAccountUpdate struct {
config
hooks []Hook
mutation *DavAccountMutation
}
// Where appends a list predicates to the DavAccountUpdate builder.
func (dau *DavAccountUpdate) Where(ps ...predicate.DavAccount) *DavAccountUpdate {
dau.mutation.Where(ps...)
return dau
}
// SetUpdatedAt sets the "updated_at" field.
func (dau *DavAccountUpdate) SetUpdatedAt(t time.Time) *DavAccountUpdate {
dau.mutation.SetUpdatedAt(t)
return dau
}
// SetDeletedAt sets the "deleted_at" field.
func (dau *DavAccountUpdate) SetDeletedAt(t time.Time) *DavAccountUpdate {
dau.mutation.SetDeletedAt(t)
return dau
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (dau *DavAccountUpdate) SetNillableDeletedAt(t *time.Time) *DavAccountUpdate {
if t != nil {
dau.SetDeletedAt(*t)
}
return dau
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (dau *DavAccountUpdate) ClearDeletedAt() *DavAccountUpdate {
dau.mutation.ClearDeletedAt()
return dau
}
// SetName sets the "name" field.
func (dau *DavAccountUpdate) SetName(s string) *DavAccountUpdate {
dau.mutation.SetName(s)
return dau
}
// SetNillableName sets the "name" field if the given value is not nil.
func (dau *DavAccountUpdate) SetNillableName(s *string) *DavAccountUpdate {
if s != nil {
dau.SetName(*s)
}
return dau
}
// SetURI sets the "uri" field.
func (dau *DavAccountUpdate) SetURI(s string) *DavAccountUpdate {
dau.mutation.SetURI(s)
return dau
}
// SetNillableURI sets the "uri" field if the given value is not nil.
func (dau *DavAccountUpdate) SetNillableURI(s *string) *DavAccountUpdate {
if s != nil {
dau.SetURI(*s)
}
return dau
}
// SetPassword sets the "password" field.
func (dau *DavAccountUpdate) SetPassword(s string) *DavAccountUpdate {
dau.mutation.SetPassword(s)
return dau
}
// SetNillablePassword sets the "password" field if the given value is not nil.
func (dau *DavAccountUpdate) SetNillablePassword(s *string) *DavAccountUpdate {
if s != nil {
dau.SetPassword(*s)
}
return dau
}
// SetOptions sets the "options" field.
func (dau *DavAccountUpdate) SetOptions(bs *boolset.BooleanSet) *DavAccountUpdate {
dau.mutation.SetOptions(bs)
return dau
}
// SetProps sets the "props" field.
func (dau *DavAccountUpdate) SetProps(tap *types.DavAccountProps) *DavAccountUpdate {
dau.mutation.SetProps(tap)
return dau
}
// ClearProps clears the value of the "props" field.
func (dau *DavAccountUpdate) ClearProps() *DavAccountUpdate {
dau.mutation.ClearProps()
return dau
}
// SetOwnerID sets the "owner_id" field.
func (dau *DavAccountUpdate) SetOwnerID(i int) *DavAccountUpdate {
dau.mutation.SetOwnerID(i)
return dau
}
// SetNillableOwnerID sets the "owner_id" field if the given value is not nil.
func (dau *DavAccountUpdate) SetNillableOwnerID(i *int) *DavAccountUpdate {
if i != nil {
dau.SetOwnerID(*i)
}
return dau
}
// SetOwner sets the "owner" edge to the User entity.
func (dau *DavAccountUpdate) SetOwner(u *User) *DavAccountUpdate {
return dau.SetOwnerID(u.ID)
}
// Mutation returns the DavAccountMutation object of the builder.
func (dau *DavAccountUpdate) Mutation() *DavAccountMutation {
return dau.mutation
}
// ClearOwner clears the "owner" edge to the User entity.
func (dau *DavAccountUpdate) ClearOwner() *DavAccountUpdate {
dau.mutation.ClearOwner()
return dau
}
// Save executes the query and returns the number of nodes affected by the update operation.
func (dau *DavAccountUpdate) Save(ctx context.Context) (int, error) {
if err := dau.defaults(); err != nil {
return 0, err
}
return withHooks(ctx, dau.sqlSave, dau.mutation, dau.hooks)
}
// SaveX is like Save, but panics if an error occurs.
func (dau *DavAccountUpdate) SaveX(ctx context.Context) int {
affected, err := dau.Save(ctx)
if err != nil {
panic(err)
}
return affected
}
// Exec executes the query.
func (dau *DavAccountUpdate) Exec(ctx context.Context) error {
_, err := dau.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dau *DavAccountUpdate) ExecX(ctx context.Context) {
if err := dau.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (dau *DavAccountUpdate) defaults() error {
if _, ok := dau.mutation.UpdatedAt(); !ok {
if davaccount.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized davaccount.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := davaccount.UpdateDefaultUpdatedAt()
dau.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (dau *DavAccountUpdate) check() error {
if _, ok := dau.mutation.OwnerID(); dau.mutation.OwnerCleared() && !ok {
return errors.New(`ent: clearing a required unique edge "DavAccount.owner"`)
}
return nil
}
func (dau *DavAccountUpdate) sqlSave(ctx context.Context) (n int, err error) {
if err := dau.check(); err != nil {
return n, err
}
_spec := sqlgraph.NewUpdateSpec(davaccount.Table, davaccount.Columns, sqlgraph.NewFieldSpec(davaccount.FieldID, field.TypeInt))
if ps := dau.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := dau.mutation.UpdatedAt(); ok {
_spec.SetField(davaccount.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := dau.mutation.DeletedAt(); ok {
_spec.SetField(davaccount.FieldDeletedAt, field.TypeTime, value)
}
if dau.mutation.DeletedAtCleared() {
_spec.ClearField(davaccount.FieldDeletedAt, field.TypeTime)
}
if value, ok := dau.mutation.Name(); ok {
_spec.SetField(davaccount.FieldName, field.TypeString, value)
}
if value, ok := dau.mutation.URI(); ok {
_spec.SetField(davaccount.FieldURI, field.TypeString, value)
}
if value, ok := dau.mutation.Password(); ok {
_spec.SetField(davaccount.FieldPassword, field.TypeString, value)
}
if value, ok := dau.mutation.Options(); ok {
_spec.SetField(davaccount.FieldOptions, field.TypeBytes, value)
}
if value, ok := dau.mutation.Props(); ok {
_spec.SetField(davaccount.FieldProps, field.TypeJSON, value)
}
if dau.mutation.PropsCleared() {
_spec.ClearField(davaccount.FieldProps, field.TypeJSON)
}
if dau.mutation.OwnerCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: davaccount.OwnerTable,
Columns: []string{davaccount.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := dau.mutation.OwnerIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: davaccount.OwnerTable,
Columns: []string{davaccount.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if n, err = sqlgraph.UpdateNodes(ctx, dau.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{davaccount.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return 0, err
}
dau.mutation.done = true
return n, nil
}
// DavAccountUpdateOne is the builder for updating a single DavAccount entity.
type DavAccountUpdateOne struct {
config
fields []string
hooks []Hook
mutation *DavAccountMutation
}
// SetUpdatedAt sets the "updated_at" field.
func (dauo *DavAccountUpdateOne) SetUpdatedAt(t time.Time) *DavAccountUpdateOne {
dauo.mutation.SetUpdatedAt(t)
return dauo
}
// SetDeletedAt sets the "deleted_at" field.
func (dauo *DavAccountUpdateOne) SetDeletedAt(t time.Time) *DavAccountUpdateOne {
dauo.mutation.SetDeletedAt(t)
return dauo
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (dauo *DavAccountUpdateOne) SetNillableDeletedAt(t *time.Time) *DavAccountUpdateOne {
if t != nil {
dauo.SetDeletedAt(*t)
}
return dauo
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (dauo *DavAccountUpdateOne) ClearDeletedAt() *DavAccountUpdateOne {
dauo.mutation.ClearDeletedAt()
return dauo
}
// SetName sets the "name" field.
func (dauo *DavAccountUpdateOne) SetName(s string) *DavAccountUpdateOne {
dauo.mutation.SetName(s)
return dauo
}
// SetNillableName sets the "name" field if the given value is not nil.
func (dauo *DavAccountUpdateOne) SetNillableName(s *string) *DavAccountUpdateOne {
if s != nil {
dauo.SetName(*s)
}
return dauo
}
// SetURI sets the "uri" field.
func (dauo *DavAccountUpdateOne) SetURI(s string) *DavAccountUpdateOne {
dauo.mutation.SetURI(s)
return dauo
}
// SetNillableURI sets the "uri" field if the given value is not nil.
func (dauo *DavAccountUpdateOne) SetNillableURI(s *string) *DavAccountUpdateOne {
if s != nil {
dauo.SetURI(*s)
}
return dauo
}
// SetPassword sets the "password" field.
func (dauo *DavAccountUpdateOne) SetPassword(s string) *DavAccountUpdateOne {
dauo.mutation.SetPassword(s)
return dauo
}
// SetNillablePassword sets the "password" field if the given value is not nil.
func (dauo *DavAccountUpdateOne) SetNillablePassword(s *string) *DavAccountUpdateOne {
if s != nil {
dauo.SetPassword(*s)
}
return dauo
}
// SetOptions sets the "options" field.
func (dauo *DavAccountUpdateOne) SetOptions(bs *boolset.BooleanSet) *DavAccountUpdateOne {
dauo.mutation.SetOptions(bs)
return dauo
}
// SetProps sets the "props" field.
func (dauo *DavAccountUpdateOne) SetProps(tap *types.DavAccountProps) *DavAccountUpdateOne {
dauo.mutation.SetProps(tap)
return dauo
}
// ClearProps clears the value of the "props" field.
func (dauo *DavAccountUpdateOne) ClearProps() *DavAccountUpdateOne {
dauo.mutation.ClearProps()
return dauo
}
// SetOwnerID sets the "owner_id" field.
func (dauo *DavAccountUpdateOne) SetOwnerID(i int) *DavAccountUpdateOne {
dauo.mutation.SetOwnerID(i)
return dauo
}
// SetNillableOwnerID sets the "owner_id" field if the given value is not nil.
func (dauo *DavAccountUpdateOne) SetNillableOwnerID(i *int) *DavAccountUpdateOne {
if i != nil {
dauo.SetOwnerID(*i)
}
return dauo
}
// SetOwner sets the "owner" edge to the User entity.
func (dauo *DavAccountUpdateOne) SetOwner(u *User) *DavAccountUpdateOne {
return dauo.SetOwnerID(u.ID)
}
// Mutation returns the DavAccountMutation object of the builder.
func (dauo *DavAccountUpdateOne) Mutation() *DavAccountMutation {
return dauo.mutation
}
// ClearOwner clears the "owner" edge to the User entity.
func (dauo *DavAccountUpdateOne) ClearOwner() *DavAccountUpdateOne {
dauo.mutation.ClearOwner()
return dauo
}
// Where appends a list predicates to the DavAccountUpdate builder.
func (dauo *DavAccountUpdateOne) Where(ps ...predicate.DavAccount) *DavAccountUpdateOne {
dauo.mutation.Where(ps...)
return dauo
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (dauo *DavAccountUpdateOne) Select(field string, fields ...string) *DavAccountUpdateOne {
dauo.fields = append([]string{field}, fields...)
return dauo
}
// Save executes the query and returns the updated DavAccount entity.
func (dauo *DavAccountUpdateOne) Save(ctx context.Context) (*DavAccount, error) {
if err := dauo.defaults(); err != nil {
return nil, err
}
return withHooks(ctx, dauo.sqlSave, dauo.mutation, dauo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
func (dauo *DavAccountUpdateOne) SaveX(ctx context.Context) *DavAccount {
node, err := dauo.Save(ctx)
if err != nil {
panic(err)
}
return node
}
// Exec executes the query on the entity.
func (dauo *DavAccountUpdateOne) Exec(ctx context.Context) error {
_, err := dauo.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dauo *DavAccountUpdateOne) ExecX(ctx context.Context) {
if err := dauo.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (dauo *DavAccountUpdateOne) defaults() error {
if _, ok := dauo.mutation.UpdatedAt(); !ok {
if davaccount.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized davaccount.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := davaccount.UpdateDefaultUpdatedAt()
dauo.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (dauo *DavAccountUpdateOne) check() error {
if _, ok := dauo.mutation.OwnerID(); dauo.mutation.OwnerCleared() && !ok {
return errors.New(`ent: clearing a required unique edge "DavAccount.owner"`)
}
return nil
}
func (dauo *DavAccountUpdateOne) sqlSave(ctx context.Context) (_node *DavAccount, err error) {
if err := dauo.check(); err != nil {
return _node, err
}
_spec := sqlgraph.NewUpdateSpec(davaccount.Table, davaccount.Columns, sqlgraph.NewFieldSpec(davaccount.FieldID, field.TypeInt))
id, ok := dauo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "DavAccount.id" for update`)}
}
_spec.Node.ID.Value = id
if fields := dauo.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, davaccount.FieldID)
for _, f := range fields {
if !davaccount.ValidColumn(f) {
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
if f != davaccount.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, f)
}
}
}
if ps := dauo.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := dauo.mutation.UpdatedAt(); ok {
_spec.SetField(davaccount.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := dauo.mutation.DeletedAt(); ok {
_spec.SetField(davaccount.FieldDeletedAt, field.TypeTime, value)
}
if dauo.mutation.DeletedAtCleared() {
_spec.ClearField(davaccount.FieldDeletedAt, field.TypeTime)
}
if value, ok := dauo.mutation.Name(); ok {
_spec.SetField(davaccount.FieldName, field.TypeString, value)
}
if value, ok := dauo.mutation.URI(); ok {
_spec.SetField(davaccount.FieldURI, field.TypeString, value)
}
if value, ok := dauo.mutation.Password(); ok {
_spec.SetField(davaccount.FieldPassword, field.TypeString, value)
}
if value, ok := dauo.mutation.Options(); ok {
_spec.SetField(davaccount.FieldOptions, field.TypeBytes, value)
}
if value, ok := dauo.mutation.Props(); ok {
_spec.SetField(davaccount.FieldProps, field.TypeJSON, value)
}
if dauo.mutation.PropsCleared() {
_spec.ClearField(davaccount.FieldProps, field.TypeJSON)
}
if dauo.mutation.OwnerCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: davaccount.OwnerTable,
Columns: []string{davaccount.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := dauo.mutation.OwnerIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: davaccount.OwnerTable,
Columns: []string{davaccount.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
_node = &DavAccount{config: dauo.config}
_spec.Assign = _node.assignValues
_spec.ScanValues = _node.scanValues
if err = sqlgraph.UpdateNode(ctx, dauo.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{davaccount.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
dauo.mutation.done = true
return _node, nil
}

212
ent/directlink.go Normal file
View File

@@ -0,0 +1,212 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"fmt"
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/file"
)
// DirectLink is the model entity for the DirectLink schema.
type DirectLink struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// DeletedAt holds the value of the "deleted_at" field.
DeletedAt *time.Time `json:"deleted_at,omitempty"`
// Name holds the value of the "name" field.
Name string `json:"name,omitempty"`
// Downloads holds the value of the "downloads" field.
Downloads int `json:"downloads,omitempty"`
// FileID holds the value of the "file_id" field.
FileID int `json:"file_id,omitempty"`
// Speed holds the value of the "speed" field.
Speed int `json:"speed,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the DirectLinkQuery when eager-loading is set.
Edges DirectLinkEdges `json:"edges"`
selectValues sql.SelectValues
}
// DirectLinkEdges holds the relations/edges for other nodes in the graph.
type DirectLinkEdges struct {
// File holds the value of the file edge.
File *File `json:"file,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [1]bool
}
// FileOrErr returns the File value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e DirectLinkEdges) FileOrErr() (*File, error) {
if e.loadedTypes[0] {
if e.File == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: file.Label}
}
return e.File, nil
}
return nil, &NotLoadedError{edge: "file"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*DirectLink) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
for i := range columns {
switch columns[i] {
case directlink.FieldID, directlink.FieldDownloads, directlink.FieldFileID, directlink.FieldSpeed:
values[i] = new(sql.NullInt64)
case directlink.FieldName:
values[i] = new(sql.NullString)
case directlink.FieldCreatedAt, directlink.FieldUpdatedAt, directlink.FieldDeletedAt:
values[i] = new(sql.NullTime)
default:
values[i] = new(sql.UnknownType)
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the DirectLink fields.
func (dl *DirectLink) assignValues(columns []string, values []any) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case directlink.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
dl.ID = int(value.Int64)
case directlink.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
dl.CreatedAt = value.Time
}
case directlink.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
dl.UpdatedAt = value.Time
}
case directlink.FieldDeletedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
} else if value.Valid {
dl.DeletedAt = new(time.Time)
*dl.DeletedAt = value.Time
}
case directlink.FieldName:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field name", values[i])
} else if value.Valid {
dl.Name = value.String
}
case directlink.FieldDownloads:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field downloads", values[i])
} else if value.Valid {
dl.Downloads = int(value.Int64)
}
case directlink.FieldFileID:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field file_id", values[i])
} else if value.Valid {
dl.FileID = int(value.Int64)
}
case directlink.FieldSpeed:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field speed", values[i])
} else if value.Valid {
dl.Speed = int(value.Int64)
}
default:
dl.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the DirectLink.
// This includes values selected through modifiers, order, etc.
func (dl *DirectLink) Value(name string) (ent.Value, error) {
return dl.selectValues.Get(name)
}
// QueryFile queries the "file" edge of the DirectLink entity.
func (dl *DirectLink) QueryFile() *FileQuery {
return NewDirectLinkClient(dl.config).QueryFile(dl)
}
// Update returns a builder for updating this DirectLink.
// Note that you need to call DirectLink.Unwrap() before calling this method if this DirectLink
// was returned from a transaction, and the transaction was committed or rolled back.
func (dl *DirectLink) Update() *DirectLinkUpdateOne {
return NewDirectLinkClient(dl.config).UpdateOne(dl)
}
// Unwrap unwraps the DirectLink entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (dl *DirectLink) Unwrap() *DirectLink {
_tx, ok := dl.config.driver.(*txDriver)
if !ok {
panic("ent: DirectLink is not a transactional entity")
}
dl.config.driver = _tx.drv
return dl
}
// String implements the fmt.Stringer.
func (dl *DirectLink) String() string {
var builder strings.Builder
builder.WriteString("DirectLink(")
builder.WriteString(fmt.Sprintf("id=%v, ", dl.ID))
builder.WriteString("created_at=")
builder.WriteString(dl.CreatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("updated_at=")
builder.WriteString(dl.UpdatedAt.Format(time.ANSIC))
builder.WriteString(", ")
if v := dl.DeletedAt; v != nil {
builder.WriteString("deleted_at=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteString(", ")
builder.WriteString("name=")
builder.WriteString(dl.Name)
builder.WriteString(", ")
builder.WriteString("downloads=")
builder.WriteString(fmt.Sprintf("%v", dl.Downloads))
builder.WriteString(", ")
builder.WriteString("file_id=")
builder.WriteString(fmt.Sprintf("%v", dl.FileID))
builder.WriteString(", ")
builder.WriteString("speed=")
builder.WriteString(fmt.Sprintf("%v", dl.Speed))
builder.WriteByte(')')
return builder.String()
}
// SetFile manually set the edge as loaded state.
func (e *DirectLink) SetFile(v *File) {
e.Edges.File = v
e.Edges.loadedTypes[0] = true
}
// DirectLinks is a parsable slice of DirectLink.
type DirectLinks []*DirectLink

View File

@@ -0,0 +1,138 @@
// Code generated by ent, DO NOT EDIT.
package directlink
import (
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
// Label holds the string label denoting the directlink type in the database.
Label = "direct_link"
// FieldID holds the string denoting the id field in the database.
FieldID = "id"
// FieldCreatedAt holds the string denoting the created_at field in the database.
FieldCreatedAt = "created_at"
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
FieldUpdatedAt = "updated_at"
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
FieldDeletedAt = "deleted_at"
// FieldName holds the string denoting the name field in the database.
FieldName = "name"
// FieldDownloads holds the string denoting the downloads field in the database.
FieldDownloads = "downloads"
// FieldFileID holds the string denoting the file_id field in the database.
FieldFileID = "file_id"
// FieldSpeed holds the string denoting the speed field in the database.
FieldSpeed = "speed"
// EdgeFile holds the string denoting the file edge name in mutations.
EdgeFile = "file"
// Table holds the table name of the directlink in the database.
Table = "direct_links"
// FileTable is the table that holds the file relation/edge.
FileTable = "direct_links"
// FileInverseTable is the table name for the File entity.
// It exists in this package in order to avoid circular dependency with the "file" package.
FileInverseTable = "files"
// FileColumn is the table column denoting the file relation/edge.
FileColumn = "file_id"
)
// Columns holds all SQL columns for directlink fields.
var Columns = []string{
FieldID,
FieldCreatedAt,
FieldUpdatedAt,
FieldDeletedAt,
FieldName,
FieldDownloads,
FieldFileID,
FieldSpeed,
}
// ValidColumn reports if the column name is valid (part of the table columns).
func ValidColumn(column string) bool {
for i := range Columns {
if column == Columns[i] {
return true
}
}
return false
}
// Note that the variables below are initialized by the runtime
// package on the initialization of the application. Therefore,
// it should be imported in the main as follows:
//
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
var (
Hooks [1]ent.Hook
Interceptors [1]ent.Interceptor
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
DefaultCreatedAt func() time.Time
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
DefaultUpdatedAt func() time.Time
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
)
// OrderOption defines the ordering options for the DirectLink queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByDeletedAt orders the results by the deleted_at field.
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
}
// ByName orders the results by the name field.
func ByName(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldName, opts...).ToFunc()
}
// ByDownloads orders the results by the downloads field.
func ByDownloads(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDownloads, opts...).ToFunc()
}
// ByFileID orders the results by the file_id field.
func ByFileID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldFileID, opts...).ToFunc()
}
// BySpeed orders the results by the speed field.
func BySpeed(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSpeed, opts...).ToFunc()
}
// ByFileField orders the results by file field.
func ByFileField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newFileStep(), sql.OrderByField(field, opts...))
}
}
func newFileStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(FileInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, FileTable, FileColumn),
)
}

424
ent/directlink/where.go Normal file
View File

@@ -0,0 +1,424 @@
// Code generated by ent, DO NOT EDIT.
package directlink
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// ID filters vertices based on their ID field.
func ID(id int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldUpdatedAt, v))
}
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
func DeletedAt(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldDeletedAt, v))
}
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
func Name(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldName, v))
}
// Downloads applies equality check predicate on the "downloads" field. It's identical to DownloadsEQ.
func Downloads(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldDownloads, v))
}
// FileID applies equality check predicate on the "file_id" field. It's identical to FileIDEQ.
func FileID(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldFileID, v))
}
// Speed applies equality check predicate on the "speed" field. It's identical to SpeedEQ.
func Speed(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldSpeed, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLTE(FieldUpdatedAt, v))
}
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
func DeletedAtEQ(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldDeletedAt, v))
}
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
func DeletedAtNEQ(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNEQ(FieldDeletedAt, v))
}
// DeletedAtIn applies the In predicate on the "deleted_at" field.
func DeletedAtIn(vs ...time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldIn(FieldDeletedAt, vs...))
}
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
func DeletedAtNotIn(vs ...time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotIn(FieldDeletedAt, vs...))
}
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
func DeletedAtGT(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGT(FieldDeletedAt, v))
}
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
func DeletedAtGTE(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGTE(FieldDeletedAt, v))
}
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
func DeletedAtLT(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLT(FieldDeletedAt, v))
}
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
func DeletedAtLTE(v time.Time) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLTE(FieldDeletedAt, v))
}
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
func DeletedAtIsNil() predicate.DirectLink {
return predicate.DirectLink(sql.FieldIsNull(FieldDeletedAt))
}
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
func DeletedAtNotNil() predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotNull(FieldDeletedAt))
}
// NameEQ applies the EQ predicate on the "name" field.
func NameEQ(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldName, v))
}
// NameNEQ applies the NEQ predicate on the "name" field.
func NameNEQ(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNEQ(FieldName, v))
}
// NameIn applies the In predicate on the "name" field.
func NameIn(vs ...string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldIn(FieldName, vs...))
}
// NameNotIn applies the NotIn predicate on the "name" field.
func NameNotIn(vs ...string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotIn(FieldName, vs...))
}
// NameGT applies the GT predicate on the "name" field.
func NameGT(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGT(FieldName, v))
}
// NameGTE applies the GTE predicate on the "name" field.
func NameGTE(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGTE(FieldName, v))
}
// NameLT applies the LT predicate on the "name" field.
func NameLT(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLT(FieldName, v))
}
// NameLTE applies the LTE predicate on the "name" field.
func NameLTE(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLTE(FieldName, v))
}
// NameContains applies the Contains predicate on the "name" field.
func NameContains(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldContains(FieldName, v))
}
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
func NameHasPrefix(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldHasPrefix(FieldName, v))
}
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
func NameHasSuffix(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldHasSuffix(FieldName, v))
}
// NameEqualFold applies the EqualFold predicate on the "name" field.
func NameEqualFold(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEqualFold(FieldName, v))
}
// NameContainsFold applies the ContainsFold predicate on the "name" field.
func NameContainsFold(v string) predicate.DirectLink {
return predicate.DirectLink(sql.FieldContainsFold(FieldName, v))
}
// DownloadsEQ applies the EQ predicate on the "downloads" field.
func DownloadsEQ(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldDownloads, v))
}
// DownloadsNEQ applies the NEQ predicate on the "downloads" field.
func DownloadsNEQ(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNEQ(FieldDownloads, v))
}
// DownloadsIn applies the In predicate on the "downloads" field.
func DownloadsIn(vs ...int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldIn(FieldDownloads, vs...))
}
// DownloadsNotIn applies the NotIn predicate on the "downloads" field.
func DownloadsNotIn(vs ...int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotIn(FieldDownloads, vs...))
}
// DownloadsGT applies the GT predicate on the "downloads" field.
func DownloadsGT(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGT(FieldDownloads, v))
}
// DownloadsGTE applies the GTE predicate on the "downloads" field.
func DownloadsGTE(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGTE(FieldDownloads, v))
}
// DownloadsLT applies the LT predicate on the "downloads" field.
func DownloadsLT(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLT(FieldDownloads, v))
}
// DownloadsLTE applies the LTE predicate on the "downloads" field.
func DownloadsLTE(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLTE(FieldDownloads, v))
}
// FileIDEQ applies the EQ predicate on the "file_id" field.
func FileIDEQ(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldFileID, v))
}
// FileIDNEQ applies the NEQ predicate on the "file_id" field.
func FileIDNEQ(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNEQ(FieldFileID, v))
}
// FileIDIn applies the In predicate on the "file_id" field.
func FileIDIn(vs ...int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldIn(FieldFileID, vs...))
}
// FileIDNotIn applies the NotIn predicate on the "file_id" field.
func FileIDNotIn(vs ...int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotIn(FieldFileID, vs...))
}
// SpeedEQ applies the EQ predicate on the "speed" field.
func SpeedEQ(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldEQ(FieldSpeed, v))
}
// SpeedNEQ applies the NEQ predicate on the "speed" field.
func SpeedNEQ(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNEQ(FieldSpeed, v))
}
// SpeedIn applies the In predicate on the "speed" field.
func SpeedIn(vs ...int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldIn(FieldSpeed, vs...))
}
// SpeedNotIn applies the NotIn predicate on the "speed" field.
func SpeedNotIn(vs ...int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldNotIn(FieldSpeed, vs...))
}
// SpeedGT applies the GT predicate on the "speed" field.
func SpeedGT(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGT(FieldSpeed, v))
}
// SpeedGTE applies the GTE predicate on the "speed" field.
func SpeedGTE(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldGTE(FieldSpeed, v))
}
// SpeedLT applies the LT predicate on the "speed" field.
func SpeedLT(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLT(FieldSpeed, v))
}
// SpeedLTE applies the LTE predicate on the "speed" field.
func SpeedLTE(v int) predicate.DirectLink {
return predicate.DirectLink(sql.FieldLTE(FieldSpeed, v))
}
// HasFile applies the HasEdge predicate on the "file" edge.
func HasFile() predicate.DirectLink {
return predicate.DirectLink(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, FileTable, FileColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasFileWith applies the HasEdge predicate on the "file" edge with a given conditions (other predicates).
func HasFileWith(preds ...predicate.File) predicate.DirectLink {
return predicate.DirectLink(func(s *sql.Selector) {
step := newFileStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.DirectLink) predicate.DirectLink {
return predicate.DirectLink(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.DirectLink) predicate.DirectLink {
return predicate.DirectLink(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.DirectLink) predicate.DirectLink {
return predicate.DirectLink(sql.NotPredicates(p))
}

883
ent/directlink_create.go Normal file
View File

@@ -0,0 +1,883 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/file"
)
// DirectLinkCreate is the builder for creating a DirectLink entity.
type DirectLinkCreate struct {
config
mutation *DirectLinkMutation
hooks []Hook
conflict []sql.ConflictOption
}
// SetCreatedAt sets the "created_at" field.
func (dlc *DirectLinkCreate) SetCreatedAt(t time.Time) *DirectLinkCreate {
dlc.mutation.SetCreatedAt(t)
return dlc
}
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
func (dlc *DirectLinkCreate) SetNillableCreatedAt(t *time.Time) *DirectLinkCreate {
if t != nil {
dlc.SetCreatedAt(*t)
}
return dlc
}
// SetUpdatedAt sets the "updated_at" field.
func (dlc *DirectLinkCreate) SetUpdatedAt(t time.Time) *DirectLinkCreate {
dlc.mutation.SetUpdatedAt(t)
return dlc
}
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
func (dlc *DirectLinkCreate) SetNillableUpdatedAt(t *time.Time) *DirectLinkCreate {
if t != nil {
dlc.SetUpdatedAt(*t)
}
return dlc
}
// SetDeletedAt sets the "deleted_at" field.
func (dlc *DirectLinkCreate) SetDeletedAt(t time.Time) *DirectLinkCreate {
dlc.mutation.SetDeletedAt(t)
return dlc
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (dlc *DirectLinkCreate) SetNillableDeletedAt(t *time.Time) *DirectLinkCreate {
if t != nil {
dlc.SetDeletedAt(*t)
}
return dlc
}
// SetName sets the "name" field.
func (dlc *DirectLinkCreate) SetName(s string) *DirectLinkCreate {
dlc.mutation.SetName(s)
return dlc
}
// SetDownloads sets the "downloads" field.
func (dlc *DirectLinkCreate) SetDownloads(i int) *DirectLinkCreate {
dlc.mutation.SetDownloads(i)
return dlc
}
// SetFileID sets the "file_id" field.
func (dlc *DirectLinkCreate) SetFileID(i int) *DirectLinkCreate {
dlc.mutation.SetFileID(i)
return dlc
}
// SetSpeed sets the "speed" field.
func (dlc *DirectLinkCreate) SetSpeed(i int) *DirectLinkCreate {
dlc.mutation.SetSpeed(i)
return dlc
}
// SetFile sets the "file" edge to the File entity.
func (dlc *DirectLinkCreate) SetFile(f *File) *DirectLinkCreate {
return dlc.SetFileID(f.ID)
}
// Mutation returns the DirectLinkMutation object of the builder.
func (dlc *DirectLinkCreate) Mutation() *DirectLinkMutation {
return dlc.mutation
}
// Save creates the DirectLink in the database.
func (dlc *DirectLinkCreate) Save(ctx context.Context) (*DirectLink, error) {
if err := dlc.defaults(); err != nil {
return nil, err
}
return withHooks(ctx, dlc.sqlSave, dlc.mutation, dlc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
func (dlc *DirectLinkCreate) SaveX(ctx context.Context) *DirectLink {
v, err := dlc.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (dlc *DirectLinkCreate) Exec(ctx context.Context) error {
_, err := dlc.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dlc *DirectLinkCreate) ExecX(ctx context.Context) {
if err := dlc.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (dlc *DirectLinkCreate) defaults() error {
if _, ok := dlc.mutation.CreatedAt(); !ok {
if directlink.DefaultCreatedAt == nil {
return fmt.Errorf("ent: uninitialized directlink.DefaultCreatedAt (forgotten import ent/runtime?)")
}
v := directlink.DefaultCreatedAt()
dlc.mutation.SetCreatedAt(v)
}
if _, ok := dlc.mutation.UpdatedAt(); !ok {
if directlink.DefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized directlink.DefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := directlink.DefaultUpdatedAt()
dlc.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (dlc *DirectLinkCreate) check() error {
if _, ok := dlc.mutation.CreatedAt(); !ok {
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "DirectLink.created_at"`)}
}
if _, ok := dlc.mutation.UpdatedAt(); !ok {
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "DirectLink.updated_at"`)}
}
if _, ok := dlc.mutation.Name(); !ok {
return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "DirectLink.name"`)}
}
if _, ok := dlc.mutation.Downloads(); !ok {
return &ValidationError{Name: "downloads", err: errors.New(`ent: missing required field "DirectLink.downloads"`)}
}
if _, ok := dlc.mutation.FileID(); !ok {
return &ValidationError{Name: "file_id", err: errors.New(`ent: missing required field "DirectLink.file_id"`)}
}
if _, ok := dlc.mutation.Speed(); !ok {
return &ValidationError{Name: "speed", err: errors.New(`ent: missing required field "DirectLink.speed"`)}
}
if _, ok := dlc.mutation.FileID(); !ok {
return &ValidationError{Name: "file", err: errors.New(`ent: missing required edge "DirectLink.file"`)}
}
return nil
}
func (dlc *DirectLinkCreate) sqlSave(ctx context.Context) (*DirectLink, error) {
if err := dlc.check(); err != nil {
return nil, err
}
_node, _spec := dlc.createSpec()
if err := sqlgraph.CreateNode(ctx, dlc.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
id := _spec.ID.Value.(int64)
_node.ID = int(id)
dlc.mutation.id = &_node.ID
dlc.mutation.done = true
return _node, nil
}
func (dlc *DirectLinkCreate) createSpec() (*DirectLink, *sqlgraph.CreateSpec) {
var (
_node = &DirectLink{config: dlc.config}
_spec = sqlgraph.NewCreateSpec(directlink.Table, sqlgraph.NewFieldSpec(directlink.FieldID, field.TypeInt))
)
if id, ok := dlc.mutation.ID(); ok {
_node.ID = id
id64 := int64(id)
_spec.ID.Value = id64
}
_spec.OnConflict = dlc.conflict
if value, ok := dlc.mutation.CreatedAt(); ok {
_spec.SetField(directlink.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = value
}
if value, ok := dlc.mutation.UpdatedAt(); ok {
_spec.SetField(directlink.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = value
}
if value, ok := dlc.mutation.DeletedAt(); ok {
_spec.SetField(directlink.FieldDeletedAt, field.TypeTime, value)
_node.DeletedAt = &value
}
if value, ok := dlc.mutation.Name(); ok {
_spec.SetField(directlink.FieldName, field.TypeString, value)
_node.Name = value
}
if value, ok := dlc.mutation.Downloads(); ok {
_spec.SetField(directlink.FieldDownloads, field.TypeInt, value)
_node.Downloads = value
}
if value, ok := dlc.mutation.Speed(); ok {
_spec.SetField(directlink.FieldSpeed, field.TypeInt, value)
_node.Speed = value
}
if nodes := dlc.mutation.FileIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: directlink.FileTable,
Columns: []string{directlink.FileColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_node.FileID = nodes[0]
_spec.Edges = append(_spec.Edges, edge)
}
return _node, _spec
}
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
// of the `INSERT` statement. For example:
//
// client.DirectLink.Create().
// SetCreatedAt(v).
// OnConflict(
// // Update the row with the new values
// // the was proposed for insertion.
// sql.ResolveWithNewValues(),
// ).
// // Override some of the fields with custom
// // update values.
// Update(func(u *ent.DirectLinkUpsert) {
// SetCreatedAt(v+v).
// }).
// Exec(ctx)
func (dlc *DirectLinkCreate) OnConflict(opts ...sql.ConflictOption) *DirectLinkUpsertOne {
dlc.conflict = opts
return &DirectLinkUpsertOne{
create: dlc,
}
}
// OnConflictColumns calls `OnConflict` and configures the columns
// as conflict target. Using this option is equivalent to using:
//
// client.DirectLink.Create().
// OnConflict(sql.ConflictColumns(columns...)).
// Exec(ctx)
func (dlc *DirectLinkCreate) OnConflictColumns(columns ...string) *DirectLinkUpsertOne {
dlc.conflict = append(dlc.conflict, sql.ConflictColumns(columns...))
return &DirectLinkUpsertOne{
create: dlc,
}
}
type (
// DirectLinkUpsertOne is the builder for "upsert"-ing
// one DirectLink node.
DirectLinkUpsertOne struct {
create *DirectLinkCreate
}
// DirectLinkUpsert is the "OnConflict" setter.
DirectLinkUpsert struct {
*sql.UpdateSet
}
)
// SetUpdatedAt sets the "updated_at" field.
func (u *DirectLinkUpsert) SetUpdatedAt(v time.Time) *DirectLinkUpsert {
u.Set(directlink.FieldUpdatedAt, v)
return u
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *DirectLinkUpsert) UpdateUpdatedAt() *DirectLinkUpsert {
u.SetExcluded(directlink.FieldUpdatedAt)
return u
}
// SetDeletedAt sets the "deleted_at" field.
func (u *DirectLinkUpsert) SetDeletedAt(v time.Time) *DirectLinkUpsert {
u.Set(directlink.FieldDeletedAt, v)
return u
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *DirectLinkUpsert) UpdateDeletedAt() *DirectLinkUpsert {
u.SetExcluded(directlink.FieldDeletedAt)
return u
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *DirectLinkUpsert) ClearDeletedAt() *DirectLinkUpsert {
u.SetNull(directlink.FieldDeletedAt)
return u
}
// SetName sets the "name" field.
func (u *DirectLinkUpsert) SetName(v string) *DirectLinkUpsert {
u.Set(directlink.FieldName, v)
return u
}
// UpdateName sets the "name" field to the value that was provided on create.
func (u *DirectLinkUpsert) UpdateName() *DirectLinkUpsert {
u.SetExcluded(directlink.FieldName)
return u
}
// SetDownloads sets the "downloads" field.
func (u *DirectLinkUpsert) SetDownloads(v int) *DirectLinkUpsert {
u.Set(directlink.FieldDownloads, v)
return u
}
// UpdateDownloads sets the "downloads" field to the value that was provided on create.
func (u *DirectLinkUpsert) UpdateDownloads() *DirectLinkUpsert {
u.SetExcluded(directlink.FieldDownloads)
return u
}
// AddDownloads adds v to the "downloads" field.
func (u *DirectLinkUpsert) AddDownloads(v int) *DirectLinkUpsert {
u.Add(directlink.FieldDownloads, v)
return u
}
// SetFileID sets the "file_id" field.
func (u *DirectLinkUpsert) SetFileID(v int) *DirectLinkUpsert {
u.Set(directlink.FieldFileID, v)
return u
}
// UpdateFileID sets the "file_id" field to the value that was provided on create.
func (u *DirectLinkUpsert) UpdateFileID() *DirectLinkUpsert {
u.SetExcluded(directlink.FieldFileID)
return u
}
// SetSpeed sets the "speed" field.
func (u *DirectLinkUpsert) SetSpeed(v int) *DirectLinkUpsert {
u.Set(directlink.FieldSpeed, v)
return u
}
// UpdateSpeed sets the "speed" field to the value that was provided on create.
func (u *DirectLinkUpsert) UpdateSpeed() *DirectLinkUpsert {
u.SetExcluded(directlink.FieldSpeed)
return u
}
// AddSpeed adds v to the "speed" field.
func (u *DirectLinkUpsert) AddSpeed(v int) *DirectLinkUpsert {
u.Add(directlink.FieldSpeed, v)
return u
}
// UpdateNewValues updates the mutable fields using the new values that were set on create.
// Using this option is equivalent to using:
//
// client.DirectLink.Create().
// OnConflict(
// sql.ResolveWithNewValues(),
// ).
// Exec(ctx)
func (u *DirectLinkUpsertOne) UpdateNewValues() *DirectLinkUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
if _, exists := u.create.mutation.CreatedAt(); exists {
s.SetIgnore(directlink.FieldCreatedAt)
}
}))
return u
}
// Ignore sets each column to itself in case of conflict.
// Using this option is equivalent to using:
//
// client.DirectLink.Create().
// OnConflict(sql.ResolveWithIgnore()).
// Exec(ctx)
func (u *DirectLinkUpsertOne) Ignore() *DirectLinkUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
return u
}
// DoNothing configures the conflict_action to `DO NOTHING`.
// Supported only by SQLite and PostgreSQL.
func (u *DirectLinkUpsertOne) DoNothing() *DirectLinkUpsertOne {
u.create.conflict = append(u.create.conflict, sql.DoNothing())
return u
}
// Update allows overriding fields `UPDATE` values. See the DirectLinkCreate.OnConflict
// documentation for more info.
func (u *DirectLinkUpsertOne) Update(set func(*DirectLinkUpsert)) *DirectLinkUpsertOne {
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
set(&DirectLinkUpsert{UpdateSet: update})
}))
return u
}
// SetUpdatedAt sets the "updated_at" field.
func (u *DirectLinkUpsertOne) SetUpdatedAt(v time.Time) *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.SetUpdatedAt(v)
})
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *DirectLinkUpsertOne) UpdateUpdatedAt() *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateUpdatedAt()
})
}
// SetDeletedAt sets the "deleted_at" field.
func (u *DirectLinkUpsertOne) SetDeletedAt(v time.Time) *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *DirectLinkUpsertOne) UpdateDeletedAt() *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *DirectLinkUpsertOne) ClearDeletedAt() *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.ClearDeletedAt()
})
}
// SetName sets the "name" field.
func (u *DirectLinkUpsertOne) SetName(v string) *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.SetName(v)
})
}
// UpdateName sets the "name" field to the value that was provided on create.
func (u *DirectLinkUpsertOne) UpdateName() *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateName()
})
}
// SetDownloads sets the "downloads" field.
func (u *DirectLinkUpsertOne) SetDownloads(v int) *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.SetDownloads(v)
})
}
// AddDownloads adds v to the "downloads" field.
func (u *DirectLinkUpsertOne) AddDownloads(v int) *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.AddDownloads(v)
})
}
// UpdateDownloads sets the "downloads" field to the value that was provided on create.
func (u *DirectLinkUpsertOne) UpdateDownloads() *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateDownloads()
})
}
// SetFileID sets the "file_id" field.
func (u *DirectLinkUpsertOne) SetFileID(v int) *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.SetFileID(v)
})
}
// UpdateFileID sets the "file_id" field to the value that was provided on create.
func (u *DirectLinkUpsertOne) UpdateFileID() *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateFileID()
})
}
// SetSpeed sets the "speed" field.
func (u *DirectLinkUpsertOne) SetSpeed(v int) *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.SetSpeed(v)
})
}
// AddSpeed adds v to the "speed" field.
func (u *DirectLinkUpsertOne) AddSpeed(v int) *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.AddSpeed(v)
})
}
// UpdateSpeed sets the "speed" field to the value that was provided on create.
func (u *DirectLinkUpsertOne) UpdateSpeed() *DirectLinkUpsertOne {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateSpeed()
})
}
// Exec executes the query.
func (u *DirectLinkUpsertOne) Exec(ctx context.Context) error {
if len(u.create.conflict) == 0 {
return errors.New("ent: missing options for DirectLinkCreate.OnConflict")
}
return u.create.Exec(ctx)
}
// ExecX is like Exec, but panics if an error occurs.
func (u *DirectLinkUpsertOne) ExecX(ctx context.Context) {
if err := u.create.Exec(ctx); err != nil {
panic(err)
}
}
// Exec executes the UPSERT query and returns the inserted/updated ID.
func (u *DirectLinkUpsertOne) ID(ctx context.Context) (id int, err error) {
node, err := u.create.Save(ctx)
if err != nil {
return id, err
}
return node.ID, nil
}
// IDX is like ID, but panics if an error occurs.
func (u *DirectLinkUpsertOne) IDX(ctx context.Context) int {
id, err := u.ID(ctx)
if err != nil {
panic(err)
}
return id
}
func (m *DirectLinkCreate) SetRawID(t int) *DirectLinkCreate {
m.mutation.SetRawID(t)
return m
}
// DirectLinkCreateBulk is the builder for creating many DirectLink entities in bulk.
type DirectLinkCreateBulk struct {
config
err error
builders []*DirectLinkCreate
conflict []sql.ConflictOption
}
// Save creates the DirectLink entities in the database.
func (dlcb *DirectLinkCreateBulk) Save(ctx context.Context) ([]*DirectLink, error) {
if dlcb.err != nil {
return nil, dlcb.err
}
specs := make([]*sqlgraph.CreateSpec, len(dlcb.builders))
nodes := make([]*DirectLink, len(dlcb.builders))
mutators := make([]Mutator, len(dlcb.builders))
for i := range dlcb.builders {
func(i int, root context.Context) {
builder := dlcb.builders[i]
builder.defaults()
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DirectLinkMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err := builder.check(); err != nil {
return nil, err
}
builder.mutation = mutation
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, dlcb.builders[i+1].mutation)
} else {
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
spec.OnConflict = dlcb.conflict
// Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, dlcb.driver, spec); err != nil {
if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
}
}
if err != nil {
return nil, err
}
mutation.id = &nodes[i].ID
if specs[i].ID.Value != nil {
id := specs[i].ID.Value.(int64)
nodes[i].ID = int(id)
}
mutation.done = true
return nodes[i], nil
})
for i := len(builder.hooks) - 1; i >= 0; i-- {
mut = builder.hooks[i](mut)
}
mutators[i] = mut
}(i, ctx)
}
if len(mutators) > 0 {
if _, err := mutators[0].Mutate(ctx, dlcb.builders[0].mutation); err != nil {
return nil, err
}
}
return nodes, nil
}
// SaveX is like Save, but panics if an error occurs.
func (dlcb *DirectLinkCreateBulk) SaveX(ctx context.Context) []*DirectLink {
v, err := dlcb.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// Exec executes the query.
func (dlcb *DirectLinkCreateBulk) Exec(ctx context.Context) error {
_, err := dlcb.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dlcb *DirectLinkCreateBulk) ExecX(ctx context.Context) {
if err := dlcb.Exec(ctx); err != nil {
panic(err)
}
}
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
// of the `INSERT` statement. For example:
//
// client.DirectLink.CreateBulk(builders...).
// OnConflict(
// // Update the row with the new values
// // the was proposed for insertion.
// sql.ResolveWithNewValues(),
// ).
// // Override some of the fields with custom
// // update values.
// Update(func(u *ent.DirectLinkUpsert) {
// SetCreatedAt(v+v).
// }).
// Exec(ctx)
func (dlcb *DirectLinkCreateBulk) OnConflict(opts ...sql.ConflictOption) *DirectLinkUpsertBulk {
dlcb.conflict = opts
return &DirectLinkUpsertBulk{
create: dlcb,
}
}
// OnConflictColumns calls `OnConflict` and configures the columns
// as conflict target. Using this option is equivalent to using:
//
// client.DirectLink.Create().
// OnConflict(sql.ConflictColumns(columns...)).
// Exec(ctx)
func (dlcb *DirectLinkCreateBulk) OnConflictColumns(columns ...string) *DirectLinkUpsertBulk {
dlcb.conflict = append(dlcb.conflict, sql.ConflictColumns(columns...))
return &DirectLinkUpsertBulk{
create: dlcb,
}
}
// DirectLinkUpsertBulk is the builder for "upsert"-ing
// a bulk of DirectLink nodes.
type DirectLinkUpsertBulk struct {
create *DirectLinkCreateBulk
}
// UpdateNewValues updates the mutable fields using the new values that
// were set on create. Using this option is equivalent to using:
//
// client.DirectLink.Create().
// OnConflict(
// sql.ResolveWithNewValues(),
// ).
// Exec(ctx)
func (u *DirectLinkUpsertBulk) UpdateNewValues() *DirectLinkUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
for _, b := range u.create.builders {
if _, exists := b.mutation.CreatedAt(); exists {
s.SetIgnore(directlink.FieldCreatedAt)
}
}
}))
return u
}
// Ignore sets each column to itself in case of conflict.
// Using this option is equivalent to using:
//
// client.DirectLink.Create().
// OnConflict(sql.ResolveWithIgnore()).
// Exec(ctx)
func (u *DirectLinkUpsertBulk) Ignore() *DirectLinkUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
return u
}
// DoNothing configures the conflict_action to `DO NOTHING`.
// Supported only by SQLite and PostgreSQL.
func (u *DirectLinkUpsertBulk) DoNothing() *DirectLinkUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.DoNothing())
return u
}
// Update allows overriding fields `UPDATE` values. See the DirectLinkCreateBulk.OnConflict
// documentation for more info.
func (u *DirectLinkUpsertBulk) Update(set func(*DirectLinkUpsert)) *DirectLinkUpsertBulk {
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
set(&DirectLinkUpsert{UpdateSet: update})
}))
return u
}
// SetUpdatedAt sets the "updated_at" field.
func (u *DirectLinkUpsertBulk) SetUpdatedAt(v time.Time) *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.SetUpdatedAt(v)
})
}
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
func (u *DirectLinkUpsertBulk) UpdateUpdatedAt() *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateUpdatedAt()
})
}
// SetDeletedAt sets the "deleted_at" field.
func (u *DirectLinkUpsertBulk) SetDeletedAt(v time.Time) *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.SetDeletedAt(v)
})
}
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
func (u *DirectLinkUpsertBulk) UpdateDeletedAt() *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateDeletedAt()
})
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (u *DirectLinkUpsertBulk) ClearDeletedAt() *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.ClearDeletedAt()
})
}
// SetName sets the "name" field.
func (u *DirectLinkUpsertBulk) SetName(v string) *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.SetName(v)
})
}
// UpdateName sets the "name" field to the value that was provided on create.
func (u *DirectLinkUpsertBulk) UpdateName() *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateName()
})
}
// SetDownloads sets the "downloads" field.
func (u *DirectLinkUpsertBulk) SetDownloads(v int) *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.SetDownloads(v)
})
}
// AddDownloads adds v to the "downloads" field.
func (u *DirectLinkUpsertBulk) AddDownloads(v int) *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.AddDownloads(v)
})
}
// UpdateDownloads sets the "downloads" field to the value that was provided on create.
func (u *DirectLinkUpsertBulk) UpdateDownloads() *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateDownloads()
})
}
// SetFileID sets the "file_id" field.
func (u *DirectLinkUpsertBulk) SetFileID(v int) *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.SetFileID(v)
})
}
// UpdateFileID sets the "file_id" field to the value that was provided on create.
func (u *DirectLinkUpsertBulk) UpdateFileID() *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateFileID()
})
}
// SetSpeed sets the "speed" field.
func (u *DirectLinkUpsertBulk) SetSpeed(v int) *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.SetSpeed(v)
})
}
// AddSpeed adds v to the "speed" field.
func (u *DirectLinkUpsertBulk) AddSpeed(v int) *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.AddSpeed(v)
})
}
// UpdateSpeed sets the "speed" field to the value that was provided on create.
func (u *DirectLinkUpsertBulk) UpdateSpeed() *DirectLinkUpsertBulk {
return u.Update(func(s *DirectLinkUpsert) {
s.UpdateSpeed()
})
}
// Exec executes the query.
func (u *DirectLinkUpsertBulk) Exec(ctx context.Context) error {
if u.create.err != nil {
return u.create.err
}
for i, b := range u.create.builders {
if len(b.conflict) != 0 {
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the DirectLinkCreateBulk instead", i)
}
}
if len(u.create.conflict) == 0 {
return errors.New("ent: missing options for DirectLinkCreateBulk.OnConflict")
}
return u.create.Exec(ctx)
}
// ExecX is like Exec, but panics if an error occurs.
func (u *DirectLinkUpsertBulk) ExecX(ctx context.Context) {
if err := u.create.Exec(ctx); err != nil {
panic(err)
}
}

88
ent/directlink_delete.go Normal file
View File

@@ -0,0 +1,88 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// DirectLinkDelete is the builder for deleting a DirectLink entity.
type DirectLinkDelete struct {
config
hooks []Hook
mutation *DirectLinkMutation
}
// Where appends a list predicates to the DirectLinkDelete builder.
func (dld *DirectLinkDelete) Where(ps ...predicate.DirectLink) *DirectLinkDelete {
dld.mutation.Where(ps...)
return dld
}
// Exec executes the deletion query and returns how many vertices were deleted.
func (dld *DirectLinkDelete) Exec(ctx context.Context) (int, error) {
return withHooks(ctx, dld.sqlExec, dld.mutation, dld.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
func (dld *DirectLinkDelete) ExecX(ctx context.Context) int {
n, err := dld.Exec(ctx)
if err != nil {
panic(err)
}
return n
}
func (dld *DirectLinkDelete) sqlExec(ctx context.Context) (int, error) {
_spec := sqlgraph.NewDeleteSpec(directlink.Table, sqlgraph.NewFieldSpec(directlink.FieldID, field.TypeInt))
if ps := dld.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
affected, err := sqlgraph.DeleteNodes(ctx, dld.driver, _spec)
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
dld.mutation.done = true
return affected, err
}
// DirectLinkDeleteOne is the builder for deleting a single DirectLink entity.
type DirectLinkDeleteOne struct {
dld *DirectLinkDelete
}
// Where appends a list predicates to the DirectLinkDelete builder.
func (dldo *DirectLinkDeleteOne) Where(ps ...predicate.DirectLink) *DirectLinkDeleteOne {
dldo.dld.mutation.Where(ps...)
return dldo
}
// Exec executes the deletion query.
func (dldo *DirectLinkDeleteOne) Exec(ctx context.Context) error {
n, err := dldo.dld.Exec(ctx)
switch {
case err != nil:
return err
case n == 0:
return &NotFoundError{directlink.Label}
default:
return nil
}
}
// ExecX is like Exec, but panics if an error occurs.
func (dldo *DirectLinkDeleteOne) ExecX(ctx context.Context) {
if err := dldo.Exec(ctx); err != nil {
panic(err)
}
}

605
ent/directlink_query.go Normal file
View File

@@ -0,0 +1,605 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"fmt"
"math"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// DirectLinkQuery is the builder for querying DirectLink entities.
type DirectLinkQuery struct {
config
ctx *QueryContext
order []directlink.OrderOption
inters []Interceptor
predicates []predicate.DirectLink
withFile *FileQuery
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the DirectLinkQuery builder.
func (dlq *DirectLinkQuery) Where(ps ...predicate.DirectLink) *DirectLinkQuery {
dlq.predicates = append(dlq.predicates, ps...)
return dlq
}
// Limit the number of records to be returned by this query.
func (dlq *DirectLinkQuery) Limit(limit int) *DirectLinkQuery {
dlq.ctx.Limit = &limit
return dlq
}
// Offset to start from.
func (dlq *DirectLinkQuery) Offset(offset int) *DirectLinkQuery {
dlq.ctx.Offset = &offset
return dlq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (dlq *DirectLinkQuery) Unique(unique bool) *DirectLinkQuery {
dlq.ctx.Unique = &unique
return dlq
}
// Order specifies how the records should be ordered.
func (dlq *DirectLinkQuery) Order(o ...directlink.OrderOption) *DirectLinkQuery {
dlq.order = append(dlq.order, o...)
return dlq
}
// QueryFile chains the current query on the "file" edge.
func (dlq *DirectLinkQuery) QueryFile() *FileQuery {
query := (&FileClient{config: dlq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := dlq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := dlq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(directlink.Table, directlink.FieldID, selector),
sqlgraph.To(file.Table, file.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, directlink.FileTable, directlink.FileColumn),
)
fromU = sqlgraph.SetNeighbors(dlq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// First returns the first DirectLink entity from the query.
// Returns a *NotFoundError when no DirectLink was found.
func (dlq *DirectLinkQuery) First(ctx context.Context) (*DirectLink, error) {
nodes, err := dlq.Limit(1).All(setContextOp(ctx, dlq.ctx, "First"))
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{directlink.Label}
}
return nodes[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (dlq *DirectLinkQuery) FirstX(ctx context.Context) *DirectLink {
node, err := dlq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
// FirstID returns the first DirectLink ID from the query.
// Returns a *NotFoundError when no DirectLink ID was found.
func (dlq *DirectLinkQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = dlq.Limit(1).IDs(setContextOp(ctx, dlq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{directlink.Label}
return
}
return ids[0], nil
}
// FirstIDX is like FirstID, but panics if an error occurs.
func (dlq *DirectLinkQuery) FirstIDX(ctx context.Context) int {
id, err := dlq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns a single DirectLink entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one DirectLink entity is found.
// Returns a *NotFoundError when no DirectLink entities are found.
func (dlq *DirectLinkQuery) Only(ctx context.Context) (*DirectLink, error) {
nodes, err := dlq.Limit(2).All(setContextOp(ctx, dlq.ctx, "Only"))
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{directlink.Label}
default:
return nil, &NotSingularError{directlink.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (dlq *DirectLinkQuery) OnlyX(ctx context.Context) *DirectLink {
node, err := dlq.Only(ctx)
if err != nil {
panic(err)
}
return node
}
// OnlyID is like Only, but returns the only DirectLink ID in the query.
// Returns a *NotSingularError when more than one DirectLink ID is found.
// Returns a *NotFoundError when no entities are found.
func (dlq *DirectLinkQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = dlq.Limit(2).IDs(setContextOp(ctx, dlq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{directlink.Label}
default:
err = &NotSingularError{directlink.Label}
}
return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (dlq *DirectLinkQuery) OnlyIDX(ctx context.Context) int {
id, err := dlq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of DirectLinks.
func (dlq *DirectLinkQuery) All(ctx context.Context) ([]*DirectLink, error) {
ctx = setContextOp(ctx, dlq.ctx, "All")
if err := dlq.prepareQuery(ctx); err != nil {
return nil, err
}
qr := querierAll[[]*DirectLink, *DirectLinkQuery]()
return withInterceptors[[]*DirectLink](ctx, dlq, qr, dlq.inters)
}
// AllX is like All, but panics if an error occurs.
func (dlq *DirectLinkQuery) AllX(ctx context.Context) []*DirectLink {
nodes, err := dlq.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
// IDs executes the query and returns a list of DirectLink IDs.
func (dlq *DirectLinkQuery) IDs(ctx context.Context) (ids []int, err error) {
if dlq.ctx.Unique == nil && dlq.path != nil {
dlq.Unique(true)
}
ctx = setContextOp(ctx, dlq.ctx, "IDs")
if err = dlq.Select(directlink.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (dlq *DirectLinkQuery) IDsX(ctx context.Context) []int {
ids, err := dlq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (dlq *DirectLinkQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, dlq.ctx, "Count")
if err := dlq.prepareQuery(ctx); err != nil {
return 0, err
}
return withInterceptors[int](ctx, dlq, querierCount[*DirectLinkQuery](), dlq.inters)
}
// CountX is like Count, but panics if an error occurs.
func (dlq *DirectLinkQuery) CountX(ctx context.Context) int {
count, err := dlq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (dlq *DirectLinkQuery) Exist(ctx context.Context) (bool, error) {
ctx = setContextOp(ctx, dlq.ctx, "Exist")
switch _, err := dlq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
// ExistX is like Exist, but panics if an error occurs.
func (dlq *DirectLinkQuery) ExistX(ctx context.Context) bool {
exist, err := dlq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the DirectLinkQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (dlq *DirectLinkQuery) Clone() *DirectLinkQuery {
if dlq == nil {
return nil
}
return &DirectLinkQuery{
config: dlq.config,
ctx: dlq.ctx.Clone(),
order: append([]directlink.OrderOption{}, dlq.order...),
inters: append([]Interceptor{}, dlq.inters...),
predicates: append([]predicate.DirectLink{}, dlq.predicates...),
withFile: dlq.withFile.Clone(),
// clone intermediate query.
sql: dlq.sql.Clone(),
path: dlq.path,
}
}
// WithFile tells the query-builder to eager-load the nodes that are connected to
// the "file" edge. The optional arguments are used to configure the query builder of the edge.
func (dlq *DirectLinkQuery) WithFile(opts ...func(*FileQuery)) *DirectLinkQuery {
query := (&FileClient{config: dlq.config}).Query()
for _, opt := range opts {
opt(query)
}
dlq.withFile = query
return dlq
}
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.DirectLink.Query().
// GroupBy(directlink.FieldCreatedAt).
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (dlq *DirectLinkQuery) GroupBy(field string, fields ...string) *DirectLinkGroupBy {
dlq.ctx.Fields = append([]string{field}, fields...)
grbuild := &DirectLinkGroupBy{build: dlq}
grbuild.flds = &dlq.ctx.Fields
grbuild.label = directlink.Label
grbuild.scan = grbuild.Scan
return grbuild
}
// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// }
//
// client.DirectLink.Query().
// Select(directlink.FieldCreatedAt).
// Scan(ctx, &v)
func (dlq *DirectLinkQuery) Select(fields ...string) *DirectLinkSelect {
dlq.ctx.Fields = append(dlq.ctx.Fields, fields...)
sbuild := &DirectLinkSelect{DirectLinkQuery: dlq}
sbuild.label = directlink.Label
sbuild.flds, sbuild.scan = &dlq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a DirectLinkSelect configured with the given aggregations.
func (dlq *DirectLinkQuery) Aggregate(fns ...AggregateFunc) *DirectLinkSelect {
return dlq.Select().Aggregate(fns...)
}
func (dlq *DirectLinkQuery) prepareQuery(ctx context.Context) error {
for _, inter := range dlq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, dlq); err != nil {
return err
}
}
}
for _, f := range dlq.ctx.Fields {
if !directlink.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
}
if dlq.path != nil {
prev, err := dlq.path(ctx)
if err != nil {
return err
}
dlq.sql = prev
}
return nil
}
func (dlq *DirectLinkQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DirectLink, error) {
var (
nodes = []*DirectLink{}
_spec = dlq.querySpec()
loadedTypes = [1]bool{
dlq.withFile != nil,
}
)
_spec.ScanValues = func(columns []string) ([]any, error) {
return (*DirectLink).scanValues(nil, columns)
}
_spec.Assign = func(columns []string, values []any) error {
node := &DirectLink{config: dlq.config}
nodes = append(nodes, node)
node.Edges.loadedTypes = loadedTypes
return node.assignValues(columns, values)
}
for i := range hooks {
hooks[i](ctx, _spec)
}
if err := sqlgraph.QueryNodes(ctx, dlq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
if query := dlq.withFile; query != nil {
if err := dlq.loadFile(ctx, query, nodes, nil,
func(n *DirectLink, e *File) { n.Edges.File = e }); err != nil {
return nil, err
}
}
return nodes, nil
}
func (dlq *DirectLinkQuery) loadFile(ctx context.Context, query *FileQuery, nodes []*DirectLink, init func(*DirectLink), assign func(*DirectLink, *File)) error {
ids := make([]int, 0, len(nodes))
nodeids := make(map[int][]*DirectLink)
for i := range nodes {
fk := nodes[i].FileID
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
if len(ids) == 0 {
return nil
}
query.Where(file.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
nodes, ok := nodeids[n.ID]
if !ok {
return fmt.Errorf(`unexpected foreign-key "file_id" returned %v`, n.ID)
}
for i := range nodes {
assign(nodes[i], n)
}
}
return nil
}
func (dlq *DirectLinkQuery) sqlCount(ctx context.Context) (int, error) {
_spec := dlq.querySpec()
_spec.Node.Columns = dlq.ctx.Fields
if len(dlq.ctx.Fields) > 0 {
_spec.Unique = dlq.ctx.Unique != nil && *dlq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, dlq.driver, _spec)
}
func (dlq *DirectLinkQuery) querySpec() *sqlgraph.QuerySpec {
_spec := sqlgraph.NewQuerySpec(directlink.Table, directlink.Columns, sqlgraph.NewFieldSpec(directlink.FieldID, field.TypeInt))
_spec.From = dlq.sql
if unique := dlq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if dlq.path != nil {
_spec.Unique = true
}
if fields := dlq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, directlink.FieldID)
for i := range fields {
if fields[i] != directlink.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
if dlq.withFile != nil {
_spec.Node.AddColumnOnce(directlink.FieldFileID)
}
}
if ps := dlq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := dlq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := dlq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := dlq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (dlq *DirectLinkQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(dlq.driver.Dialect())
t1 := builder.Table(directlink.Table)
columns := dlq.ctx.Fields
if len(columns) == 0 {
columns = directlink.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if dlq.sql != nil {
selector = dlq.sql
selector.Select(selector.Columns(columns...)...)
}
if dlq.ctx.Unique != nil && *dlq.ctx.Unique {
selector.Distinct()
}
for _, p := range dlq.predicates {
p(selector)
}
for _, p := range dlq.order {
p(selector)
}
if offset := dlq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := dlq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// DirectLinkGroupBy is the group-by builder for DirectLink entities.
type DirectLinkGroupBy struct {
selector
build *DirectLinkQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
func (dlgb *DirectLinkGroupBy) Aggregate(fns ...AggregateFunc) *DirectLinkGroupBy {
dlgb.fns = append(dlgb.fns, fns...)
return dlgb
}
// Scan applies the selector query and scans the result into the given value.
func (dlgb *DirectLinkGroupBy) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, dlgb.build.ctx, "GroupBy")
if err := dlgb.build.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*DirectLinkQuery, *DirectLinkGroupBy](ctx, dlgb.build, dlgb, dlgb.build.inters, v)
}
func (dlgb *DirectLinkGroupBy) sqlScan(ctx context.Context, root *DirectLinkQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(dlgb.fns))
for _, fn := range dlgb.fns {
aggregation = append(aggregation, fn(selector))
}
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(*dlgb.flds)+len(dlgb.fns))
for _, f := range *dlgb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
selector.GroupBy(selector.Columns(*dlgb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := dlgb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// DirectLinkSelect is the builder for selecting fields of DirectLink entities.
type DirectLinkSelect struct {
*DirectLinkQuery
selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (dls *DirectLinkSelect) Aggregate(fns ...AggregateFunc) *DirectLinkSelect {
dls.fns = append(dls.fns, fns...)
return dls
}
// Scan applies the selector query and scans the result into the given value.
func (dls *DirectLinkSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, dls.ctx, "Select")
if err := dls.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*DirectLinkQuery, *DirectLinkSelect](ctx, dls.DirectLinkQuery, dls, dls.inters, v)
}
func (dls *DirectLinkSelect) sqlScan(ctx context.Context, root *DirectLinkQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(dls.fns))
for _, fn := range dls.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*dls.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := dls.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}

549
ent/directlink_update.go Normal file
View File

@@ -0,0 +1,549 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// DirectLinkUpdate is the builder for updating DirectLink entities.
type DirectLinkUpdate struct {
config
hooks []Hook
mutation *DirectLinkMutation
}
// Where appends a list predicates to the DirectLinkUpdate builder.
func (dlu *DirectLinkUpdate) Where(ps ...predicate.DirectLink) *DirectLinkUpdate {
dlu.mutation.Where(ps...)
return dlu
}
// SetUpdatedAt sets the "updated_at" field.
func (dlu *DirectLinkUpdate) SetUpdatedAt(t time.Time) *DirectLinkUpdate {
dlu.mutation.SetUpdatedAt(t)
return dlu
}
// SetDeletedAt sets the "deleted_at" field.
func (dlu *DirectLinkUpdate) SetDeletedAt(t time.Time) *DirectLinkUpdate {
dlu.mutation.SetDeletedAt(t)
return dlu
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (dlu *DirectLinkUpdate) SetNillableDeletedAt(t *time.Time) *DirectLinkUpdate {
if t != nil {
dlu.SetDeletedAt(*t)
}
return dlu
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (dlu *DirectLinkUpdate) ClearDeletedAt() *DirectLinkUpdate {
dlu.mutation.ClearDeletedAt()
return dlu
}
// SetName sets the "name" field.
func (dlu *DirectLinkUpdate) SetName(s string) *DirectLinkUpdate {
dlu.mutation.SetName(s)
return dlu
}
// SetNillableName sets the "name" field if the given value is not nil.
func (dlu *DirectLinkUpdate) SetNillableName(s *string) *DirectLinkUpdate {
if s != nil {
dlu.SetName(*s)
}
return dlu
}
// SetDownloads sets the "downloads" field.
func (dlu *DirectLinkUpdate) SetDownloads(i int) *DirectLinkUpdate {
dlu.mutation.ResetDownloads()
dlu.mutation.SetDownloads(i)
return dlu
}
// SetNillableDownloads sets the "downloads" field if the given value is not nil.
func (dlu *DirectLinkUpdate) SetNillableDownloads(i *int) *DirectLinkUpdate {
if i != nil {
dlu.SetDownloads(*i)
}
return dlu
}
// AddDownloads adds i to the "downloads" field.
func (dlu *DirectLinkUpdate) AddDownloads(i int) *DirectLinkUpdate {
dlu.mutation.AddDownloads(i)
return dlu
}
// SetFileID sets the "file_id" field.
func (dlu *DirectLinkUpdate) SetFileID(i int) *DirectLinkUpdate {
dlu.mutation.SetFileID(i)
return dlu
}
// SetNillableFileID sets the "file_id" field if the given value is not nil.
func (dlu *DirectLinkUpdate) SetNillableFileID(i *int) *DirectLinkUpdate {
if i != nil {
dlu.SetFileID(*i)
}
return dlu
}
// SetSpeed sets the "speed" field.
func (dlu *DirectLinkUpdate) SetSpeed(i int) *DirectLinkUpdate {
dlu.mutation.ResetSpeed()
dlu.mutation.SetSpeed(i)
return dlu
}
// SetNillableSpeed sets the "speed" field if the given value is not nil.
func (dlu *DirectLinkUpdate) SetNillableSpeed(i *int) *DirectLinkUpdate {
if i != nil {
dlu.SetSpeed(*i)
}
return dlu
}
// AddSpeed adds i to the "speed" field.
func (dlu *DirectLinkUpdate) AddSpeed(i int) *DirectLinkUpdate {
dlu.mutation.AddSpeed(i)
return dlu
}
// SetFile sets the "file" edge to the File entity.
func (dlu *DirectLinkUpdate) SetFile(f *File) *DirectLinkUpdate {
return dlu.SetFileID(f.ID)
}
// Mutation returns the DirectLinkMutation object of the builder.
func (dlu *DirectLinkUpdate) Mutation() *DirectLinkMutation {
return dlu.mutation
}
// ClearFile clears the "file" edge to the File entity.
func (dlu *DirectLinkUpdate) ClearFile() *DirectLinkUpdate {
dlu.mutation.ClearFile()
return dlu
}
// Save executes the query and returns the number of nodes affected by the update operation.
func (dlu *DirectLinkUpdate) Save(ctx context.Context) (int, error) {
if err := dlu.defaults(); err != nil {
return 0, err
}
return withHooks(ctx, dlu.sqlSave, dlu.mutation, dlu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
func (dlu *DirectLinkUpdate) SaveX(ctx context.Context) int {
affected, err := dlu.Save(ctx)
if err != nil {
panic(err)
}
return affected
}
// Exec executes the query.
func (dlu *DirectLinkUpdate) Exec(ctx context.Context) error {
_, err := dlu.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dlu *DirectLinkUpdate) ExecX(ctx context.Context) {
if err := dlu.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (dlu *DirectLinkUpdate) defaults() error {
if _, ok := dlu.mutation.UpdatedAt(); !ok {
if directlink.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized directlink.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := directlink.UpdateDefaultUpdatedAt()
dlu.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (dlu *DirectLinkUpdate) check() error {
if _, ok := dlu.mutation.FileID(); dlu.mutation.FileCleared() && !ok {
return errors.New(`ent: clearing a required unique edge "DirectLink.file"`)
}
return nil
}
func (dlu *DirectLinkUpdate) sqlSave(ctx context.Context) (n int, err error) {
if err := dlu.check(); err != nil {
return n, err
}
_spec := sqlgraph.NewUpdateSpec(directlink.Table, directlink.Columns, sqlgraph.NewFieldSpec(directlink.FieldID, field.TypeInt))
if ps := dlu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := dlu.mutation.UpdatedAt(); ok {
_spec.SetField(directlink.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := dlu.mutation.DeletedAt(); ok {
_spec.SetField(directlink.FieldDeletedAt, field.TypeTime, value)
}
if dlu.mutation.DeletedAtCleared() {
_spec.ClearField(directlink.FieldDeletedAt, field.TypeTime)
}
if value, ok := dlu.mutation.Name(); ok {
_spec.SetField(directlink.FieldName, field.TypeString, value)
}
if value, ok := dlu.mutation.Downloads(); ok {
_spec.SetField(directlink.FieldDownloads, field.TypeInt, value)
}
if value, ok := dlu.mutation.AddedDownloads(); ok {
_spec.AddField(directlink.FieldDownloads, field.TypeInt, value)
}
if value, ok := dlu.mutation.Speed(); ok {
_spec.SetField(directlink.FieldSpeed, field.TypeInt, value)
}
if value, ok := dlu.mutation.AddedSpeed(); ok {
_spec.AddField(directlink.FieldSpeed, field.TypeInt, value)
}
if dlu.mutation.FileCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: directlink.FileTable,
Columns: []string{directlink.FileColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := dlu.mutation.FileIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: directlink.FileTable,
Columns: []string{directlink.FileColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
if n, err = sqlgraph.UpdateNodes(ctx, dlu.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{directlink.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return 0, err
}
dlu.mutation.done = true
return n, nil
}
// DirectLinkUpdateOne is the builder for updating a single DirectLink entity.
type DirectLinkUpdateOne struct {
config
fields []string
hooks []Hook
mutation *DirectLinkMutation
}
// SetUpdatedAt sets the "updated_at" field.
func (dluo *DirectLinkUpdateOne) SetUpdatedAt(t time.Time) *DirectLinkUpdateOne {
dluo.mutation.SetUpdatedAt(t)
return dluo
}
// SetDeletedAt sets the "deleted_at" field.
func (dluo *DirectLinkUpdateOne) SetDeletedAt(t time.Time) *DirectLinkUpdateOne {
dluo.mutation.SetDeletedAt(t)
return dluo
}
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
func (dluo *DirectLinkUpdateOne) SetNillableDeletedAt(t *time.Time) *DirectLinkUpdateOne {
if t != nil {
dluo.SetDeletedAt(*t)
}
return dluo
}
// ClearDeletedAt clears the value of the "deleted_at" field.
func (dluo *DirectLinkUpdateOne) ClearDeletedAt() *DirectLinkUpdateOne {
dluo.mutation.ClearDeletedAt()
return dluo
}
// SetName sets the "name" field.
func (dluo *DirectLinkUpdateOne) SetName(s string) *DirectLinkUpdateOne {
dluo.mutation.SetName(s)
return dluo
}
// SetNillableName sets the "name" field if the given value is not nil.
func (dluo *DirectLinkUpdateOne) SetNillableName(s *string) *DirectLinkUpdateOne {
if s != nil {
dluo.SetName(*s)
}
return dluo
}
// SetDownloads sets the "downloads" field.
func (dluo *DirectLinkUpdateOne) SetDownloads(i int) *DirectLinkUpdateOne {
dluo.mutation.ResetDownloads()
dluo.mutation.SetDownloads(i)
return dluo
}
// SetNillableDownloads sets the "downloads" field if the given value is not nil.
func (dluo *DirectLinkUpdateOne) SetNillableDownloads(i *int) *DirectLinkUpdateOne {
if i != nil {
dluo.SetDownloads(*i)
}
return dluo
}
// AddDownloads adds i to the "downloads" field.
func (dluo *DirectLinkUpdateOne) AddDownloads(i int) *DirectLinkUpdateOne {
dluo.mutation.AddDownloads(i)
return dluo
}
// SetFileID sets the "file_id" field.
func (dluo *DirectLinkUpdateOne) SetFileID(i int) *DirectLinkUpdateOne {
dluo.mutation.SetFileID(i)
return dluo
}
// SetNillableFileID sets the "file_id" field if the given value is not nil.
func (dluo *DirectLinkUpdateOne) SetNillableFileID(i *int) *DirectLinkUpdateOne {
if i != nil {
dluo.SetFileID(*i)
}
return dluo
}
// SetSpeed sets the "speed" field.
func (dluo *DirectLinkUpdateOne) SetSpeed(i int) *DirectLinkUpdateOne {
dluo.mutation.ResetSpeed()
dluo.mutation.SetSpeed(i)
return dluo
}
// SetNillableSpeed sets the "speed" field if the given value is not nil.
func (dluo *DirectLinkUpdateOne) SetNillableSpeed(i *int) *DirectLinkUpdateOne {
if i != nil {
dluo.SetSpeed(*i)
}
return dluo
}
// AddSpeed adds i to the "speed" field.
func (dluo *DirectLinkUpdateOne) AddSpeed(i int) *DirectLinkUpdateOne {
dluo.mutation.AddSpeed(i)
return dluo
}
// SetFile sets the "file" edge to the File entity.
func (dluo *DirectLinkUpdateOne) SetFile(f *File) *DirectLinkUpdateOne {
return dluo.SetFileID(f.ID)
}
// Mutation returns the DirectLinkMutation object of the builder.
func (dluo *DirectLinkUpdateOne) Mutation() *DirectLinkMutation {
return dluo.mutation
}
// ClearFile clears the "file" edge to the File entity.
func (dluo *DirectLinkUpdateOne) ClearFile() *DirectLinkUpdateOne {
dluo.mutation.ClearFile()
return dluo
}
// Where appends a list predicates to the DirectLinkUpdate builder.
func (dluo *DirectLinkUpdateOne) Where(ps ...predicate.DirectLink) *DirectLinkUpdateOne {
dluo.mutation.Where(ps...)
return dluo
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (dluo *DirectLinkUpdateOne) Select(field string, fields ...string) *DirectLinkUpdateOne {
dluo.fields = append([]string{field}, fields...)
return dluo
}
// Save executes the query and returns the updated DirectLink entity.
func (dluo *DirectLinkUpdateOne) Save(ctx context.Context) (*DirectLink, error) {
if err := dluo.defaults(); err != nil {
return nil, err
}
return withHooks(ctx, dluo.sqlSave, dluo.mutation, dluo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
func (dluo *DirectLinkUpdateOne) SaveX(ctx context.Context) *DirectLink {
node, err := dluo.Save(ctx)
if err != nil {
panic(err)
}
return node
}
// Exec executes the query on the entity.
func (dluo *DirectLinkUpdateOne) Exec(ctx context.Context) error {
_, err := dluo.Save(ctx)
return err
}
// ExecX is like Exec, but panics if an error occurs.
func (dluo *DirectLinkUpdateOne) ExecX(ctx context.Context) {
if err := dluo.Exec(ctx); err != nil {
panic(err)
}
}
// defaults sets the default values of the builder before save.
func (dluo *DirectLinkUpdateOne) defaults() error {
if _, ok := dluo.mutation.UpdatedAt(); !ok {
if directlink.UpdateDefaultUpdatedAt == nil {
return fmt.Errorf("ent: uninitialized directlink.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
}
v := directlink.UpdateDefaultUpdatedAt()
dluo.mutation.SetUpdatedAt(v)
}
return nil
}
// check runs all checks and user-defined validators on the builder.
func (dluo *DirectLinkUpdateOne) check() error {
if _, ok := dluo.mutation.FileID(); dluo.mutation.FileCleared() && !ok {
return errors.New(`ent: clearing a required unique edge "DirectLink.file"`)
}
return nil
}
func (dluo *DirectLinkUpdateOne) sqlSave(ctx context.Context) (_node *DirectLink, err error) {
if err := dluo.check(); err != nil {
return _node, err
}
_spec := sqlgraph.NewUpdateSpec(directlink.Table, directlink.Columns, sqlgraph.NewFieldSpec(directlink.FieldID, field.TypeInt))
id, ok := dluo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "DirectLink.id" for update`)}
}
_spec.Node.ID.Value = id
if fields := dluo.fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, directlink.FieldID)
for _, f := range fields {
if !directlink.ValidColumn(f) {
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
if f != directlink.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, f)
}
}
}
if ps := dluo.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if value, ok := dluo.mutation.UpdatedAt(); ok {
_spec.SetField(directlink.FieldUpdatedAt, field.TypeTime, value)
}
if value, ok := dluo.mutation.DeletedAt(); ok {
_spec.SetField(directlink.FieldDeletedAt, field.TypeTime, value)
}
if dluo.mutation.DeletedAtCleared() {
_spec.ClearField(directlink.FieldDeletedAt, field.TypeTime)
}
if value, ok := dluo.mutation.Name(); ok {
_spec.SetField(directlink.FieldName, field.TypeString, value)
}
if value, ok := dluo.mutation.Downloads(); ok {
_spec.SetField(directlink.FieldDownloads, field.TypeInt, value)
}
if value, ok := dluo.mutation.AddedDownloads(); ok {
_spec.AddField(directlink.FieldDownloads, field.TypeInt, value)
}
if value, ok := dluo.mutation.Speed(); ok {
_spec.SetField(directlink.FieldSpeed, field.TypeInt, value)
}
if value, ok := dluo.mutation.AddedSpeed(); ok {
_spec.AddField(directlink.FieldSpeed, field.TypeInt, value)
}
if dluo.mutation.FileCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: directlink.FileTable,
Columns: []string{directlink.FileColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
}
if nodes := dluo.mutation.FileIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: directlink.FileTable,
Columns: []string{directlink.FileColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
_node = &DirectLink{config: dluo.config}
_spec.Assign = _node.assignValues
_spec.ScanValues = _node.scanValues
if err = sqlgraph.UpdateNode(ctx, dluo.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{directlink.Label}
} else if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
return nil, err
}
dluo.mutation.done = true
return _node, nil
}

638
ent/ent.go Normal file
View File

@@ -0,0 +1,638 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"errors"
"fmt"
"reflect"
"sync"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/group"
"github.com/cloudreve/Cloudreve/v4/ent/metadata"
"github.com/cloudreve/Cloudreve/v4/ent/node"
"github.com/cloudreve/Cloudreve/v4/ent/oauthclient"
"github.com/cloudreve/Cloudreve/v4/ent/oauthgrant"
"github.com/cloudreve/Cloudreve/v4/ent/passkey"
"github.com/cloudreve/Cloudreve/v4/ent/setting"
"github.com/cloudreve/Cloudreve/v4/ent/share"
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
"github.com/cloudreve/Cloudreve/v4/ent/task"
"github.com/cloudreve/Cloudreve/v4/ent/user"
)
// ent aliases to avoid import conflicts in user's code.
type (
Op = ent.Op
Hook = ent.Hook
Value = ent.Value
Query = ent.Query
QueryContext = ent.QueryContext
Querier = ent.Querier
QuerierFunc = ent.QuerierFunc
Interceptor = ent.Interceptor
InterceptFunc = ent.InterceptFunc
Traverser = ent.Traverser
TraverseFunc = ent.TraverseFunc
Policy = ent.Policy
Mutator = ent.Mutator
Mutation = ent.Mutation
MutateFunc = ent.MutateFunc
)
type clientCtxKey struct{}
// FromContext returns a Client stored inside a context, or nil if there isn't one.
func FromContext(ctx context.Context) *Client {
c, _ := ctx.Value(clientCtxKey{}).(*Client)
return c
}
// NewContext returns a new context with the given Client attached.
func NewContext(parent context.Context, c *Client) context.Context {
return context.WithValue(parent, clientCtxKey{}, c)
}
type txCtxKey struct{}
// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
func TxFromContext(ctx context.Context) *Tx {
tx, _ := ctx.Value(txCtxKey{}).(*Tx)
return tx
}
// NewTxContext returns a new context with the given Tx attached.
func NewTxContext(parent context.Context, tx *Tx) context.Context {
return context.WithValue(parent, txCtxKey{}, tx)
}
// OrderFunc applies an ordering on the sql selector.
// Deprecated: Use Asc/Desc functions or the package builders instead.
type OrderFunc func(*sql.Selector)
var (
initCheck sync.Once
columnCheck sql.ColumnCheck
)
// columnChecker checks if the column exists in the given table.
func checkColumn(table, column string) error {
initCheck.Do(func() {
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
davaccount.Table: davaccount.ValidColumn,
directlink.Table: directlink.ValidColumn,
entity.Table: entity.ValidColumn,
file.Table: file.ValidColumn,
fsevent.Table: fsevent.ValidColumn,
group.Table: group.ValidColumn,
metadata.Table: metadata.ValidColumn,
node.Table: node.ValidColumn,
oauthclient.Table: oauthclient.ValidColumn,
oauthgrant.Table: oauthgrant.ValidColumn,
passkey.Table: passkey.ValidColumn,
setting.Table: setting.ValidColumn,
share.Table: share.ValidColumn,
storagepolicy.Table: storagepolicy.ValidColumn,
task.Table: task.ValidColumn,
user.Table: user.ValidColumn,
})
})
return columnCheck(table, column)
}
// Asc applies the given fields in ASC order.
func Asc(fields ...string) func(*sql.Selector) {
return func(s *sql.Selector) {
for _, f := range fields {
if err := checkColumn(s.TableName(), f); err != nil {
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
}
s.OrderBy(sql.Asc(s.C(f)))
}
}
}
// Desc applies the given fields in DESC order.
func Desc(fields ...string) func(*sql.Selector) {
return func(s *sql.Selector) {
for _, f := range fields {
if err := checkColumn(s.TableName(), f); err != nil {
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
}
s.OrderBy(sql.Desc(s.C(f)))
}
}
}
// AggregateFunc applies an aggregation step on the group-by traversal/selector.
type AggregateFunc func(*sql.Selector) string
// As is a pseudo aggregation function for renaming another other functions with custom names. For example:
//
// GroupBy(field1, field2).
// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")).
// Scan(ctx, &v)
func As(fn AggregateFunc, end string) AggregateFunc {
return func(s *sql.Selector) string {
return sql.As(fn(s), end)
}
}
// Count applies the "count" aggregation function on each group.
func Count() AggregateFunc {
return func(s *sql.Selector) string {
return sql.Count("*")
}
}
// Max applies the "max" aggregation function on the given field of each group.
func Max(field string) AggregateFunc {
return func(s *sql.Selector) string {
if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
return sql.Max(s.C(field))
}
}
// Mean applies the "mean" aggregation function on the given field of each group.
func Mean(field string) AggregateFunc {
return func(s *sql.Selector) string {
if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
return sql.Avg(s.C(field))
}
}
// Min applies the "min" aggregation function on the given field of each group.
func Min(field string) AggregateFunc {
return func(s *sql.Selector) string {
if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
return sql.Min(s.C(field))
}
}
// Sum applies the "sum" aggregation function on the given field of each group.
func Sum(field string) AggregateFunc {
return func(s *sql.Selector) string {
if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
return sql.Sum(s.C(field))
}
}
// ValidationError returns when validating a field or edge fails.
type ValidationError struct {
Name string // Field or edge name.
err error
}
// Error implements the error interface.
func (e *ValidationError) Error() string {
return e.err.Error()
}
// Unwrap implements the errors.Wrapper interface.
func (e *ValidationError) Unwrap() error {
return e.err
}
// IsValidationError returns a boolean indicating whether the error is a validation error.
func IsValidationError(err error) bool {
if err == nil {
return false
}
var e *ValidationError
return errors.As(err, &e)
}
// NotFoundError returns when trying to fetch a specific entity and it was not found in the database.
type NotFoundError struct {
label string
}
// Error implements the error interface.
func (e *NotFoundError) Error() string {
return "ent: " + e.label + " not found"
}
// IsNotFound returns a boolean indicating whether the error is a not found error.
func IsNotFound(err error) bool {
if err == nil {
return false
}
var e *NotFoundError
return errors.As(err, &e)
}
// MaskNotFound masks not found error.
func MaskNotFound(err error) error {
if IsNotFound(err) {
return nil
}
return err
}
// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database.
type NotSingularError struct {
label string
}
// Error implements the error interface.
func (e *NotSingularError) Error() string {
return "ent: " + e.label + " not singular"
}
// IsNotSingular returns a boolean indicating whether the error is a not singular error.
func IsNotSingular(err error) bool {
if err == nil {
return false
}
var e *NotSingularError
return errors.As(err, &e)
}
// NotLoadedError returns when trying to get a node that was not loaded by the query.
type NotLoadedError struct {
edge string
}
// Error implements the error interface.
func (e *NotLoadedError) Error() string {
return "ent: " + e.edge + " edge was not loaded"
}
// IsNotLoaded returns a boolean indicating whether the error is a not loaded error.
func IsNotLoaded(err error) bool {
if err == nil {
return false
}
var e *NotLoadedError
return errors.As(err, &e)
}
// ConstraintError returns when trying to create/update one or more entities and
// one or more of their constraints failed. For example, violation of edge or
// field uniqueness.
type ConstraintError struct {
msg string
wrap error
}
// Error implements the error interface.
func (e ConstraintError) Error() string {
return "ent: constraint failed: " + e.msg
}
// Unwrap implements the errors.Wrapper interface.
func (e *ConstraintError) Unwrap() error {
return e.wrap
}
// IsConstraintError returns a boolean indicating whether the error is a constraint failure.
func IsConstraintError(err error) bool {
if err == nil {
return false
}
var e *ConstraintError
return errors.As(err, &e)
}
// selector embedded by the different Select/GroupBy builders.
type selector struct {
label string
flds *[]string
fns []AggregateFunc
scan func(context.Context, any) error
}
// ScanX is like Scan, but panics if an error occurs.
func (s *selector) ScanX(ctx context.Context, v any) {
if err := s.scan(ctx, v); err != nil {
panic(err)
}
}
// Strings returns list of strings from a selector. It is only allowed when selecting one field.
func (s *selector) Strings(ctx context.Context) ([]string, error) {
if len(*s.flds) > 1 {
return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field")
}
var v []string
if err := s.scan(ctx, &v); err != nil {
return nil, err
}
return v, nil
}
// StringsX is like Strings, but panics if an error occurs.
func (s *selector) StringsX(ctx context.Context) []string {
v, err := s.Strings(ctx)
if err != nil {
panic(err)
}
return v
}
// String returns a single string from a selector. It is only allowed when selecting one field.
func (s *selector) String(ctx context.Context) (_ string, err error) {
var v []string
if v, err = s.Strings(ctx); err != nil {
return
}
switch len(v) {
case 1:
return v[0], nil
case 0:
err = &NotFoundError{s.label}
default:
err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v))
}
return
}
// StringX is like String, but panics if an error occurs.
func (s *selector) StringX(ctx context.Context) string {
v, err := s.String(ctx)
if err != nil {
panic(err)
}
return v
}
// Ints returns list of ints from a selector. It is only allowed when selecting one field.
func (s *selector) Ints(ctx context.Context) ([]int, error) {
if len(*s.flds) > 1 {
return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field")
}
var v []int
if err := s.scan(ctx, &v); err != nil {
return nil, err
}
return v, nil
}
// IntsX is like Ints, but panics if an error occurs.
func (s *selector) IntsX(ctx context.Context) []int {
v, err := s.Ints(ctx)
if err != nil {
panic(err)
}
return v
}
// Int returns a single int from a selector. It is only allowed when selecting one field.
func (s *selector) Int(ctx context.Context) (_ int, err error) {
var v []int
if v, err = s.Ints(ctx); err != nil {
return
}
switch len(v) {
case 1:
return v[0], nil
case 0:
err = &NotFoundError{s.label}
default:
err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v))
}
return
}
// IntX is like Int, but panics if an error occurs.
func (s *selector) IntX(ctx context.Context) int {
v, err := s.Int(ctx)
if err != nil {
panic(err)
}
return v
}
// Float64s returns list of float64s from a selector. It is only allowed when selecting one field.
func (s *selector) Float64s(ctx context.Context) ([]float64, error) {
if len(*s.flds) > 1 {
return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field")
}
var v []float64
if err := s.scan(ctx, &v); err != nil {
return nil, err
}
return v, nil
}
// Float64sX is like Float64s, but panics if an error occurs.
func (s *selector) Float64sX(ctx context.Context) []float64 {
v, err := s.Float64s(ctx)
if err != nil {
panic(err)
}
return v
}
// Float64 returns a single float64 from a selector. It is only allowed when selecting one field.
func (s *selector) Float64(ctx context.Context) (_ float64, err error) {
var v []float64
if v, err = s.Float64s(ctx); err != nil {
return
}
switch len(v) {
case 1:
return v[0], nil
case 0:
err = &NotFoundError{s.label}
default:
err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v))
}
return
}
// Float64X is like Float64, but panics if an error occurs.
func (s *selector) Float64X(ctx context.Context) float64 {
v, err := s.Float64(ctx)
if err != nil {
panic(err)
}
return v
}
// Bools returns list of bools from a selector. It is only allowed when selecting one field.
func (s *selector) Bools(ctx context.Context) ([]bool, error) {
if len(*s.flds) > 1 {
return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field")
}
var v []bool
if err := s.scan(ctx, &v); err != nil {
return nil, err
}
return v, nil
}
// BoolsX is like Bools, but panics if an error occurs.
func (s *selector) BoolsX(ctx context.Context) []bool {
v, err := s.Bools(ctx)
if err != nil {
panic(err)
}
return v
}
// Bool returns a single bool from a selector. It is only allowed when selecting one field.
func (s *selector) Bool(ctx context.Context) (_ bool, err error) {
var v []bool
if v, err = s.Bools(ctx); err != nil {
return
}
switch len(v) {
case 1:
return v[0], nil
case 0:
err = &NotFoundError{s.label}
default:
err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v))
}
return
}
// BoolX is like Bool, but panics if an error occurs.
func (s *selector) BoolX(ctx context.Context) bool {
v, err := s.Bool(ctx)
if err != nil {
panic(err)
}
return v
}
// withHooks invokes the builder operation with the given hooks, if any.
func withHooks[V Value, M any, PM interface {
*M
Mutation
}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) {
if len(hooks) == 0 {
return exec(ctx)
}
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutationT, ok := any(m).(PM)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
// Set the mutation to the builder.
*mutation = *mutationT
return exec(ctx)
})
for i := len(hooks) - 1; i >= 0; i-- {
if hooks[i] == nil {
return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = hooks[i](mut)
}
v, err := mut.Mutate(ctx, mutation)
if err != nil {
return value, err
}
nv, ok := v.(V)
if !ok {
return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation)
}
return nv, nil
}
// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist.
func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context {
if ent.QueryFromContext(ctx) == nil {
qc.Op = op
ctx = ent.NewQueryContext(ctx, qc)
}
return ctx
}
func querierAll[V Value, Q interface {
sqlAll(context.Context, ...queryHook) (V, error)
}]() Querier {
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
query, ok := q.(Q)
if !ok {
return nil, fmt.Errorf("unexpected query type %T", q)
}
return query.sqlAll(ctx)
})
}
func querierCount[Q interface {
sqlCount(context.Context) (int, error)
}]() Querier {
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
query, ok := q.(Q)
if !ok {
return nil, fmt.Errorf("unexpected query type %T", q)
}
return query.sqlCount(ctx)
})
}
func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) {
for i := len(inters) - 1; i >= 0; i-- {
qr = inters[i].Intercept(qr)
}
rv, err := qr.Query(ctx, q)
if err != nil {
return v, err
}
vt, ok := rv.(V)
if !ok {
return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v)
}
return vt, nil
}
func scanWithInterceptors[Q1 ent.Query, Q2 interface {
sqlScan(context.Context, Q1, any) error
}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error {
rv := reflect.ValueOf(v)
var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
query, ok := q.(Q1)
if !ok {
return nil, fmt.Errorf("unexpected query type %T", q)
}
if err := selectOrGroup.sqlScan(ctx, query, v); err != nil {
return nil, err
}
if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() {
return rv.Elem().Interface(), nil
}
return v, nil
})
for i := len(inters) - 1; i >= 0; i-- {
qr = inters[i].Intercept(qr)
}
vv, err := qr.Query(ctx, rootQuery)
if err != nil {
return err
}
switch rv2 := reflect.ValueOf(vv); {
case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer:
case rv.Type() == rv2.Type():
rv.Elem().Set(rv2.Elem())
case rv.Elem().Type() == rv2.Type():
rv.Elem().Set(rv2)
}
return nil
}
// queryHook describes an internal hook for the different sqlAll methods.
type queryHook func(context.Context, *sqlgraph.QuerySpec)

29
ent/entc.go Normal file
View File

@@ -0,0 +1,29 @@
//go:build ignore
package main
import (
"log"
"entgo.io/ent/entc"
"entgo.io/ent/entc/gen"
)
func main() {
if err := entc.Generate("./schema", &gen.Config{
Features: []gen.Feature{
gen.FeatureIntercept,
gen.FeatureSnapshot,
gen.FeatureUpsert,
gen.FeatureUpsert,
gen.FeatureExecQuery,
},
Templates: []*gen.Template{
gen.MustParse(gen.NewTemplate("edge_helper").ParseFiles("templates/edgehelper.tmpl")),
gen.MustParse(gen.NewTemplate("mutation_helper").ParseFiles("templates/mutationhelper.tmpl")),
gen.MustParse(gen.NewTemplate("create_helper").ParseFiles("templates/createhelper.tmpl")),
},
}); err != nil {
log.Fatal("running ent codegen:", err)
}
}

317
ent/entity.go Normal file
View File

@@ -0,0 +1,317 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"encoding/json"
"fmt"
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/gofrs/uuid"
)
// Entity is the model entity for the Entity schema.
type Entity struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// DeletedAt holds the value of the "deleted_at" field.
DeletedAt *time.Time `json:"deleted_at,omitempty"`
// Type holds the value of the "type" field.
Type int `json:"type,omitempty"`
// Source holds the value of the "source" field.
Source string `json:"source,omitempty"`
// Size holds the value of the "size" field.
Size int64 `json:"size,omitempty"`
// ReferenceCount holds the value of the "reference_count" field.
ReferenceCount int `json:"reference_count,omitempty"`
// StoragePolicyEntities holds the value of the "storage_policy_entities" field.
StoragePolicyEntities int `json:"storage_policy_entities,omitempty"`
// CreatedBy holds the value of the "created_by" field.
CreatedBy int `json:"created_by,omitempty"`
// UploadSessionID holds the value of the "upload_session_id" field.
UploadSessionID *uuid.UUID `json:"upload_session_id,omitempty"`
// Props holds the value of the "props" field.
Props *types.EntityProps `json:"props,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the EntityQuery when eager-loading is set.
Edges EntityEdges `json:"edges"`
selectValues sql.SelectValues
}
// EntityEdges holds the relations/edges for other nodes in the graph.
type EntityEdges struct {
// File holds the value of the file edge.
File []*File `json:"file,omitempty"`
// User holds the value of the user edge.
User *User `json:"user,omitempty"`
// StoragePolicy holds the value of the storage_policy edge.
StoragePolicy *StoragePolicy `json:"storage_policy,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [3]bool
}
// FileOrErr returns the File value or an error if the edge
// was not loaded in eager-loading.
func (e EntityEdges) FileOrErr() ([]*File, error) {
if e.loadedTypes[0] {
return e.File, nil
}
return nil, &NotLoadedError{edge: "file"}
}
// UserOrErr returns the User value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e EntityEdges) UserOrErr() (*User, error) {
if e.loadedTypes[1] {
if e.User == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: user.Label}
}
return e.User, nil
}
return nil, &NotLoadedError{edge: "user"}
}
// StoragePolicyOrErr returns the StoragePolicy value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e EntityEdges) StoragePolicyOrErr() (*StoragePolicy, error) {
if e.loadedTypes[2] {
if e.StoragePolicy == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: storagepolicy.Label}
}
return e.StoragePolicy, nil
}
return nil, &NotLoadedError{edge: "storage_policy"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*Entity) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
for i := range columns {
switch columns[i] {
case entity.FieldUploadSessionID:
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
case entity.FieldProps:
values[i] = new([]byte)
case entity.FieldID, entity.FieldType, entity.FieldSize, entity.FieldReferenceCount, entity.FieldStoragePolicyEntities, entity.FieldCreatedBy:
values[i] = new(sql.NullInt64)
case entity.FieldSource:
values[i] = new(sql.NullString)
case entity.FieldCreatedAt, entity.FieldUpdatedAt, entity.FieldDeletedAt:
values[i] = new(sql.NullTime)
default:
values[i] = new(sql.UnknownType)
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the Entity fields.
func (e *Entity) assignValues(columns []string, values []any) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case entity.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
e.ID = int(value.Int64)
case entity.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
e.CreatedAt = value.Time
}
case entity.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
e.UpdatedAt = value.Time
}
case entity.FieldDeletedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
} else if value.Valid {
e.DeletedAt = new(time.Time)
*e.DeletedAt = value.Time
}
case entity.FieldType:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field type", values[i])
} else if value.Valid {
e.Type = int(value.Int64)
}
case entity.FieldSource:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field source", values[i])
} else if value.Valid {
e.Source = value.String
}
case entity.FieldSize:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field size", values[i])
} else if value.Valid {
e.Size = value.Int64
}
case entity.FieldReferenceCount:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field reference_count", values[i])
} else if value.Valid {
e.ReferenceCount = int(value.Int64)
}
case entity.FieldStoragePolicyEntities:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field storage_policy_entities", values[i])
} else if value.Valid {
e.StoragePolicyEntities = int(value.Int64)
}
case entity.FieldCreatedBy:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field created_by", values[i])
} else if value.Valid {
e.CreatedBy = int(value.Int64)
}
case entity.FieldUploadSessionID:
if value, ok := values[i].(*sql.NullScanner); !ok {
return fmt.Errorf("unexpected type %T for field upload_session_id", values[i])
} else if value.Valid {
e.UploadSessionID = new(uuid.UUID)
*e.UploadSessionID = *value.S.(*uuid.UUID)
}
case entity.FieldProps:
if value, ok := values[i].(*[]byte); !ok {
return fmt.Errorf("unexpected type %T for field props", values[i])
} else if value != nil && len(*value) > 0 {
if err := json.Unmarshal(*value, &e.Props); err != nil {
return fmt.Errorf("unmarshal field props: %w", err)
}
}
default:
e.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the Entity.
// This includes values selected through modifiers, order, etc.
func (e *Entity) Value(name string) (ent.Value, error) {
return e.selectValues.Get(name)
}
// QueryFile queries the "file" edge of the Entity entity.
func (e *Entity) QueryFile() *FileQuery {
return NewEntityClient(e.config).QueryFile(e)
}
// QueryUser queries the "user" edge of the Entity entity.
func (e *Entity) QueryUser() *UserQuery {
return NewEntityClient(e.config).QueryUser(e)
}
// QueryStoragePolicy queries the "storage_policy" edge of the Entity entity.
func (e *Entity) QueryStoragePolicy() *StoragePolicyQuery {
return NewEntityClient(e.config).QueryStoragePolicy(e)
}
// Update returns a builder for updating this Entity.
// Note that you need to call Entity.Unwrap() before calling this method if this Entity
// was returned from a transaction, and the transaction was committed or rolled back.
func (e *Entity) Update() *EntityUpdateOne {
return NewEntityClient(e.config).UpdateOne(e)
}
// Unwrap unwraps the Entity entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (e *Entity) Unwrap() *Entity {
_tx, ok := e.config.driver.(*txDriver)
if !ok {
panic("ent: Entity is not a transactional entity")
}
e.config.driver = _tx.drv
return e
}
// String implements the fmt.Stringer.
func (e *Entity) String() string {
var builder strings.Builder
builder.WriteString("Entity(")
builder.WriteString(fmt.Sprintf("id=%v, ", e.ID))
builder.WriteString("created_at=")
builder.WriteString(e.CreatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("updated_at=")
builder.WriteString(e.UpdatedAt.Format(time.ANSIC))
builder.WriteString(", ")
if v := e.DeletedAt; v != nil {
builder.WriteString("deleted_at=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteString(", ")
builder.WriteString("type=")
builder.WriteString(fmt.Sprintf("%v", e.Type))
builder.WriteString(", ")
builder.WriteString("source=")
builder.WriteString(e.Source)
builder.WriteString(", ")
builder.WriteString("size=")
builder.WriteString(fmt.Sprintf("%v", e.Size))
builder.WriteString(", ")
builder.WriteString("reference_count=")
builder.WriteString(fmt.Sprintf("%v", e.ReferenceCount))
builder.WriteString(", ")
builder.WriteString("storage_policy_entities=")
builder.WriteString(fmt.Sprintf("%v", e.StoragePolicyEntities))
builder.WriteString(", ")
builder.WriteString("created_by=")
builder.WriteString(fmt.Sprintf("%v", e.CreatedBy))
builder.WriteString(", ")
if v := e.UploadSessionID; v != nil {
builder.WriteString("upload_session_id=")
builder.WriteString(fmt.Sprintf("%v", *v))
}
builder.WriteString(", ")
builder.WriteString("props=")
builder.WriteString(fmt.Sprintf("%v", e.Props))
builder.WriteByte(')')
return builder.String()
}
// SetFile manually set the edge as loaded state.
func (e *Entity) SetFile(v []*File) {
e.Edges.File = v
e.Edges.loadedTypes[0] = true
}
// SetUser manually set the edge as loaded state.
func (e *Entity) SetUser(v *User) {
e.Edges.User = v
e.Edges.loadedTypes[1] = true
}
// SetStoragePolicy manually set the edge as loaded state.
func (e *Entity) SetStoragePolicy(v *StoragePolicy) {
e.Edges.StoragePolicy = v
e.Edges.loadedTypes[2] = true
}
// Entities is a parsable slice of Entity.
type Entities []*Entity

224
ent/entity/entity.go Normal file
View File

@@ -0,0 +1,224 @@
// Code generated by ent, DO NOT EDIT.
package entity
import (
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
// Label holds the string label denoting the entity type in the database.
Label = "entity"
// FieldID holds the string denoting the id field in the database.
FieldID = "id"
// FieldCreatedAt holds the string denoting the created_at field in the database.
FieldCreatedAt = "created_at"
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
FieldUpdatedAt = "updated_at"
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
FieldDeletedAt = "deleted_at"
// FieldType holds the string denoting the type field in the database.
FieldType = "type"
// FieldSource holds the string denoting the source field in the database.
FieldSource = "source"
// FieldSize holds the string denoting the size field in the database.
FieldSize = "size"
// FieldReferenceCount holds the string denoting the reference_count field in the database.
FieldReferenceCount = "reference_count"
// FieldStoragePolicyEntities holds the string denoting the storage_policy_entities field in the database.
FieldStoragePolicyEntities = "storage_policy_entities"
// FieldCreatedBy holds the string denoting the created_by field in the database.
FieldCreatedBy = "created_by"
// FieldUploadSessionID holds the string denoting the upload_session_id field in the database.
FieldUploadSessionID = "upload_session_id"
// FieldProps holds the string denoting the props field in the database.
FieldProps = "recycle_options"
// EdgeFile holds the string denoting the file edge name in mutations.
EdgeFile = "file"
// EdgeUser holds the string denoting the user edge name in mutations.
EdgeUser = "user"
// EdgeStoragePolicy holds the string denoting the storage_policy edge name in mutations.
EdgeStoragePolicy = "storage_policy"
// Table holds the table name of the entity in the database.
Table = "entities"
// FileTable is the table that holds the file relation/edge. The primary key declared below.
FileTable = "file_entities"
// FileInverseTable is the table name for the File entity.
// It exists in this package in order to avoid circular dependency with the "file" package.
FileInverseTable = "files"
// UserTable is the table that holds the user relation/edge.
UserTable = "entities"
// UserInverseTable is the table name for the User entity.
// It exists in this package in order to avoid circular dependency with the "user" package.
UserInverseTable = "users"
// UserColumn is the table column denoting the user relation/edge.
UserColumn = "created_by"
// StoragePolicyTable is the table that holds the storage_policy relation/edge.
StoragePolicyTable = "entities"
// StoragePolicyInverseTable is the table name for the StoragePolicy entity.
// It exists in this package in order to avoid circular dependency with the "storagepolicy" package.
StoragePolicyInverseTable = "storage_policies"
// StoragePolicyColumn is the table column denoting the storage_policy relation/edge.
StoragePolicyColumn = "storage_policy_entities"
)
// Columns holds all SQL columns for entity fields.
var Columns = []string{
FieldID,
FieldCreatedAt,
FieldUpdatedAt,
FieldDeletedAt,
FieldType,
FieldSource,
FieldSize,
FieldReferenceCount,
FieldStoragePolicyEntities,
FieldCreatedBy,
FieldUploadSessionID,
FieldProps,
}
var (
// FilePrimaryKey and FileColumn2 are the table columns denoting the
// primary key for the file relation (M2M).
FilePrimaryKey = []string{"file_id", "entity_id"}
)
// ValidColumn reports if the column name is valid (part of the table columns).
func ValidColumn(column string) bool {
for i := range Columns {
if column == Columns[i] {
return true
}
}
return false
}
// Note that the variables below are initialized by the runtime
// package on the initialization of the application. Therefore,
// it should be imported in the main as follows:
//
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
var (
Hooks [1]ent.Hook
Interceptors [1]ent.Interceptor
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
DefaultCreatedAt func() time.Time
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
DefaultUpdatedAt func() time.Time
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
// DefaultReferenceCount holds the default value on creation for the "reference_count" field.
DefaultReferenceCount int
)
// OrderOption defines the ordering options for the Entity queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByDeletedAt orders the results by the deleted_at field.
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
}
// ByType orders the results by the type field.
func ByType(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldType, opts...).ToFunc()
}
// BySource orders the results by the source field.
func BySource(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSource, opts...).ToFunc()
}
// BySize orders the results by the size field.
func BySize(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSize, opts...).ToFunc()
}
// ByReferenceCount orders the results by the reference_count field.
func ByReferenceCount(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldReferenceCount, opts...).ToFunc()
}
// ByStoragePolicyEntities orders the results by the storage_policy_entities field.
func ByStoragePolicyEntities(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldStoragePolicyEntities, opts...).ToFunc()
}
// ByCreatedBy orders the results by the created_by field.
func ByCreatedBy(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedBy, opts...).ToFunc()
}
// ByUploadSessionID orders the results by the upload_session_id field.
func ByUploadSessionID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUploadSessionID, opts...).ToFunc()
}
// ByFileCount orders the results by file count.
func ByFileCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newFileStep(), opts...)
}
}
// ByFile orders the results by file terms.
func ByFile(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newFileStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByUserField orders the results by user field.
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
}
}
// ByStoragePolicyField orders the results by storage_policy field.
func ByStoragePolicyField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newStoragePolicyStep(), sql.OrderByField(field, opts...))
}
}
func newFileStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(FileInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2M, true, FileTable, FilePrimaryKey...),
)
}
func newUserStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(UserInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
)
}
func newStoragePolicyStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(StoragePolicyInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, StoragePolicyTable, StoragePolicyColumn),
)
}

616
ent/entity/where.go Normal file
View File

@@ -0,0 +1,616 @@
// Code generated by ent, DO NOT EDIT.
package entity
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/gofrs/uuid"
)
// ID filters vertices based on their ID field.
func ID(id int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id int) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...int) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...int) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id int) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id int) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id int) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id int) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldUpdatedAt, v))
}
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
func DeletedAt(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldDeletedAt, v))
}
// Type applies equality check predicate on the "type" field. It's identical to TypeEQ.
func Type(v int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldType, v))
}
// Source applies equality check predicate on the "source" field. It's identical to SourceEQ.
func Source(v string) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldSource, v))
}
// Size applies equality check predicate on the "size" field. It's identical to SizeEQ.
func Size(v int64) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldSize, v))
}
// ReferenceCount applies equality check predicate on the "reference_count" field. It's identical to ReferenceCountEQ.
func ReferenceCount(v int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldReferenceCount, v))
}
// StoragePolicyEntities applies equality check predicate on the "storage_policy_entities" field. It's identical to StoragePolicyEntitiesEQ.
func StoragePolicyEntities(v int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldStoragePolicyEntities, v))
}
// CreatedBy applies equality check predicate on the "created_by" field. It's identical to CreatedByEQ.
func CreatedBy(v int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldCreatedBy, v))
}
// UploadSessionID applies equality check predicate on the "upload_session_id" field. It's identical to UploadSessionIDEQ.
func UploadSessionID(v uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldUploadSessionID, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldUpdatedAt, v))
}
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
func DeletedAtEQ(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldDeletedAt, v))
}
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
func DeletedAtNEQ(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldDeletedAt, v))
}
// DeletedAtIn applies the In predicate on the "deleted_at" field.
func DeletedAtIn(vs ...time.Time) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldDeletedAt, vs...))
}
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
func DeletedAtNotIn(vs ...time.Time) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldDeletedAt, vs...))
}
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
func DeletedAtGT(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldDeletedAt, v))
}
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
func DeletedAtGTE(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldDeletedAt, v))
}
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
func DeletedAtLT(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldDeletedAt, v))
}
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
func DeletedAtLTE(v time.Time) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldDeletedAt, v))
}
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
func DeletedAtIsNil() predicate.Entity {
return predicate.Entity(sql.FieldIsNull(FieldDeletedAt))
}
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
func DeletedAtNotNil() predicate.Entity {
return predicate.Entity(sql.FieldNotNull(FieldDeletedAt))
}
// TypeEQ applies the EQ predicate on the "type" field.
func TypeEQ(v int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldType, v))
}
// TypeNEQ applies the NEQ predicate on the "type" field.
func TypeNEQ(v int) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldType, v))
}
// TypeIn applies the In predicate on the "type" field.
func TypeIn(vs ...int) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldType, vs...))
}
// TypeNotIn applies the NotIn predicate on the "type" field.
func TypeNotIn(vs ...int) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldType, vs...))
}
// TypeGT applies the GT predicate on the "type" field.
func TypeGT(v int) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldType, v))
}
// TypeGTE applies the GTE predicate on the "type" field.
func TypeGTE(v int) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldType, v))
}
// TypeLT applies the LT predicate on the "type" field.
func TypeLT(v int) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldType, v))
}
// TypeLTE applies the LTE predicate on the "type" field.
func TypeLTE(v int) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldType, v))
}
// SourceEQ applies the EQ predicate on the "source" field.
func SourceEQ(v string) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldSource, v))
}
// SourceNEQ applies the NEQ predicate on the "source" field.
func SourceNEQ(v string) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldSource, v))
}
// SourceIn applies the In predicate on the "source" field.
func SourceIn(vs ...string) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldSource, vs...))
}
// SourceNotIn applies the NotIn predicate on the "source" field.
func SourceNotIn(vs ...string) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldSource, vs...))
}
// SourceGT applies the GT predicate on the "source" field.
func SourceGT(v string) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldSource, v))
}
// SourceGTE applies the GTE predicate on the "source" field.
func SourceGTE(v string) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldSource, v))
}
// SourceLT applies the LT predicate on the "source" field.
func SourceLT(v string) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldSource, v))
}
// SourceLTE applies the LTE predicate on the "source" field.
func SourceLTE(v string) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldSource, v))
}
// SourceContains applies the Contains predicate on the "source" field.
func SourceContains(v string) predicate.Entity {
return predicate.Entity(sql.FieldContains(FieldSource, v))
}
// SourceHasPrefix applies the HasPrefix predicate on the "source" field.
func SourceHasPrefix(v string) predicate.Entity {
return predicate.Entity(sql.FieldHasPrefix(FieldSource, v))
}
// SourceHasSuffix applies the HasSuffix predicate on the "source" field.
func SourceHasSuffix(v string) predicate.Entity {
return predicate.Entity(sql.FieldHasSuffix(FieldSource, v))
}
// SourceEqualFold applies the EqualFold predicate on the "source" field.
func SourceEqualFold(v string) predicate.Entity {
return predicate.Entity(sql.FieldEqualFold(FieldSource, v))
}
// SourceContainsFold applies the ContainsFold predicate on the "source" field.
func SourceContainsFold(v string) predicate.Entity {
return predicate.Entity(sql.FieldContainsFold(FieldSource, v))
}
// SizeEQ applies the EQ predicate on the "size" field.
func SizeEQ(v int64) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldSize, v))
}
// SizeNEQ applies the NEQ predicate on the "size" field.
func SizeNEQ(v int64) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldSize, v))
}
// SizeIn applies the In predicate on the "size" field.
func SizeIn(vs ...int64) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldSize, vs...))
}
// SizeNotIn applies the NotIn predicate on the "size" field.
func SizeNotIn(vs ...int64) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldSize, vs...))
}
// SizeGT applies the GT predicate on the "size" field.
func SizeGT(v int64) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldSize, v))
}
// SizeGTE applies the GTE predicate on the "size" field.
func SizeGTE(v int64) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldSize, v))
}
// SizeLT applies the LT predicate on the "size" field.
func SizeLT(v int64) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldSize, v))
}
// SizeLTE applies the LTE predicate on the "size" field.
func SizeLTE(v int64) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldSize, v))
}
// ReferenceCountEQ applies the EQ predicate on the "reference_count" field.
func ReferenceCountEQ(v int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldReferenceCount, v))
}
// ReferenceCountNEQ applies the NEQ predicate on the "reference_count" field.
func ReferenceCountNEQ(v int) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldReferenceCount, v))
}
// ReferenceCountIn applies the In predicate on the "reference_count" field.
func ReferenceCountIn(vs ...int) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldReferenceCount, vs...))
}
// ReferenceCountNotIn applies the NotIn predicate on the "reference_count" field.
func ReferenceCountNotIn(vs ...int) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldReferenceCount, vs...))
}
// ReferenceCountGT applies the GT predicate on the "reference_count" field.
func ReferenceCountGT(v int) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldReferenceCount, v))
}
// ReferenceCountGTE applies the GTE predicate on the "reference_count" field.
func ReferenceCountGTE(v int) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldReferenceCount, v))
}
// ReferenceCountLT applies the LT predicate on the "reference_count" field.
func ReferenceCountLT(v int) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldReferenceCount, v))
}
// ReferenceCountLTE applies the LTE predicate on the "reference_count" field.
func ReferenceCountLTE(v int) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldReferenceCount, v))
}
// StoragePolicyEntitiesEQ applies the EQ predicate on the "storage_policy_entities" field.
func StoragePolicyEntitiesEQ(v int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldStoragePolicyEntities, v))
}
// StoragePolicyEntitiesNEQ applies the NEQ predicate on the "storage_policy_entities" field.
func StoragePolicyEntitiesNEQ(v int) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldStoragePolicyEntities, v))
}
// StoragePolicyEntitiesIn applies the In predicate on the "storage_policy_entities" field.
func StoragePolicyEntitiesIn(vs ...int) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldStoragePolicyEntities, vs...))
}
// StoragePolicyEntitiesNotIn applies the NotIn predicate on the "storage_policy_entities" field.
func StoragePolicyEntitiesNotIn(vs ...int) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldStoragePolicyEntities, vs...))
}
// CreatedByEQ applies the EQ predicate on the "created_by" field.
func CreatedByEQ(v int) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldCreatedBy, v))
}
// CreatedByNEQ applies the NEQ predicate on the "created_by" field.
func CreatedByNEQ(v int) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldCreatedBy, v))
}
// CreatedByIn applies the In predicate on the "created_by" field.
func CreatedByIn(vs ...int) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldCreatedBy, vs...))
}
// CreatedByNotIn applies the NotIn predicate on the "created_by" field.
func CreatedByNotIn(vs ...int) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldCreatedBy, vs...))
}
// CreatedByIsNil applies the IsNil predicate on the "created_by" field.
func CreatedByIsNil() predicate.Entity {
return predicate.Entity(sql.FieldIsNull(FieldCreatedBy))
}
// CreatedByNotNil applies the NotNil predicate on the "created_by" field.
func CreatedByNotNil() predicate.Entity {
return predicate.Entity(sql.FieldNotNull(FieldCreatedBy))
}
// UploadSessionIDEQ applies the EQ predicate on the "upload_session_id" field.
func UploadSessionIDEQ(v uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldEQ(FieldUploadSessionID, v))
}
// UploadSessionIDNEQ applies the NEQ predicate on the "upload_session_id" field.
func UploadSessionIDNEQ(v uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldNEQ(FieldUploadSessionID, v))
}
// UploadSessionIDIn applies the In predicate on the "upload_session_id" field.
func UploadSessionIDIn(vs ...uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldIn(FieldUploadSessionID, vs...))
}
// UploadSessionIDNotIn applies the NotIn predicate on the "upload_session_id" field.
func UploadSessionIDNotIn(vs ...uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldNotIn(FieldUploadSessionID, vs...))
}
// UploadSessionIDGT applies the GT predicate on the "upload_session_id" field.
func UploadSessionIDGT(v uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldGT(FieldUploadSessionID, v))
}
// UploadSessionIDGTE applies the GTE predicate on the "upload_session_id" field.
func UploadSessionIDGTE(v uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldGTE(FieldUploadSessionID, v))
}
// UploadSessionIDLT applies the LT predicate on the "upload_session_id" field.
func UploadSessionIDLT(v uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldLT(FieldUploadSessionID, v))
}
// UploadSessionIDLTE applies the LTE predicate on the "upload_session_id" field.
func UploadSessionIDLTE(v uuid.UUID) predicate.Entity {
return predicate.Entity(sql.FieldLTE(FieldUploadSessionID, v))
}
// UploadSessionIDIsNil applies the IsNil predicate on the "upload_session_id" field.
func UploadSessionIDIsNil() predicate.Entity {
return predicate.Entity(sql.FieldIsNull(FieldUploadSessionID))
}
// UploadSessionIDNotNil applies the NotNil predicate on the "upload_session_id" field.
func UploadSessionIDNotNil() predicate.Entity {
return predicate.Entity(sql.FieldNotNull(FieldUploadSessionID))
}
// PropsIsNil applies the IsNil predicate on the "props" field.
func PropsIsNil() predicate.Entity {
return predicate.Entity(sql.FieldIsNull(FieldProps))
}
// PropsNotNil applies the NotNil predicate on the "props" field.
func PropsNotNil() predicate.Entity {
return predicate.Entity(sql.FieldNotNull(FieldProps))
}
// HasFile applies the HasEdge predicate on the "file" edge.
func HasFile() predicate.Entity {
return predicate.Entity(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2M, true, FileTable, FilePrimaryKey...),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasFileWith applies the HasEdge predicate on the "file" edge with a given conditions (other predicates).
func HasFileWith(preds ...predicate.File) predicate.Entity {
return predicate.Entity(func(s *sql.Selector) {
step := newFileStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasUser applies the HasEdge predicate on the "user" edge.
func HasUser() predicate.Entity {
return predicate.Entity(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
func HasUserWith(preds ...predicate.User) predicate.Entity {
return predicate.Entity(func(s *sql.Selector) {
step := newUserStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasStoragePolicy applies the HasEdge predicate on the "storage_policy" edge.
func HasStoragePolicy() predicate.Entity {
return predicate.Entity(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, StoragePolicyTable, StoragePolicyColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasStoragePolicyWith applies the HasEdge predicate on the "storage_policy" edge with a given conditions (other predicates).
func HasStoragePolicyWith(preds ...predicate.StoragePolicy) predicate.Entity {
return predicate.Entity(func(s *sql.Selector) {
step := newStoragePolicyStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.Entity) predicate.Entity {
return predicate.Entity(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.Entity) predicate.Entity {
return predicate.Entity(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.Entity) predicate.Entity {
return predicate.Entity(sql.NotPredicates(p))
}

1267
ent/entity_create.go Normal file

File diff suppressed because it is too large Load Diff

88
ent/entity_delete.go Normal file
View File

@@ -0,0 +1,88 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// EntityDelete is the builder for deleting a Entity entity.
type EntityDelete struct {
config
hooks []Hook
mutation *EntityMutation
}
// Where appends a list predicates to the EntityDelete builder.
func (ed *EntityDelete) Where(ps ...predicate.Entity) *EntityDelete {
ed.mutation.Where(ps...)
return ed
}
// Exec executes the deletion query and returns how many vertices were deleted.
func (ed *EntityDelete) Exec(ctx context.Context) (int, error) {
return withHooks(ctx, ed.sqlExec, ed.mutation, ed.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
func (ed *EntityDelete) ExecX(ctx context.Context) int {
n, err := ed.Exec(ctx)
if err != nil {
panic(err)
}
return n
}
func (ed *EntityDelete) sqlExec(ctx context.Context) (int, error) {
_spec := sqlgraph.NewDeleteSpec(entity.Table, sqlgraph.NewFieldSpec(entity.FieldID, field.TypeInt))
if ps := ed.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
affected, err := sqlgraph.DeleteNodes(ctx, ed.driver, _spec)
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
ed.mutation.done = true
return affected, err
}
// EntityDeleteOne is the builder for deleting a single Entity entity.
type EntityDeleteOne struct {
ed *EntityDelete
}
// Where appends a list predicates to the EntityDelete builder.
func (edo *EntityDeleteOne) Where(ps ...predicate.Entity) *EntityDeleteOne {
edo.ed.mutation.Where(ps...)
return edo
}
// Exec executes the deletion query.
func (edo *EntityDeleteOne) Exec(ctx context.Context) error {
n, err := edo.ed.Exec(ctx)
switch {
case err != nil:
return err
case n == 0:
return &NotFoundError{entity.Label}
default:
return nil
}
}
// ExecX is like Exec, but panics if an error occurs.
func (edo *EntityDeleteOne) ExecX(ctx context.Context) {
if err := edo.Exec(ctx); err != nil {
panic(err)
}
}

786
ent/entity_query.go Normal file
View File

@@ -0,0 +1,786 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"database/sql/driver"
"fmt"
"math"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/entity"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
"github.com/cloudreve/Cloudreve/v4/ent/user"
)
// EntityQuery is the builder for querying Entity entities.
type EntityQuery struct {
config
ctx *QueryContext
order []entity.OrderOption
inters []Interceptor
predicates []predicate.Entity
withFile *FileQuery
withUser *UserQuery
withStoragePolicy *StoragePolicyQuery
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the EntityQuery builder.
func (eq *EntityQuery) Where(ps ...predicate.Entity) *EntityQuery {
eq.predicates = append(eq.predicates, ps...)
return eq
}
// Limit the number of records to be returned by this query.
func (eq *EntityQuery) Limit(limit int) *EntityQuery {
eq.ctx.Limit = &limit
return eq
}
// Offset to start from.
func (eq *EntityQuery) Offset(offset int) *EntityQuery {
eq.ctx.Offset = &offset
return eq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (eq *EntityQuery) Unique(unique bool) *EntityQuery {
eq.ctx.Unique = &unique
return eq
}
// Order specifies how the records should be ordered.
func (eq *EntityQuery) Order(o ...entity.OrderOption) *EntityQuery {
eq.order = append(eq.order, o...)
return eq
}
// QueryFile chains the current query on the "file" edge.
func (eq *EntityQuery) QueryFile() *FileQuery {
query := (&FileClient{config: eq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := eq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := eq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(entity.Table, entity.FieldID, selector),
sqlgraph.To(file.Table, file.FieldID),
sqlgraph.Edge(sqlgraph.M2M, true, entity.FileTable, entity.FilePrimaryKey...),
)
fromU = sqlgraph.SetNeighbors(eq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// QueryUser chains the current query on the "user" edge.
func (eq *EntityQuery) QueryUser() *UserQuery {
query := (&UserClient{config: eq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := eq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := eq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(entity.Table, entity.FieldID, selector),
sqlgraph.To(user.Table, user.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, entity.UserTable, entity.UserColumn),
)
fromU = sqlgraph.SetNeighbors(eq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// QueryStoragePolicy chains the current query on the "storage_policy" edge.
func (eq *EntityQuery) QueryStoragePolicy() *StoragePolicyQuery {
query := (&StoragePolicyClient{config: eq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := eq.prepareQuery(ctx); err != nil {
return nil, err
}
selector := eq.sqlQuery(ctx)
if err := selector.Err(); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(entity.Table, entity.FieldID, selector),
sqlgraph.To(storagepolicy.Table, storagepolicy.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, entity.StoragePolicyTable, entity.StoragePolicyColumn),
)
fromU = sqlgraph.SetNeighbors(eq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// First returns the first Entity entity from the query.
// Returns a *NotFoundError when no Entity was found.
func (eq *EntityQuery) First(ctx context.Context) (*Entity, error) {
nodes, err := eq.Limit(1).All(setContextOp(ctx, eq.ctx, "First"))
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{entity.Label}
}
return nodes[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (eq *EntityQuery) FirstX(ctx context.Context) *Entity {
node, err := eq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
// FirstID returns the first Entity ID from the query.
// Returns a *NotFoundError when no Entity ID was found.
func (eq *EntityQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = eq.Limit(1).IDs(setContextOp(ctx, eq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{entity.Label}
return
}
return ids[0], nil
}
// FirstIDX is like FirstID, but panics if an error occurs.
func (eq *EntityQuery) FirstIDX(ctx context.Context) int {
id, err := eq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns a single Entity entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one Entity entity is found.
// Returns a *NotFoundError when no Entity entities are found.
func (eq *EntityQuery) Only(ctx context.Context) (*Entity, error) {
nodes, err := eq.Limit(2).All(setContextOp(ctx, eq.ctx, "Only"))
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{entity.Label}
default:
return nil, &NotSingularError{entity.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (eq *EntityQuery) OnlyX(ctx context.Context) *Entity {
node, err := eq.Only(ctx)
if err != nil {
panic(err)
}
return node
}
// OnlyID is like Only, but returns the only Entity ID in the query.
// Returns a *NotSingularError when more than one Entity ID is found.
// Returns a *NotFoundError when no entities are found.
func (eq *EntityQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = eq.Limit(2).IDs(setContextOp(ctx, eq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{entity.Label}
default:
err = &NotSingularError{entity.Label}
}
return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (eq *EntityQuery) OnlyIDX(ctx context.Context) int {
id, err := eq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of Entities.
func (eq *EntityQuery) All(ctx context.Context) ([]*Entity, error) {
ctx = setContextOp(ctx, eq.ctx, "All")
if err := eq.prepareQuery(ctx); err != nil {
return nil, err
}
qr := querierAll[[]*Entity, *EntityQuery]()
return withInterceptors[[]*Entity](ctx, eq, qr, eq.inters)
}
// AllX is like All, but panics if an error occurs.
func (eq *EntityQuery) AllX(ctx context.Context) []*Entity {
nodes, err := eq.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
// IDs executes the query and returns a list of Entity IDs.
func (eq *EntityQuery) IDs(ctx context.Context) (ids []int, err error) {
if eq.ctx.Unique == nil && eq.path != nil {
eq.Unique(true)
}
ctx = setContextOp(ctx, eq.ctx, "IDs")
if err = eq.Select(entity.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (eq *EntityQuery) IDsX(ctx context.Context) []int {
ids, err := eq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (eq *EntityQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, eq.ctx, "Count")
if err := eq.prepareQuery(ctx); err != nil {
return 0, err
}
return withInterceptors[int](ctx, eq, querierCount[*EntityQuery](), eq.inters)
}
// CountX is like Count, but panics if an error occurs.
func (eq *EntityQuery) CountX(ctx context.Context) int {
count, err := eq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (eq *EntityQuery) Exist(ctx context.Context) (bool, error) {
ctx = setContextOp(ctx, eq.ctx, "Exist")
switch _, err := eq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
// ExistX is like Exist, but panics if an error occurs.
func (eq *EntityQuery) ExistX(ctx context.Context) bool {
exist, err := eq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the EntityQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (eq *EntityQuery) Clone() *EntityQuery {
if eq == nil {
return nil
}
return &EntityQuery{
config: eq.config,
ctx: eq.ctx.Clone(),
order: append([]entity.OrderOption{}, eq.order...),
inters: append([]Interceptor{}, eq.inters...),
predicates: append([]predicate.Entity{}, eq.predicates...),
withFile: eq.withFile.Clone(),
withUser: eq.withUser.Clone(),
withStoragePolicy: eq.withStoragePolicy.Clone(),
// clone intermediate query.
sql: eq.sql.Clone(),
path: eq.path,
}
}
// WithFile tells the query-builder to eager-load the nodes that are connected to
// the "file" edge. The optional arguments are used to configure the query builder of the edge.
func (eq *EntityQuery) WithFile(opts ...func(*FileQuery)) *EntityQuery {
query := (&FileClient{config: eq.config}).Query()
for _, opt := range opts {
opt(query)
}
eq.withFile = query
return eq
}
// WithUser tells the query-builder to eager-load the nodes that are connected to
// the "user" edge. The optional arguments are used to configure the query builder of the edge.
func (eq *EntityQuery) WithUser(opts ...func(*UserQuery)) *EntityQuery {
query := (&UserClient{config: eq.config}).Query()
for _, opt := range opts {
opt(query)
}
eq.withUser = query
return eq
}
// WithStoragePolicy tells the query-builder to eager-load the nodes that are connected to
// the "storage_policy" edge. The optional arguments are used to configure the query builder of the edge.
func (eq *EntityQuery) WithStoragePolicy(opts ...func(*StoragePolicyQuery)) *EntityQuery {
query := (&StoragePolicyClient{config: eq.config}).Query()
for _, opt := range opts {
opt(query)
}
eq.withStoragePolicy = query
return eq
}
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.Entity.Query().
// GroupBy(entity.FieldCreatedAt).
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (eq *EntityQuery) GroupBy(field string, fields ...string) *EntityGroupBy {
eq.ctx.Fields = append([]string{field}, fields...)
grbuild := &EntityGroupBy{build: eq}
grbuild.flds = &eq.ctx.Fields
grbuild.label = entity.Label
grbuild.scan = grbuild.Scan
return grbuild
}
// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
// var v []struct {
// CreatedAt time.Time `json:"created_at,omitempty"`
// }
//
// client.Entity.Query().
// Select(entity.FieldCreatedAt).
// Scan(ctx, &v)
func (eq *EntityQuery) Select(fields ...string) *EntitySelect {
eq.ctx.Fields = append(eq.ctx.Fields, fields...)
sbuild := &EntitySelect{EntityQuery: eq}
sbuild.label = entity.Label
sbuild.flds, sbuild.scan = &eq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a EntitySelect configured with the given aggregations.
func (eq *EntityQuery) Aggregate(fns ...AggregateFunc) *EntitySelect {
return eq.Select().Aggregate(fns...)
}
func (eq *EntityQuery) prepareQuery(ctx context.Context) error {
for _, inter := range eq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, eq); err != nil {
return err
}
}
}
for _, f := range eq.ctx.Fields {
if !entity.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
}
if eq.path != nil {
prev, err := eq.path(ctx)
if err != nil {
return err
}
eq.sql = prev
}
return nil
}
func (eq *EntityQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Entity, error) {
var (
nodes = []*Entity{}
_spec = eq.querySpec()
loadedTypes = [3]bool{
eq.withFile != nil,
eq.withUser != nil,
eq.withStoragePolicy != nil,
}
)
_spec.ScanValues = func(columns []string) ([]any, error) {
return (*Entity).scanValues(nil, columns)
}
_spec.Assign = func(columns []string, values []any) error {
node := &Entity{config: eq.config}
nodes = append(nodes, node)
node.Edges.loadedTypes = loadedTypes
return node.assignValues(columns, values)
}
for i := range hooks {
hooks[i](ctx, _spec)
}
if err := sqlgraph.QueryNodes(ctx, eq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
if query := eq.withFile; query != nil {
if err := eq.loadFile(ctx, query, nodes,
func(n *Entity) { n.Edges.File = []*File{} },
func(n *Entity, e *File) { n.Edges.File = append(n.Edges.File, e) }); err != nil {
return nil, err
}
}
if query := eq.withUser; query != nil {
if err := eq.loadUser(ctx, query, nodes, nil,
func(n *Entity, e *User) { n.Edges.User = e }); err != nil {
return nil, err
}
}
if query := eq.withStoragePolicy; query != nil {
if err := eq.loadStoragePolicy(ctx, query, nodes, nil,
func(n *Entity, e *StoragePolicy) { n.Edges.StoragePolicy = e }); err != nil {
return nil, err
}
}
return nodes, nil
}
func (eq *EntityQuery) loadFile(ctx context.Context, query *FileQuery, nodes []*Entity, init func(*Entity), assign func(*Entity, *File)) error {
edgeIDs := make([]driver.Value, len(nodes))
byID := make(map[int]*Entity)
nids := make(map[int]map[*Entity]struct{})
for i, node := range nodes {
edgeIDs[i] = node.ID
byID[node.ID] = node
if init != nil {
init(node)
}
}
query.Where(func(s *sql.Selector) {
joinT := sql.Table(entity.FileTable)
s.Join(joinT).On(s.C(file.FieldID), joinT.C(entity.FilePrimaryKey[0]))
s.Where(sql.InValues(joinT.C(entity.FilePrimaryKey[1]), edgeIDs...))
columns := s.SelectedColumns()
s.Select(joinT.C(entity.FilePrimaryKey[1]))
s.AppendSelect(columns...)
s.SetDistinct(false)
})
if err := query.prepareQuery(ctx); err != nil {
return err
}
qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) {
assign := spec.Assign
values := spec.ScanValues
spec.ScanValues = func(columns []string) ([]any, error) {
values, err := values(columns[1:])
if err != nil {
return nil, err
}
return append([]any{new(sql.NullInt64)}, values...), nil
}
spec.Assign = func(columns []string, values []any) error {
outValue := int(values[0].(*sql.NullInt64).Int64)
inValue := int(values[1].(*sql.NullInt64).Int64)
if nids[inValue] == nil {
nids[inValue] = map[*Entity]struct{}{byID[outValue]: {}}
return assign(columns[1:], values[1:])
}
nids[inValue][byID[outValue]] = struct{}{}
return nil
}
})
})
neighbors, err := withInterceptors[[]*File](ctx, query, qr, query.inters)
if err != nil {
return err
}
for _, n := range neighbors {
nodes, ok := nids[n.ID]
if !ok {
return fmt.Errorf(`unexpected "file" node returned %v`, n.ID)
}
for kn := range nodes {
assign(kn, n)
}
}
return nil
}
func (eq *EntityQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*Entity, init func(*Entity), assign func(*Entity, *User)) error {
ids := make([]int, 0, len(nodes))
nodeids := make(map[int][]*Entity)
for i := range nodes {
fk := nodes[i].CreatedBy
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
if len(ids) == 0 {
return nil
}
query.Where(user.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
nodes, ok := nodeids[n.ID]
if !ok {
return fmt.Errorf(`unexpected foreign-key "created_by" returned %v`, n.ID)
}
for i := range nodes {
assign(nodes[i], n)
}
}
return nil
}
func (eq *EntityQuery) loadStoragePolicy(ctx context.Context, query *StoragePolicyQuery, nodes []*Entity, init func(*Entity), assign func(*Entity, *StoragePolicy)) error {
ids := make([]int, 0, len(nodes))
nodeids := make(map[int][]*Entity)
for i := range nodes {
fk := nodes[i].StoragePolicyEntities
if _, ok := nodeids[fk]; !ok {
ids = append(ids, fk)
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
if len(ids) == 0 {
return nil
}
query.Where(storagepolicy.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
return err
}
for _, n := range neighbors {
nodes, ok := nodeids[n.ID]
if !ok {
return fmt.Errorf(`unexpected foreign-key "storage_policy_entities" returned %v`, n.ID)
}
for i := range nodes {
assign(nodes[i], n)
}
}
return nil
}
func (eq *EntityQuery) sqlCount(ctx context.Context) (int, error) {
_spec := eq.querySpec()
_spec.Node.Columns = eq.ctx.Fields
if len(eq.ctx.Fields) > 0 {
_spec.Unique = eq.ctx.Unique != nil && *eq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, eq.driver, _spec)
}
func (eq *EntityQuery) querySpec() *sqlgraph.QuerySpec {
_spec := sqlgraph.NewQuerySpec(entity.Table, entity.Columns, sqlgraph.NewFieldSpec(entity.FieldID, field.TypeInt))
_spec.From = eq.sql
if unique := eq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if eq.path != nil {
_spec.Unique = true
}
if fields := eq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, entity.FieldID)
for i := range fields {
if fields[i] != entity.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
if eq.withUser != nil {
_spec.Node.AddColumnOnce(entity.FieldCreatedBy)
}
if eq.withStoragePolicy != nil {
_spec.Node.AddColumnOnce(entity.FieldStoragePolicyEntities)
}
}
if ps := eq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := eq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := eq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := eq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (eq *EntityQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(eq.driver.Dialect())
t1 := builder.Table(entity.Table)
columns := eq.ctx.Fields
if len(columns) == 0 {
columns = entity.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if eq.sql != nil {
selector = eq.sql
selector.Select(selector.Columns(columns...)...)
}
if eq.ctx.Unique != nil && *eq.ctx.Unique {
selector.Distinct()
}
for _, p := range eq.predicates {
p(selector)
}
for _, p := range eq.order {
p(selector)
}
if offset := eq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := eq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// EntityGroupBy is the group-by builder for Entity entities.
type EntityGroupBy struct {
selector
build *EntityQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
func (egb *EntityGroupBy) Aggregate(fns ...AggregateFunc) *EntityGroupBy {
egb.fns = append(egb.fns, fns...)
return egb
}
// Scan applies the selector query and scans the result into the given value.
func (egb *EntityGroupBy) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, egb.build.ctx, "GroupBy")
if err := egb.build.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*EntityQuery, *EntityGroupBy](ctx, egb.build, egb, egb.build.inters, v)
}
func (egb *EntityGroupBy) sqlScan(ctx context.Context, root *EntityQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(egb.fns))
for _, fn := range egb.fns {
aggregation = append(aggregation, fn(selector))
}
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(*egb.flds)+len(egb.fns))
for _, f := range *egb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
selector.GroupBy(selector.Columns(*egb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := egb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// EntitySelect is the builder for selecting fields of Entity entities.
type EntitySelect struct {
*EntityQuery
selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (es *EntitySelect) Aggregate(fns ...AggregateFunc) *EntitySelect {
es.fns = append(es.fns, fns...)
return es
}
// Scan applies the selector query and scans the result into the given value.
func (es *EntitySelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, es.ctx, "Select")
if err := es.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*EntityQuery, *EntitySelect](ctx, es.EntityQuery, es, es.inters, v)
}
func (es *EntitySelect) sqlScan(ctx context.Context, root *EntityQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(es.fns))
for _, fn := range es.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*es.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := es.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}

1017
ent/entity_update.go Normal file

File diff suppressed because it is too large Load Diff

84
ent/enttest/enttest.go Normal file
View File

@@ -0,0 +1,84 @@
// Code generated by ent, DO NOT EDIT.
package enttest
import (
"context"
"github.com/cloudreve/Cloudreve/v4/ent"
// required by schema hooks.
_ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
"entgo.io/ent/dialect/sql/schema"
"github.com/cloudreve/Cloudreve/v4/ent/migrate"
)
type (
// TestingT is the interface that is shared between
// testing.T and testing.B and used by enttest.
TestingT interface {
FailNow()
Error(...any)
}
// Option configures client creation.
Option func(*options)
options struct {
opts []ent.Option
migrateOpts []schema.MigrateOption
}
)
// WithOptions forwards options to client creation.
func WithOptions(opts ...ent.Option) Option {
return func(o *options) {
o.opts = append(o.opts, opts...)
}
}
// WithMigrateOptions forwards options to auto migration.
func WithMigrateOptions(opts ...schema.MigrateOption) Option {
return func(o *options) {
o.migrateOpts = append(o.migrateOpts, opts...)
}
}
func newOptions(opts []Option) *options {
o := &options{}
for _, opt := range opts {
opt(o)
}
return o
}
// Open calls ent.Open and auto-run migration.
func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client {
o := newOptions(opts)
c, err := ent.Open(driverName, dataSourceName, o.opts...)
if err != nil {
t.Error(err)
t.FailNow()
}
migrateSchema(t, c, o)
return c
}
// NewClient calls ent.NewClient and auto-run migration.
func NewClient(t TestingT, opts ...Option) *ent.Client {
o := newOptions(opts)
c := ent.NewClient(o.opts...)
migrateSchema(t, c, o)
return c
}
func migrateSchema(t TestingT, c *ent.Client, o *options) {
tables, err := schema.CopyTables(migrate.Tables)
if err != nil {
t.Error(err)
t.FailNow()
}
if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil {
t.Error(err)
t.FailNow()
}
}

424
ent/file.go Normal file
View File

@@ -0,0 +1,424 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"encoding/json"
"fmt"
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
)
// File is the model entity for the File schema.
type File struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// Type holds the value of the "type" field.
Type int `json:"type,omitempty"`
// Name holds the value of the "name" field.
Name string `json:"name,omitempty"`
// OwnerID holds the value of the "owner_id" field.
OwnerID int `json:"owner_id,omitempty"`
// Size holds the value of the "size" field.
Size int64 `json:"size,omitempty"`
// PrimaryEntity holds the value of the "primary_entity" field.
PrimaryEntity int `json:"primary_entity,omitempty"`
// FileChildren holds the value of the "file_children" field.
FileChildren int `json:"file_children,omitempty"`
// IsSymbolic holds the value of the "is_symbolic" field.
IsSymbolic bool `json:"is_symbolic,omitempty"`
// Props holds the value of the "props" field.
Props *types.FileProps `json:"props,omitempty"`
// StoragePolicyFiles holds the value of the "storage_policy_files" field.
StoragePolicyFiles int `json:"storage_policy_files,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the FileQuery when eager-loading is set.
Edges FileEdges `json:"edges"`
selectValues sql.SelectValues
}
// FileEdges holds the relations/edges for other nodes in the graph.
type FileEdges struct {
// Owner holds the value of the owner edge.
Owner *User `json:"owner,omitempty"`
// StoragePolicies holds the value of the storage_policies edge.
StoragePolicies *StoragePolicy `json:"storage_policies,omitempty"`
// Parent holds the value of the parent edge.
Parent *File `json:"parent,omitempty"`
// Children holds the value of the children edge.
Children []*File `json:"children,omitempty"`
// Metadata holds the value of the metadata edge.
Metadata []*Metadata `json:"metadata,omitempty"`
// Entities holds the value of the entities edge.
Entities []*Entity `json:"entities,omitempty"`
// Shares holds the value of the shares edge.
Shares []*Share `json:"shares,omitempty"`
// DirectLinks holds the value of the direct_links edge.
DirectLinks []*DirectLink `json:"direct_links,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [8]bool
}
// OwnerOrErr returns the Owner value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e FileEdges) OwnerOrErr() (*User, error) {
if e.loadedTypes[0] {
if e.Owner == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: user.Label}
}
return e.Owner, nil
}
return nil, &NotLoadedError{edge: "owner"}
}
// StoragePoliciesOrErr returns the StoragePolicies value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e FileEdges) StoragePoliciesOrErr() (*StoragePolicy, error) {
if e.loadedTypes[1] {
if e.StoragePolicies == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: storagepolicy.Label}
}
return e.StoragePolicies, nil
}
return nil, &NotLoadedError{edge: "storage_policies"}
}
// ParentOrErr returns the Parent value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e FileEdges) ParentOrErr() (*File, error) {
if e.loadedTypes[2] {
if e.Parent == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: file.Label}
}
return e.Parent, nil
}
return nil, &NotLoadedError{edge: "parent"}
}
// ChildrenOrErr returns the Children value or an error if the edge
// was not loaded in eager-loading.
func (e FileEdges) ChildrenOrErr() ([]*File, error) {
if e.loadedTypes[3] {
return e.Children, nil
}
return nil, &NotLoadedError{edge: "children"}
}
// MetadataOrErr returns the Metadata value or an error if the edge
// was not loaded in eager-loading.
func (e FileEdges) MetadataOrErr() ([]*Metadata, error) {
if e.loadedTypes[4] {
return e.Metadata, nil
}
return nil, &NotLoadedError{edge: "metadata"}
}
// EntitiesOrErr returns the Entities value or an error if the edge
// was not loaded in eager-loading.
func (e FileEdges) EntitiesOrErr() ([]*Entity, error) {
if e.loadedTypes[5] {
return e.Entities, nil
}
return nil, &NotLoadedError{edge: "entities"}
}
// SharesOrErr returns the Shares value or an error if the edge
// was not loaded in eager-loading.
func (e FileEdges) SharesOrErr() ([]*Share, error) {
if e.loadedTypes[6] {
return e.Shares, nil
}
return nil, &NotLoadedError{edge: "shares"}
}
// DirectLinksOrErr returns the DirectLinks value or an error if the edge
// was not loaded in eager-loading.
func (e FileEdges) DirectLinksOrErr() ([]*DirectLink, error) {
if e.loadedTypes[7] {
return e.DirectLinks, nil
}
return nil, &NotLoadedError{edge: "direct_links"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*File) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
for i := range columns {
switch columns[i] {
case file.FieldProps:
values[i] = new([]byte)
case file.FieldIsSymbolic:
values[i] = new(sql.NullBool)
case file.FieldID, file.FieldType, file.FieldOwnerID, file.FieldSize, file.FieldPrimaryEntity, file.FieldFileChildren, file.FieldStoragePolicyFiles:
values[i] = new(sql.NullInt64)
case file.FieldName:
values[i] = new(sql.NullString)
case file.FieldCreatedAt, file.FieldUpdatedAt:
values[i] = new(sql.NullTime)
default:
values[i] = new(sql.UnknownType)
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the File fields.
func (f *File) assignValues(columns []string, values []any) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case file.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
f.ID = int(value.Int64)
case file.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
f.CreatedAt = value.Time
}
case file.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
f.UpdatedAt = value.Time
}
case file.FieldType:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field type", values[i])
} else if value.Valid {
f.Type = int(value.Int64)
}
case file.FieldName:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field name", values[i])
} else if value.Valid {
f.Name = value.String
}
case file.FieldOwnerID:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field owner_id", values[i])
} else if value.Valid {
f.OwnerID = int(value.Int64)
}
case file.FieldSize:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field size", values[i])
} else if value.Valid {
f.Size = value.Int64
}
case file.FieldPrimaryEntity:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field primary_entity", values[i])
} else if value.Valid {
f.PrimaryEntity = int(value.Int64)
}
case file.FieldFileChildren:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field file_children", values[i])
} else if value.Valid {
f.FileChildren = int(value.Int64)
}
case file.FieldIsSymbolic:
if value, ok := values[i].(*sql.NullBool); !ok {
return fmt.Errorf("unexpected type %T for field is_symbolic", values[i])
} else if value.Valid {
f.IsSymbolic = value.Bool
}
case file.FieldProps:
if value, ok := values[i].(*[]byte); !ok {
return fmt.Errorf("unexpected type %T for field props", values[i])
} else if value != nil && len(*value) > 0 {
if err := json.Unmarshal(*value, &f.Props); err != nil {
return fmt.Errorf("unmarshal field props: %w", err)
}
}
case file.FieldStoragePolicyFiles:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field storage_policy_files", values[i])
} else if value.Valid {
f.StoragePolicyFiles = int(value.Int64)
}
default:
f.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the File.
// This includes values selected through modifiers, order, etc.
func (f *File) Value(name string) (ent.Value, error) {
return f.selectValues.Get(name)
}
// QueryOwner queries the "owner" edge of the File entity.
func (f *File) QueryOwner() *UserQuery {
return NewFileClient(f.config).QueryOwner(f)
}
// QueryStoragePolicies queries the "storage_policies" edge of the File entity.
func (f *File) QueryStoragePolicies() *StoragePolicyQuery {
return NewFileClient(f.config).QueryStoragePolicies(f)
}
// QueryParent queries the "parent" edge of the File entity.
func (f *File) QueryParent() *FileQuery {
return NewFileClient(f.config).QueryParent(f)
}
// QueryChildren queries the "children" edge of the File entity.
func (f *File) QueryChildren() *FileQuery {
return NewFileClient(f.config).QueryChildren(f)
}
// QueryMetadata queries the "metadata" edge of the File entity.
func (f *File) QueryMetadata() *MetadataQuery {
return NewFileClient(f.config).QueryMetadata(f)
}
// QueryEntities queries the "entities" edge of the File entity.
func (f *File) QueryEntities() *EntityQuery {
return NewFileClient(f.config).QueryEntities(f)
}
// QueryShares queries the "shares" edge of the File entity.
func (f *File) QueryShares() *ShareQuery {
return NewFileClient(f.config).QueryShares(f)
}
// QueryDirectLinks queries the "direct_links" edge of the File entity.
func (f *File) QueryDirectLinks() *DirectLinkQuery {
return NewFileClient(f.config).QueryDirectLinks(f)
}
// Update returns a builder for updating this File.
// Note that you need to call File.Unwrap() before calling this method if this File
// was returned from a transaction, and the transaction was committed or rolled back.
func (f *File) Update() *FileUpdateOne {
return NewFileClient(f.config).UpdateOne(f)
}
// Unwrap unwraps the File entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (f *File) Unwrap() *File {
_tx, ok := f.config.driver.(*txDriver)
if !ok {
panic("ent: File is not a transactional entity")
}
f.config.driver = _tx.drv
return f
}
// String implements the fmt.Stringer.
func (f *File) String() string {
var builder strings.Builder
builder.WriteString("File(")
builder.WriteString(fmt.Sprintf("id=%v, ", f.ID))
builder.WriteString("created_at=")
builder.WriteString(f.CreatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("updated_at=")
builder.WriteString(f.UpdatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("type=")
builder.WriteString(fmt.Sprintf("%v", f.Type))
builder.WriteString(", ")
builder.WriteString("name=")
builder.WriteString(f.Name)
builder.WriteString(", ")
builder.WriteString("owner_id=")
builder.WriteString(fmt.Sprintf("%v", f.OwnerID))
builder.WriteString(", ")
builder.WriteString("size=")
builder.WriteString(fmt.Sprintf("%v", f.Size))
builder.WriteString(", ")
builder.WriteString("primary_entity=")
builder.WriteString(fmt.Sprintf("%v", f.PrimaryEntity))
builder.WriteString(", ")
builder.WriteString("file_children=")
builder.WriteString(fmt.Sprintf("%v", f.FileChildren))
builder.WriteString(", ")
builder.WriteString("is_symbolic=")
builder.WriteString(fmt.Sprintf("%v", f.IsSymbolic))
builder.WriteString(", ")
builder.WriteString("props=")
builder.WriteString(fmt.Sprintf("%v", f.Props))
builder.WriteString(", ")
builder.WriteString("storage_policy_files=")
builder.WriteString(fmt.Sprintf("%v", f.StoragePolicyFiles))
builder.WriteByte(')')
return builder.String()
}
// SetOwner manually set the edge as loaded state.
func (e *File) SetOwner(v *User) {
e.Edges.Owner = v
e.Edges.loadedTypes[0] = true
}
// SetStoragePolicies manually set the edge as loaded state.
func (e *File) SetStoragePolicies(v *StoragePolicy) {
e.Edges.StoragePolicies = v
e.Edges.loadedTypes[1] = true
}
// SetParent manually set the edge as loaded state.
func (e *File) SetParent(v *File) {
e.Edges.Parent = v
e.Edges.loadedTypes[2] = true
}
// SetChildren manually set the edge as loaded state.
func (e *File) SetChildren(v []*File) {
e.Edges.Children = v
e.Edges.loadedTypes[3] = true
}
// SetMetadata manually set the edge as loaded state.
func (e *File) SetMetadata(v []*Metadata) {
e.Edges.Metadata = v
e.Edges.loadedTypes[4] = true
}
// SetEntities manually set the edge as loaded state.
func (e *File) SetEntities(v []*Entity) {
e.Edges.Entities = v
e.Edges.loadedTypes[5] = true
}
// SetShares manually set the edge as loaded state.
func (e *File) SetShares(v []*Share) {
e.Edges.Shares = v
e.Edges.loadedTypes[6] = true
}
// SetDirectLinks manually set the edge as loaded state.
func (e *File) SetDirectLinks(v []*DirectLink) {
e.Edges.DirectLinks = v
e.Edges.loadedTypes[7] = true
}
// Files is a parsable slice of File.
type Files []*File

360
ent/file/file.go Normal file
View File

@@ -0,0 +1,360 @@
// Code generated by ent, DO NOT EDIT.
package file
import (
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
// Label holds the string label denoting the file type in the database.
Label = "file"
// FieldID holds the string denoting the id field in the database.
FieldID = "id"
// FieldCreatedAt holds the string denoting the created_at field in the database.
FieldCreatedAt = "created_at"
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
FieldUpdatedAt = "updated_at"
// FieldType holds the string denoting the type field in the database.
FieldType = "type"
// FieldName holds the string denoting the name field in the database.
FieldName = "name"
// FieldOwnerID holds the string denoting the owner_id field in the database.
FieldOwnerID = "owner_id"
// FieldSize holds the string denoting the size field in the database.
FieldSize = "size"
// FieldPrimaryEntity holds the string denoting the primary_entity field in the database.
FieldPrimaryEntity = "primary_entity"
// FieldFileChildren holds the string denoting the file_children field in the database.
FieldFileChildren = "file_children"
// FieldIsSymbolic holds the string denoting the is_symbolic field in the database.
FieldIsSymbolic = "is_symbolic"
// FieldProps holds the string denoting the props field in the database.
FieldProps = "props"
// FieldStoragePolicyFiles holds the string denoting the storage_policy_files field in the database.
FieldStoragePolicyFiles = "storage_policy_files"
// EdgeOwner holds the string denoting the owner edge name in mutations.
EdgeOwner = "owner"
// EdgeStoragePolicies holds the string denoting the storage_policies edge name in mutations.
EdgeStoragePolicies = "storage_policies"
// EdgeParent holds the string denoting the parent edge name in mutations.
EdgeParent = "parent"
// EdgeChildren holds the string denoting the children edge name in mutations.
EdgeChildren = "children"
// EdgeMetadata holds the string denoting the metadata edge name in mutations.
EdgeMetadata = "metadata"
// EdgeEntities holds the string denoting the entities edge name in mutations.
EdgeEntities = "entities"
// EdgeShares holds the string denoting the shares edge name in mutations.
EdgeShares = "shares"
// EdgeDirectLinks holds the string denoting the direct_links edge name in mutations.
EdgeDirectLinks = "direct_links"
// Table holds the table name of the file in the database.
Table = "files"
// OwnerTable is the table that holds the owner relation/edge.
OwnerTable = "files"
// OwnerInverseTable is the table name for the User entity.
// It exists in this package in order to avoid circular dependency with the "user" package.
OwnerInverseTable = "users"
// OwnerColumn is the table column denoting the owner relation/edge.
OwnerColumn = "owner_id"
// StoragePoliciesTable is the table that holds the storage_policies relation/edge.
StoragePoliciesTable = "files"
// StoragePoliciesInverseTable is the table name for the StoragePolicy entity.
// It exists in this package in order to avoid circular dependency with the "storagepolicy" package.
StoragePoliciesInverseTable = "storage_policies"
// StoragePoliciesColumn is the table column denoting the storage_policies relation/edge.
StoragePoliciesColumn = "storage_policy_files"
// ParentTable is the table that holds the parent relation/edge.
ParentTable = "files"
// ParentColumn is the table column denoting the parent relation/edge.
ParentColumn = "file_children"
// ChildrenTable is the table that holds the children relation/edge.
ChildrenTable = "files"
// ChildrenColumn is the table column denoting the children relation/edge.
ChildrenColumn = "file_children"
// MetadataTable is the table that holds the metadata relation/edge.
MetadataTable = "metadata"
// MetadataInverseTable is the table name for the Metadata entity.
// It exists in this package in order to avoid circular dependency with the "metadata" package.
MetadataInverseTable = "metadata"
// MetadataColumn is the table column denoting the metadata relation/edge.
MetadataColumn = "file_id"
// EntitiesTable is the table that holds the entities relation/edge. The primary key declared below.
EntitiesTable = "file_entities"
// EntitiesInverseTable is the table name for the Entity entity.
// It exists in this package in order to avoid circular dependency with the "entity" package.
EntitiesInverseTable = "entities"
// SharesTable is the table that holds the shares relation/edge.
SharesTable = "shares"
// SharesInverseTable is the table name for the Share entity.
// It exists in this package in order to avoid circular dependency with the "share" package.
SharesInverseTable = "shares"
// SharesColumn is the table column denoting the shares relation/edge.
SharesColumn = "file_shares"
// DirectLinksTable is the table that holds the direct_links relation/edge.
DirectLinksTable = "direct_links"
// DirectLinksInverseTable is the table name for the DirectLink entity.
// It exists in this package in order to avoid circular dependency with the "directlink" package.
DirectLinksInverseTable = "direct_links"
// DirectLinksColumn is the table column denoting the direct_links relation/edge.
DirectLinksColumn = "file_id"
)
// Columns holds all SQL columns for file fields.
var Columns = []string{
FieldID,
FieldCreatedAt,
FieldUpdatedAt,
FieldType,
FieldName,
FieldOwnerID,
FieldSize,
FieldPrimaryEntity,
FieldFileChildren,
FieldIsSymbolic,
FieldProps,
FieldStoragePolicyFiles,
}
var (
// EntitiesPrimaryKey and EntitiesColumn2 are the table columns denoting the
// primary key for the entities relation (M2M).
EntitiesPrimaryKey = []string{"file_id", "entity_id"}
)
// ValidColumn reports if the column name is valid (part of the table columns).
func ValidColumn(column string) bool {
for i := range Columns {
if column == Columns[i] {
return true
}
}
return false
}
// Note that the variables below are initialized by the runtime
// package on the initialization of the application. Therefore,
// it should be imported in the main as follows:
//
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
var (
Hooks [1]ent.Hook
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
DefaultCreatedAt func() time.Time
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
DefaultUpdatedAt func() time.Time
// DefaultSize holds the default value on creation for the "size" field.
DefaultSize int64
// DefaultIsSymbolic holds the default value on creation for the "is_symbolic" field.
DefaultIsSymbolic bool
)
// OrderOption defines the ordering options for the File queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByType orders the results by the type field.
func ByType(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldType, opts...).ToFunc()
}
// ByName orders the results by the name field.
func ByName(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldName, opts...).ToFunc()
}
// ByOwnerID orders the results by the owner_id field.
func ByOwnerID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldOwnerID, opts...).ToFunc()
}
// BySize orders the results by the size field.
func BySize(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSize, opts...).ToFunc()
}
// ByPrimaryEntity orders the results by the primary_entity field.
func ByPrimaryEntity(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldPrimaryEntity, opts...).ToFunc()
}
// ByFileChildren orders the results by the file_children field.
func ByFileChildren(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldFileChildren, opts...).ToFunc()
}
// ByIsSymbolic orders the results by the is_symbolic field.
func ByIsSymbolic(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldIsSymbolic, opts...).ToFunc()
}
// ByStoragePolicyFiles orders the results by the storage_policy_files field.
func ByStoragePolicyFiles(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldStoragePolicyFiles, opts...).ToFunc()
}
// ByOwnerField orders the results by owner field.
func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...))
}
}
// ByStoragePoliciesField orders the results by storage_policies field.
func ByStoragePoliciesField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newStoragePoliciesStep(), sql.OrderByField(field, opts...))
}
}
// ByParentField orders the results by parent field.
func ByParentField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newParentStep(), sql.OrderByField(field, opts...))
}
}
// ByChildrenCount orders the results by children count.
func ByChildrenCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newChildrenStep(), opts...)
}
}
// ByChildren orders the results by children terms.
func ByChildren(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newChildrenStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByMetadataCount orders the results by metadata count.
func ByMetadataCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newMetadataStep(), opts...)
}
}
// ByMetadata orders the results by metadata terms.
func ByMetadata(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newMetadataStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByEntitiesCount orders the results by entities count.
func ByEntitiesCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newEntitiesStep(), opts...)
}
}
// ByEntities orders the results by entities terms.
func ByEntities(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newEntitiesStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// BySharesCount orders the results by shares count.
func BySharesCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newSharesStep(), opts...)
}
}
// ByShares orders the results by shares terms.
func ByShares(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newSharesStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByDirectLinksCount orders the results by direct_links count.
func ByDirectLinksCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newDirectLinksStep(), opts...)
}
}
// ByDirectLinks orders the results by direct_links terms.
func ByDirectLinks(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newDirectLinksStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
func newOwnerStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(OwnerInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
)
}
func newStoragePoliciesStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(StoragePoliciesInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, StoragePoliciesTable, StoragePoliciesColumn),
)
}
func newParentStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn),
)
}
func newChildrenStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(Table, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn),
)
}
func newMetadataStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(MetadataInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, MetadataTable, MetadataColumn),
)
}
func newEntitiesStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(EntitiesInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2M, false, EntitiesTable, EntitiesPrimaryKey...),
)
}
func newSharesStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(SharesInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, SharesTable, SharesColumn),
)
}
func newDirectLinksStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(DirectLinksInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, DirectLinksTable, DirectLinksColumn),
)
}

680
ent/file/where.go Normal file
View File

@@ -0,0 +1,680 @@
// Code generated by ent, DO NOT EDIT.
package file
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// ID filters vertices based on their ID field.
func ID(id int) predicate.File {
return predicate.File(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id int) predicate.File {
return predicate.File(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id int) predicate.File {
return predicate.File(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...int) predicate.File {
return predicate.File(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...int) predicate.File {
return predicate.File(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id int) predicate.File {
return predicate.File(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id int) predicate.File {
return predicate.File(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id int) predicate.File {
return predicate.File(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id int) predicate.File {
return predicate.File(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.File {
return predicate.File(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.File {
return predicate.File(sql.FieldEQ(FieldUpdatedAt, v))
}
// Type applies equality check predicate on the "type" field. It's identical to TypeEQ.
func Type(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldType, v))
}
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
func Name(v string) predicate.File {
return predicate.File(sql.FieldEQ(FieldName, v))
}
// OwnerID applies equality check predicate on the "owner_id" field. It's identical to OwnerIDEQ.
func OwnerID(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldOwnerID, v))
}
// Size applies equality check predicate on the "size" field. It's identical to SizeEQ.
func Size(v int64) predicate.File {
return predicate.File(sql.FieldEQ(FieldSize, v))
}
// PrimaryEntity applies equality check predicate on the "primary_entity" field. It's identical to PrimaryEntityEQ.
func PrimaryEntity(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldPrimaryEntity, v))
}
// FileChildren applies equality check predicate on the "file_children" field. It's identical to FileChildrenEQ.
func FileChildren(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldFileChildren, v))
}
// IsSymbolic applies equality check predicate on the "is_symbolic" field. It's identical to IsSymbolicEQ.
func IsSymbolic(v bool) predicate.File {
return predicate.File(sql.FieldEQ(FieldIsSymbolic, v))
}
// StoragePolicyFiles applies equality check predicate on the "storage_policy_files" field. It's identical to StoragePolicyFilesEQ.
func StoragePolicyFiles(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldStoragePolicyFiles, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.File {
return predicate.File(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.File {
return predicate.File(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.File {
return predicate.File(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.File {
return predicate.File(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.File {
return predicate.File(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.File {
return predicate.File(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.File {
return predicate.File(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.File {
return predicate.File(sql.FieldLTE(FieldCreatedAt, v))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.File {
return predicate.File(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.File {
return predicate.File(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.File {
return predicate.File(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.File {
return predicate.File(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.File {
return predicate.File(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.File {
return predicate.File(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.File {
return predicate.File(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.File {
return predicate.File(sql.FieldLTE(FieldUpdatedAt, v))
}
// TypeEQ applies the EQ predicate on the "type" field.
func TypeEQ(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldType, v))
}
// TypeNEQ applies the NEQ predicate on the "type" field.
func TypeNEQ(v int) predicate.File {
return predicate.File(sql.FieldNEQ(FieldType, v))
}
// TypeIn applies the In predicate on the "type" field.
func TypeIn(vs ...int) predicate.File {
return predicate.File(sql.FieldIn(FieldType, vs...))
}
// TypeNotIn applies the NotIn predicate on the "type" field.
func TypeNotIn(vs ...int) predicate.File {
return predicate.File(sql.FieldNotIn(FieldType, vs...))
}
// TypeGT applies the GT predicate on the "type" field.
func TypeGT(v int) predicate.File {
return predicate.File(sql.FieldGT(FieldType, v))
}
// TypeGTE applies the GTE predicate on the "type" field.
func TypeGTE(v int) predicate.File {
return predicate.File(sql.FieldGTE(FieldType, v))
}
// TypeLT applies the LT predicate on the "type" field.
func TypeLT(v int) predicate.File {
return predicate.File(sql.FieldLT(FieldType, v))
}
// TypeLTE applies the LTE predicate on the "type" field.
func TypeLTE(v int) predicate.File {
return predicate.File(sql.FieldLTE(FieldType, v))
}
// NameEQ applies the EQ predicate on the "name" field.
func NameEQ(v string) predicate.File {
return predicate.File(sql.FieldEQ(FieldName, v))
}
// NameNEQ applies the NEQ predicate on the "name" field.
func NameNEQ(v string) predicate.File {
return predicate.File(sql.FieldNEQ(FieldName, v))
}
// NameIn applies the In predicate on the "name" field.
func NameIn(vs ...string) predicate.File {
return predicate.File(sql.FieldIn(FieldName, vs...))
}
// NameNotIn applies the NotIn predicate on the "name" field.
func NameNotIn(vs ...string) predicate.File {
return predicate.File(sql.FieldNotIn(FieldName, vs...))
}
// NameGT applies the GT predicate on the "name" field.
func NameGT(v string) predicate.File {
return predicate.File(sql.FieldGT(FieldName, v))
}
// NameGTE applies the GTE predicate on the "name" field.
func NameGTE(v string) predicate.File {
return predicate.File(sql.FieldGTE(FieldName, v))
}
// NameLT applies the LT predicate on the "name" field.
func NameLT(v string) predicate.File {
return predicate.File(sql.FieldLT(FieldName, v))
}
// NameLTE applies the LTE predicate on the "name" field.
func NameLTE(v string) predicate.File {
return predicate.File(sql.FieldLTE(FieldName, v))
}
// NameContains applies the Contains predicate on the "name" field.
func NameContains(v string) predicate.File {
return predicate.File(sql.FieldContains(FieldName, v))
}
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
func NameHasPrefix(v string) predicate.File {
return predicate.File(sql.FieldHasPrefix(FieldName, v))
}
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
func NameHasSuffix(v string) predicate.File {
return predicate.File(sql.FieldHasSuffix(FieldName, v))
}
// NameEqualFold applies the EqualFold predicate on the "name" field.
func NameEqualFold(v string) predicate.File {
return predicate.File(sql.FieldEqualFold(FieldName, v))
}
// NameContainsFold applies the ContainsFold predicate on the "name" field.
func NameContainsFold(v string) predicate.File {
return predicate.File(sql.FieldContainsFold(FieldName, v))
}
// OwnerIDEQ applies the EQ predicate on the "owner_id" field.
func OwnerIDEQ(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldOwnerID, v))
}
// OwnerIDNEQ applies the NEQ predicate on the "owner_id" field.
func OwnerIDNEQ(v int) predicate.File {
return predicate.File(sql.FieldNEQ(FieldOwnerID, v))
}
// OwnerIDIn applies the In predicate on the "owner_id" field.
func OwnerIDIn(vs ...int) predicate.File {
return predicate.File(sql.FieldIn(FieldOwnerID, vs...))
}
// OwnerIDNotIn applies the NotIn predicate on the "owner_id" field.
func OwnerIDNotIn(vs ...int) predicate.File {
return predicate.File(sql.FieldNotIn(FieldOwnerID, vs...))
}
// SizeEQ applies the EQ predicate on the "size" field.
func SizeEQ(v int64) predicate.File {
return predicate.File(sql.FieldEQ(FieldSize, v))
}
// SizeNEQ applies the NEQ predicate on the "size" field.
func SizeNEQ(v int64) predicate.File {
return predicate.File(sql.FieldNEQ(FieldSize, v))
}
// SizeIn applies the In predicate on the "size" field.
func SizeIn(vs ...int64) predicate.File {
return predicate.File(sql.FieldIn(FieldSize, vs...))
}
// SizeNotIn applies the NotIn predicate on the "size" field.
func SizeNotIn(vs ...int64) predicate.File {
return predicate.File(sql.FieldNotIn(FieldSize, vs...))
}
// SizeGT applies the GT predicate on the "size" field.
func SizeGT(v int64) predicate.File {
return predicate.File(sql.FieldGT(FieldSize, v))
}
// SizeGTE applies the GTE predicate on the "size" field.
func SizeGTE(v int64) predicate.File {
return predicate.File(sql.FieldGTE(FieldSize, v))
}
// SizeLT applies the LT predicate on the "size" field.
func SizeLT(v int64) predicate.File {
return predicate.File(sql.FieldLT(FieldSize, v))
}
// SizeLTE applies the LTE predicate on the "size" field.
func SizeLTE(v int64) predicate.File {
return predicate.File(sql.FieldLTE(FieldSize, v))
}
// PrimaryEntityEQ applies the EQ predicate on the "primary_entity" field.
func PrimaryEntityEQ(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldPrimaryEntity, v))
}
// PrimaryEntityNEQ applies the NEQ predicate on the "primary_entity" field.
func PrimaryEntityNEQ(v int) predicate.File {
return predicate.File(sql.FieldNEQ(FieldPrimaryEntity, v))
}
// PrimaryEntityIn applies the In predicate on the "primary_entity" field.
func PrimaryEntityIn(vs ...int) predicate.File {
return predicate.File(sql.FieldIn(FieldPrimaryEntity, vs...))
}
// PrimaryEntityNotIn applies the NotIn predicate on the "primary_entity" field.
func PrimaryEntityNotIn(vs ...int) predicate.File {
return predicate.File(sql.FieldNotIn(FieldPrimaryEntity, vs...))
}
// PrimaryEntityGT applies the GT predicate on the "primary_entity" field.
func PrimaryEntityGT(v int) predicate.File {
return predicate.File(sql.FieldGT(FieldPrimaryEntity, v))
}
// PrimaryEntityGTE applies the GTE predicate on the "primary_entity" field.
func PrimaryEntityGTE(v int) predicate.File {
return predicate.File(sql.FieldGTE(FieldPrimaryEntity, v))
}
// PrimaryEntityLT applies the LT predicate on the "primary_entity" field.
func PrimaryEntityLT(v int) predicate.File {
return predicate.File(sql.FieldLT(FieldPrimaryEntity, v))
}
// PrimaryEntityLTE applies the LTE predicate on the "primary_entity" field.
func PrimaryEntityLTE(v int) predicate.File {
return predicate.File(sql.FieldLTE(FieldPrimaryEntity, v))
}
// PrimaryEntityIsNil applies the IsNil predicate on the "primary_entity" field.
func PrimaryEntityIsNil() predicate.File {
return predicate.File(sql.FieldIsNull(FieldPrimaryEntity))
}
// PrimaryEntityNotNil applies the NotNil predicate on the "primary_entity" field.
func PrimaryEntityNotNil() predicate.File {
return predicate.File(sql.FieldNotNull(FieldPrimaryEntity))
}
// FileChildrenEQ applies the EQ predicate on the "file_children" field.
func FileChildrenEQ(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldFileChildren, v))
}
// FileChildrenNEQ applies the NEQ predicate on the "file_children" field.
func FileChildrenNEQ(v int) predicate.File {
return predicate.File(sql.FieldNEQ(FieldFileChildren, v))
}
// FileChildrenIn applies the In predicate on the "file_children" field.
func FileChildrenIn(vs ...int) predicate.File {
return predicate.File(sql.FieldIn(FieldFileChildren, vs...))
}
// FileChildrenNotIn applies the NotIn predicate on the "file_children" field.
func FileChildrenNotIn(vs ...int) predicate.File {
return predicate.File(sql.FieldNotIn(FieldFileChildren, vs...))
}
// FileChildrenIsNil applies the IsNil predicate on the "file_children" field.
func FileChildrenIsNil() predicate.File {
return predicate.File(sql.FieldIsNull(FieldFileChildren))
}
// FileChildrenNotNil applies the NotNil predicate on the "file_children" field.
func FileChildrenNotNil() predicate.File {
return predicate.File(sql.FieldNotNull(FieldFileChildren))
}
// IsSymbolicEQ applies the EQ predicate on the "is_symbolic" field.
func IsSymbolicEQ(v bool) predicate.File {
return predicate.File(sql.FieldEQ(FieldIsSymbolic, v))
}
// IsSymbolicNEQ applies the NEQ predicate on the "is_symbolic" field.
func IsSymbolicNEQ(v bool) predicate.File {
return predicate.File(sql.FieldNEQ(FieldIsSymbolic, v))
}
// PropsIsNil applies the IsNil predicate on the "props" field.
func PropsIsNil() predicate.File {
return predicate.File(sql.FieldIsNull(FieldProps))
}
// PropsNotNil applies the NotNil predicate on the "props" field.
func PropsNotNil() predicate.File {
return predicate.File(sql.FieldNotNull(FieldProps))
}
// StoragePolicyFilesEQ applies the EQ predicate on the "storage_policy_files" field.
func StoragePolicyFilesEQ(v int) predicate.File {
return predicate.File(sql.FieldEQ(FieldStoragePolicyFiles, v))
}
// StoragePolicyFilesNEQ applies the NEQ predicate on the "storage_policy_files" field.
func StoragePolicyFilesNEQ(v int) predicate.File {
return predicate.File(sql.FieldNEQ(FieldStoragePolicyFiles, v))
}
// StoragePolicyFilesIn applies the In predicate on the "storage_policy_files" field.
func StoragePolicyFilesIn(vs ...int) predicate.File {
return predicate.File(sql.FieldIn(FieldStoragePolicyFiles, vs...))
}
// StoragePolicyFilesNotIn applies the NotIn predicate on the "storage_policy_files" field.
func StoragePolicyFilesNotIn(vs ...int) predicate.File {
return predicate.File(sql.FieldNotIn(FieldStoragePolicyFiles, vs...))
}
// StoragePolicyFilesIsNil applies the IsNil predicate on the "storage_policy_files" field.
func StoragePolicyFilesIsNil() predicate.File {
return predicate.File(sql.FieldIsNull(FieldStoragePolicyFiles))
}
// StoragePolicyFilesNotNil applies the NotNil predicate on the "storage_policy_files" field.
func StoragePolicyFilesNotNil() predicate.File {
return predicate.File(sql.FieldNotNull(FieldStoragePolicyFiles))
}
// HasOwner applies the HasEdge predicate on the "owner" edge.
func HasOwner() predicate.File {
return predicate.File(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates).
func HasOwnerWith(preds ...predicate.User) predicate.File {
return predicate.File(func(s *sql.Selector) {
step := newOwnerStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasStoragePolicies applies the HasEdge predicate on the "storage_policies" edge.
func HasStoragePolicies() predicate.File {
return predicate.File(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, StoragePoliciesTable, StoragePoliciesColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasStoragePoliciesWith applies the HasEdge predicate on the "storage_policies" edge with a given conditions (other predicates).
func HasStoragePoliciesWith(preds ...predicate.StoragePolicy) predicate.File {
return predicate.File(func(s *sql.Selector) {
step := newStoragePoliciesStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasParent applies the HasEdge predicate on the "parent" edge.
func HasParent() predicate.File {
return predicate.File(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasParentWith applies the HasEdge predicate on the "parent" edge with a given conditions (other predicates).
func HasParentWith(preds ...predicate.File) predicate.File {
return predicate.File(func(s *sql.Selector) {
step := newParentStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasChildren applies the HasEdge predicate on the "children" edge.
func HasChildren() predicate.File {
return predicate.File(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasChildrenWith applies the HasEdge predicate on the "children" edge with a given conditions (other predicates).
func HasChildrenWith(preds ...predicate.File) predicate.File {
return predicate.File(func(s *sql.Selector) {
step := newChildrenStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasMetadata applies the HasEdge predicate on the "metadata" edge.
func HasMetadata() predicate.File {
return predicate.File(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, MetadataTable, MetadataColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasMetadataWith applies the HasEdge predicate on the "metadata" edge with a given conditions (other predicates).
func HasMetadataWith(preds ...predicate.Metadata) predicate.File {
return predicate.File(func(s *sql.Selector) {
step := newMetadataStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasEntities applies the HasEdge predicate on the "entities" edge.
func HasEntities() predicate.File {
return predicate.File(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.M2M, false, EntitiesTable, EntitiesPrimaryKey...),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasEntitiesWith applies the HasEdge predicate on the "entities" edge with a given conditions (other predicates).
func HasEntitiesWith(preds ...predicate.Entity) predicate.File {
return predicate.File(func(s *sql.Selector) {
step := newEntitiesStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasShares applies the HasEdge predicate on the "shares" edge.
func HasShares() predicate.File {
return predicate.File(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, SharesTable, SharesColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasSharesWith applies the HasEdge predicate on the "shares" edge with a given conditions (other predicates).
func HasSharesWith(preds ...predicate.Share) predicate.File {
return predicate.File(func(s *sql.Selector) {
step := newSharesStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// HasDirectLinks applies the HasEdge predicate on the "direct_links" edge.
func HasDirectLinks() predicate.File {
return predicate.File(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, DirectLinksTable, DirectLinksColumn),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasDirectLinksWith applies the HasEdge predicate on the "direct_links" edge with a given conditions (other predicates).
func HasDirectLinksWith(preds ...predicate.DirectLink) predicate.File {
return predicate.File(func(s *sql.Selector) {
step := newDirectLinksStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.File) predicate.File {
return predicate.File(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.File) predicate.File {
return predicate.File(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.File) predicate.File {
return predicate.File(sql.NotPredicates(p))
}

1431
ent/file_create.go Normal file

File diff suppressed because it is too large Load Diff

88
ent/file_delete.go Normal file
View File

@@ -0,0 +1,88 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/cloudreve/Cloudreve/v4/ent/file"
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
)
// FileDelete is the builder for deleting a File entity.
type FileDelete struct {
config
hooks []Hook
mutation *FileMutation
}
// Where appends a list predicates to the FileDelete builder.
func (fd *FileDelete) Where(ps ...predicate.File) *FileDelete {
fd.mutation.Where(ps...)
return fd
}
// Exec executes the deletion query and returns how many vertices were deleted.
func (fd *FileDelete) Exec(ctx context.Context) (int, error) {
return withHooks(ctx, fd.sqlExec, fd.mutation, fd.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
func (fd *FileDelete) ExecX(ctx context.Context) int {
n, err := fd.Exec(ctx)
if err != nil {
panic(err)
}
return n
}
func (fd *FileDelete) sqlExec(ctx context.Context) (int, error) {
_spec := sqlgraph.NewDeleteSpec(file.Table, sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt))
if ps := fd.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
affected, err := sqlgraph.DeleteNodes(ctx, fd.driver, _spec)
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
fd.mutation.done = true
return affected, err
}
// FileDeleteOne is the builder for deleting a single File entity.
type FileDeleteOne struct {
fd *FileDelete
}
// Where appends a list predicates to the FileDelete builder.
func (fdo *FileDeleteOne) Where(ps ...predicate.File) *FileDeleteOne {
fdo.fd.mutation.Where(ps...)
return fdo
}
// Exec executes the deletion query.
func (fdo *FileDeleteOne) Exec(ctx context.Context) error {
n, err := fdo.fd.Exec(ctx)
switch {
case err != nil:
return err
case n == 0:
return &NotFoundError{file.Label}
default:
return nil
}
}
// ExecX is like Exec, but panics if an error occurs.
func (fdo *FileDeleteOne) ExecX(ctx context.Context) {
if err := fdo.Exec(ctx); err != nil {
panic(err)
}
}

1156
ent/file_query.go Normal file

File diff suppressed because it is too large Load Diff

1737
ent/file_update.go Normal file

File diff suppressed because it is too large Load Diff

204
ent/fsevent.go Normal file
View File

@@ -0,0 +1,204 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"fmt"
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
"github.com/cloudreve/Cloudreve/v4/ent/user"
"github.com/gofrs/uuid"
)
// FsEvent is the model entity for the FsEvent schema.
type FsEvent struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// DeletedAt holds the value of the "deleted_at" field.
DeletedAt *time.Time `json:"deleted_at,omitempty"`
// Event holds the value of the "event" field.
Event string `json:"event,omitempty"`
// Subscriber holds the value of the "subscriber" field.
Subscriber uuid.UUID `json:"subscriber,omitempty"`
// UserFsevent holds the value of the "user_fsevent" field.
UserFsevent int `json:"user_fsevent,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the FsEventQuery when eager-loading is set.
Edges FsEventEdges `json:"edges"`
selectValues sql.SelectValues
}
// FsEventEdges holds the relations/edges for other nodes in the graph.
type FsEventEdges struct {
// User holds the value of the user edge.
User *User `json:"user,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [1]bool
}
// UserOrErr returns the User value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e FsEventEdges) UserOrErr() (*User, error) {
if e.loadedTypes[0] {
if e.User == nil {
// Edge was loaded but was not found.
return nil, &NotFoundError{label: user.Label}
}
return e.User, nil
}
return nil, &NotLoadedError{edge: "user"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*FsEvent) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
for i := range columns {
switch columns[i] {
case fsevent.FieldID, fsevent.FieldUserFsevent:
values[i] = new(sql.NullInt64)
case fsevent.FieldEvent:
values[i] = new(sql.NullString)
case fsevent.FieldCreatedAt, fsevent.FieldUpdatedAt, fsevent.FieldDeletedAt:
values[i] = new(sql.NullTime)
case fsevent.FieldSubscriber:
values[i] = new(uuid.UUID)
default:
values[i] = new(sql.UnknownType)
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the FsEvent fields.
func (fe *FsEvent) assignValues(columns []string, values []any) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case fsevent.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
fe.ID = int(value.Int64)
case fsevent.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
fe.CreatedAt = value.Time
}
case fsevent.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
fe.UpdatedAt = value.Time
}
case fsevent.FieldDeletedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
} else if value.Valid {
fe.DeletedAt = new(time.Time)
*fe.DeletedAt = value.Time
}
case fsevent.FieldEvent:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field event", values[i])
} else if value.Valid {
fe.Event = value.String
}
case fsevent.FieldSubscriber:
if value, ok := values[i].(*uuid.UUID); !ok {
return fmt.Errorf("unexpected type %T for field subscriber", values[i])
} else if value != nil {
fe.Subscriber = *value
}
case fsevent.FieldUserFsevent:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field user_fsevent", values[i])
} else if value.Valid {
fe.UserFsevent = int(value.Int64)
}
default:
fe.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the FsEvent.
// This includes values selected through modifiers, order, etc.
func (fe *FsEvent) Value(name string) (ent.Value, error) {
return fe.selectValues.Get(name)
}
// QueryUser queries the "user" edge of the FsEvent entity.
func (fe *FsEvent) QueryUser() *UserQuery {
return NewFsEventClient(fe.config).QueryUser(fe)
}
// Update returns a builder for updating this FsEvent.
// Note that you need to call FsEvent.Unwrap() before calling this method if this FsEvent
// was returned from a transaction, and the transaction was committed or rolled back.
func (fe *FsEvent) Update() *FsEventUpdateOne {
return NewFsEventClient(fe.config).UpdateOne(fe)
}
// Unwrap unwraps the FsEvent entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (fe *FsEvent) Unwrap() *FsEvent {
_tx, ok := fe.config.driver.(*txDriver)
if !ok {
panic("ent: FsEvent is not a transactional entity")
}
fe.config.driver = _tx.drv
return fe
}
// String implements the fmt.Stringer.
func (fe *FsEvent) String() string {
var builder strings.Builder
builder.WriteString("FsEvent(")
builder.WriteString(fmt.Sprintf("id=%v, ", fe.ID))
builder.WriteString("created_at=")
builder.WriteString(fe.CreatedAt.Format(time.ANSIC))
builder.WriteString(", ")
builder.WriteString("updated_at=")
builder.WriteString(fe.UpdatedAt.Format(time.ANSIC))
builder.WriteString(", ")
if v := fe.DeletedAt; v != nil {
builder.WriteString("deleted_at=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteString(", ")
builder.WriteString("event=")
builder.WriteString(fe.Event)
builder.WriteString(", ")
builder.WriteString("subscriber=")
builder.WriteString(fmt.Sprintf("%v", fe.Subscriber))
builder.WriteString(", ")
builder.WriteString("user_fsevent=")
builder.WriteString(fmt.Sprintf("%v", fe.UserFsevent))
builder.WriteByte(')')
return builder.String()
}
// SetUser manually set the edge as loaded state.
func (e *FsEvent) SetUser(v *User) {
e.Edges.User = v
e.Edges.loadedTypes[0] = true
}
// FsEvents is a parsable slice of FsEvent.
type FsEvents []*FsEvent

130
ent/fsevent/fsevent.go Normal file
View File

@@ -0,0 +1,130 @@
// Code generated by ent, DO NOT EDIT.
package fsevent
import (
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
// Label holds the string label denoting the fsevent type in the database.
Label = "fs_event"
// FieldID holds the string denoting the id field in the database.
FieldID = "id"
// FieldCreatedAt holds the string denoting the created_at field in the database.
FieldCreatedAt = "created_at"
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
FieldUpdatedAt = "updated_at"
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
FieldDeletedAt = "deleted_at"
// FieldEvent holds the string denoting the event field in the database.
FieldEvent = "event"
// FieldSubscriber holds the string denoting the subscriber field in the database.
FieldSubscriber = "subscriber"
// FieldUserFsevent holds the string denoting the user_fsevent field in the database.
FieldUserFsevent = "user_fsevent"
// EdgeUser holds the string denoting the user edge name in mutations.
EdgeUser = "user"
// Table holds the table name of the fsevent in the database.
Table = "fs_events"
// UserTable is the table that holds the user relation/edge.
UserTable = "fs_events"
// UserInverseTable is the table name for the User entity.
// It exists in this package in order to avoid circular dependency with the "user" package.
UserInverseTable = "users"
// UserColumn is the table column denoting the user relation/edge.
UserColumn = "user_fsevent"
)
// Columns holds all SQL columns for fsevent fields.
var Columns = []string{
FieldID,
FieldCreatedAt,
FieldUpdatedAt,
FieldDeletedAt,
FieldEvent,
FieldSubscriber,
FieldUserFsevent,
}
// ValidColumn reports if the column name is valid (part of the table columns).
func ValidColumn(column string) bool {
for i := range Columns {
if column == Columns[i] {
return true
}
}
return false
}
// Note that the variables below are initialized by the runtime
// package on the initialization of the application. Therefore,
// it should be imported in the main as follows:
//
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
var (
Hooks [1]ent.Hook
Interceptors [1]ent.Interceptor
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
DefaultCreatedAt func() time.Time
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
DefaultUpdatedAt func() time.Time
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
)
// OrderOption defines the ordering options for the FsEvent queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByDeletedAt orders the results by the deleted_at field.
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
}
// ByEvent orders the results by the event field.
func ByEvent(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldEvent, opts...).ToFunc()
}
// BySubscriber orders the results by the subscriber field.
func BySubscriber(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSubscriber, opts...).ToFunc()
}
// ByUserFsevent orders the results by the user_fsevent field.
func ByUserFsevent(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUserFsevent, opts...).ToFunc()
}
// ByUserField orders the results by user field.
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
}
}
func newUserStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(UserInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
)
}

Some files were not shown because too many files have changed in this diff Show More